• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include <linux/types.h>
5 #include <linux/crc32.h>
6 #include "dr_ste.h"
7 
8 struct dr_hw_ste_format {
9 	u8 ctrl[DR_STE_SIZE_CTRL];
10 	u8 tag[DR_STE_SIZE_TAG];
11 	u8 mask[DR_STE_SIZE_MASK];
12 };
13 
dr_ste_crc32_calc(const void * input_data,size_t length)14 static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
15 {
16 	u32 crc = crc32(0, input_data, length);
17 
18 	return (__force u32)((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) |
19 			    ((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000);
20 }
21 
mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps * caps)22 bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
23 {
24 	return caps->sw_format_ver > MLX5_STEERING_FORMAT_CONNECTX_5;
25 }
26 
mlx5dr_ste_calc_hash_index(u8 * hw_ste_p,struct mlx5dr_ste_htbl * htbl)27 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
28 {
29 	struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
30 	u8 masked[DR_STE_SIZE_TAG] = {};
31 	u32 crc32, index;
32 	u16 bit;
33 	int i;
34 
35 	/* Don't calculate CRC if the result is predicted */
36 	if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0)
37 		return 0;
38 
39 	/* Mask tag using byte mask, bit per byte */
40 	bit = 1 << (DR_STE_SIZE_TAG - 1);
41 	for (i = 0; i < DR_STE_SIZE_TAG; i++) {
42 		if (htbl->byte_mask & bit)
43 			masked[i] = hw_ste->tag[i];
44 
45 		bit = bit >> 1;
46 	}
47 
48 	crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
49 	index = crc32 & (htbl->chunk->num_of_entries - 1);
50 
51 	return index;
52 }
53 
mlx5dr_ste_conv_bit_to_byte_mask(u8 * bit_mask)54 u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
55 {
56 	u16 byte_mask = 0;
57 	int i;
58 
59 	for (i = 0; i < DR_STE_SIZE_MASK; i++) {
60 		byte_mask = byte_mask << 1;
61 		if (bit_mask[i] == 0xff)
62 			byte_mask |= 1;
63 	}
64 	return byte_mask;
65 }
66 
dr_ste_get_tag(u8 * hw_ste_p)67 static u8 *dr_ste_get_tag(u8 *hw_ste_p)
68 {
69 	struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
70 
71 	return hw_ste->tag;
72 }
73 
mlx5dr_ste_set_bit_mask(u8 * hw_ste_p,u8 * bit_mask)74 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
75 {
76 	struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
77 
78 	memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
79 }
80 
dr_ste_set_always_hit(struct dr_hw_ste_format * hw_ste)81 static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
82 {
83 	memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
84 	memset(&hw_ste->mask, 0, sizeof(hw_ste->mask));
85 }
86 
dr_ste_set_always_miss(struct dr_hw_ste_format * hw_ste)87 static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
88 {
89 	hw_ste->tag[0] = 0xdc;
90 	hw_ste->mask[0] = 0;
91 }
92 
mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste_p,u64 miss_addr)93 void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
94 			      u8 *hw_ste_p, u64 miss_addr)
95 {
96 	ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
97 }
98 
dr_ste_always_miss_addr(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste * ste,u64 miss_addr)99 static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
100 				    struct mlx5dr_ste *ste, u64 miss_addr)
101 {
102 	u8 *hw_ste_p = ste->hw_ste;
103 
104 	ste_ctx->set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
105 	ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
106 	dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
107 }
108 
mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste,u64 icm_addr,u32 ht_size)109 void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
110 			     u8 *hw_ste, u64 icm_addr, u32 ht_size)
111 {
112 	ste_ctx->set_hit_addr(hw_ste, icm_addr, ht_size);
113 }
114 
mlx5dr_ste_get_icm_addr(struct mlx5dr_ste * ste)115 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
116 {
117 	u32 index = ste - ste->htbl->ste_arr;
118 
119 	return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index;
120 }
121 
mlx5dr_ste_get_mr_addr(struct mlx5dr_ste * ste)122 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
123 {
124 	u32 index = ste - ste->htbl->ste_arr;
125 
126 	return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index;
127 }
128 
mlx5dr_ste_get_miss_list(struct mlx5dr_ste * ste)129 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
130 {
131 	u32 index = ste - ste->htbl->ste_arr;
132 
133 	return &ste->htbl->miss_list[index];
134 }
135 
dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste * ste,struct mlx5dr_ste_htbl * next_htbl)136 static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
137 				   struct mlx5dr_ste *ste,
138 				   struct mlx5dr_ste_htbl *next_htbl)
139 {
140 	struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
141 	u8 *hw_ste = ste->hw_ste;
142 
143 	ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
144 	ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
145 	ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
146 
147 	dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
148 }
149 
mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx * nic_matcher,u8 ste_location)150 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
151 				u8 ste_location)
152 {
153 	return ste_location == nic_matcher->num_of_builders;
154 }
155 
156 /* Replace relevant fields, except of:
157  * htbl - keep the origin htbl
158  * miss_list + list - already took the src from the list.
159  * icm_addr/mr_addr - depends on the hosting table.
160  *
161  * Before:
162  * | a | -> | b | -> | c | ->
163  *
164  * After:
165  * | a | -> | c | ->
166  * While the data that was in b copied to a.
167  */
dr_ste_replace(struct mlx5dr_ste * dst,struct mlx5dr_ste * src)168 static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
169 {
170 	memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED);
171 	dst->next_htbl = src->next_htbl;
172 	if (dst->next_htbl)
173 		dst->next_htbl->pointing_ste = dst;
174 
175 	dst->refcount = src->refcount;
176 }
177 
178 /* Free ste which is the head and the only one in miss_list */
179 static void
dr_ste_remove_head_ste(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste * ste,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste_send_info * ste_info_head,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)180 dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
181 		       struct mlx5dr_ste *ste,
182 		       struct mlx5dr_matcher_rx_tx *nic_matcher,
183 		       struct mlx5dr_ste_send_info *ste_info_head,
184 		       struct list_head *send_ste_list,
185 		       struct mlx5dr_ste_htbl *stats_tbl)
186 {
187 	u8 tmp_data_ste[DR_STE_SIZE] = {};
188 	struct mlx5dr_ste tmp_ste = {};
189 	u64 miss_addr;
190 
191 	tmp_ste.hw_ste = tmp_data_ste;
192 
193 	/* Use temp ste because dr_ste_always_miss_addr
194 	 * touches bit_mask area which doesn't exist at ste->hw_ste.
195 	 */
196 	memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
197 	miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
198 	dr_ste_always_miss_addr(ste_ctx, &tmp_ste, miss_addr);
199 	memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
200 
201 	list_del_init(&ste->miss_list_node);
202 
203 	/* Write full STE size in order to have "always_miss" */
204 	mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
205 						  0, tmp_data_ste,
206 						  ste_info_head,
207 						  send_ste_list,
208 						  true /* Copy data */);
209 
210 	stats_tbl->ctrl.num_of_valid_entries--;
211 }
212 
213 /* Free ste which is the head but NOT the only one in miss_list:
214  * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0
215  */
216 static void
dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * ste,struct mlx5dr_ste * next_ste,struct mlx5dr_ste_send_info * ste_info_head,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)217 dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
218 			struct mlx5dr_ste *ste,
219 			struct mlx5dr_ste *next_ste,
220 			struct mlx5dr_ste_send_info *ste_info_head,
221 			struct list_head *send_ste_list,
222 			struct mlx5dr_ste_htbl *stats_tbl)
223 
224 {
225 	struct mlx5dr_ste_htbl *next_miss_htbl;
226 	u8 hw_ste[DR_STE_SIZE] = {};
227 	int sb_idx;
228 
229 	next_miss_htbl = next_ste->htbl;
230 
231 	/* Remove from the miss_list the next_ste before copy */
232 	list_del_init(&next_ste->miss_list_node);
233 
234 	/* Move data from next into ste */
235 	dr_ste_replace(ste, next_ste);
236 
237 	/* Update the rule on STE change */
238 	mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false);
239 
240 	/* Copy all 64 hw_ste bytes */
241 	memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
242 	sb_idx = ste->ste_chain_location - 1;
243 	mlx5dr_ste_set_bit_mask(hw_ste,
244 				nic_matcher->ste_builder[sb_idx].bit_mask);
245 
246 	/* Del the htbl that contains the next_ste.
247 	 * The origin htbl stay with the same number of entries.
248 	 */
249 	mlx5dr_htbl_put(next_miss_htbl);
250 
251 	mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
252 						  0, hw_ste,
253 						  ste_info_head,
254 						  send_ste_list,
255 						  true /* Copy data */);
256 
257 	stats_tbl->ctrl.num_of_collisions--;
258 	stats_tbl->ctrl.num_of_valid_entries--;
259 }
260 
261 /* Free ste that is located in the middle of the miss list:
262  * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
263  */
dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste * ste,struct mlx5dr_ste_send_info * ste_info,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)264 static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
265 				     struct mlx5dr_ste *ste,
266 				     struct mlx5dr_ste_send_info *ste_info,
267 				     struct list_head *send_ste_list,
268 				     struct mlx5dr_ste_htbl *stats_tbl)
269 {
270 	struct mlx5dr_ste *prev_ste;
271 	u64 miss_addr;
272 
273 	prev_ste = list_prev_entry(ste, miss_list_node);
274 	if (WARN_ON(!prev_ste))
275 		return;
276 
277 	miss_addr = ste_ctx->get_miss_addr(ste->hw_ste);
278 	ste_ctx->set_miss_addr(prev_ste->hw_ste, miss_addr);
279 
280 	mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0,
281 						  prev_ste->hw_ste, ste_info,
282 						  send_ste_list, true /* Copy data*/);
283 
284 	list_del_init(&ste->miss_list_node);
285 
286 	stats_tbl->ctrl.num_of_valid_entries--;
287 	stats_tbl->ctrl.num_of_collisions--;
288 }
289 
mlx5dr_ste_free(struct mlx5dr_ste * ste,struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher)290 void mlx5dr_ste_free(struct mlx5dr_ste *ste,
291 		     struct mlx5dr_matcher *matcher,
292 		     struct mlx5dr_matcher_rx_tx *nic_matcher)
293 {
294 	struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
295 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
296 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
297 	struct mlx5dr_ste_send_info ste_info_head;
298 	struct mlx5dr_ste *next_ste, *first_ste;
299 	bool put_on_origin_table = true;
300 	struct mlx5dr_ste_htbl *stats_tbl;
301 	LIST_HEAD(send_ste_list);
302 
303 	first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste),
304 				     struct mlx5dr_ste, miss_list_node);
305 	stats_tbl = first_ste->htbl;
306 
307 	/* Two options:
308 	 * 1. ste is head:
309 	 *	a. head ste is the only ste in the miss list
310 	 *	b. head ste is not the only ste in the miss-list
311 	 * 2. ste is not head
312 	 */
313 	if (first_ste == ste) { /* Ste is the head */
314 		struct mlx5dr_ste *last_ste;
315 
316 		last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste),
317 					   struct mlx5dr_ste, miss_list_node);
318 		if (last_ste == first_ste)
319 			next_ste = NULL;
320 		else
321 			next_ste = list_next_entry(ste, miss_list_node);
322 
323 		if (!next_ste) {
324 			/* One and only entry in the list */
325 			dr_ste_remove_head_ste(ste_ctx, ste,
326 					       nic_matcher,
327 					       &ste_info_head,
328 					       &send_ste_list,
329 					       stats_tbl);
330 		} else {
331 			/* First but not only entry in the list */
332 			dr_ste_replace_head_ste(nic_matcher, ste,
333 						next_ste, &ste_info_head,
334 						&send_ste_list, stats_tbl);
335 			put_on_origin_table = false;
336 		}
337 	} else { /* Ste in the middle of the list */
338 		dr_ste_remove_middle_ste(ste_ctx, ste,
339 					 &ste_info_head, &send_ste_list,
340 					 stats_tbl);
341 	}
342 
343 	/* Update HW */
344 	list_for_each_entry_safe(cur_ste_info, tmp_ste_info,
345 				 &send_ste_list, send_list) {
346 		list_del(&cur_ste_info->send_list);
347 		mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste,
348 					 cur_ste_info->data, cur_ste_info->size,
349 					 cur_ste_info->offset);
350 	}
351 
352 	if (put_on_origin_table)
353 		mlx5dr_htbl_put(ste->htbl);
354 }
355 
mlx5dr_ste_equal_tag(void * src,void * dst)356 bool mlx5dr_ste_equal_tag(void *src, void *dst)
357 {
358 	struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src;
359 	struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst;
360 
361 	return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
362 }
363 
mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste,struct mlx5dr_ste_htbl * next_htbl)364 void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
365 					  u8 *hw_ste,
366 					  struct mlx5dr_ste_htbl *next_htbl)
367 {
368 	struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
369 
370 	ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
371 }
372 
mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste_p,u32 ste_size)373 void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
374 				     u8 *hw_ste_p, u32 ste_size)
375 {
376 	if (ste_ctx->prepare_for_postsend)
377 		ste_ctx->prepare_for_postsend(hw_ste_p, ste_size);
378 }
379 
380 /* Init one ste as a pattern for ste data array */
mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx * ste_ctx,u16 gvmi,enum mlx5dr_domain_nic_type nic_type,struct mlx5dr_ste_htbl * htbl,u8 * formatted_ste,struct mlx5dr_htbl_connect_info * connect_info)381 void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
382 				  u16 gvmi,
383 				  enum mlx5dr_domain_nic_type nic_type,
384 				  struct mlx5dr_ste_htbl *htbl,
385 				  u8 *formatted_ste,
386 				  struct mlx5dr_htbl_connect_info *connect_info)
387 {
388 	bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
389 	struct mlx5dr_ste ste = {};
390 
391 	ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi);
392 	ste.hw_ste = formatted_ste;
393 
394 	if (connect_info->type == CONNECT_HIT)
395 		dr_ste_always_hit_htbl(ste_ctx, &ste, connect_info->hit_next_htbl);
396 	else
397 		dr_ste_always_miss_addr(ste_ctx, &ste, connect_info->miss_icm_addr);
398 }
399 
mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain * dmn,struct mlx5dr_domain_rx_tx * nic_dmn,struct mlx5dr_ste_htbl * htbl,struct mlx5dr_htbl_connect_info * connect_info,bool update_hw_ste)400 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
401 				      struct mlx5dr_domain_rx_tx *nic_dmn,
402 				      struct mlx5dr_ste_htbl *htbl,
403 				      struct mlx5dr_htbl_connect_info *connect_info,
404 				      bool update_hw_ste)
405 {
406 	u8 formatted_ste[DR_STE_SIZE] = {};
407 
408 	mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
409 				     dmn->info.caps.gvmi,
410 				     nic_dmn->type,
411 				     htbl,
412 				     formatted_ste,
413 				     connect_info);
414 
415 	return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste);
416 }
417 
mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * ste,u8 * cur_hw_ste,enum mlx5dr_icm_chunk_size log_table_size)418 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
419 				struct mlx5dr_matcher_rx_tx *nic_matcher,
420 				struct mlx5dr_ste *ste,
421 				u8 *cur_hw_ste,
422 				enum mlx5dr_icm_chunk_size log_table_size)
423 {
424 	struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
425 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
426 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
427 	struct mlx5dr_htbl_connect_info info;
428 	struct mlx5dr_ste_htbl *next_htbl;
429 
430 	if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
431 		u16 next_lu_type;
432 		u16 byte_mask;
433 
434 		next_lu_type = ste_ctx->get_next_lu_type(cur_hw_ste);
435 		byte_mask = ste_ctx->get_byte_mask(cur_hw_ste);
436 
437 		next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
438 						  log_table_size,
439 						  next_lu_type,
440 						  byte_mask);
441 		if (!next_htbl) {
442 			mlx5dr_dbg(dmn, "Failed allocating table\n");
443 			return -ENOMEM;
444 		}
445 
446 		/* Write new table to HW */
447 		info.type = CONNECT_MISS;
448 		info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
449 		if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
450 						      &info, false)) {
451 			mlx5dr_info(dmn, "Failed writing table to HW\n");
452 			goto free_table;
453 		}
454 
455 		mlx5dr_ste_set_hit_addr_by_next_htbl(ste_ctx,
456 						     cur_hw_ste, next_htbl);
457 		ste->next_htbl = next_htbl;
458 		next_htbl->pointing_ste = ste;
459 	}
460 
461 	return 0;
462 
463 free_table:
464 	mlx5dr_ste_htbl_free(next_htbl);
465 	return -ENOENT;
466 }
467 
mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool * pool,enum mlx5dr_icm_chunk_size chunk_size,u16 lu_type,u16 byte_mask)468 struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
469 					      enum mlx5dr_icm_chunk_size chunk_size,
470 					      u16 lu_type, u16 byte_mask)
471 {
472 	struct mlx5dr_icm_chunk *chunk;
473 	struct mlx5dr_ste_htbl *htbl;
474 	int i;
475 
476 	htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
477 	if (!htbl)
478 		return NULL;
479 
480 	chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size);
481 	if (!chunk)
482 		goto out_free_htbl;
483 
484 	htbl->chunk = chunk;
485 	htbl->lu_type = lu_type;
486 	htbl->byte_mask = byte_mask;
487 	htbl->ste_arr = chunk->ste_arr;
488 	htbl->hw_ste_arr = chunk->hw_ste_arr;
489 	htbl->miss_list = chunk->miss_list;
490 	htbl->refcount = 0;
491 
492 	for (i = 0; i < chunk->num_of_entries; i++) {
493 		struct mlx5dr_ste *ste = &htbl->ste_arr[i];
494 
495 		ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
496 		ste->htbl = htbl;
497 		ste->refcount = 0;
498 		INIT_LIST_HEAD(&ste->miss_list_node);
499 		INIT_LIST_HEAD(&htbl->miss_list[i]);
500 	}
501 
502 	htbl->chunk_size = chunk_size;
503 	return htbl;
504 
505 out_free_htbl:
506 	kfree(htbl);
507 	return NULL;
508 }
509 
mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl * htbl)510 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
511 {
512 	if (htbl->refcount)
513 		return -EBUSY;
514 
515 	mlx5dr_icm_free_chunk(htbl->chunk);
516 	kfree(htbl);
517 	return 0;
518 }
519 
mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_domain * dmn,u8 * action_type_set,u8 * hw_ste_arr,struct mlx5dr_ste_actions_attr * attr,u32 * added_stes)520 void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
521 			       struct mlx5dr_domain *dmn,
522 			       u8 *action_type_set,
523 			       u8 *hw_ste_arr,
524 			       struct mlx5dr_ste_actions_attr *attr,
525 			       u32 *added_stes)
526 {
527 	ste_ctx->set_actions_tx(dmn, action_type_set, hw_ste_arr,
528 				attr, added_stes);
529 }
530 
mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_domain * dmn,u8 * action_type_set,u8 * hw_ste_arr,struct mlx5dr_ste_actions_attr * attr,u32 * added_stes)531 void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
532 			       struct mlx5dr_domain *dmn,
533 			       u8 *action_type_set,
534 			       u8 *hw_ste_arr,
535 			       struct mlx5dr_ste_actions_attr *attr,
536 			       u32 *added_stes)
537 {
538 	ste_ctx->set_actions_rx(dmn, action_type_set, hw_ste_arr,
539 				attr, added_stes);
540 }
541 
542 const struct mlx5dr_ste_action_modify_field *
mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx * ste_ctx,u16 sw_field)543 mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field)
544 {
545 	const struct mlx5dr_ste_action_modify_field *hw_field;
546 
547 	if (sw_field >= ste_ctx->modify_field_arr_sz)
548 		return NULL;
549 
550 	hw_field = &ste_ctx->modify_field_arr[sw_field];
551 	if (!hw_field->end && !hw_field->start)
552 		return NULL;
553 
554 	return hw_field;
555 }
556 
mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx * ste_ctx,__be64 * hw_action,u8 hw_field,u8 shifter,u8 length,u32 data)557 void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
558 			       __be64 *hw_action,
559 			       u8 hw_field,
560 			       u8 shifter,
561 			       u8 length,
562 			       u32 data)
563 {
564 	ste_ctx->set_action_set((u8 *)hw_action,
565 				hw_field, shifter, length, data);
566 }
567 
mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx * ste_ctx,__be64 * hw_action,u8 hw_field,u8 shifter,u8 length,u32 data)568 void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
569 			       __be64 *hw_action,
570 			       u8 hw_field,
571 			       u8 shifter,
572 			       u8 length,
573 			       u32 data)
574 {
575 	ste_ctx->set_action_add((u8 *)hw_action,
576 				hw_field, shifter, length, data);
577 }
578 
mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx * ste_ctx,__be64 * hw_action,u8 dst_hw_field,u8 dst_shifter,u8 dst_len,u8 src_hw_field,u8 src_shifter)579 void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
580 				__be64 *hw_action,
581 				u8 dst_hw_field,
582 				u8 dst_shifter,
583 				u8 dst_len,
584 				u8 src_hw_field,
585 				u8 src_shifter)
586 {
587 	ste_ctx->set_action_copy((u8 *)hw_action,
588 				 dst_hw_field, dst_shifter, dst_len,
589 				 src_hw_field, src_shifter);
590 }
591 
mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx * ste_ctx,void * data,u32 data_sz,u8 * hw_action,u32 hw_action_sz,u16 * used_hw_action_num)592 int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
593 					void *data, u32 data_sz,
594 					u8 *hw_action, u32 hw_action_sz,
595 					u16 *used_hw_action_num)
596 {
597 	/* Only Ethernet frame is supported, with VLAN (18) or without (14) */
598 	if (data_sz != HDR_LEN_L2 && data_sz != HDR_LEN_L2_W_VLAN)
599 		return -EINVAL;
600 
601 	return ste_ctx->set_action_decap_l3_list(data, data_sz,
602 						 hw_action, hw_action_sz,
603 						 used_hw_action_num);
604 }
605 
dr_ste_build_pre_check_spec(struct mlx5dr_domain * dmn,struct mlx5dr_match_spec * spec)606 static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn,
607 				       struct mlx5dr_match_spec *spec)
608 {
609 	if (spec->ip_version) {
610 		if (spec->ip_version != 0xf) {
611 			mlx5dr_err(dmn,
612 				   "Partial ip_version mask with src/dst IP is not supported\n");
613 			return -EINVAL;
614 		}
615 	} else if (spec->ethertype != 0xffff &&
616 		   (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) {
617 		mlx5dr_err(dmn,
618 			   "Partial/no ethertype mask with src/dst IP is not supported\n");
619 		return -EINVAL;
620 	}
621 
622 	return 0;
623 }
624 
mlx5dr_ste_build_pre_check(struct mlx5dr_domain * dmn,u8 match_criteria,struct mlx5dr_match_param * mask,struct mlx5dr_match_param * value)625 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
626 			       u8 match_criteria,
627 			       struct mlx5dr_match_param *mask,
628 			       struct mlx5dr_match_param *value)
629 {
630 	if (value)
631 		return 0;
632 
633 	if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
634 		if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
635 			mlx5dr_err(dmn,
636 				   "Partial mask source_port is not supported\n");
637 			return -EINVAL;
638 		}
639 		if (mask->misc.source_eswitch_owner_vhca_id &&
640 		    mask->misc.source_eswitch_owner_vhca_id != 0xffff) {
641 			mlx5dr_err(dmn,
642 				   "Partial mask source_eswitch_owner_vhca_id is not supported\n");
643 			return -EINVAL;
644 		}
645 	}
646 
647 	if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) &&
648 	    dr_ste_build_pre_check_spec(dmn, &mask->outer))
649 		return -EINVAL;
650 
651 	if ((match_criteria & DR_MATCHER_CRITERIA_INNER) &&
652 	    dr_ste_build_pre_check_spec(dmn, &mask->inner))
653 		return -EINVAL;
654 
655 	return 0;
656 }
657 
mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_match_param * value,u8 * ste_arr)658 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
659 			     struct mlx5dr_matcher_rx_tx *nic_matcher,
660 			     struct mlx5dr_match_param *value,
661 			     u8 *ste_arr)
662 {
663 	struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
664 	bool is_rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
665 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
666 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
667 	struct mlx5dr_ste_build *sb;
668 	int ret, i;
669 
670 	ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
671 					 &matcher->mask, value);
672 	if (ret)
673 		return ret;
674 
675 	sb = nic_matcher->ste_builder;
676 	for (i = 0; i < nic_matcher->num_of_builders; i++) {
677 		ste_ctx->ste_init(ste_arr,
678 				  sb->lu_type,
679 				  is_rx,
680 				  dmn->info.caps.gvmi);
681 
682 		mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
683 
684 		ret = sb->ste_build_tag_func(value, sb, dr_ste_get_tag(ste_arr));
685 		if (ret)
686 			return ret;
687 
688 		/* Connect the STEs */
689 		if (i < (nic_matcher->num_of_builders - 1)) {
690 			/* Need the next builder for these fields,
691 			 * not relevant for the last ste in the chain.
692 			 */
693 			sb++;
694 			ste_ctx->set_next_lu_type(ste_arr, sb->lu_type);
695 			ste_ctx->set_byte_mask(ste_arr, sb->byte_mask);
696 		}
697 		ste_arr += DR_STE_SIZE;
698 	}
699 	return 0;
700 }
701 
dr_ste_copy_mask_misc(char * mask,struct mlx5dr_match_misc * spec)702 static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
703 {
704 	spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present);
705 	spec->gre_k_present = MLX5_GET(fte_match_set_misc, mask, gre_k_present);
706 	spec->gre_s_present = MLX5_GET(fte_match_set_misc, mask, gre_s_present);
707 	spec->source_vhca_port = MLX5_GET(fte_match_set_misc, mask, source_vhca_port);
708 	spec->source_sqn = MLX5_GET(fte_match_set_misc, mask, source_sqn);
709 
710 	spec->source_port = MLX5_GET(fte_match_set_misc, mask, source_port);
711 	spec->source_eswitch_owner_vhca_id = MLX5_GET(fte_match_set_misc, mask,
712 						      source_eswitch_owner_vhca_id);
713 
714 	spec->outer_second_prio = MLX5_GET(fte_match_set_misc, mask, outer_second_prio);
715 	spec->outer_second_cfi = MLX5_GET(fte_match_set_misc, mask, outer_second_cfi);
716 	spec->outer_second_vid = MLX5_GET(fte_match_set_misc, mask, outer_second_vid);
717 	spec->inner_second_prio = MLX5_GET(fte_match_set_misc, mask, inner_second_prio);
718 	spec->inner_second_cfi = MLX5_GET(fte_match_set_misc, mask, inner_second_cfi);
719 	spec->inner_second_vid = MLX5_GET(fte_match_set_misc, mask, inner_second_vid);
720 
721 	spec->outer_second_cvlan_tag =
722 		MLX5_GET(fte_match_set_misc, mask, outer_second_cvlan_tag);
723 	spec->inner_second_cvlan_tag =
724 		MLX5_GET(fte_match_set_misc, mask, inner_second_cvlan_tag);
725 	spec->outer_second_svlan_tag =
726 		MLX5_GET(fte_match_set_misc, mask, outer_second_svlan_tag);
727 	spec->inner_second_svlan_tag =
728 		MLX5_GET(fte_match_set_misc, mask, inner_second_svlan_tag);
729 
730 	spec->gre_protocol = MLX5_GET(fte_match_set_misc, mask, gre_protocol);
731 
732 	spec->gre_key_h = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi);
733 	spec->gre_key_l = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo);
734 
735 	spec->vxlan_vni = MLX5_GET(fte_match_set_misc, mask, vxlan_vni);
736 
737 	spec->geneve_vni = MLX5_GET(fte_match_set_misc, mask, geneve_vni);
738 	spec->geneve_oam = MLX5_GET(fte_match_set_misc, mask, geneve_oam);
739 
740 	spec->outer_ipv6_flow_label =
741 		MLX5_GET(fte_match_set_misc, mask, outer_ipv6_flow_label);
742 
743 	spec->inner_ipv6_flow_label =
744 		MLX5_GET(fte_match_set_misc, mask, inner_ipv6_flow_label);
745 
746 	spec->geneve_opt_len = MLX5_GET(fte_match_set_misc, mask, geneve_opt_len);
747 	spec->geneve_protocol_type =
748 		MLX5_GET(fte_match_set_misc, mask, geneve_protocol_type);
749 
750 	spec->bth_dst_qp = MLX5_GET(fte_match_set_misc, mask, bth_dst_qp);
751 }
752 
dr_ste_copy_mask_spec(char * mask,struct mlx5dr_match_spec * spec)753 static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec)
754 {
755 	__be32 raw_ip[4];
756 
757 	spec->smac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16);
758 
759 	spec->smac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_15_0);
760 	spec->ethertype = MLX5_GET(fte_match_set_lyr_2_4, mask, ethertype);
761 
762 	spec->dmac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_47_16);
763 
764 	spec->dmac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_15_0);
765 	spec->first_prio = MLX5_GET(fte_match_set_lyr_2_4, mask, first_prio);
766 	spec->first_cfi = MLX5_GET(fte_match_set_lyr_2_4, mask, first_cfi);
767 	spec->first_vid = MLX5_GET(fte_match_set_lyr_2_4, mask, first_vid);
768 
769 	spec->ip_protocol = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_protocol);
770 	spec->ip_dscp = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_dscp);
771 	spec->ip_ecn = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_ecn);
772 	spec->cvlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, cvlan_tag);
773 	spec->svlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, svlan_tag);
774 	spec->frag = MLX5_GET(fte_match_set_lyr_2_4, mask, frag);
775 	spec->ip_version = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_version);
776 	spec->tcp_flags = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_flags);
777 	spec->tcp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_sport);
778 	spec->tcp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_dport);
779 
780 	spec->ttl_hoplimit = MLX5_GET(fte_match_set_lyr_2_4, mask, ttl_hoplimit);
781 
782 	spec->udp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_sport);
783 	spec->udp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_dport);
784 
785 	memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
786 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
787 				    sizeof(raw_ip));
788 
789 	spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
790 	spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
791 	spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
792 	spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
793 
794 	memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
795 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
796 				    sizeof(raw_ip));
797 
798 	spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
799 	spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
800 	spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]);
801 	spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
802 }
803 
dr_ste_copy_mask_misc2(char * mask,struct mlx5dr_match_misc2 * spec)804 static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec)
805 {
806 	spec->outer_first_mpls_label =
807 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_label);
808 	spec->outer_first_mpls_exp =
809 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp);
810 	spec->outer_first_mpls_s_bos =
811 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos);
812 	spec->outer_first_mpls_ttl =
813 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl);
814 	spec->inner_first_mpls_label =
815 		MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_label);
816 	spec->inner_first_mpls_exp =
817 		MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp);
818 	spec->inner_first_mpls_s_bos =
819 		MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos);
820 	spec->inner_first_mpls_ttl =
821 		MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl);
822 	spec->outer_first_mpls_over_gre_label =
823 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label);
824 	spec->outer_first_mpls_over_gre_exp =
825 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp);
826 	spec->outer_first_mpls_over_gre_s_bos =
827 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos);
828 	spec->outer_first_mpls_over_gre_ttl =
829 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl);
830 	spec->outer_first_mpls_over_udp_label =
831 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label);
832 	spec->outer_first_mpls_over_udp_exp =
833 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp);
834 	spec->outer_first_mpls_over_udp_s_bos =
835 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos);
836 	spec->outer_first_mpls_over_udp_ttl =
837 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl);
838 	spec->metadata_reg_c_7 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_7);
839 	spec->metadata_reg_c_6 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_6);
840 	spec->metadata_reg_c_5 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_5);
841 	spec->metadata_reg_c_4 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_4);
842 	spec->metadata_reg_c_3 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_3);
843 	spec->metadata_reg_c_2 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_2);
844 	spec->metadata_reg_c_1 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_1);
845 	spec->metadata_reg_c_0 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_0);
846 	spec->metadata_reg_a = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_a);
847 }
848 
dr_ste_copy_mask_misc3(char * mask,struct mlx5dr_match_misc3 * spec)849 static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec)
850 {
851 	spec->inner_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_seq_num);
852 	spec->outer_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_seq_num);
853 	spec->inner_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_ack_num);
854 	spec->outer_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_ack_num);
855 	spec->outer_vxlan_gpe_vni =
856 		MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_vni);
857 	spec->outer_vxlan_gpe_next_protocol =
858 		MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol);
859 	spec->outer_vxlan_gpe_flags =
860 		MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_flags);
861 	spec->icmpv4_header_data = MLX5_GET(fte_match_set_misc3, mask, icmp_header_data);
862 	spec->icmpv6_header_data =
863 		MLX5_GET(fte_match_set_misc3, mask, icmpv6_header_data);
864 	spec->icmpv4_type = MLX5_GET(fte_match_set_misc3, mask, icmp_type);
865 	spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code);
866 	spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type);
867 	spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code);
868 	spec->geneve_tlv_option_0_data =
869 		MLX5_GET(fte_match_set_misc3, mask, geneve_tlv_option_0_data);
870 	spec->gtpu_msg_flags = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_flags);
871 	spec->gtpu_msg_type = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_type);
872 	spec->gtpu_teid = MLX5_GET(fte_match_set_misc3, mask, gtpu_teid);
873 	spec->gtpu_dw_0 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_0);
874 	spec->gtpu_dw_2 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_2);
875 	spec->gtpu_first_ext_dw_0 =
876 		MLX5_GET(fte_match_set_misc3, mask, gtpu_first_ext_dw_0);
877 }
878 
dr_ste_copy_mask_misc4(char * mask,struct mlx5dr_match_misc4 * spec)879 static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec)
880 {
881 	spec->prog_sample_field_id_0 =
882 		MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_0);
883 	spec->prog_sample_field_value_0 =
884 		MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_0);
885 	spec->prog_sample_field_id_1 =
886 		MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_1);
887 	spec->prog_sample_field_value_1 =
888 		MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_1);
889 	spec->prog_sample_field_id_2 =
890 		MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_2);
891 	spec->prog_sample_field_value_2 =
892 		MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_2);
893 	spec->prog_sample_field_id_3 =
894 		MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_3);
895 	spec->prog_sample_field_value_3 =
896 		MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_3);
897 }
898 
mlx5dr_ste_copy_param(u8 match_criteria,struct mlx5dr_match_param * set_param,struct mlx5dr_match_parameters * mask)899 void mlx5dr_ste_copy_param(u8 match_criteria,
900 			   struct mlx5dr_match_param *set_param,
901 			   struct mlx5dr_match_parameters *mask)
902 {
903 	u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
904 	u8 *data = (u8 *)mask->match_buf;
905 	size_t param_location;
906 	void *buff;
907 
908 	if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
909 		if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) {
910 			memcpy(tail_param, data, mask->match_sz);
911 			buff = tail_param;
912 		} else {
913 			buff = mask->match_buf;
914 		}
915 		dr_ste_copy_mask_spec(buff, &set_param->outer);
916 	}
917 	param_location = sizeof(struct mlx5dr_match_spec);
918 
919 	if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
920 		if (mask->match_sz < param_location +
921 		    sizeof(struct mlx5dr_match_misc)) {
922 			memcpy(tail_param, data + param_location,
923 			       mask->match_sz - param_location);
924 			buff = tail_param;
925 		} else {
926 			buff = data + param_location;
927 		}
928 		dr_ste_copy_mask_misc(buff, &set_param->misc);
929 	}
930 	param_location += sizeof(struct mlx5dr_match_misc);
931 
932 	if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
933 		if (mask->match_sz < param_location +
934 		    sizeof(struct mlx5dr_match_spec)) {
935 			memcpy(tail_param, data + param_location,
936 			       mask->match_sz - param_location);
937 			buff = tail_param;
938 		} else {
939 			buff = data + param_location;
940 		}
941 		dr_ste_copy_mask_spec(buff, &set_param->inner);
942 	}
943 	param_location += sizeof(struct mlx5dr_match_spec);
944 
945 	if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
946 		if (mask->match_sz < param_location +
947 		    sizeof(struct mlx5dr_match_misc2)) {
948 			memcpy(tail_param, data + param_location,
949 			       mask->match_sz - param_location);
950 			buff = tail_param;
951 		} else {
952 			buff = data + param_location;
953 		}
954 		dr_ste_copy_mask_misc2(buff, &set_param->misc2);
955 	}
956 
957 	param_location += sizeof(struct mlx5dr_match_misc2);
958 
959 	if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
960 		if (mask->match_sz < param_location +
961 		    sizeof(struct mlx5dr_match_misc3)) {
962 			memcpy(tail_param, data + param_location,
963 			       mask->match_sz - param_location);
964 			buff = tail_param;
965 		} else {
966 			buff = data + param_location;
967 		}
968 		dr_ste_copy_mask_misc3(buff, &set_param->misc3);
969 	}
970 
971 	param_location += sizeof(struct mlx5dr_match_misc3);
972 
973 	if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
974 		if (mask->match_sz < param_location +
975 		    sizeof(struct mlx5dr_match_misc4)) {
976 			memcpy(tail_param, data + param_location,
977 			       mask->match_sz - param_location);
978 			buff = tail_param;
979 		} else {
980 			buff = data + param_location;
981 		}
982 		dr_ste_copy_mask_misc4(buff, &set_param->misc4);
983 	}
984 }
985 
mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)986 void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
987 				     struct mlx5dr_ste_build *sb,
988 				     struct mlx5dr_match_param *mask,
989 				     bool inner, bool rx)
990 {
991 	sb->rx = rx;
992 	sb->inner = inner;
993 	ste_ctx->build_eth_l2_src_dst_init(sb, mask);
994 }
995 
mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)996 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
997 				      struct mlx5dr_ste_build *sb,
998 				      struct mlx5dr_match_param *mask,
999 				      bool inner, bool rx)
1000 {
1001 	sb->rx = rx;
1002 	sb->inner = inner;
1003 	ste_ctx->build_eth_l3_ipv6_dst_init(sb, mask);
1004 }
1005 
mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1006 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
1007 				      struct mlx5dr_ste_build *sb,
1008 				      struct mlx5dr_match_param *mask,
1009 				      bool inner, bool rx)
1010 {
1011 	sb->rx = rx;
1012 	sb->inner = inner;
1013 	ste_ctx->build_eth_l3_ipv6_src_init(sb, mask);
1014 }
1015 
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1016 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
1017 					  struct mlx5dr_ste_build *sb,
1018 					  struct mlx5dr_match_param *mask,
1019 					  bool inner, bool rx)
1020 {
1021 	sb->rx = rx;
1022 	sb->inner = inner;
1023 	ste_ctx->build_eth_l3_ipv4_5_tuple_init(sb, mask);
1024 }
1025 
mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1026 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
1027 				 struct mlx5dr_ste_build *sb,
1028 				 struct mlx5dr_match_param *mask,
1029 				 bool inner, bool rx)
1030 {
1031 	sb->rx = rx;
1032 	sb->inner = inner;
1033 	ste_ctx->build_eth_l2_src_init(sb, mask);
1034 }
1035 
mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1036 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
1037 				 struct mlx5dr_ste_build *sb,
1038 				 struct mlx5dr_match_param *mask,
1039 				 bool inner, bool rx)
1040 {
1041 	sb->rx = rx;
1042 	sb->inner = inner;
1043 	ste_ctx->build_eth_l2_dst_init(sb, mask);
1044 }
1045 
mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1046 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
1047 				 struct mlx5dr_ste_build *sb,
1048 				 struct mlx5dr_match_param *mask, bool inner, bool rx)
1049 {
1050 	sb->rx = rx;
1051 	sb->inner = inner;
1052 	ste_ctx->build_eth_l2_tnl_init(sb, mask);
1053 }
1054 
mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1055 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
1056 				       struct mlx5dr_ste_build *sb,
1057 				       struct mlx5dr_match_param *mask,
1058 				       bool inner, bool rx)
1059 {
1060 	sb->rx = rx;
1061 	sb->inner = inner;
1062 	ste_ctx->build_eth_l3_ipv4_misc_init(sb, mask);
1063 }
1064 
mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1065 void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
1066 				     struct mlx5dr_ste_build *sb,
1067 				     struct mlx5dr_match_param *mask,
1068 				     bool inner, bool rx)
1069 {
1070 	sb->rx = rx;
1071 	sb->inner = inner;
1072 	ste_ctx->build_eth_ipv6_l3_l4_init(sb, mask);
1073 }
1074 
dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1075 static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
1076 					     struct mlx5dr_ste_build *sb,
1077 					     u8 *tag)
1078 {
1079 	return 0;
1080 }
1081 
mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build * sb,bool rx)1082 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
1083 {
1084 	sb->rx = rx;
1085 	sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE;
1086 	sb->byte_mask = 0;
1087 	sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
1088 }
1089 
mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1090 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
1091 			   struct mlx5dr_ste_build *sb,
1092 			   struct mlx5dr_match_param *mask,
1093 			   bool inner, bool rx)
1094 {
1095 	sb->rx = rx;
1096 	sb->inner = inner;
1097 	ste_ctx->build_mpls_init(sb, mask);
1098 }
1099 
mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1100 void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
1101 			      struct mlx5dr_ste_build *sb,
1102 			      struct mlx5dr_match_param *mask,
1103 			      bool inner, bool rx)
1104 {
1105 	sb->rx = rx;
1106 	sb->inner = inner;
1107 	ste_ctx->build_tnl_gre_init(sb, mask);
1108 }
1109 
mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1110 void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
1111 					struct mlx5dr_ste_build *sb,
1112 					struct mlx5dr_match_param *mask,
1113 					struct mlx5dr_cmd_caps *caps,
1114 					bool inner, bool rx)
1115 {
1116 	sb->rx = rx;
1117 	sb->inner = inner;
1118 	sb->caps = caps;
1119 	return ste_ctx->build_tnl_mpls_over_gre_init(sb, mask);
1120 }
1121 
mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1122 void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
1123 					struct mlx5dr_ste_build *sb,
1124 					struct mlx5dr_match_param *mask,
1125 					struct mlx5dr_cmd_caps *caps,
1126 					bool inner, bool rx)
1127 {
1128 	sb->rx = rx;
1129 	sb->inner = inner;
1130 	sb->caps = caps;
1131 	return ste_ctx->build_tnl_mpls_over_udp_init(sb, mask);
1132 }
1133 
mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1134 void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
1135 			   struct mlx5dr_ste_build *sb,
1136 			   struct mlx5dr_match_param *mask,
1137 			   struct mlx5dr_cmd_caps *caps,
1138 			   bool inner, bool rx)
1139 {
1140 	sb->rx = rx;
1141 	sb->inner = inner;
1142 	sb->caps = caps;
1143 	ste_ctx->build_icmp_init(sb, mask);
1144 }
1145 
mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1146 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
1147 				      struct mlx5dr_ste_build *sb,
1148 				      struct mlx5dr_match_param *mask,
1149 				      bool inner, bool rx)
1150 {
1151 	sb->rx = rx;
1152 	sb->inner = inner;
1153 	ste_ctx->build_general_purpose_init(sb, mask);
1154 }
1155 
mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1156 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
1157 				  struct mlx5dr_ste_build *sb,
1158 				  struct mlx5dr_match_param *mask,
1159 				  bool inner, bool rx)
1160 {
1161 	sb->rx = rx;
1162 	sb->inner = inner;
1163 	ste_ctx->build_eth_l4_misc_init(sb, mask);
1164 }
1165 
mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1166 void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
1167 				    struct mlx5dr_ste_build *sb,
1168 				    struct mlx5dr_match_param *mask,
1169 				    bool inner, bool rx)
1170 {
1171 	sb->rx = rx;
1172 	sb->inner = inner;
1173 	ste_ctx->build_tnl_vxlan_gpe_init(sb, mask);
1174 }
1175 
mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1176 void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
1177 				 struct mlx5dr_ste_build *sb,
1178 				 struct mlx5dr_match_param *mask,
1179 				 bool inner, bool rx)
1180 {
1181 	sb->rx = rx;
1182 	sb->inner = inner;
1183 	ste_ctx->build_tnl_geneve_init(sb, mask);
1184 }
1185 
mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1186 void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
1187 					 struct mlx5dr_ste_build *sb,
1188 					 struct mlx5dr_match_param *mask,
1189 					 struct mlx5dr_cmd_caps *caps,
1190 					 bool inner, bool rx)
1191 {
1192 	sb->rx = rx;
1193 	sb->caps = caps;
1194 	sb->inner = inner;
1195 	ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask);
1196 }
1197 
mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1198 void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
1199 			       struct mlx5dr_ste_build *sb,
1200 			       struct mlx5dr_match_param *mask,
1201 			       bool inner, bool rx)
1202 {
1203 	sb->rx = rx;
1204 	sb->inner = inner;
1205 	ste_ctx->build_tnl_gtpu_init(sb, mask);
1206 }
1207 
mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1208 void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
1209 					     struct mlx5dr_ste_build *sb,
1210 					     struct mlx5dr_match_param *mask,
1211 					     struct mlx5dr_cmd_caps *caps,
1212 					     bool inner, bool rx)
1213 {
1214 	sb->rx = rx;
1215 	sb->caps = caps;
1216 	sb->inner = inner;
1217 	ste_ctx->build_tnl_gtpu_flex_parser_0_init(sb, mask);
1218 }
1219 
mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1220 void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
1221 					     struct mlx5dr_ste_build *sb,
1222 					     struct mlx5dr_match_param *mask,
1223 					     struct mlx5dr_cmd_caps *caps,
1224 					     bool inner, bool rx)
1225 {
1226 	sb->rx = rx;
1227 	sb->caps = caps;
1228 	sb->inner = inner;
1229 	ste_ctx->build_tnl_gtpu_flex_parser_1_init(sb, mask);
1230 }
1231 
mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1232 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
1233 				 struct mlx5dr_ste_build *sb,
1234 				 struct mlx5dr_match_param *mask,
1235 				 bool inner, bool rx)
1236 {
1237 	sb->rx = rx;
1238 	sb->inner = inner;
1239 	ste_ctx->build_register_0_init(sb, mask);
1240 }
1241 
mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1242 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
1243 				 struct mlx5dr_ste_build *sb,
1244 				 struct mlx5dr_match_param *mask,
1245 				 bool inner, bool rx)
1246 {
1247 	sb->rx = rx;
1248 	sb->inner = inner;
1249 	ste_ctx->build_register_1_init(sb, mask);
1250 }
1251 
mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_domain * dmn,bool inner,bool rx)1252 void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
1253 				   struct mlx5dr_ste_build *sb,
1254 				   struct mlx5dr_match_param *mask,
1255 				   struct mlx5dr_domain *dmn,
1256 				   bool inner, bool rx)
1257 {
1258 	/* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
1259 	sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
1260 
1261 	sb->rx = rx;
1262 	sb->dmn = dmn;
1263 	sb->inner = inner;
1264 	ste_ctx->build_src_gvmi_qpn_init(sb, mask);
1265 }
1266 
mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1267 void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
1268 				    struct mlx5dr_ste_build *sb,
1269 				    struct mlx5dr_match_param *mask,
1270 				    bool inner, bool rx)
1271 {
1272 	sb->rx = rx;
1273 	sb->inner = inner;
1274 	ste_ctx->build_flex_parser_0_init(sb, mask);
1275 }
1276 
mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1277 void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
1278 				    struct mlx5dr_ste_build *sb,
1279 				    struct mlx5dr_match_param *mask,
1280 				    bool inner, bool rx)
1281 {
1282 	sb->rx = rx;
1283 	sb->inner = inner;
1284 	ste_ctx->build_flex_parser_1_init(sb, mask);
1285 }
1286 
1287 static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
1288 	[MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
1289 	[MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,
1290 };
1291 
mlx5dr_ste_get_ctx(u8 version)1292 struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
1293 {
1294 	if (version > MLX5_STEERING_FORMAT_CONNECTX_6DX)
1295 		return NULL;
1296 
1297 	return mlx5dr_ste_ctx_arr[version];
1298 }
1299