• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include <linux/types.h>
5 #include <linux/crc32.h>
6 #include "dr_ste.h"
7 
8 struct dr_hw_ste_format {
9 	u8 ctrl[DR_STE_SIZE_CTRL];
10 	u8 tag[DR_STE_SIZE_TAG];
11 	u8 mask[DR_STE_SIZE_MASK];
12 };
13 
dr_ste_crc32_calc(const void * input_data,size_t length)14 static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
15 {
16 	u32 crc = crc32(0, input_data, length);
17 
18 	return (__force u32)((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) |
19 			    ((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000);
20 }
21 
mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps * caps)22 bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
23 {
24 	return caps->sw_format_ver > MLX5_STEERING_FORMAT_CONNECTX_5;
25 }
26 
mlx5dr_ste_calc_hash_index(u8 * hw_ste_p,struct mlx5dr_ste_htbl * htbl)27 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
28 {
29 	u32 num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk);
30 	struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
31 	u8 masked[DR_STE_SIZE_TAG] = {};
32 	u32 crc32, index;
33 	u16 bit;
34 	int i;
35 
36 	/* Don't calculate CRC if the result is predicted */
37 	if (num_entries == 1 || htbl->byte_mask == 0)
38 		return 0;
39 
40 	/* Mask tag using byte mask, bit per byte */
41 	bit = 1 << (DR_STE_SIZE_TAG - 1);
42 	for (i = 0; i < DR_STE_SIZE_TAG; i++) {
43 		if (htbl->byte_mask & bit)
44 			masked[i] = hw_ste->tag[i];
45 
46 		bit = bit >> 1;
47 	}
48 
49 	crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
50 	index = crc32 & (num_entries - 1);
51 
52 	return index;
53 }
54 
mlx5dr_ste_conv_bit_to_byte_mask(u8 * bit_mask)55 u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
56 {
57 	u16 byte_mask = 0;
58 	int i;
59 
60 	for (i = 0; i < DR_STE_SIZE_MASK; i++) {
61 		byte_mask = byte_mask << 1;
62 		if (bit_mask[i] == 0xff)
63 			byte_mask |= 1;
64 	}
65 	return byte_mask;
66 }
67 
dr_ste_get_tag(u8 * hw_ste_p)68 static u8 *dr_ste_get_tag(u8 *hw_ste_p)
69 {
70 	struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
71 
72 	return hw_ste->tag;
73 }
74 
mlx5dr_ste_set_bit_mask(u8 * hw_ste_p,u8 * bit_mask)75 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
76 {
77 	struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
78 
79 	memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
80 }
81 
dr_ste_set_always_hit(struct dr_hw_ste_format * hw_ste)82 static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
83 {
84 	memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
85 	memset(&hw_ste->mask, 0, sizeof(hw_ste->mask));
86 }
87 
dr_ste_set_always_miss(struct dr_hw_ste_format * hw_ste)88 static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
89 {
90 	hw_ste->tag[0] = 0xdc;
91 	hw_ste->mask[0] = 0;
92 }
93 
mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste_p,u64 miss_addr)94 void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
95 			      u8 *hw_ste_p, u64 miss_addr)
96 {
97 	ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
98 }
99 
dr_ste_always_miss_addr(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste,u64 miss_addr)100 static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
101 				    u8 *hw_ste, u64 miss_addr)
102 {
103 	ste_ctx->set_next_lu_type(hw_ste, MLX5DR_STE_LU_TYPE_DONT_CARE);
104 	ste_ctx->set_miss_addr(hw_ste, miss_addr);
105 	dr_ste_set_always_miss((struct dr_hw_ste_format *)hw_ste);
106 }
107 
mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste,u64 icm_addr,u32 ht_size)108 void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
109 			     u8 *hw_ste, u64 icm_addr, u32 ht_size)
110 {
111 	ste_ctx->set_hit_addr(hw_ste, icm_addr, ht_size);
112 }
113 
mlx5dr_ste_get_icm_addr(struct mlx5dr_ste * ste)114 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
115 {
116 	u64 base_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(ste->htbl->chunk);
117 	u32 index = ste - ste->htbl->chunk->ste_arr;
118 
119 	return base_icm_addr + DR_STE_SIZE * index;
120 }
121 
mlx5dr_ste_get_mr_addr(struct mlx5dr_ste * ste)122 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
123 {
124 	u32 index = ste - ste->htbl->chunk->ste_arr;
125 
126 	return mlx5dr_icm_pool_get_chunk_mr_addr(ste->htbl->chunk) + DR_STE_SIZE * index;
127 }
128 
mlx5dr_ste_get_hw_ste(struct mlx5dr_ste * ste)129 u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste)
130 {
131 	u64 index = ste - ste->htbl->chunk->ste_arr;
132 
133 	return ste->htbl->chunk->hw_ste_arr + DR_STE_SIZE_REDUCED * index;
134 }
135 
mlx5dr_ste_get_miss_list(struct mlx5dr_ste * ste)136 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
137 {
138 	u32 index = ste - ste->htbl->chunk->ste_arr;
139 
140 	return &ste->htbl->chunk->miss_list[index];
141 }
142 
dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste,struct mlx5dr_ste_htbl * next_htbl)143 static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
144 				   u8 *hw_ste,
145 				   struct mlx5dr_ste_htbl *next_htbl)
146 {
147 	struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
148 
149 	ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
150 	ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
151 	ste_ctx->set_hit_addr(hw_ste, mlx5dr_icm_pool_get_chunk_icm_addr(chunk),
152 			      mlx5dr_icm_pool_get_chunk_num_of_entries(chunk));
153 
154 	dr_ste_set_always_hit((struct dr_hw_ste_format *)hw_ste);
155 }
156 
mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx * nic_matcher,u8 ste_location)157 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
158 				u8 ste_location)
159 {
160 	return ste_location == nic_matcher->num_of_builders;
161 }
162 
163 /* Replace relevant fields, except of:
164  * htbl - keep the origin htbl
165  * miss_list + list - already took the src from the list.
166  * icm_addr/mr_addr - depends on the hosting table.
167  *
168  * Before:
169  * | a | -> | b | -> | c | ->
170  *
171  * After:
172  * | a | -> | c | ->
173  * While the data that was in b copied to a.
174  */
dr_ste_replace(struct mlx5dr_ste * dst,struct mlx5dr_ste * src)175 static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
176 {
177 	memcpy(mlx5dr_ste_get_hw_ste(dst), mlx5dr_ste_get_hw_ste(src),
178 	       DR_STE_SIZE_REDUCED);
179 	dst->next_htbl = src->next_htbl;
180 	if (dst->next_htbl)
181 		dst->next_htbl->pointing_ste = dst;
182 
183 	dst->refcount = src->refcount;
184 }
185 
186 /* Free ste which is the head and the only one in miss_list */
187 static void
dr_ste_remove_head_ste(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste * ste,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste_send_info * ste_info_head,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)188 dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
189 		       struct mlx5dr_ste *ste,
190 		       struct mlx5dr_matcher_rx_tx *nic_matcher,
191 		       struct mlx5dr_ste_send_info *ste_info_head,
192 		       struct list_head *send_ste_list,
193 		       struct mlx5dr_ste_htbl *stats_tbl)
194 {
195 	u8 tmp_data_ste[DR_STE_SIZE] = {};
196 	u64 miss_addr;
197 
198 	miss_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
199 
200 	/* Use temp ste because dr_ste_always_miss_addr
201 	 * touches bit_mask area which doesn't exist at ste->hw_ste.
202 	 * Need to use a full-sized (DR_STE_SIZE) hw_ste.
203 	 */
204 	memcpy(tmp_data_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
205 	dr_ste_always_miss_addr(ste_ctx, tmp_data_ste, miss_addr);
206 	memcpy(mlx5dr_ste_get_hw_ste(ste), tmp_data_ste, DR_STE_SIZE_REDUCED);
207 
208 	list_del_init(&ste->miss_list_node);
209 
210 	/* Write full STE size in order to have "always_miss" */
211 	mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
212 						  0, tmp_data_ste,
213 						  ste_info_head,
214 						  send_ste_list,
215 						  true /* Copy data */);
216 
217 	stats_tbl->ctrl.num_of_valid_entries--;
218 }
219 
220 /* Free ste which is the head but NOT the only one in miss_list:
221  * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0
222  */
223 static void
dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * ste,struct mlx5dr_ste * next_ste,struct mlx5dr_ste_send_info * ste_info_head,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)224 dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
225 			struct mlx5dr_ste *ste,
226 			struct mlx5dr_ste *next_ste,
227 			struct mlx5dr_ste_send_info *ste_info_head,
228 			struct list_head *send_ste_list,
229 			struct mlx5dr_ste_htbl *stats_tbl)
230 
231 {
232 	struct mlx5dr_ste_htbl *next_miss_htbl;
233 	u8 hw_ste[DR_STE_SIZE] = {};
234 	int sb_idx;
235 
236 	next_miss_htbl = next_ste->htbl;
237 
238 	/* Remove from the miss_list the next_ste before copy */
239 	list_del_init(&next_ste->miss_list_node);
240 
241 	/* Move data from next into ste */
242 	dr_ste_replace(ste, next_ste);
243 
244 	/* Update the rule on STE change */
245 	mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false);
246 
247 	/* Copy all 64 hw_ste bytes */
248 	memcpy(hw_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
249 	sb_idx = ste->ste_chain_location - 1;
250 	mlx5dr_ste_set_bit_mask(hw_ste,
251 				nic_matcher->ste_builder[sb_idx].bit_mask);
252 
253 	/* Del the htbl that contains the next_ste.
254 	 * The origin htbl stay with the same number of entries.
255 	 */
256 	mlx5dr_htbl_put(next_miss_htbl);
257 
258 	mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
259 						  0, hw_ste,
260 						  ste_info_head,
261 						  send_ste_list,
262 						  true /* Copy data */);
263 
264 	stats_tbl->ctrl.num_of_collisions--;
265 	stats_tbl->ctrl.num_of_valid_entries--;
266 }
267 
268 /* Free ste that is located in the middle of the miss list:
269  * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
270  */
dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste * ste,struct mlx5dr_ste_send_info * ste_info,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)271 static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
272 				     struct mlx5dr_ste *ste,
273 				     struct mlx5dr_ste_send_info *ste_info,
274 				     struct list_head *send_ste_list,
275 				     struct mlx5dr_ste_htbl *stats_tbl)
276 {
277 	struct mlx5dr_ste *prev_ste;
278 	u64 miss_addr;
279 
280 	prev_ste = list_prev_entry(ste, miss_list_node);
281 	if (WARN_ON(!prev_ste))
282 		return;
283 
284 	miss_addr = ste_ctx->get_miss_addr(mlx5dr_ste_get_hw_ste(ste));
285 	ste_ctx->set_miss_addr(mlx5dr_ste_get_hw_ste(prev_ste), miss_addr);
286 
287 	mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0,
288 						  mlx5dr_ste_get_hw_ste(prev_ste),
289 						  ste_info, send_ste_list,
290 						  true /* Copy data*/);
291 
292 	list_del_init(&ste->miss_list_node);
293 
294 	stats_tbl->ctrl.num_of_valid_entries--;
295 	stats_tbl->ctrl.num_of_collisions--;
296 }
297 
mlx5dr_ste_free(struct mlx5dr_ste * ste,struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher)298 void mlx5dr_ste_free(struct mlx5dr_ste *ste,
299 		     struct mlx5dr_matcher *matcher,
300 		     struct mlx5dr_matcher_rx_tx *nic_matcher)
301 {
302 	struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
303 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
304 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
305 	struct mlx5dr_ste_send_info ste_info_head;
306 	struct mlx5dr_ste *next_ste, *first_ste;
307 	bool put_on_origin_table = true;
308 	struct mlx5dr_ste_htbl *stats_tbl;
309 	LIST_HEAD(send_ste_list);
310 
311 	first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste),
312 				     struct mlx5dr_ste, miss_list_node);
313 	stats_tbl = first_ste->htbl;
314 
315 	/* Two options:
316 	 * 1. ste is head:
317 	 *	a. head ste is the only ste in the miss list
318 	 *	b. head ste is not the only ste in the miss-list
319 	 * 2. ste is not head
320 	 */
321 	if (first_ste == ste) { /* Ste is the head */
322 		struct mlx5dr_ste *last_ste;
323 
324 		last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste),
325 					   struct mlx5dr_ste, miss_list_node);
326 		if (last_ste == first_ste)
327 			next_ste = NULL;
328 		else
329 			next_ste = list_next_entry(ste, miss_list_node);
330 
331 		if (!next_ste) {
332 			/* One and only entry in the list */
333 			dr_ste_remove_head_ste(ste_ctx, ste,
334 					       nic_matcher,
335 					       &ste_info_head,
336 					       &send_ste_list,
337 					       stats_tbl);
338 		} else {
339 			/* First but not only entry in the list */
340 			dr_ste_replace_head_ste(nic_matcher, ste,
341 						next_ste, &ste_info_head,
342 						&send_ste_list, stats_tbl);
343 			put_on_origin_table = false;
344 		}
345 	} else { /* Ste in the middle of the list */
346 		dr_ste_remove_middle_ste(ste_ctx, ste,
347 					 &ste_info_head, &send_ste_list,
348 					 stats_tbl);
349 	}
350 
351 	/* Update HW */
352 	list_for_each_entry_safe(cur_ste_info, tmp_ste_info,
353 				 &send_ste_list, send_list) {
354 		list_del(&cur_ste_info->send_list);
355 		mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste,
356 					 cur_ste_info->data, cur_ste_info->size,
357 					 cur_ste_info->offset);
358 	}
359 
360 	if (put_on_origin_table)
361 		mlx5dr_htbl_put(ste->htbl);
362 }
363 
mlx5dr_ste_equal_tag(void * src,void * dst)364 bool mlx5dr_ste_equal_tag(void *src, void *dst)
365 {
366 	struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src;
367 	struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst;
368 
369 	return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
370 }
371 
mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste,struct mlx5dr_ste_htbl * next_htbl)372 void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
373 					  u8 *hw_ste,
374 					  struct mlx5dr_ste_htbl *next_htbl)
375 {
376 	u64 icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(next_htbl->chunk);
377 	u32 num_entries =
378 		mlx5dr_icm_pool_get_chunk_num_of_entries(next_htbl->chunk);
379 
380 	ste_ctx->set_hit_addr(hw_ste, icm_addr, num_entries);
381 }
382 
mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx * ste_ctx,u8 * hw_ste_p,u32 ste_size)383 void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
384 				     u8 *hw_ste_p, u32 ste_size)
385 {
386 	if (ste_ctx->prepare_for_postsend)
387 		ste_ctx->prepare_for_postsend(hw_ste_p, ste_size);
388 }
389 
390 /* Init one ste as a pattern for ste data array */
mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx * ste_ctx,u16 gvmi,enum mlx5dr_domain_nic_type nic_type,struct mlx5dr_ste_htbl * htbl,u8 * formatted_ste,struct mlx5dr_htbl_connect_info * connect_info)391 void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
392 				  u16 gvmi,
393 				  enum mlx5dr_domain_nic_type nic_type,
394 				  struct mlx5dr_ste_htbl *htbl,
395 				  u8 *formatted_ste,
396 				  struct mlx5dr_htbl_connect_info *connect_info)
397 {
398 	bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
399 	u8 tmp_hw_ste[DR_STE_SIZE] = {0};
400 
401 	ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi);
402 
403 	/* Use temp ste because dr_ste_always_miss_addr/hit_htbl
404 	 * touches bit_mask area which doesn't exist at ste->hw_ste.
405 	 * Need to use a full-sized (DR_STE_SIZE) hw_ste.
406 	 */
407 	memcpy(tmp_hw_ste, formatted_ste, DR_STE_SIZE_REDUCED);
408 	if (connect_info->type == CONNECT_HIT)
409 		dr_ste_always_hit_htbl(ste_ctx, tmp_hw_ste,
410 				       connect_info->hit_next_htbl);
411 	else
412 		dr_ste_always_miss_addr(ste_ctx, tmp_hw_ste,
413 					connect_info->miss_icm_addr);
414 	memcpy(formatted_ste, tmp_hw_ste, DR_STE_SIZE_REDUCED);
415 }
416 
mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain * dmn,struct mlx5dr_domain_rx_tx * nic_dmn,struct mlx5dr_ste_htbl * htbl,struct mlx5dr_htbl_connect_info * connect_info,bool update_hw_ste)417 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
418 				      struct mlx5dr_domain_rx_tx *nic_dmn,
419 				      struct mlx5dr_ste_htbl *htbl,
420 				      struct mlx5dr_htbl_connect_info *connect_info,
421 				      bool update_hw_ste)
422 {
423 	u8 formatted_ste[DR_STE_SIZE] = {};
424 
425 	mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
426 				     dmn->info.caps.gvmi,
427 				     nic_dmn->type,
428 				     htbl,
429 				     formatted_ste,
430 				     connect_info);
431 
432 	return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste);
433 }
434 
mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * ste,u8 * cur_hw_ste,enum mlx5dr_icm_chunk_size log_table_size)435 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
436 				struct mlx5dr_matcher_rx_tx *nic_matcher,
437 				struct mlx5dr_ste *ste,
438 				u8 *cur_hw_ste,
439 				enum mlx5dr_icm_chunk_size log_table_size)
440 {
441 	struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
442 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
443 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
444 	struct mlx5dr_htbl_connect_info info;
445 	struct mlx5dr_ste_htbl *next_htbl;
446 
447 	if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
448 		u16 next_lu_type;
449 		u16 byte_mask;
450 
451 		next_lu_type = ste_ctx->get_next_lu_type(cur_hw_ste);
452 		byte_mask = ste_ctx->get_byte_mask(cur_hw_ste);
453 
454 		next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
455 						  log_table_size,
456 						  next_lu_type,
457 						  byte_mask);
458 		if (!next_htbl) {
459 			mlx5dr_dbg(dmn, "Failed allocating table\n");
460 			return -ENOMEM;
461 		}
462 
463 		/* Write new table to HW */
464 		info.type = CONNECT_MISS;
465 		info.miss_icm_addr =
466 			mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
467 		if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
468 						      &info, false)) {
469 			mlx5dr_info(dmn, "Failed writing table to HW\n");
470 			goto free_table;
471 		}
472 
473 		mlx5dr_ste_set_hit_addr_by_next_htbl(ste_ctx,
474 						     cur_hw_ste, next_htbl);
475 		ste->next_htbl = next_htbl;
476 		next_htbl->pointing_ste = ste;
477 	}
478 
479 	return 0;
480 
481 free_table:
482 	mlx5dr_ste_htbl_free(next_htbl);
483 	return -ENOENT;
484 }
485 
mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool * pool,enum mlx5dr_icm_chunk_size chunk_size,u16 lu_type,u16 byte_mask)486 struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
487 					      enum mlx5dr_icm_chunk_size chunk_size,
488 					      u16 lu_type, u16 byte_mask)
489 {
490 	struct mlx5dr_icm_chunk *chunk;
491 	struct mlx5dr_ste_htbl *htbl;
492 	u32 num_entries;
493 	int i;
494 
495 	htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
496 	if (!htbl)
497 		return NULL;
498 
499 	chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size);
500 	if (!chunk)
501 		goto out_free_htbl;
502 
503 	htbl->chunk = chunk;
504 	htbl->lu_type = lu_type;
505 	htbl->byte_mask = byte_mask;
506 	htbl->refcount = 0;
507 	num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
508 
509 	for (i = 0; i < num_entries; i++) {
510 		struct mlx5dr_ste *ste = &chunk->ste_arr[i];
511 
512 		ste->htbl = htbl;
513 		ste->refcount = 0;
514 		INIT_LIST_HEAD(&ste->miss_list_node);
515 		INIT_LIST_HEAD(&chunk->miss_list[i]);
516 	}
517 
518 	return htbl;
519 
520 out_free_htbl:
521 	kfree(htbl);
522 	return NULL;
523 }
524 
mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl * htbl)525 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
526 {
527 	if (htbl->refcount)
528 		return -EBUSY;
529 
530 	mlx5dr_icm_free_chunk(htbl->chunk);
531 	kfree(htbl);
532 	return 0;
533 }
534 
mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_domain * dmn,u8 * action_type_set,u8 * hw_ste_arr,struct mlx5dr_ste_actions_attr * attr,u32 * added_stes)535 void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
536 			       struct mlx5dr_domain *dmn,
537 			       u8 *action_type_set,
538 			       u8 *hw_ste_arr,
539 			       struct mlx5dr_ste_actions_attr *attr,
540 			       u32 *added_stes)
541 {
542 	ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps,
543 				hw_ste_arr, attr, added_stes);
544 }
545 
mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_domain * dmn,u8 * action_type_set,u8 * hw_ste_arr,struct mlx5dr_ste_actions_attr * attr,u32 * added_stes)546 void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
547 			       struct mlx5dr_domain *dmn,
548 			       u8 *action_type_set,
549 			       u8 *hw_ste_arr,
550 			       struct mlx5dr_ste_actions_attr *attr,
551 			       u32 *added_stes)
552 {
553 	ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps,
554 				hw_ste_arr, attr, added_stes);
555 }
556 
557 const struct mlx5dr_ste_action_modify_field *
mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx * ste_ctx,u16 sw_field)558 mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field)
559 {
560 	const struct mlx5dr_ste_action_modify_field *hw_field;
561 
562 	if (sw_field >= ste_ctx->modify_field_arr_sz)
563 		return NULL;
564 
565 	hw_field = &ste_ctx->modify_field_arr[sw_field];
566 	if (!hw_field->end && !hw_field->start)
567 		return NULL;
568 
569 	return hw_field;
570 }
571 
mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx * ste_ctx,__be64 * hw_action,u8 hw_field,u8 shifter,u8 length,u32 data)572 void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
573 			       __be64 *hw_action,
574 			       u8 hw_field,
575 			       u8 shifter,
576 			       u8 length,
577 			       u32 data)
578 {
579 	ste_ctx->set_action_set((u8 *)hw_action,
580 				hw_field, shifter, length, data);
581 }
582 
mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx * ste_ctx,__be64 * hw_action,u8 hw_field,u8 shifter,u8 length,u32 data)583 void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
584 			       __be64 *hw_action,
585 			       u8 hw_field,
586 			       u8 shifter,
587 			       u8 length,
588 			       u32 data)
589 {
590 	ste_ctx->set_action_add((u8 *)hw_action,
591 				hw_field, shifter, length, data);
592 }
593 
mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx * ste_ctx,__be64 * hw_action,u8 dst_hw_field,u8 dst_shifter,u8 dst_len,u8 src_hw_field,u8 src_shifter)594 void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
595 				__be64 *hw_action,
596 				u8 dst_hw_field,
597 				u8 dst_shifter,
598 				u8 dst_len,
599 				u8 src_hw_field,
600 				u8 src_shifter)
601 {
602 	ste_ctx->set_action_copy((u8 *)hw_action,
603 				 dst_hw_field, dst_shifter, dst_len,
604 				 src_hw_field, src_shifter);
605 }
606 
mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx * ste_ctx,void * data,u32 data_sz,u8 * hw_action,u32 hw_action_sz,u16 * used_hw_action_num)607 int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
608 					void *data, u32 data_sz,
609 					u8 *hw_action, u32 hw_action_sz,
610 					u16 *used_hw_action_num)
611 {
612 	/* Only Ethernet frame is supported, with VLAN (18) or without (14) */
613 	if (data_sz != HDR_LEN_L2 && data_sz != HDR_LEN_L2_W_VLAN)
614 		return -EINVAL;
615 
616 	return ste_ctx->set_action_decap_l3_list(data, data_sz,
617 						 hw_action, hw_action_sz,
618 						 used_hw_action_num);
619 }
620 
dr_ste_build_pre_check_spec(struct mlx5dr_domain * dmn,struct mlx5dr_match_spec * spec)621 static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn,
622 				       struct mlx5dr_match_spec *spec)
623 {
624 	if (spec->ip_version) {
625 		if (spec->ip_version != 0xf) {
626 			mlx5dr_err(dmn,
627 				   "Partial ip_version mask with src/dst IP is not supported\n");
628 			return -EINVAL;
629 		}
630 	} else if (spec->ethertype != 0xffff &&
631 		   (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) {
632 		mlx5dr_err(dmn,
633 			   "Partial/no ethertype mask with src/dst IP is not supported\n");
634 		return -EINVAL;
635 	}
636 
637 	return 0;
638 }
639 
mlx5dr_ste_build_pre_check(struct mlx5dr_domain * dmn,u8 match_criteria,struct mlx5dr_match_param * mask,struct mlx5dr_match_param * value)640 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
641 			       u8 match_criteria,
642 			       struct mlx5dr_match_param *mask,
643 			       struct mlx5dr_match_param *value)
644 {
645 	if (value)
646 		return 0;
647 
648 	if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
649 		if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
650 			mlx5dr_err(dmn,
651 				   "Partial mask source_port is not supported\n");
652 			return -EINVAL;
653 		}
654 		if (mask->misc.source_eswitch_owner_vhca_id &&
655 		    mask->misc.source_eswitch_owner_vhca_id != 0xffff) {
656 			mlx5dr_err(dmn,
657 				   "Partial mask source_eswitch_owner_vhca_id is not supported\n");
658 			return -EINVAL;
659 		}
660 	}
661 
662 	if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) &&
663 	    dr_ste_build_pre_check_spec(dmn, &mask->outer))
664 		return -EINVAL;
665 
666 	if ((match_criteria & DR_MATCHER_CRITERIA_INNER) &&
667 	    dr_ste_build_pre_check_spec(dmn, &mask->inner))
668 		return -EINVAL;
669 
670 	return 0;
671 }
672 
mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_match_param * value,u8 * ste_arr)673 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
674 			     struct mlx5dr_matcher_rx_tx *nic_matcher,
675 			     struct mlx5dr_match_param *value,
676 			     u8 *ste_arr)
677 {
678 	struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
679 	bool is_rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
680 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
681 	struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
682 	struct mlx5dr_ste_build *sb;
683 	int ret, i;
684 
685 	ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
686 					 &matcher->mask, value);
687 	if (ret)
688 		return ret;
689 
690 	sb = nic_matcher->ste_builder;
691 	for (i = 0; i < nic_matcher->num_of_builders; i++) {
692 		ste_ctx->ste_init(ste_arr,
693 				  sb->lu_type,
694 				  is_rx,
695 				  dmn->info.caps.gvmi);
696 
697 		mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
698 
699 		ret = sb->ste_build_tag_func(value, sb, dr_ste_get_tag(ste_arr));
700 		if (ret)
701 			return ret;
702 
703 		/* Connect the STEs */
704 		if (i < (nic_matcher->num_of_builders - 1)) {
705 			/* Need the next builder for these fields,
706 			 * not relevant for the last ste in the chain.
707 			 */
708 			sb++;
709 			ste_ctx->set_next_lu_type(ste_arr, sb->lu_type);
710 			ste_ctx->set_byte_mask(ste_arr, sb->byte_mask);
711 		}
712 		ste_arr += DR_STE_SIZE;
713 	}
714 	return 0;
715 }
716 
717 #define IFC_GET_CLR(typ, p, fld, clear) ({ \
718 	void *__p = (p); \
719 	u32 __t = MLX5_GET(typ, __p, fld); \
720 	if (clear) \
721 		MLX5_SET(typ, __p, fld, 0); \
722 	__t; \
723 })
724 
725 #define memcpy_and_clear(to, from, len, clear) ({ \
726 	void *__to = (to), *__from = (from); \
727 	size_t __len = (len); \
728 	memcpy(__to, __from, __len); \
729 	if (clear) \
730 		memset(__from, 0, __len); \
731 })
732 
dr_ste_copy_mask_misc(char * mask,struct mlx5dr_match_misc * spec,bool clr)733 static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec, bool clr)
734 {
735 	spec->gre_c_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_c_present, clr);
736 	spec->gre_k_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_k_present, clr);
737 	spec->gre_s_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_s_present, clr);
738 	spec->source_vhca_port = IFC_GET_CLR(fte_match_set_misc, mask, source_vhca_port, clr);
739 	spec->source_sqn = IFC_GET_CLR(fte_match_set_misc, mask, source_sqn, clr);
740 
741 	spec->source_port = IFC_GET_CLR(fte_match_set_misc, mask, source_port, clr);
742 	spec->source_eswitch_owner_vhca_id =
743 		IFC_GET_CLR(fte_match_set_misc, mask, source_eswitch_owner_vhca_id, clr);
744 
745 	spec->outer_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_prio, clr);
746 	spec->outer_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cfi, clr);
747 	spec->outer_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_vid, clr);
748 	spec->inner_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_prio, clr);
749 	spec->inner_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cfi, clr);
750 	spec->inner_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_vid, clr);
751 
752 	spec->outer_second_cvlan_tag =
753 		IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cvlan_tag, clr);
754 	spec->inner_second_cvlan_tag =
755 		IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cvlan_tag, clr);
756 	spec->outer_second_svlan_tag =
757 		IFC_GET_CLR(fte_match_set_misc, mask, outer_second_svlan_tag, clr);
758 	spec->inner_second_svlan_tag =
759 		IFC_GET_CLR(fte_match_set_misc, mask, inner_second_svlan_tag, clr);
760 	spec->gre_protocol = IFC_GET_CLR(fte_match_set_misc, mask, gre_protocol, clr);
761 
762 	spec->gre_key_h = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.hi, clr);
763 	spec->gre_key_l = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.lo, clr);
764 
765 	spec->vxlan_vni = IFC_GET_CLR(fte_match_set_misc, mask, vxlan_vni, clr);
766 
767 	spec->geneve_vni = IFC_GET_CLR(fte_match_set_misc, mask, geneve_vni, clr);
768 	spec->geneve_tlv_option_0_exist =
769 		IFC_GET_CLR(fte_match_set_misc, mask, geneve_tlv_option_0_exist, clr);
770 	spec->geneve_oam = IFC_GET_CLR(fte_match_set_misc, mask, geneve_oam, clr);
771 
772 	spec->outer_ipv6_flow_label =
773 		IFC_GET_CLR(fte_match_set_misc, mask, outer_ipv6_flow_label, clr);
774 
775 	spec->inner_ipv6_flow_label =
776 		IFC_GET_CLR(fte_match_set_misc, mask, inner_ipv6_flow_label, clr);
777 
778 	spec->geneve_opt_len = IFC_GET_CLR(fte_match_set_misc, mask, geneve_opt_len, clr);
779 	spec->geneve_protocol_type =
780 		IFC_GET_CLR(fte_match_set_misc, mask, geneve_protocol_type, clr);
781 
782 	spec->bth_dst_qp = IFC_GET_CLR(fte_match_set_misc, mask, bth_dst_qp, clr);
783 }
784 
dr_ste_copy_mask_spec(char * mask,struct mlx5dr_match_spec * spec,bool clr)785 static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec, bool clr)
786 {
787 	__be32 raw_ip[4];
788 
789 	spec->smac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_47_16, clr);
790 
791 	spec->smac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_15_0, clr);
792 	spec->ethertype = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ethertype, clr);
793 
794 	spec->dmac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_47_16, clr);
795 
796 	spec->dmac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_15_0, clr);
797 	spec->first_prio = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_prio, clr);
798 	spec->first_cfi = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_cfi, clr);
799 	spec->first_vid = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_vid, clr);
800 
801 	spec->ip_protocol = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_protocol, clr);
802 	spec->ip_dscp = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_dscp, clr);
803 	spec->ip_ecn = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_ecn, clr);
804 	spec->cvlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, cvlan_tag, clr);
805 	spec->svlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, svlan_tag, clr);
806 	spec->frag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, frag, clr);
807 	spec->ip_version = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_version, clr);
808 	spec->tcp_flags = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_flags, clr);
809 	spec->tcp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_sport, clr);
810 	spec->tcp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_dport, clr);
811 
812 	spec->ipv4_ihl = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ipv4_ihl, clr);
813 	spec->ttl_hoplimit = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ttl_hoplimit, clr);
814 
815 	spec->udp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_sport, clr);
816 	spec->udp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_dport, clr);
817 
818 	memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
819 					      src_ipv4_src_ipv6.ipv6_layout.ipv6),
820 			 sizeof(raw_ip), clr);
821 
822 	spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
823 	spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
824 	spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
825 	spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
826 
827 	memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
828 					      dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
829 			 sizeof(raw_ip), clr);
830 
831 	spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
832 	spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
833 	spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]);
834 	spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
835 }
836 
dr_ste_copy_mask_misc2(char * mask,struct mlx5dr_match_misc2 * spec,bool clr)837 static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec, bool clr)
838 {
839 	spec->outer_first_mpls_label =
840 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_label, clr);
841 	spec->outer_first_mpls_exp =
842 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp, clr);
843 	spec->outer_first_mpls_s_bos =
844 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos, clr);
845 	spec->outer_first_mpls_ttl =
846 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl, clr);
847 	spec->inner_first_mpls_label =
848 		IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_label, clr);
849 	spec->inner_first_mpls_exp =
850 		IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp, clr);
851 	spec->inner_first_mpls_s_bos =
852 		IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos, clr);
853 	spec->inner_first_mpls_ttl =
854 		IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl, clr);
855 	spec->outer_first_mpls_over_gre_label =
856 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label, clr);
857 	spec->outer_first_mpls_over_gre_exp =
858 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp, clr);
859 	spec->outer_first_mpls_over_gre_s_bos =
860 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos, clr);
861 	spec->outer_first_mpls_over_gre_ttl =
862 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl, clr);
863 	spec->outer_first_mpls_over_udp_label =
864 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label, clr);
865 	spec->outer_first_mpls_over_udp_exp =
866 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp, clr);
867 	spec->outer_first_mpls_over_udp_s_bos =
868 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos, clr);
869 	spec->outer_first_mpls_over_udp_ttl =
870 		IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl, clr);
871 	spec->metadata_reg_c_7 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_7, clr);
872 	spec->metadata_reg_c_6 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_6, clr);
873 	spec->metadata_reg_c_5 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_5, clr);
874 	spec->metadata_reg_c_4 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_4, clr);
875 	spec->metadata_reg_c_3 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_3, clr);
876 	spec->metadata_reg_c_2 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_2, clr);
877 	spec->metadata_reg_c_1 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_1, clr);
878 	spec->metadata_reg_c_0 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_0, clr);
879 	spec->metadata_reg_a = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_a, clr);
880 }
881 
dr_ste_copy_mask_misc3(char * mask,struct mlx5dr_match_misc3 * spec,bool clr)882 static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec, bool clr)
883 {
884 	spec->inner_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_seq_num, clr);
885 	spec->outer_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_seq_num, clr);
886 	spec->inner_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_ack_num, clr);
887 	spec->outer_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_ack_num, clr);
888 	spec->outer_vxlan_gpe_vni =
889 		IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_vni, clr);
890 	spec->outer_vxlan_gpe_next_protocol =
891 		IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol, clr);
892 	spec->outer_vxlan_gpe_flags =
893 		IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_flags, clr);
894 	spec->icmpv4_header_data = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_header_data, clr);
895 	spec->icmpv6_header_data =
896 		IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_header_data, clr);
897 	spec->icmpv4_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_type, clr);
898 	spec->icmpv4_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_code, clr);
899 	spec->icmpv6_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_type, clr);
900 	spec->icmpv6_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_code, clr);
901 	spec->geneve_tlv_option_0_data =
902 		IFC_GET_CLR(fte_match_set_misc3, mask, geneve_tlv_option_0_data, clr);
903 	spec->gtpu_teid = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_teid, clr);
904 	spec->gtpu_msg_flags = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_flags, clr);
905 	spec->gtpu_msg_type = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_type, clr);
906 	spec->gtpu_dw_0 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_0, clr);
907 	spec->gtpu_dw_2 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_2, clr);
908 	spec->gtpu_first_ext_dw_0 =
909 		IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_first_ext_dw_0, clr);
910 }
911 
dr_ste_copy_mask_misc4(char * mask,struct mlx5dr_match_misc4 * spec,bool clr)912 static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec, bool clr)
913 {
914 	spec->prog_sample_field_id_0 =
915 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_0, clr);
916 	spec->prog_sample_field_value_0 =
917 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_0, clr);
918 	spec->prog_sample_field_id_1 =
919 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_1, clr);
920 	spec->prog_sample_field_value_1 =
921 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_1, clr);
922 	spec->prog_sample_field_id_2 =
923 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_2, clr);
924 	spec->prog_sample_field_value_2 =
925 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_2, clr);
926 	spec->prog_sample_field_id_3 =
927 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_3, clr);
928 	spec->prog_sample_field_value_3 =
929 		IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_3, clr);
930 }
931 
dr_ste_copy_mask_misc5(char * mask,struct mlx5dr_match_misc5 * spec,bool clr)932 static void dr_ste_copy_mask_misc5(char *mask, struct mlx5dr_match_misc5 *spec, bool clr)
933 {
934 	spec->macsec_tag_0 =
935 		IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_0, clr);
936 	spec->macsec_tag_1 =
937 		IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_1, clr);
938 	spec->macsec_tag_2 =
939 		IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_2, clr);
940 	spec->macsec_tag_3 =
941 		IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_3, clr);
942 	spec->tunnel_header_0 =
943 		IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_0, clr);
944 	spec->tunnel_header_1 =
945 		IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_1, clr);
946 	spec->tunnel_header_2 =
947 		IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_2, clr);
948 	spec->tunnel_header_3 =
949 		IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_3, clr);
950 }
951 
mlx5dr_ste_copy_param(u8 match_criteria,struct mlx5dr_match_param * set_param,struct mlx5dr_match_parameters * mask,bool clr)952 void mlx5dr_ste_copy_param(u8 match_criteria,
953 			   struct mlx5dr_match_param *set_param,
954 			   struct mlx5dr_match_parameters *mask,
955 			   bool clr)
956 {
957 	u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
958 	u8 *data = (u8 *)mask->match_buf;
959 	size_t param_location;
960 	void *buff;
961 
962 	if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
963 		if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) {
964 			memcpy(tail_param, data, mask->match_sz);
965 			buff = tail_param;
966 		} else {
967 			buff = mask->match_buf;
968 		}
969 		dr_ste_copy_mask_spec(buff, &set_param->outer, clr);
970 	}
971 	param_location = sizeof(struct mlx5dr_match_spec);
972 
973 	if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
974 		if (mask->match_sz < param_location +
975 		    sizeof(struct mlx5dr_match_misc)) {
976 			memcpy(tail_param, data + param_location,
977 			       mask->match_sz - param_location);
978 			buff = tail_param;
979 		} else {
980 			buff = data + param_location;
981 		}
982 		dr_ste_copy_mask_misc(buff, &set_param->misc, clr);
983 	}
984 	param_location += sizeof(struct mlx5dr_match_misc);
985 
986 	if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
987 		if (mask->match_sz < param_location +
988 		    sizeof(struct mlx5dr_match_spec)) {
989 			memcpy(tail_param, data + param_location,
990 			       mask->match_sz - param_location);
991 			buff = tail_param;
992 		} else {
993 			buff = data + param_location;
994 		}
995 		dr_ste_copy_mask_spec(buff, &set_param->inner, clr);
996 	}
997 	param_location += sizeof(struct mlx5dr_match_spec);
998 
999 	if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
1000 		if (mask->match_sz < param_location +
1001 		    sizeof(struct mlx5dr_match_misc2)) {
1002 			memcpy(tail_param, data + param_location,
1003 			       mask->match_sz - param_location);
1004 			buff = tail_param;
1005 		} else {
1006 			buff = data + param_location;
1007 		}
1008 		dr_ste_copy_mask_misc2(buff, &set_param->misc2, clr);
1009 	}
1010 
1011 	param_location += sizeof(struct mlx5dr_match_misc2);
1012 
1013 	if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
1014 		if (mask->match_sz < param_location +
1015 		    sizeof(struct mlx5dr_match_misc3)) {
1016 			memcpy(tail_param, data + param_location,
1017 			       mask->match_sz - param_location);
1018 			buff = tail_param;
1019 		} else {
1020 			buff = data + param_location;
1021 		}
1022 		dr_ste_copy_mask_misc3(buff, &set_param->misc3, clr);
1023 	}
1024 
1025 	param_location += sizeof(struct mlx5dr_match_misc3);
1026 
1027 	if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
1028 		if (mask->match_sz < param_location +
1029 		    sizeof(struct mlx5dr_match_misc4)) {
1030 			memcpy(tail_param, data + param_location,
1031 			       mask->match_sz - param_location);
1032 			buff = tail_param;
1033 		} else {
1034 			buff = data + param_location;
1035 		}
1036 		dr_ste_copy_mask_misc4(buff, &set_param->misc4, clr);
1037 	}
1038 
1039 	param_location += sizeof(struct mlx5dr_match_misc4);
1040 
1041 	if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
1042 		if (mask->match_sz < param_location +
1043 		    sizeof(struct mlx5dr_match_misc5)) {
1044 			memcpy(tail_param, data + param_location,
1045 			       mask->match_sz - param_location);
1046 			buff = tail_param;
1047 		} else {
1048 			buff = data + param_location;
1049 		}
1050 		dr_ste_copy_mask_misc5(buff, &set_param->misc5, clr);
1051 	}
1052 }
1053 
mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1054 void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
1055 				     struct mlx5dr_ste_build *sb,
1056 				     struct mlx5dr_match_param *mask,
1057 				     bool inner, bool rx)
1058 {
1059 	sb->rx = rx;
1060 	sb->inner = inner;
1061 	ste_ctx->build_eth_l2_src_dst_init(sb, mask);
1062 }
1063 
mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1064 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
1065 				      struct mlx5dr_ste_build *sb,
1066 				      struct mlx5dr_match_param *mask,
1067 				      bool inner, bool rx)
1068 {
1069 	sb->rx = rx;
1070 	sb->inner = inner;
1071 	ste_ctx->build_eth_l3_ipv6_dst_init(sb, mask);
1072 }
1073 
mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1074 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
1075 				      struct mlx5dr_ste_build *sb,
1076 				      struct mlx5dr_match_param *mask,
1077 				      bool inner, bool rx)
1078 {
1079 	sb->rx = rx;
1080 	sb->inner = inner;
1081 	ste_ctx->build_eth_l3_ipv6_src_init(sb, mask);
1082 }
1083 
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1084 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
1085 					  struct mlx5dr_ste_build *sb,
1086 					  struct mlx5dr_match_param *mask,
1087 					  bool inner, bool rx)
1088 {
1089 	sb->rx = rx;
1090 	sb->inner = inner;
1091 	ste_ctx->build_eth_l3_ipv4_5_tuple_init(sb, mask);
1092 }
1093 
mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1094 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
1095 				 struct mlx5dr_ste_build *sb,
1096 				 struct mlx5dr_match_param *mask,
1097 				 bool inner, bool rx)
1098 {
1099 	sb->rx = rx;
1100 	sb->inner = inner;
1101 	ste_ctx->build_eth_l2_src_init(sb, mask);
1102 }
1103 
mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1104 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
1105 				 struct mlx5dr_ste_build *sb,
1106 				 struct mlx5dr_match_param *mask,
1107 				 bool inner, bool rx)
1108 {
1109 	sb->rx = rx;
1110 	sb->inner = inner;
1111 	ste_ctx->build_eth_l2_dst_init(sb, mask);
1112 }
1113 
mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1114 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
1115 				 struct mlx5dr_ste_build *sb,
1116 				 struct mlx5dr_match_param *mask, bool inner, bool rx)
1117 {
1118 	sb->rx = rx;
1119 	sb->inner = inner;
1120 	ste_ctx->build_eth_l2_tnl_init(sb, mask);
1121 }
1122 
mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1123 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
1124 				       struct mlx5dr_ste_build *sb,
1125 				       struct mlx5dr_match_param *mask,
1126 				       bool inner, bool rx)
1127 {
1128 	sb->rx = rx;
1129 	sb->inner = inner;
1130 	ste_ctx->build_eth_l3_ipv4_misc_init(sb, mask);
1131 }
1132 
mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1133 void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
1134 				     struct mlx5dr_ste_build *sb,
1135 				     struct mlx5dr_match_param *mask,
1136 				     bool inner, bool rx)
1137 {
1138 	sb->rx = rx;
1139 	sb->inner = inner;
1140 	ste_ctx->build_eth_ipv6_l3_l4_init(sb, mask);
1141 }
1142 
dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1143 static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
1144 					     struct mlx5dr_ste_build *sb,
1145 					     u8 *tag)
1146 {
1147 	return 0;
1148 }
1149 
mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build * sb,bool rx)1150 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
1151 {
1152 	sb->rx = rx;
1153 	sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE;
1154 	sb->byte_mask = 0;
1155 	sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
1156 }
1157 
mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1158 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
1159 			   struct mlx5dr_ste_build *sb,
1160 			   struct mlx5dr_match_param *mask,
1161 			   bool inner, bool rx)
1162 {
1163 	sb->rx = rx;
1164 	sb->inner = inner;
1165 	ste_ctx->build_mpls_init(sb, mask);
1166 }
1167 
mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1168 void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
1169 			      struct mlx5dr_ste_build *sb,
1170 			      struct mlx5dr_match_param *mask,
1171 			      bool inner, bool rx)
1172 {
1173 	sb->rx = rx;
1174 	sb->inner = inner;
1175 	ste_ctx->build_tnl_gre_init(sb, mask);
1176 }
1177 
mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1178 void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
1179 					struct mlx5dr_ste_build *sb,
1180 					struct mlx5dr_match_param *mask,
1181 					struct mlx5dr_cmd_caps *caps,
1182 					bool inner, bool rx)
1183 {
1184 	sb->rx = rx;
1185 	sb->inner = inner;
1186 	sb->caps = caps;
1187 	return ste_ctx->build_tnl_mpls_over_gre_init(sb, mask);
1188 }
1189 
mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1190 void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
1191 					struct mlx5dr_ste_build *sb,
1192 					struct mlx5dr_match_param *mask,
1193 					struct mlx5dr_cmd_caps *caps,
1194 					bool inner, bool rx)
1195 {
1196 	sb->rx = rx;
1197 	sb->inner = inner;
1198 	sb->caps = caps;
1199 	return ste_ctx->build_tnl_mpls_over_udp_init(sb, mask);
1200 }
1201 
mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1202 void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
1203 			   struct mlx5dr_ste_build *sb,
1204 			   struct mlx5dr_match_param *mask,
1205 			   struct mlx5dr_cmd_caps *caps,
1206 			   bool inner, bool rx)
1207 {
1208 	sb->rx = rx;
1209 	sb->inner = inner;
1210 	sb->caps = caps;
1211 	ste_ctx->build_icmp_init(sb, mask);
1212 }
1213 
mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1214 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
1215 				      struct mlx5dr_ste_build *sb,
1216 				      struct mlx5dr_match_param *mask,
1217 				      bool inner, bool rx)
1218 {
1219 	sb->rx = rx;
1220 	sb->inner = inner;
1221 	ste_ctx->build_general_purpose_init(sb, mask);
1222 }
1223 
mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1224 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
1225 				  struct mlx5dr_ste_build *sb,
1226 				  struct mlx5dr_match_param *mask,
1227 				  bool inner, bool rx)
1228 {
1229 	sb->rx = rx;
1230 	sb->inner = inner;
1231 	ste_ctx->build_eth_l4_misc_init(sb, mask);
1232 }
1233 
mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1234 void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
1235 				    struct mlx5dr_ste_build *sb,
1236 				    struct mlx5dr_match_param *mask,
1237 				    bool inner, bool rx)
1238 {
1239 	sb->rx = rx;
1240 	sb->inner = inner;
1241 	ste_ctx->build_tnl_vxlan_gpe_init(sb, mask);
1242 }
1243 
mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1244 void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
1245 				 struct mlx5dr_ste_build *sb,
1246 				 struct mlx5dr_match_param *mask,
1247 				 bool inner, bool rx)
1248 {
1249 	sb->rx = rx;
1250 	sb->inner = inner;
1251 	ste_ctx->build_tnl_geneve_init(sb, mask);
1252 }
1253 
mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1254 void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
1255 					 struct mlx5dr_ste_build *sb,
1256 					 struct mlx5dr_match_param *mask,
1257 					 struct mlx5dr_cmd_caps *caps,
1258 					 bool inner, bool rx)
1259 {
1260 	sb->rx = rx;
1261 	sb->caps = caps;
1262 	sb->inner = inner;
1263 	ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask);
1264 }
1265 
mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1266 void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx,
1267 					       struct mlx5dr_ste_build *sb,
1268 					       struct mlx5dr_match_param *mask,
1269 					       struct mlx5dr_cmd_caps *caps,
1270 					       bool inner, bool rx)
1271 {
1272 	if (!ste_ctx->build_tnl_geneve_tlv_opt_exist_init)
1273 		return;
1274 
1275 	sb->rx = rx;
1276 	sb->caps = caps;
1277 	sb->inner = inner;
1278 	ste_ctx->build_tnl_geneve_tlv_opt_exist_init(sb, mask);
1279 }
1280 
mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1281 void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
1282 			       struct mlx5dr_ste_build *sb,
1283 			       struct mlx5dr_match_param *mask,
1284 			       bool inner, bool rx)
1285 {
1286 	sb->rx = rx;
1287 	sb->inner = inner;
1288 	ste_ctx->build_tnl_gtpu_init(sb, mask);
1289 }
1290 
mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1291 void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
1292 					     struct mlx5dr_ste_build *sb,
1293 					     struct mlx5dr_match_param *mask,
1294 					     struct mlx5dr_cmd_caps *caps,
1295 					     bool inner, bool rx)
1296 {
1297 	sb->rx = rx;
1298 	sb->caps = caps;
1299 	sb->inner = inner;
1300 	ste_ctx->build_tnl_gtpu_flex_parser_0_init(sb, mask);
1301 }
1302 
mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1303 void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
1304 					     struct mlx5dr_ste_build *sb,
1305 					     struct mlx5dr_match_param *mask,
1306 					     struct mlx5dr_cmd_caps *caps,
1307 					     bool inner, bool rx)
1308 {
1309 	sb->rx = rx;
1310 	sb->caps = caps;
1311 	sb->inner = inner;
1312 	ste_ctx->build_tnl_gtpu_flex_parser_1_init(sb, mask);
1313 }
1314 
mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1315 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
1316 				 struct mlx5dr_ste_build *sb,
1317 				 struct mlx5dr_match_param *mask,
1318 				 bool inner, bool rx)
1319 {
1320 	sb->rx = rx;
1321 	sb->inner = inner;
1322 	ste_ctx->build_register_0_init(sb, mask);
1323 }
1324 
mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1325 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
1326 				 struct mlx5dr_ste_build *sb,
1327 				 struct mlx5dr_match_param *mask,
1328 				 bool inner, bool rx)
1329 {
1330 	sb->rx = rx;
1331 	sb->inner = inner;
1332 	ste_ctx->build_register_1_init(sb, mask);
1333 }
1334 
mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_domain * dmn,bool inner,bool rx)1335 void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
1336 				   struct mlx5dr_ste_build *sb,
1337 				   struct mlx5dr_match_param *mask,
1338 				   struct mlx5dr_domain *dmn,
1339 				   bool inner, bool rx)
1340 {
1341 	/* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
1342 	sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
1343 
1344 	sb->rx = rx;
1345 	sb->dmn = dmn;
1346 	sb->inner = inner;
1347 	ste_ctx->build_src_gvmi_qpn_init(sb, mask);
1348 }
1349 
mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1350 void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
1351 				    struct mlx5dr_ste_build *sb,
1352 				    struct mlx5dr_match_param *mask,
1353 				    bool inner, bool rx)
1354 {
1355 	sb->rx = rx;
1356 	sb->inner = inner;
1357 	ste_ctx->build_flex_parser_0_init(sb, mask);
1358 }
1359 
mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1360 void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
1361 				    struct mlx5dr_ste_build *sb,
1362 				    struct mlx5dr_match_param *mask,
1363 				    bool inner, bool rx)
1364 {
1365 	sb->rx = rx;
1366 	sb->inner = inner;
1367 	ste_ctx->build_flex_parser_1_init(sb, mask);
1368 }
1369 
mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1370 void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
1371 				     struct mlx5dr_ste_build *sb,
1372 				     struct mlx5dr_match_param *mask,
1373 				     bool inner, bool rx)
1374 {
1375 	sb->rx = rx;
1376 	sb->inner = inner;
1377 	ste_ctx->build_tnl_header_0_1_init(sb, mask);
1378 }
1379 
mlx5dr_ste_get_ctx(u8 version)1380 struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
1381 {
1382 	if (version == MLX5_STEERING_FORMAT_CONNECTX_5)
1383 		return mlx5dr_ste_get_ctx_v0();
1384 	else if (version == MLX5_STEERING_FORMAT_CONNECTX_6DX)
1385 		return mlx5dr_ste_get_ctx_v1();
1386 	else if (version == MLX5_STEERING_FORMAT_CONNECTX_7)
1387 		return mlx5dr_ste_get_ctx_v2();
1388 
1389 	return NULL;
1390 }
1391