• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include <linux/types.h>
5 #include <linux/crc32.h>
6 #include "dr_types.h"
7 
8 #define DR_STE_CRC_POLY 0xEDB88320L
9 #define STE_IPV4 0x1
10 #define STE_IPV6 0x2
11 #define STE_TCP 0x1
12 #define STE_UDP 0x2
13 #define STE_SPI 0x3
14 #define IP_VERSION_IPV4 0x4
15 #define IP_VERSION_IPV6 0x6
16 #define STE_SVLAN 0x1
17 #define STE_CVLAN 0x2
18 
19 #define DR_STE_ENABLE_FLOW_TAG BIT(31)
20 
21 /* Set to STE a specific value using DR_STE_SET */
22 #define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
23 	if ((spec)->s_fname) { \
24 		MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
25 		(spec)->s_fname = 0; \
26 	} \
27 } while (0)
28 
29 /* Set to STE spec->s_fname to tag->t_fname */
30 #define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
31 	DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
32 
33 /* Set to STE -1 to bit_mask->bm_fname and set spec->s_fname as used */
34 #define DR_STE_SET_MASK(lookup_type, bit_mask, bm_fname, spec, s_fname) \
35 	DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, -1)
36 
37 /* Set to STE spec->s_fname to bit_mask->bm_fname and set spec->s_fname as used */
38 #define DR_STE_SET_MASK_V(lookup_type, bit_mask, bm_fname, spec, s_fname) \
39 	DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, (spec)->s_fname)
40 
41 #define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
42 	MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
43 	MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
44 	MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
45 	MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
46 	MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
47 	MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
48 	MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
49 	MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
50 	MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
51 } while (0)
52 
53 #define DR_STE_SET_MPLS_MASK(lookup_type, mask, in_out, bit_mask) do { \
54 	DR_STE_SET_MASK_V(lookup_type, mask, mpls0_label, mask, \
55 			  in_out##_first_mpls_label);\
56 	DR_STE_SET_MASK_V(lookup_type, mask, mpls0_s_bos, mask, \
57 			  in_out##_first_mpls_s_bos); \
58 	DR_STE_SET_MASK_V(lookup_type, mask, mpls0_exp, mask, \
59 			  in_out##_first_mpls_exp); \
60 	DR_STE_SET_MASK_V(lookup_type, mask, mpls0_ttl, mask, \
61 			  in_out##_first_mpls_ttl); \
62 } while (0)
63 
64 #define DR_STE_SET_MPLS_TAG(lookup_type, mask, in_out, tag) do { \
65 	DR_STE_SET_TAG(lookup_type, tag, mpls0_label, mask, \
66 		       in_out##_first_mpls_label);\
67 	DR_STE_SET_TAG(lookup_type, tag, mpls0_s_bos, mask, \
68 		       in_out##_first_mpls_s_bos); \
69 	DR_STE_SET_TAG(lookup_type, tag, mpls0_exp, mask, \
70 		       in_out##_first_mpls_exp); \
71 	DR_STE_SET_TAG(lookup_type, tag, mpls0_ttl, mask, \
72 		       in_out##_first_mpls_ttl); \
73 } while (0)
74 
75 #define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
76 	(_misc)->outer_first_mpls_over_gre_label || \
77 	(_misc)->outer_first_mpls_over_gre_exp || \
78 	(_misc)->outer_first_mpls_over_gre_s_bos || \
79 	(_misc)->outer_first_mpls_over_gre_ttl)
80 #define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
81 	(_misc)->outer_first_mpls_over_udp_label || \
82 	(_misc)->outer_first_mpls_over_udp_exp || \
83 	(_misc)->outer_first_mpls_over_udp_s_bos || \
84 	(_misc)->outer_first_mpls_over_udp_ttl)
85 
86 #define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
87 	((inner) ? MLX5DR_STE_LU_TYPE_##lookup_type##_I : \
88 		   (rx) ? MLX5DR_STE_LU_TYPE_##lookup_type##_D : \
89 			  MLX5DR_STE_LU_TYPE_##lookup_type##_O)
90 
91 enum dr_ste_tunl_action {
92 	DR_STE_TUNL_ACTION_NONE		= 0,
93 	DR_STE_TUNL_ACTION_ENABLE	= 1,
94 	DR_STE_TUNL_ACTION_DECAP	= 2,
95 	DR_STE_TUNL_ACTION_L3_DECAP	= 3,
96 	DR_STE_TUNL_ACTION_POP_VLAN	= 4,
97 };
98 
99 enum dr_ste_action_type {
100 	DR_STE_ACTION_TYPE_PUSH_VLAN	= 1,
101 	DR_STE_ACTION_TYPE_ENCAP_L3	= 3,
102 	DR_STE_ACTION_TYPE_ENCAP	= 4,
103 };
104 
105 struct dr_hw_ste_format {
106 	u8 ctrl[DR_STE_SIZE_CTRL];
107 	u8 tag[DR_STE_SIZE_TAG];
108 	u8 mask[DR_STE_SIZE_MASK];
109 };
110 
dr_ste_crc32_calc(const void * input_data,size_t length)111 static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
112 {
113 	u32 crc = crc32(0, input_data, length);
114 
115 	return (__force u32)((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) |
116 			    ((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000);
117 }
118 
mlx5dr_ste_calc_hash_index(u8 * hw_ste_p,struct mlx5dr_ste_htbl * htbl)119 u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
120 {
121 	struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
122 	u8 masked[DR_STE_SIZE_TAG] = {};
123 	u32 crc32, index;
124 	u16 bit;
125 	int i;
126 
127 	/* Don't calculate CRC if the result is predicted */
128 	if (htbl->chunk->num_of_entries == 1 || htbl->byte_mask == 0)
129 		return 0;
130 
131 	/* Mask tag using byte mask, bit per byte */
132 	bit = 1 << (DR_STE_SIZE_TAG - 1);
133 	for (i = 0; i < DR_STE_SIZE_TAG; i++) {
134 		if (htbl->byte_mask & bit)
135 			masked[i] = hw_ste->tag[i];
136 
137 		bit = bit >> 1;
138 	}
139 
140 	crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
141 	index = crc32 & (htbl->chunk->num_of_entries - 1);
142 
143 	return index;
144 }
145 
dr_ste_conv_bit_to_byte_mask(u8 * bit_mask)146 static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
147 {
148 	u16 byte_mask = 0;
149 	int i;
150 
151 	for (i = 0; i < DR_STE_SIZE_MASK; i++) {
152 		byte_mask = byte_mask << 1;
153 		if (bit_mask[i] == 0xff)
154 			byte_mask |= 1;
155 	}
156 	return byte_mask;
157 }
158 
mlx5dr_ste_get_tag(u8 * hw_ste_p)159 static u8 *mlx5dr_ste_get_tag(u8 *hw_ste_p)
160 {
161 	struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
162 
163 	return hw_ste->tag;
164 }
165 
mlx5dr_ste_set_bit_mask(u8 * hw_ste_p,u8 * bit_mask)166 void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
167 {
168 	struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
169 
170 	memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
171 }
172 
mlx5dr_ste_rx_set_flow_tag(u8 * hw_ste_p,u32 flow_tag)173 void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
174 {
175 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
176 		 DR_STE_ENABLE_FLOW_TAG | flow_tag);
177 }
178 
mlx5dr_ste_set_counter_id(u8 * hw_ste_p,u32 ctr_id)179 void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
180 {
181 	/* This can be used for both rx_steering_mult and for sx_transmit */
182 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
183 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
184 }
185 
mlx5dr_ste_set_go_back_bit(u8 * hw_ste_p)186 void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p)
187 {
188 	MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
189 }
190 
mlx5dr_ste_set_tx_push_vlan(u8 * hw_ste_p,u32 vlan_hdr,bool go_back)191 void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
192 				 bool go_back)
193 {
194 	MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
195 		 DR_STE_ACTION_TYPE_PUSH_VLAN);
196 	MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
197 	/* Due to HW limitation we need to set this bit, otherwise reforamt +
198 	 * push vlan will not work.
199 	 */
200 	if (go_back)
201 		mlx5dr_ste_set_go_back_bit(hw_ste_p);
202 }
203 
mlx5dr_ste_set_tx_encap(void * hw_ste_p,u32 reformat_id,int size,bool encap_l3)204 void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id, int size, bool encap_l3)
205 {
206 	MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
207 		 encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
208 	/* The hardware expects here size in words (2 byte) */
209 	MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
210 	MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
211 }
212 
mlx5dr_ste_set_rx_decap(u8 * hw_ste_p)213 void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p)
214 {
215 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
216 		 DR_STE_TUNL_ACTION_DECAP);
217 }
218 
mlx5dr_ste_set_rx_pop_vlan(u8 * hw_ste_p)219 void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p)
220 {
221 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
222 		 DR_STE_TUNL_ACTION_POP_VLAN);
223 }
224 
mlx5dr_ste_set_rx_decap_l3(u8 * hw_ste_p,bool vlan)225 void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
226 {
227 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
228 		 DR_STE_TUNL_ACTION_L3_DECAP);
229 	MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
230 }
231 
mlx5dr_ste_set_entry_type(u8 * hw_ste_p,u8 entry_type)232 void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type)
233 {
234 	MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
235 }
236 
mlx5dr_ste_get_entry_type(u8 * hw_ste_p)237 u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p)
238 {
239 	return MLX5_GET(ste_general, hw_ste_p, entry_type);
240 }
241 
mlx5dr_ste_set_rewrite_actions(u8 * hw_ste_p,u16 num_of_actions,u32 re_write_index)242 void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
243 				    u32 re_write_index)
244 {
245 	MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
246 		 num_of_actions);
247 	MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
248 		 re_write_index);
249 }
250 
mlx5dr_ste_set_hit_gvmi(u8 * hw_ste_p,u16 gvmi)251 void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
252 {
253 	MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
254 }
255 
mlx5dr_ste_init(u8 * hw_ste_p,u8 lu_type,u8 entry_type,u16 gvmi)256 void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type,
257 		     u16 gvmi)
258 {
259 	MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
260 	MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
261 	MLX5_SET(ste_general, hw_ste_p, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
262 
263 	/* Set GVMI once, this is the same for RX/TX
264 	 * bits 63_48 of next table base / miss address encode the next GVMI
265 	 */
266 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
267 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
268 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
269 }
270 
dr_ste_set_always_hit(struct dr_hw_ste_format * hw_ste)271 static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
272 {
273 	memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
274 	memset(&hw_ste->mask, 0, sizeof(hw_ste->mask));
275 }
276 
dr_ste_set_always_miss(struct dr_hw_ste_format * hw_ste)277 static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
278 {
279 	hw_ste->tag[0] = 0xdc;
280 	hw_ste->mask[0] = 0;
281 }
282 
mlx5dr_ste_get_miss_addr(u8 * hw_ste)283 u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste)
284 {
285 	u64 index =
286 		(MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_31_6) |
287 		 MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_39_32) << 26);
288 
289 	return index << 6;
290 }
291 
mlx5dr_ste_set_hit_addr(u8 * hw_ste,u64 icm_addr,u32 ht_size)292 void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size)
293 {
294 	u64 index = (icm_addr >> 5) | ht_size;
295 
296 	MLX5_SET(ste_general, hw_ste, next_table_base_39_32_size, index >> 27);
297 	MLX5_SET(ste_general, hw_ste, next_table_base_31_5_size, index);
298 }
299 
mlx5dr_ste_get_icm_addr(struct mlx5dr_ste * ste)300 u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
301 {
302 	u32 index = ste - ste->htbl->ste_arr;
303 
304 	return ste->htbl->chunk->icm_addr + DR_STE_SIZE * index;
305 }
306 
mlx5dr_ste_get_mr_addr(struct mlx5dr_ste * ste)307 u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
308 {
309 	u32 index = ste - ste->htbl->ste_arr;
310 
311 	return ste->htbl->chunk->mr_addr + DR_STE_SIZE * index;
312 }
313 
mlx5dr_ste_get_miss_list(struct mlx5dr_ste * ste)314 struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
315 {
316 	u32 index = ste - ste->htbl->ste_arr;
317 
318 	return &ste->htbl->miss_list[index];
319 }
320 
dr_ste_always_hit_htbl(struct mlx5dr_ste * ste,struct mlx5dr_ste_htbl * next_htbl)321 static void dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
322 				   struct mlx5dr_ste_htbl *next_htbl)
323 {
324 	struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
325 	u8 *hw_ste = ste->hw_ste;
326 
327 	MLX5_SET(ste_general, hw_ste, byte_mask, next_htbl->byte_mask);
328 	MLX5_SET(ste_general, hw_ste, next_lu_type, next_htbl->lu_type);
329 	mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
330 
331 	dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
332 }
333 
mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx * nic_matcher,u8 ste_location)334 bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
335 				u8 ste_location)
336 {
337 	return ste_location == nic_matcher->num_of_builders;
338 }
339 
340 /* Replace relevant fields, except of:
341  * htbl - keep the origin htbl
342  * miss_list + list - already took the src from the list.
343  * icm_addr/mr_addr - depends on the hosting table.
344  *
345  * Before:
346  * | a | -> | b | -> | c | ->
347  *
348  * After:
349  * | a | -> | c | ->
350  * While the data that was in b copied to a.
351  */
dr_ste_replace(struct mlx5dr_ste * dst,struct mlx5dr_ste * src)352 static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
353 {
354 	memcpy(dst->hw_ste, src->hw_ste, DR_STE_SIZE_REDUCED);
355 	dst->next_htbl = src->next_htbl;
356 	if (dst->next_htbl)
357 		dst->next_htbl->pointing_ste = dst;
358 
359 	dst->refcount = src->refcount;
360 
361 	INIT_LIST_HEAD(&dst->rule_list);
362 	list_splice_tail_init(&src->rule_list, &dst->rule_list);
363 }
364 
365 /* Free ste which is the head and the only one in miss_list */
366 static void
dr_ste_remove_head_ste(struct mlx5dr_ste * ste,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste_send_info * ste_info_head,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)367 dr_ste_remove_head_ste(struct mlx5dr_ste *ste,
368 		       struct mlx5dr_matcher_rx_tx *nic_matcher,
369 		       struct mlx5dr_ste_send_info *ste_info_head,
370 		       struct list_head *send_ste_list,
371 		       struct mlx5dr_ste_htbl *stats_tbl)
372 {
373 	u8 tmp_data_ste[DR_STE_SIZE] = {};
374 	struct mlx5dr_ste tmp_ste = {};
375 	u64 miss_addr;
376 
377 	tmp_ste.hw_ste = tmp_data_ste;
378 
379 	/* Use temp ste because dr_ste_always_miss_addr
380 	 * touches bit_mask area which doesn't exist at ste->hw_ste.
381 	 */
382 	memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
383 	miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
384 	mlx5dr_ste_always_miss_addr(&tmp_ste, miss_addr);
385 	memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
386 
387 	list_del_init(&ste->miss_list_node);
388 
389 	/* Write full STE size in order to have "always_miss" */
390 	mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
391 						  0, tmp_data_ste,
392 						  ste_info_head,
393 						  send_ste_list,
394 						  true /* Copy data */);
395 
396 	stats_tbl->ctrl.num_of_valid_entries--;
397 }
398 
399 /* Free ste which is the head but NOT the only one in miss_list:
400  * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0
401  */
402 static void
dr_ste_replace_head_ste(struct mlx5dr_ste * ste,struct mlx5dr_ste * next_ste,struct mlx5dr_ste_send_info * ste_info_head,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)403 dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste,
404 			struct mlx5dr_ste_send_info *ste_info_head,
405 			struct list_head *send_ste_list,
406 			struct mlx5dr_ste_htbl *stats_tbl)
407 
408 {
409 	struct mlx5dr_ste_htbl *next_miss_htbl;
410 
411 	next_miss_htbl = next_ste->htbl;
412 
413 	/* Remove from the miss_list the next_ste before copy */
414 	list_del_init(&next_ste->miss_list_node);
415 
416 	/* All rule-members that use next_ste should know about that */
417 	mlx5dr_rule_update_rule_member(next_ste, ste);
418 
419 	/* Move data from next into ste */
420 	dr_ste_replace(ste, next_ste);
421 
422 	/* Del the htbl that contains the next_ste.
423 	 * The origin htbl stay with the same number of entries.
424 	 */
425 	mlx5dr_htbl_put(next_miss_htbl);
426 
427 	mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE_REDUCED,
428 						  0, ste->hw_ste,
429 						  ste_info_head,
430 						  send_ste_list,
431 						  true /* Copy data */);
432 
433 	stats_tbl->ctrl.num_of_collisions--;
434 	stats_tbl->ctrl.num_of_valid_entries--;
435 }
436 
437 /* Free ste that is located in the middle of the miss list:
438  * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
439  */
dr_ste_remove_middle_ste(struct mlx5dr_ste * ste,struct mlx5dr_ste_send_info * ste_info,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * stats_tbl)440 static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste,
441 				     struct mlx5dr_ste_send_info *ste_info,
442 				     struct list_head *send_ste_list,
443 				     struct mlx5dr_ste_htbl *stats_tbl)
444 {
445 	struct mlx5dr_ste *prev_ste;
446 	u64 miss_addr;
447 
448 	prev_ste = list_prev_entry(ste, miss_list_node);
449 	if (WARN_ON(!prev_ste))
450 		return;
451 
452 	miss_addr = mlx5dr_ste_get_miss_addr(ste->hw_ste);
453 	mlx5dr_ste_set_miss_addr(prev_ste->hw_ste, miss_addr);
454 
455 	mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_REDUCED, 0,
456 						  prev_ste->hw_ste, ste_info,
457 						  send_ste_list, true /* Copy data*/);
458 
459 	list_del_init(&ste->miss_list_node);
460 
461 	stats_tbl->ctrl.num_of_valid_entries--;
462 	stats_tbl->ctrl.num_of_collisions--;
463 }
464 
mlx5dr_ste_free(struct mlx5dr_ste * ste,struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher)465 void mlx5dr_ste_free(struct mlx5dr_ste *ste,
466 		     struct mlx5dr_matcher *matcher,
467 		     struct mlx5dr_matcher_rx_tx *nic_matcher)
468 {
469 	struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
470 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
471 	struct mlx5dr_ste_send_info ste_info_head;
472 	struct mlx5dr_ste *next_ste, *first_ste;
473 	bool put_on_origin_table = true;
474 	struct mlx5dr_ste_htbl *stats_tbl;
475 	LIST_HEAD(send_ste_list);
476 
477 	first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste),
478 				     struct mlx5dr_ste, miss_list_node);
479 	stats_tbl = first_ste->htbl;
480 
481 	/* Two options:
482 	 * 1. ste is head:
483 	 *	a. head ste is the only ste in the miss list
484 	 *	b. head ste is not the only ste in the miss-list
485 	 * 2. ste is not head
486 	 */
487 	if (first_ste == ste) { /* Ste is the head */
488 		struct mlx5dr_ste *last_ste;
489 
490 		last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste),
491 					   struct mlx5dr_ste, miss_list_node);
492 		if (last_ste == first_ste)
493 			next_ste = NULL;
494 		else
495 			next_ste = list_next_entry(ste, miss_list_node);
496 
497 		if (!next_ste) {
498 			/* One and only entry in the list */
499 			dr_ste_remove_head_ste(ste, nic_matcher,
500 					       &ste_info_head,
501 					       &send_ste_list,
502 					       stats_tbl);
503 		} else {
504 			/* First but not only entry in the list */
505 			dr_ste_replace_head_ste(ste, next_ste, &ste_info_head,
506 						&send_ste_list, stats_tbl);
507 			put_on_origin_table = false;
508 		}
509 	} else { /* Ste in the middle of the list */
510 		dr_ste_remove_middle_ste(ste, &ste_info_head, &send_ste_list, stats_tbl);
511 	}
512 
513 	/* Update HW */
514 	list_for_each_entry_safe(cur_ste_info, tmp_ste_info,
515 				 &send_ste_list, send_list) {
516 		list_del(&cur_ste_info->send_list);
517 		mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste,
518 					 cur_ste_info->data, cur_ste_info->size,
519 					 cur_ste_info->offset);
520 	}
521 
522 	if (put_on_origin_table)
523 		mlx5dr_htbl_put(ste->htbl);
524 }
525 
mlx5dr_ste_equal_tag(void * src,void * dst)526 bool mlx5dr_ste_equal_tag(void *src, void *dst)
527 {
528 	struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src;
529 	struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst;
530 
531 	return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
532 }
533 
mlx5dr_ste_set_hit_addr_by_next_htbl(u8 * hw_ste,struct mlx5dr_ste_htbl * next_htbl)534 void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
535 					  struct mlx5dr_ste_htbl *next_htbl)
536 {
537 	struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
538 
539 	mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
540 }
541 
mlx5dr_ste_set_miss_addr(u8 * hw_ste_p,u64 miss_addr)542 void mlx5dr_ste_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
543 {
544 	u64 index = miss_addr >> 6;
545 
546 	/* Miss address for TX and RX STEs located in the same offsets */
547 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
548 	MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
549 }
550 
mlx5dr_ste_always_miss_addr(struct mlx5dr_ste * ste,u64 miss_addr)551 void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr)
552 {
553 	u8 *hw_ste = ste->hw_ste;
554 
555 	MLX5_SET(ste_rx_steering_mult, hw_ste, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
556 	mlx5dr_ste_set_miss_addr(hw_ste, miss_addr);
557 	dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
558 }
559 
560 /* Init one ste as a pattern for ste data array */
mlx5dr_ste_set_formatted_ste(u16 gvmi,struct mlx5dr_domain_rx_tx * nic_dmn,struct mlx5dr_ste_htbl * htbl,u8 * formatted_ste,struct mlx5dr_htbl_connect_info * connect_info)561 void mlx5dr_ste_set_formatted_ste(u16 gvmi,
562 				  struct mlx5dr_domain_rx_tx *nic_dmn,
563 				  struct mlx5dr_ste_htbl *htbl,
564 				  u8 *formatted_ste,
565 				  struct mlx5dr_htbl_connect_info *connect_info)
566 {
567 	struct mlx5dr_ste ste = {};
568 
569 	mlx5dr_ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi);
570 	ste.hw_ste = formatted_ste;
571 
572 	if (connect_info->type == CONNECT_HIT)
573 		dr_ste_always_hit_htbl(&ste, connect_info->hit_next_htbl);
574 	else
575 		mlx5dr_ste_always_miss_addr(&ste, connect_info->miss_icm_addr);
576 }
577 
mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain * dmn,struct mlx5dr_domain_rx_tx * nic_dmn,struct mlx5dr_ste_htbl * htbl,struct mlx5dr_htbl_connect_info * connect_info,bool update_hw_ste)578 int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
579 				      struct mlx5dr_domain_rx_tx *nic_dmn,
580 				      struct mlx5dr_ste_htbl *htbl,
581 				      struct mlx5dr_htbl_connect_info *connect_info,
582 				      bool update_hw_ste)
583 {
584 	u8 formatted_ste[DR_STE_SIZE] = {};
585 
586 	mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
587 				     nic_dmn,
588 				     htbl,
589 				     formatted_ste,
590 				     connect_info);
591 
592 	return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste);
593 }
594 
mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * ste,u8 * cur_hw_ste,enum mlx5dr_icm_chunk_size log_table_size)595 int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
596 				struct mlx5dr_matcher_rx_tx *nic_matcher,
597 				struct mlx5dr_ste *ste,
598 				u8 *cur_hw_ste,
599 				enum mlx5dr_icm_chunk_size log_table_size)
600 {
601 	struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)cur_hw_ste;
602 	struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
603 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
604 	struct mlx5dr_htbl_connect_info info;
605 	struct mlx5dr_ste_htbl *next_htbl;
606 
607 	if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
608 		u8 next_lu_type;
609 		u16 byte_mask;
610 
611 		next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type);
612 		byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask);
613 
614 		next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
615 						  log_table_size,
616 						  next_lu_type,
617 						  byte_mask);
618 		if (!next_htbl) {
619 			mlx5dr_dbg(dmn, "Failed allocating table\n");
620 			return -ENOMEM;
621 		}
622 
623 		/* Write new table to HW */
624 		info.type = CONNECT_MISS;
625 		info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
626 		if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
627 						      &info, false)) {
628 			mlx5dr_info(dmn, "Failed writing table to HW\n");
629 			goto free_table;
630 		}
631 
632 		mlx5dr_ste_set_hit_addr_by_next_htbl(cur_hw_ste, next_htbl);
633 		ste->next_htbl = next_htbl;
634 		next_htbl->pointing_ste = ste;
635 	}
636 
637 	return 0;
638 
639 free_table:
640 	mlx5dr_ste_htbl_free(next_htbl);
641 	return -ENOENT;
642 }
643 
dr_ste_set_ctrl(struct mlx5dr_ste_htbl * htbl)644 static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
645 {
646 	struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
647 	int num_of_entries;
648 
649 	htbl->ctrl.may_grow = true;
650 
651 	if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
652 		htbl->ctrl.may_grow = false;
653 
654 	/* Threshold is 50%, one is added to table of size 1 */
655 	num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size);
656 	ctrl->increase_threshold = (num_of_entries + 1) / 2;
657 }
658 
mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool * pool,enum mlx5dr_icm_chunk_size chunk_size,u8 lu_type,u16 byte_mask)659 struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
660 					      enum mlx5dr_icm_chunk_size chunk_size,
661 					      u8 lu_type, u16 byte_mask)
662 {
663 	struct mlx5dr_icm_chunk *chunk;
664 	struct mlx5dr_ste_htbl *htbl;
665 	int i;
666 
667 	htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
668 	if (!htbl)
669 		return NULL;
670 
671 	chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size);
672 	if (!chunk)
673 		goto out_free_htbl;
674 
675 	htbl->chunk = chunk;
676 	htbl->lu_type = lu_type;
677 	htbl->byte_mask = byte_mask;
678 	htbl->ste_arr = chunk->ste_arr;
679 	htbl->hw_ste_arr = chunk->hw_ste_arr;
680 	htbl->miss_list = chunk->miss_list;
681 	htbl->refcount = 0;
682 
683 	for (i = 0; i < chunk->num_of_entries; i++) {
684 		struct mlx5dr_ste *ste = &htbl->ste_arr[i];
685 
686 		ste->hw_ste = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
687 		ste->htbl = htbl;
688 		ste->refcount = 0;
689 		INIT_LIST_HEAD(&ste->miss_list_node);
690 		INIT_LIST_HEAD(&htbl->miss_list[i]);
691 		INIT_LIST_HEAD(&ste->rule_list);
692 	}
693 
694 	htbl->chunk_size = chunk_size;
695 	dr_ste_set_ctrl(htbl);
696 	return htbl;
697 
698 out_free_htbl:
699 	kfree(htbl);
700 	return NULL;
701 }
702 
mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl * htbl)703 int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
704 {
705 	if (htbl->refcount)
706 		return -EBUSY;
707 
708 	mlx5dr_icm_free_chunk(htbl->chunk);
709 	kfree(htbl);
710 	return 0;
711 }
712 
mlx5dr_ste_build_pre_check(struct mlx5dr_domain * dmn,u8 match_criteria,struct mlx5dr_match_param * mask,struct mlx5dr_match_param * value)713 int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
714 			       u8 match_criteria,
715 			       struct mlx5dr_match_param *mask,
716 			       struct mlx5dr_match_param *value)
717 {
718 	if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
719 		if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
720 			mlx5dr_err(dmn,
721 				   "Partial mask source_port is not supported\n");
722 			return -EINVAL;
723 		}
724 		if (mask->misc.source_eswitch_owner_vhca_id &&
725 		    mask->misc.source_eswitch_owner_vhca_id != 0xffff) {
726 			mlx5dr_err(dmn,
727 				   "Partial mask source_eswitch_owner_vhca_id is not supported\n");
728 			return -EINVAL;
729 		}
730 	}
731 
732 	return 0;
733 }
734 
mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_match_param * value,u8 * ste_arr)735 int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
736 			     struct mlx5dr_matcher_rx_tx *nic_matcher,
737 			     struct mlx5dr_match_param *value,
738 			     u8 *ste_arr)
739 {
740 	struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
741 	struct mlx5dr_domain *dmn = matcher->tbl->dmn;
742 	struct mlx5dr_ste_build *sb;
743 	int ret, i;
744 
745 	ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
746 					 &matcher->mask, value);
747 	if (ret)
748 		return ret;
749 
750 	sb = nic_matcher->ste_builder;
751 	for (i = 0; i < nic_matcher->num_of_builders; i++) {
752 		mlx5dr_ste_init(ste_arr,
753 				sb->lu_type,
754 				nic_dmn->ste_type,
755 				dmn->info.caps.gvmi);
756 
757 		mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
758 
759 		ret = sb->ste_build_tag_func(value, sb, mlx5dr_ste_get_tag(ste_arr));
760 		if (ret)
761 			return ret;
762 
763 		/* Connect the STEs */
764 		if (i < (nic_matcher->num_of_builders - 1)) {
765 			/* Need the next builder for these fields,
766 			 * not relevant for the last ste in the chain.
767 			 */
768 			sb++;
769 			MLX5_SET(ste_general, ste_arr, next_lu_type, sb->lu_type);
770 			MLX5_SET(ste_general, ste_arr, byte_mask, sb->byte_mask);
771 		}
772 		ste_arr += DR_STE_SIZE;
773 	}
774 	return 0;
775 }
776 
dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)777 static void dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value,
778 						 bool inner, u8 *bit_mask)
779 {
780 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
781 
782 	DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
783 	DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
784 
785 	if (mask->smac_47_16 || mask->smac_15_0) {
786 		MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
787 			 mask->smac_47_16 >> 16);
788 		MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
789 			 mask->smac_47_16 << 16 | mask->smac_15_0);
790 		mask->smac_47_16 = 0;
791 		mask->smac_15_0 = 0;
792 	}
793 
794 	DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
795 	DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
796 	DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
797 	DR_STE_SET_MASK(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
798 
799 	if (mask->cvlan_tag) {
800 		MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
801 		mask->cvlan_tag = 0;
802 	} else if (mask->svlan_tag) {
803 		MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
804 		mask->svlan_tag = 0;
805 	}
806 }
807 
dr_ste_copy_mask_misc(char * mask,struct mlx5dr_match_misc * spec)808 static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
809 {
810 	spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present);
811 	spec->gre_k_present = MLX5_GET(fte_match_set_misc, mask, gre_k_present);
812 	spec->gre_s_present = MLX5_GET(fte_match_set_misc, mask, gre_s_present);
813 	spec->source_vhca_port = MLX5_GET(fte_match_set_misc, mask, source_vhca_port);
814 	spec->source_sqn = MLX5_GET(fte_match_set_misc, mask, source_sqn);
815 
816 	spec->source_port = MLX5_GET(fte_match_set_misc, mask, source_port);
817 	spec->source_eswitch_owner_vhca_id = MLX5_GET(fte_match_set_misc, mask,
818 						      source_eswitch_owner_vhca_id);
819 
820 	spec->outer_second_prio = MLX5_GET(fte_match_set_misc, mask, outer_second_prio);
821 	spec->outer_second_cfi = MLX5_GET(fte_match_set_misc, mask, outer_second_cfi);
822 	spec->outer_second_vid = MLX5_GET(fte_match_set_misc, mask, outer_second_vid);
823 	spec->inner_second_prio = MLX5_GET(fte_match_set_misc, mask, inner_second_prio);
824 	spec->inner_second_cfi = MLX5_GET(fte_match_set_misc, mask, inner_second_cfi);
825 	spec->inner_second_vid = MLX5_GET(fte_match_set_misc, mask, inner_second_vid);
826 
827 	spec->outer_second_cvlan_tag =
828 		MLX5_GET(fte_match_set_misc, mask, outer_second_cvlan_tag);
829 	spec->inner_second_cvlan_tag =
830 		MLX5_GET(fte_match_set_misc, mask, inner_second_cvlan_tag);
831 	spec->outer_second_svlan_tag =
832 		MLX5_GET(fte_match_set_misc, mask, outer_second_svlan_tag);
833 	spec->inner_second_svlan_tag =
834 		MLX5_GET(fte_match_set_misc, mask, inner_second_svlan_tag);
835 
836 	spec->gre_protocol = MLX5_GET(fte_match_set_misc, mask, gre_protocol);
837 
838 	spec->gre_key_h = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi);
839 	spec->gre_key_l = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo);
840 
841 	spec->vxlan_vni = MLX5_GET(fte_match_set_misc, mask, vxlan_vni);
842 
843 	spec->geneve_vni = MLX5_GET(fte_match_set_misc, mask, geneve_vni);
844 	spec->geneve_oam = MLX5_GET(fte_match_set_misc, mask, geneve_oam);
845 
846 	spec->outer_ipv6_flow_label =
847 		MLX5_GET(fte_match_set_misc, mask, outer_ipv6_flow_label);
848 
849 	spec->inner_ipv6_flow_label =
850 		MLX5_GET(fte_match_set_misc, mask, inner_ipv6_flow_label);
851 
852 	spec->geneve_opt_len = MLX5_GET(fte_match_set_misc, mask, geneve_opt_len);
853 	spec->geneve_protocol_type =
854 		MLX5_GET(fte_match_set_misc, mask, geneve_protocol_type);
855 
856 	spec->bth_dst_qp = MLX5_GET(fte_match_set_misc, mask, bth_dst_qp);
857 }
858 
dr_ste_copy_mask_spec(char * mask,struct mlx5dr_match_spec * spec)859 static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec)
860 {
861 	__be32 raw_ip[4];
862 
863 	spec->smac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16);
864 
865 	spec->smac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_15_0);
866 	spec->ethertype = MLX5_GET(fte_match_set_lyr_2_4, mask, ethertype);
867 
868 	spec->dmac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_47_16);
869 
870 	spec->dmac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_15_0);
871 	spec->first_prio = MLX5_GET(fte_match_set_lyr_2_4, mask, first_prio);
872 	spec->first_cfi = MLX5_GET(fte_match_set_lyr_2_4, mask, first_cfi);
873 	spec->first_vid = MLX5_GET(fte_match_set_lyr_2_4, mask, first_vid);
874 
875 	spec->ip_protocol = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_protocol);
876 	spec->ip_dscp = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_dscp);
877 	spec->ip_ecn = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_ecn);
878 	spec->cvlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, cvlan_tag);
879 	spec->svlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, svlan_tag);
880 	spec->frag = MLX5_GET(fte_match_set_lyr_2_4, mask, frag);
881 	spec->ip_version = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_version);
882 	spec->tcp_flags = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_flags);
883 	spec->tcp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_sport);
884 	spec->tcp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_dport);
885 
886 	spec->ttl_hoplimit = MLX5_GET(fte_match_set_lyr_2_4, mask, ttl_hoplimit);
887 
888 	spec->udp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_sport);
889 	spec->udp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_dport);
890 
891 	memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
892 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
893 				    sizeof(raw_ip));
894 
895 	spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
896 	spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
897 	spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
898 	spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
899 
900 	memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
901 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
902 				    sizeof(raw_ip));
903 
904 	spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
905 	spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
906 	spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]);
907 	spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
908 }
909 
dr_ste_copy_mask_misc2(char * mask,struct mlx5dr_match_misc2 * spec)910 static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec)
911 {
912 	spec->outer_first_mpls_label =
913 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_label);
914 	spec->outer_first_mpls_exp =
915 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp);
916 	spec->outer_first_mpls_s_bos =
917 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos);
918 	spec->outer_first_mpls_ttl =
919 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl);
920 	spec->inner_first_mpls_label =
921 		MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_label);
922 	spec->inner_first_mpls_exp =
923 		MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp);
924 	spec->inner_first_mpls_s_bos =
925 		MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos);
926 	spec->inner_first_mpls_ttl =
927 		MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl);
928 	spec->outer_first_mpls_over_gre_label =
929 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label);
930 	spec->outer_first_mpls_over_gre_exp =
931 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp);
932 	spec->outer_first_mpls_over_gre_s_bos =
933 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos);
934 	spec->outer_first_mpls_over_gre_ttl =
935 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl);
936 	spec->outer_first_mpls_over_udp_label =
937 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label);
938 	spec->outer_first_mpls_over_udp_exp =
939 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp);
940 	spec->outer_first_mpls_over_udp_s_bos =
941 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos);
942 	spec->outer_first_mpls_over_udp_ttl =
943 		MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl);
944 	spec->metadata_reg_c_7 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_7);
945 	spec->metadata_reg_c_6 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_6);
946 	spec->metadata_reg_c_5 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_5);
947 	spec->metadata_reg_c_4 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_4);
948 	spec->metadata_reg_c_3 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_3);
949 	spec->metadata_reg_c_2 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_2);
950 	spec->metadata_reg_c_1 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_1);
951 	spec->metadata_reg_c_0 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_0);
952 	spec->metadata_reg_a = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_a);
953 }
954 
dr_ste_copy_mask_misc3(char * mask,struct mlx5dr_match_misc3 * spec)955 static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec)
956 {
957 	spec->inner_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_seq_num);
958 	spec->outer_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_seq_num);
959 	spec->inner_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_ack_num);
960 	spec->outer_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_ack_num);
961 	spec->outer_vxlan_gpe_vni =
962 		MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_vni);
963 	spec->outer_vxlan_gpe_next_protocol =
964 		MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol);
965 	spec->outer_vxlan_gpe_flags =
966 		MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_flags);
967 	spec->icmpv4_header_data = MLX5_GET(fte_match_set_misc3, mask, icmp_header_data);
968 	spec->icmpv6_header_data =
969 		MLX5_GET(fte_match_set_misc3, mask, icmpv6_header_data);
970 	spec->icmpv4_type = MLX5_GET(fte_match_set_misc3, mask, icmp_type);
971 	spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code);
972 	spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type);
973 	spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code);
974 }
975 
mlx5dr_ste_copy_param(u8 match_criteria,struct mlx5dr_match_param * set_param,struct mlx5dr_match_parameters * mask)976 void mlx5dr_ste_copy_param(u8 match_criteria,
977 			   struct mlx5dr_match_param *set_param,
978 			   struct mlx5dr_match_parameters *mask)
979 {
980 	u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
981 	u8 *data = (u8 *)mask->match_buf;
982 	size_t param_location;
983 	void *buff;
984 
985 	if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
986 		if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) {
987 			memcpy(tail_param, data, mask->match_sz);
988 			buff = tail_param;
989 		} else {
990 			buff = mask->match_buf;
991 		}
992 		dr_ste_copy_mask_spec(buff, &set_param->outer);
993 	}
994 	param_location = sizeof(struct mlx5dr_match_spec);
995 
996 	if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
997 		if (mask->match_sz < param_location +
998 		    sizeof(struct mlx5dr_match_misc)) {
999 			memcpy(tail_param, data + param_location,
1000 			       mask->match_sz - param_location);
1001 			buff = tail_param;
1002 		} else {
1003 			buff = data + param_location;
1004 		}
1005 		dr_ste_copy_mask_misc(buff, &set_param->misc);
1006 	}
1007 	param_location += sizeof(struct mlx5dr_match_misc);
1008 
1009 	if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
1010 		if (mask->match_sz < param_location +
1011 		    sizeof(struct mlx5dr_match_spec)) {
1012 			memcpy(tail_param, data + param_location,
1013 			       mask->match_sz - param_location);
1014 			buff = tail_param;
1015 		} else {
1016 			buff = data + param_location;
1017 		}
1018 		dr_ste_copy_mask_spec(buff, &set_param->inner);
1019 	}
1020 	param_location += sizeof(struct mlx5dr_match_spec);
1021 
1022 	if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
1023 		if (mask->match_sz < param_location +
1024 		    sizeof(struct mlx5dr_match_misc2)) {
1025 			memcpy(tail_param, data + param_location,
1026 			       mask->match_sz - param_location);
1027 			buff = tail_param;
1028 		} else {
1029 			buff = data + param_location;
1030 		}
1031 		dr_ste_copy_mask_misc2(buff, &set_param->misc2);
1032 	}
1033 
1034 	param_location += sizeof(struct mlx5dr_match_misc2);
1035 
1036 	if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
1037 		if (mask->match_sz < param_location +
1038 		    sizeof(struct mlx5dr_match_misc3)) {
1039 			memcpy(tail_param, data + param_location,
1040 			       mask->match_sz - param_location);
1041 			buff = tail_param;
1042 		} else {
1043 			buff = data + param_location;
1044 		}
1045 		dr_ste_copy_mask_misc3(buff, &set_param->misc3);
1046 	}
1047 }
1048 
dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1049 static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value,
1050 					   struct mlx5dr_ste_build *sb,
1051 					   u8 *tag)
1052 {
1053 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1054 
1055 	DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
1056 	DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
1057 
1058 	if (spec->smac_47_16 || spec->smac_15_0) {
1059 		MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
1060 			 spec->smac_47_16 >> 16);
1061 		MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
1062 			 spec->smac_47_16 << 16 | spec->smac_15_0);
1063 		spec->smac_47_16 = 0;
1064 		spec->smac_15_0 = 0;
1065 	}
1066 
1067 	if (spec->ip_version) {
1068 		if (spec->ip_version == IP_VERSION_IPV4) {
1069 			MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
1070 			spec->ip_version = 0;
1071 		} else if (spec->ip_version == IP_VERSION_IPV6) {
1072 			MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
1073 			spec->ip_version = 0;
1074 		} else {
1075 			pr_info("Unsupported ip_version value\n");
1076 			return -EINVAL;
1077 		}
1078 	}
1079 
1080 	DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
1081 	DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
1082 	DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
1083 
1084 	if (spec->cvlan_tag) {
1085 		MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
1086 		spec->cvlan_tag = 0;
1087 	} else if (spec->svlan_tag) {
1088 		MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
1089 		spec->svlan_tag = 0;
1090 	}
1091 	return 0;
1092 }
1093 
mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1094 void mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *sb,
1095 				     struct mlx5dr_match_param *mask,
1096 				     bool inner, bool rx)
1097 {
1098 	dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask);
1099 
1100 	sb->rx = rx;
1101 	sb->inner = inner;
1102 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, rx, inner);
1103 	sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1104 	sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_des_tag;
1105 }
1106 
dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1107 static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *value,
1108 						  bool inner, u8 *bit_mask)
1109 {
1110 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1111 
1112 	DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_127_96, mask, dst_ip_127_96);
1113 	DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_95_64, mask, dst_ip_95_64);
1114 	DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_63_32, mask, dst_ip_63_32);
1115 	DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_31_0, mask, dst_ip_31_0);
1116 }
1117 
dr_ste_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1118 static int dr_ste_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
1119 					    struct mlx5dr_ste_build *sb,
1120 					    u8 *tag)
1121 {
1122 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1123 
1124 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
1125 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
1126 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
1127 	DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
1128 
1129 	return 0;
1130 }
1131 
mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1132 void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
1133 				      struct mlx5dr_match_param *mask,
1134 				      bool inner, bool rx)
1135 {
1136 	dr_ste_build_eth_l3_ipv6_dst_bit_mask(mask, inner, sb->bit_mask);
1137 
1138 	sb->rx = rx;
1139 	sb->inner = inner;
1140 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, rx, inner);
1141 	sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1142 	sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_dst_tag;
1143 }
1144 
dr_ste_build_eth_l3_ipv6_src_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1145 static void dr_ste_build_eth_l3_ipv6_src_bit_mask(struct mlx5dr_match_param *value,
1146 						  bool inner, u8 *bit_mask)
1147 {
1148 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1149 
1150 	DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_127_96, mask, src_ip_127_96);
1151 	DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_95_64, mask, src_ip_95_64);
1152 	DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_63_32, mask, src_ip_63_32);
1153 	DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_31_0, mask, src_ip_31_0);
1154 }
1155 
dr_ste_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1156 static int dr_ste_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
1157 					    struct mlx5dr_ste_build *sb,
1158 					    u8 *tag)
1159 {
1160 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1161 
1162 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
1163 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
1164 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
1165 	DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
1166 
1167 	return 0;
1168 }
1169 
mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1170 void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
1171 				      struct mlx5dr_match_param *mask,
1172 				      bool inner, bool rx)
1173 {
1174 	dr_ste_build_eth_l3_ipv6_src_bit_mask(mask, inner, sb->bit_mask);
1175 
1176 	sb->rx = rx;
1177 	sb->inner = inner;
1178 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, rx, inner);
1179 	sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1180 	sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_src_tag;
1181 }
1182 
dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1183 static void dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(struct mlx5dr_match_param *value,
1184 						      bool inner,
1185 						      u8 *bit_mask)
1186 {
1187 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1188 
1189 	DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1190 			  destination_address, mask, dst_ip_31_0);
1191 	DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1192 			  source_address, mask, src_ip_31_0);
1193 	DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1194 			  destination_port, mask, tcp_dport);
1195 	DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1196 			  destination_port, mask, udp_dport);
1197 	DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1198 			  source_port, mask, tcp_sport);
1199 	DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1200 			  source_port, mask, udp_sport);
1201 	DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1202 			  protocol, mask, ip_protocol);
1203 	DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1204 			  fragmented, mask, frag);
1205 	DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1206 			  dscp, mask, ip_dscp);
1207 	DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
1208 			  ecn, mask, ip_ecn);
1209 
1210 	if (mask->tcp_flags) {
1211 		DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, bit_mask, mask);
1212 		mask->tcp_flags = 0;
1213 	}
1214 }
1215 
dr_ste_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1216 static int dr_ste_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
1217 						struct mlx5dr_ste_build *sb,
1218 						u8 *tag)
1219 {
1220 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1221 
1222 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
1223 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
1224 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
1225 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
1226 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
1227 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
1228 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
1229 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
1230 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
1231 	DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
1232 
1233 	if (spec->tcp_flags) {
1234 		DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
1235 		spec->tcp_flags = 0;
1236 	}
1237 
1238 	return 0;
1239 }
1240 
mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1241 void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
1242 					  struct mlx5dr_match_param *mask,
1243 					  bool inner, bool rx)
1244 {
1245 	dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(mask, inner, sb->bit_mask);
1246 
1247 	sb->rx = rx;
1248 	sb->inner = inner;
1249 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, rx, inner);
1250 	sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1251 	sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_5_tuple_tag;
1252 }
1253 
1254 static void
dr_ste_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1255 dr_ste_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
1256 					bool inner, u8 *bit_mask)
1257 {
1258 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1259 	struct mlx5dr_match_misc *misc_mask = &value->misc;
1260 
1261 	DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
1262 	DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
1263 	DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_priority, mask, first_prio);
1264 	DR_STE_SET_MASK_V(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
1265 	DR_STE_SET_MASK_V(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
1266 	DR_STE_SET_MASK(eth_l2_src, bit_mask, l3_type, mask, ip_version);
1267 
1268 	if (mask->svlan_tag || mask->cvlan_tag) {
1269 		MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
1270 		mask->cvlan_tag = 0;
1271 		mask->svlan_tag = 0;
1272 	}
1273 
1274 	if (inner) {
1275 		if (misc_mask->inner_second_cvlan_tag ||
1276 		    misc_mask->inner_second_svlan_tag) {
1277 			MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
1278 			misc_mask->inner_second_cvlan_tag = 0;
1279 			misc_mask->inner_second_svlan_tag = 0;
1280 		}
1281 
1282 		DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1283 				  second_vlan_id, misc_mask, inner_second_vid);
1284 		DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1285 				  second_cfi, misc_mask, inner_second_cfi);
1286 		DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1287 				  second_priority, misc_mask, inner_second_prio);
1288 	} else {
1289 		if (misc_mask->outer_second_cvlan_tag ||
1290 		    misc_mask->outer_second_svlan_tag) {
1291 			MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
1292 			misc_mask->outer_second_cvlan_tag = 0;
1293 			misc_mask->outer_second_svlan_tag = 0;
1294 		}
1295 
1296 		DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1297 				  second_vlan_id, misc_mask, outer_second_vid);
1298 		DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1299 				  second_cfi, misc_mask, outer_second_cfi);
1300 		DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
1301 				  second_priority, misc_mask, outer_second_prio);
1302 	}
1303 }
1304 
dr_ste_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param * value,bool inner,u8 * tag)1305 static int dr_ste_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
1306 					      bool inner, u8 *tag)
1307 {
1308 	struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
1309 	struct mlx5dr_match_misc *misc_spec = &value->misc;
1310 
1311 	DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
1312 	DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
1313 	DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
1314 	DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
1315 	DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
1316 
1317 	if (spec->ip_version) {
1318 		if (spec->ip_version == IP_VERSION_IPV4) {
1319 			MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
1320 			spec->ip_version = 0;
1321 		} else if (spec->ip_version == IP_VERSION_IPV6) {
1322 			MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
1323 			spec->ip_version = 0;
1324 		} else {
1325 			pr_info("Unsupported ip_version value\n");
1326 			return -EINVAL;
1327 		}
1328 	}
1329 
1330 	if (spec->cvlan_tag) {
1331 		MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
1332 		spec->cvlan_tag = 0;
1333 	} else if (spec->svlan_tag) {
1334 		MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
1335 		spec->svlan_tag = 0;
1336 	}
1337 
1338 	if (inner) {
1339 		if (misc_spec->inner_second_cvlan_tag) {
1340 			MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
1341 			misc_spec->inner_second_cvlan_tag = 0;
1342 		} else if (misc_spec->inner_second_svlan_tag) {
1343 			MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
1344 			misc_spec->inner_second_svlan_tag = 0;
1345 		}
1346 
1347 		DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
1348 		DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
1349 		DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
1350 	} else {
1351 		if (misc_spec->outer_second_cvlan_tag) {
1352 			MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
1353 			misc_spec->outer_second_cvlan_tag = 0;
1354 		} else if (misc_spec->outer_second_svlan_tag) {
1355 			MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
1356 			misc_spec->outer_second_svlan_tag = 0;
1357 		}
1358 		DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
1359 		DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
1360 		DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
1361 	}
1362 
1363 	return 0;
1364 }
1365 
dr_ste_build_eth_l2_src_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1366 static void dr_ste_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
1367 					     bool inner, u8 *bit_mask)
1368 {
1369 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1370 
1371 	DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
1372 	DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
1373 
1374 	dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1375 }
1376 
dr_ste_build_eth_l2_src_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1377 static int dr_ste_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
1378 				       struct mlx5dr_ste_build *sb,
1379 				       u8 *tag)
1380 {
1381 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1382 
1383 	DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
1384 	DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
1385 
1386 	return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1387 }
1388 
mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1389 void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
1390 				 struct mlx5dr_match_param *mask,
1391 				 bool inner, bool rx)
1392 {
1393 	dr_ste_build_eth_l2_src_bit_mask(mask, inner, sb->bit_mask);
1394 	sb->rx = rx;
1395 	sb->inner = inner;
1396 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, rx, inner);
1397 	sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1398 	sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_tag;
1399 }
1400 
dr_ste_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1401 static void dr_ste_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
1402 					     bool inner, u8 *bit_mask)
1403 {
1404 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1405 
1406 	DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
1407 	DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
1408 
1409 	dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
1410 }
1411 
dr_ste_build_eth_l2_dst_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1412 static int dr_ste_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
1413 				       struct mlx5dr_ste_build *sb,
1414 				       u8 *tag)
1415 {
1416 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1417 
1418 	DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
1419 	DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
1420 
1421 	return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
1422 }
1423 
mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1424 void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
1425 				 struct mlx5dr_match_param *mask,
1426 				 bool inner, bool rx)
1427 {
1428 	dr_ste_build_eth_l2_dst_bit_mask(mask, inner, sb->bit_mask);
1429 
1430 	sb->rx = rx;
1431 	sb->inner = inner;
1432 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, rx, inner);
1433 	sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1434 	sb->ste_build_tag_func = &dr_ste_build_eth_l2_dst_tag;
1435 }
1436 
dr_ste_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1437 static void dr_ste_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
1438 					     bool inner, u8 *bit_mask)
1439 {
1440 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1441 	struct mlx5dr_match_misc *misc = &value->misc;
1442 
1443 	DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
1444 	DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
1445 	DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
1446 	DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
1447 	DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
1448 	DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
1449 	DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
1450 	DR_STE_SET_MASK(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
1451 
1452 	if (misc->vxlan_vni) {
1453 		MLX5_SET(ste_eth_l2_tnl, bit_mask,
1454 			 l2_tunneling_network_id, (misc->vxlan_vni << 8));
1455 		misc->vxlan_vni = 0;
1456 	}
1457 
1458 	if (mask->svlan_tag || mask->cvlan_tag) {
1459 		MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
1460 		mask->cvlan_tag = 0;
1461 		mask->svlan_tag = 0;
1462 	}
1463 }
1464 
dr_ste_build_eth_l2_tnl_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1465 static int dr_ste_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
1466 				       struct mlx5dr_ste_build *sb,
1467 				       u8 *tag)
1468 {
1469 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1470 	struct mlx5dr_match_misc *misc = &value->misc;
1471 
1472 	DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
1473 	DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
1474 	DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
1475 	DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
1476 	DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
1477 	DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
1478 	DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
1479 
1480 	if (misc->vxlan_vni) {
1481 		MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
1482 			 (misc->vxlan_vni << 8));
1483 		misc->vxlan_vni = 0;
1484 	}
1485 
1486 	if (spec->cvlan_tag) {
1487 		MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
1488 		spec->cvlan_tag = 0;
1489 	} else if (spec->svlan_tag) {
1490 		MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
1491 		spec->svlan_tag = 0;
1492 	}
1493 
1494 	if (spec->ip_version) {
1495 		if (spec->ip_version == IP_VERSION_IPV4) {
1496 			MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
1497 			spec->ip_version = 0;
1498 		} else if (spec->ip_version == IP_VERSION_IPV6) {
1499 			MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
1500 			spec->ip_version = 0;
1501 		} else {
1502 			return -EINVAL;
1503 		}
1504 	}
1505 
1506 	return 0;
1507 }
1508 
mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1509 void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
1510 				 struct mlx5dr_match_param *mask, bool inner, bool rx)
1511 {
1512 	dr_ste_build_eth_l2_tnl_bit_mask(mask, inner, sb->bit_mask);
1513 
1514 	sb->rx = rx;
1515 	sb->inner = inner;
1516 	sb->lu_type = MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I;
1517 	sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1518 	sb->ste_build_tag_func = &dr_ste_build_eth_l2_tnl_tag;
1519 }
1520 
dr_ste_build_eth_l3_ipv4_misc_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1521 static void dr_ste_build_eth_l3_ipv4_misc_bit_mask(struct mlx5dr_match_param *value,
1522 						   bool inner, u8 *bit_mask)
1523 {
1524 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1525 
1526 	DR_STE_SET_MASK_V(eth_l3_ipv4_misc, bit_mask, time_to_live, mask, ttl_hoplimit);
1527 }
1528 
dr_ste_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1529 static int dr_ste_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
1530 					     struct mlx5dr_ste_build *sb,
1531 					     u8 *tag)
1532 {
1533 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1534 
1535 	DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
1536 
1537 	return 0;
1538 }
1539 
mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1540 void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
1541 				       struct mlx5dr_match_param *mask,
1542 				       bool inner, bool rx)
1543 {
1544 	dr_ste_build_eth_l3_ipv4_misc_bit_mask(mask, inner, sb->bit_mask);
1545 
1546 	sb->rx = rx;
1547 	sb->inner = inner;
1548 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, rx, inner);
1549 	sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1550 	sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_misc_tag;
1551 }
1552 
dr_ste_build_ipv6_l3_l4_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1553 static void dr_ste_build_ipv6_l3_l4_bit_mask(struct mlx5dr_match_param *value,
1554 					     bool inner, u8 *bit_mask)
1555 {
1556 	struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
1557 
1558 	DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, tcp_dport);
1559 	DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, tcp_sport);
1560 	DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, udp_dport);
1561 	DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, udp_sport);
1562 	DR_STE_SET_MASK_V(eth_l4, bit_mask, protocol, mask, ip_protocol);
1563 	DR_STE_SET_MASK_V(eth_l4, bit_mask, fragmented, mask, frag);
1564 	DR_STE_SET_MASK_V(eth_l4, bit_mask, dscp, mask, ip_dscp);
1565 	DR_STE_SET_MASK_V(eth_l4, bit_mask, ecn, mask, ip_ecn);
1566 	DR_STE_SET_MASK_V(eth_l4, bit_mask, ipv6_hop_limit, mask, ttl_hoplimit);
1567 
1568 	if (mask->tcp_flags) {
1569 		DR_STE_SET_TCP_FLAGS(eth_l4, bit_mask, mask);
1570 		mask->tcp_flags = 0;
1571 	}
1572 }
1573 
dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1574 static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
1575 				       struct mlx5dr_ste_build *sb,
1576 				       u8 *tag)
1577 {
1578 	struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
1579 
1580 	DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
1581 	DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
1582 	DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
1583 	DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
1584 	DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
1585 	DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
1586 	DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
1587 	DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
1588 	DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
1589 
1590 	if (spec->tcp_flags) {
1591 		DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
1592 		spec->tcp_flags = 0;
1593 	}
1594 
1595 	return 0;
1596 }
1597 
mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1598 void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
1599 				 struct mlx5dr_match_param *mask,
1600 				 bool inner, bool rx)
1601 {
1602 	dr_ste_build_ipv6_l3_l4_bit_mask(mask, inner, sb->bit_mask);
1603 
1604 	sb->rx = rx;
1605 	sb->inner = inner;
1606 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, rx, inner);
1607 	sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1608 	sb->ste_build_tag_func = &dr_ste_build_ipv6_l3_l4_tag;
1609 }
1610 
dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1611 static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
1612 					     struct mlx5dr_ste_build *sb,
1613 					     u8 *tag)
1614 {
1615 	return 0;
1616 }
1617 
mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build * sb,bool rx)1618 void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
1619 {
1620 	sb->rx = rx;
1621 	sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE;
1622 	sb->byte_mask = 0;
1623 	sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
1624 }
1625 
dr_ste_build_mpls_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1626 static void dr_ste_build_mpls_bit_mask(struct mlx5dr_match_param *value,
1627 				       bool inner, u8 *bit_mask)
1628 {
1629 	struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
1630 
1631 	if (inner)
1632 		DR_STE_SET_MPLS_MASK(mpls, misc2_mask, inner, bit_mask);
1633 	else
1634 		DR_STE_SET_MPLS_MASK(mpls, misc2_mask, outer, bit_mask);
1635 }
1636 
dr_ste_build_mpls_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1637 static int dr_ste_build_mpls_tag(struct mlx5dr_match_param *value,
1638 				 struct mlx5dr_ste_build *sb,
1639 				 u8 *tag)
1640 {
1641 	struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
1642 
1643 	if (sb->inner)
1644 		DR_STE_SET_MPLS_TAG(mpls, misc2_mask, inner, tag);
1645 	else
1646 		DR_STE_SET_MPLS_TAG(mpls, misc2_mask, outer, tag);
1647 
1648 	return 0;
1649 }
1650 
mlx5dr_ste_build_mpls(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1651 void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
1652 			   struct mlx5dr_match_param *mask,
1653 			   bool inner, bool rx)
1654 {
1655 	dr_ste_build_mpls_bit_mask(mask, inner, sb->bit_mask);
1656 
1657 	sb->rx = rx;
1658 	sb->inner = inner;
1659 	sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, rx, inner);
1660 	sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1661 	sb->ste_build_tag_func = &dr_ste_build_mpls_tag;
1662 }
1663 
dr_ste_build_gre_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1664 static void dr_ste_build_gre_bit_mask(struct mlx5dr_match_param *value,
1665 				      bool inner, u8 *bit_mask)
1666 {
1667 	struct mlx5dr_match_misc *misc_mask = &value->misc;
1668 
1669 	DR_STE_SET_MASK_V(gre, bit_mask, gre_protocol, misc_mask, gre_protocol);
1670 	DR_STE_SET_MASK_V(gre, bit_mask, gre_k_present, misc_mask, gre_k_present);
1671 	DR_STE_SET_MASK_V(gre, bit_mask, gre_key_h, misc_mask, gre_key_h);
1672 	DR_STE_SET_MASK_V(gre, bit_mask, gre_key_l, misc_mask, gre_key_l);
1673 
1674 	DR_STE_SET_MASK_V(gre, bit_mask, gre_c_present, misc_mask, gre_c_present);
1675 	DR_STE_SET_MASK_V(gre, bit_mask, gre_s_present, misc_mask, gre_s_present);
1676 }
1677 
dr_ste_build_gre_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1678 static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value,
1679 				struct mlx5dr_ste_build *sb,
1680 				u8 *tag)
1681 {
1682 	struct  mlx5dr_match_misc *misc = &value->misc;
1683 
1684 	DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
1685 
1686 	DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
1687 	DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
1688 	DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
1689 
1690 	DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
1691 
1692 	DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
1693 
1694 	return 0;
1695 }
1696 
mlx5dr_ste_build_gre(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1697 void mlx5dr_ste_build_gre(struct mlx5dr_ste_build *sb,
1698 			  struct mlx5dr_match_param *mask, bool inner, bool rx)
1699 {
1700 	dr_ste_build_gre_bit_mask(mask, inner, sb->bit_mask);
1701 
1702 	sb->rx = rx;
1703 	sb->inner = inner;
1704 	sb->lu_type = MLX5DR_STE_LU_TYPE_GRE;
1705 	sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1706 	sb->ste_build_tag_func = &dr_ste_build_gre_tag;
1707 }
1708 
dr_ste_build_flex_parser_0_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1709 static void dr_ste_build_flex_parser_0_bit_mask(struct mlx5dr_match_param *value,
1710 						bool inner, u8 *bit_mask)
1711 {
1712 	struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
1713 
1714 	if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
1715 		DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
1716 				  misc_2_mask, outer_first_mpls_over_gre_label);
1717 
1718 		DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
1719 				  misc_2_mask, outer_first_mpls_over_gre_exp);
1720 
1721 		DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
1722 				  misc_2_mask, outer_first_mpls_over_gre_s_bos);
1723 
1724 		DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
1725 				  misc_2_mask, outer_first_mpls_over_gre_ttl);
1726 	} else {
1727 		DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
1728 				  misc_2_mask, outer_first_mpls_over_udp_label);
1729 
1730 		DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
1731 				  misc_2_mask, outer_first_mpls_over_udp_exp);
1732 
1733 		DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
1734 				  misc_2_mask, outer_first_mpls_over_udp_s_bos);
1735 
1736 		DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
1737 				  misc_2_mask, outer_first_mpls_over_udp_ttl);
1738 	}
1739 }
1740 
dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1741 static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value,
1742 					  struct mlx5dr_ste_build *sb,
1743 					  u8 *tag)
1744 {
1745 	struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
1746 
1747 	if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
1748 		DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
1749 			       misc_2_mask, outer_first_mpls_over_gre_label);
1750 
1751 		DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
1752 			       misc_2_mask, outer_first_mpls_over_gre_exp);
1753 
1754 		DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
1755 			       misc_2_mask, outer_first_mpls_over_gre_s_bos);
1756 
1757 		DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
1758 			       misc_2_mask, outer_first_mpls_over_gre_ttl);
1759 	} else {
1760 		DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
1761 			       misc_2_mask, outer_first_mpls_over_udp_label);
1762 
1763 		DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
1764 			       misc_2_mask, outer_first_mpls_over_udp_exp);
1765 
1766 		DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
1767 			       misc_2_mask, outer_first_mpls_over_udp_s_bos);
1768 
1769 		DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
1770 			       misc_2_mask, outer_first_mpls_over_udp_ttl);
1771 	}
1772 	return 0;
1773 }
1774 
mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1775 void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_build *sb,
1776 				    struct mlx5dr_match_param *mask,
1777 				    bool inner, bool rx)
1778 {
1779 	dr_ste_build_flex_parser_0_bit_mask(mask, inner, sb->bit_mask);
1780 
1781 	sb->rx = rx;
1782 	sb->inner = inner;
1783 	sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_0;
1784 	sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1785 	sb->ste_build_tag_func = &dr_ste_build_flex_parser_0_tag;
1786 }
1787 
1788 #define ICMP_TYPE_OFFSET_FIRST_DW		24
1789 #define ICMP_CODE_OFFSET_FIRST_DW		16
1790 #define ICMP_HEADER_DATA_OFFSET_SECOND_DW	0
1791 
dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,u8 * bit_mask)1792 static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask,
1793 					       struct mlx5dr_cmd_caps *caps,
1794 					       u8 *bit_mask)
1795 {
1796 	struct mlx5dr_match_misc3 *misc_3_mask = &mask->misc3;
1797 	bool is_ipv4_mask = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3_mask);
1798 	u32 icmp_header_data_mask;
1799 	u32 icmp_type_mask;
1800 	u32 icmp_code_mask;
1801 	int dw0_location;
1802 	int dw1_location;
1803 
1804 	if (is_ipv4_mask) {
1805 		icmp_header_data_mask	= misc_3_mask->icmpv4_header_data;
1806 		icmp_type_mask		= misc_3_mask->icmpv4_type;
1807 		icmp_code_mask		= misc_3_mask->icmpv4_code;
1808 		dw0_location		= caps->flex_parser_id_icmp_dw0;
1809 		dw1_location		= caps->flex_parser_id_icmp_dw1;
1810 	} else {
1811 		icmp_header_data_mask	= misc_3_mask->icmpv6_header_data;
1812 		icmp_type_mask		= misc_3_mask->icmpv6_type;
1813 		icmp_code_mask		= misc_3_mask->icmpv6_code;
1814 		dw0_location		= caps->flex_parser_id_icmpv6_dw0;
1815 		dw1_location		= caps->flex_parser_id_icmpv6_dw1;
1816 	}
1817 
1818 	switch (dw0_location) {
1819 	case 4:
1820 		if (icmp_type_mask) {
1821 			MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
1822 				 (icmp_type_mask << ICMP_TYPE_OFFSET_FIRST_DW));
1823 			if (is_ipv4_mask)
1824 				misc_3_mask->icmpv4_type = 0;
1825 			else
1826 				misc_3_mask->icmpv6_type = 0;
1827 		}
1828 		if (icmp_code_mask) {
1829 			u32 cur_val = MLX5_GET(ste_flex_parser_1, bit_mask,
1830 					       flex_parser_4);
1831 			MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
1832 				 cur_val | (icmp_code_mask << ICMP_CODE_OFFSET_FIRST_DW));
1833 			if (is_ipv4_mask)
1834 				misc_3_mask->icmpv4_code = 0;
1835 			else
1836 				misc_3_mask->icmpv6_code = 0;
1837 		}
1838 		break;
1839 	default:
1840 		return -EINVAL;
1841 	}
1842 
1843 	switch (dw1_location) {
1844 	case 5:
1845 		if (icmp_header_data_mask) {
1846 			MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_5,
1847 				 (icmp_header_data_mask << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
1848 			if (is_ipv4_mask)
1849 				misc_3_mask->icmpv4_header_data = 0;
1850 			else
1851 				misc_3_mask->icmpv6_header_data = 0;
1852 		}
1853 		break;
1854 	default:
1855 		return -EINVAL;
1856 	}
1857 
1858 	return 0;
1859 }
1860 
dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1861 static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value,
1862 					  struct mlx5dr_ste_build *sb,
1863 					  u8 *tag)
1864 {
1865 	struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
1866 	u32 icmp_header_data;
1867 	int dw0_location;
1868 	int dw1_location;
1869 	u32 icmp_type;
1870 	u32 icmp_code;
1871 	bool is_ipv4;
1872 
1873 	is_ipv4 = DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc_3);
1874 	if (is_ipv4) {
1875 		icmp_header_data	= misc_3->icmpv4_header_data;
1876 		icmp_type		= misc_3->icmpv4_type;
1877 		icmp_code		= misc_3->icmpv4_code;
1878 		dw0_location		= sb->caps->flex_parser_id_icmp_dw0;
1879 		dw1_location		= sb->caps->flex_parser_id_icmp_dw1;
1880 	} else {
1881 		icmp_header_data	= misc_3->icmpv6_header_data;
1882 		icmp_type		= misc_3->icmpv6_type;
1883 		icmp_code		= misc_3->icmpv6_code;
1884 		dw0_location		= sb->caps->flex_parser_id_icmpv6_dw0;
1885 		dw1_location		= sb->caps->flex_parser_id_icmpv6_dw1;
1886 	}
1887 
1888 	switch (dw0_location) {
1889 	case 4:
1890 		if (icmp_type) {
1891 			MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
1892 				 (icmp_type << ICMP_TYPE_OFFSET_FIRST_DW));
1893 			if (is_ipv4)
1894 				misc_3->icmpv4_type = 0;
1895 			else
1896 				misc_3->icmpv6_type = 0;
1897 		}
1898 
1899 		if (icmp_code) {
1900 			u32 cur_val = MLX5_GET(ste_flex_parser_1, tag,
1901 					       flex_parser_4);
1902 			MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
1903 				 cur_val | (icmp_code << ICMP_CODE_OFFSET_FIRST_DW));
1904 			if (is_ipv4)
1905 				misc_3->icmpv4_code = 0;
1906 			else
1907 				misc_3->icmpv6_code = 0;
1908 		}
1909 		break;
1910 	default:
1911 		return -EINVAL;
1912 	}
1913 
1914 	switch (dw1_location) {
1915 	case 5:
1916 		if (icmp_header_data) {
1917 			MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
1918 				 (icmp_header_data << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
1919 			if (is_ipv4)
1920 				misc_3->icmpv4_header_data = 0;
1921 			else
1922 				misc_3->icmpv6_header_data = 0;
1923 		}
1924 		break;
1925 	default:
1926 		return -EINVAL;
1927 	}
1928 
1929 	return 0;
1930 }
1931 
mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_cmd_caps * caps,bool inner,bool rx)1932 int mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_build *sb,
1933 				   struct mlx5dr_match_param *mask,
1934 				   struct mlx5dr_cmd_caps *caps,
1935 				   bool inner, bool rx)
1936 {
1937 	int ret;
1938 
1939 	ret = dr_ste_build_flex_parser_1_bit_mask(mask, caps, sb->bit_mask);
1940 	if (ret)
1941 		return ret;
1942 
1943 	sb->rx = rx;
1944 	sb->inner = inner;
1945 	sb->caps = caps;
1946 	sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_1;
1947 	sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1948 	sb->ste_build_tag_func = &dr_ste_build_flex_parser_1_tag;
1949 
1950 	return 0;
1951 }
1952 
dr_ste_build_general_purpose_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1953 static void dr_ste_build_general_purpose_bit_mask(struct mlx5dr_match_param *value,
1954 						  bool inner, u8 *bit_mask)
1955 {
1956 	struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
1957 
1958 	DR_STE_SET_MASK_V(general_purpose, bit_mask,
1959 			  general_purpose_lookup_field, misc_2_mask,
1960 			  metadata_reg_a);
1961 }
1962 
dr_ste_build_general_purpose_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)1963 static int dr_ste_build_general_purpose_tag(struct mlx5dr_match_param *value,
1964 					    struct mlx5dr_ste_build *sb,
1965 					    u8 *tag)
1966 {
1967 	struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
1968 
1969 	DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
1970 		       misc_2_mask, metadata_reg_a);
1971 
1972 	return 0;
1973 }
1974 
mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)1975 void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
1976 				      struct mlx5dr_match_param *mask,
1977 				      bool inner, bool rx)
1978 {
1979 	dr_ste_build_general_purpose_bit_mask(mask, inner, sb->bit_mask);
1980 
1981 	sb->rx = rx;
1982 	sb->inner = inner;
1983 	sb->lu_type = MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE;
1984 	sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
1985 	sb->ste_build_tag_func = &dr_ste_build_general_purpose_tag;
1986 }
1987 
dr_ste_build_eth_l4_misc_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)1988 static void dr_ste_build_eth_l4_misc_bit_mask(struct mlx5dr_match_param *value,
1989 					      bool inner, u8 *bit_mask)
1990 {
1991 	struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
1992 
1993 	if (inner) {
1994 		DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
1995 				  inner_tcp_seq_num);
1996 		DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
1997 				  inner_tcp_ack_num);
1998 	} else {
1999 		DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
2000 				  outer_tcp_seq_num);
2001 		DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
2002 				  outer_tcp_ack_num);
2003 	}
2004 }
2005 
dr_ste_build_eth_l4_misc_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2006 static int dr_ste_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
2007 					struct mlx5dr_ste_build *sb,
2008 					u8 *tag)
2009 {
2010 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
2011 
2012 	if (sb->inner) {
2013 		DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
2014 		DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
2015 	} else {
2016 		DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
2017 		DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
2018 	}
2019 
2020 	return 0;
2021 }
2022 
mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)2023 void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
2024 				  struct mlx5dr_match_param *mask,
2025 				  bool inner, bool rx)
2026 {
2027 	dr_ste_build_eth_l4_misc_bit_mask(mask, inner, sb->bit_mask);
2028 
2029 	sb->rx = rx;
2030 	sb->inner = inner;
2031 	sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, rx, inner);
2032 	sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2033 	sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag;
2034 }
2035 
2036 static void
dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(struct mlx5dr_match_param * value,bool inner,u8 * bit_mask)2037 dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(struct mlx5dr_match_param *value,
2038 						bool inner, u8 *bit_mask)
2039 {
2040 	struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
2041 
2042 	DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
2043 			  outer_vxlan_gpe_flags,
2044 			  misc_3_mask, outer_vxlan_gpe_flags);
2045 	DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
2046 			  outer_vxlan_gpe_next_protocol,
2047 			  misc_3_mask, outer_vxlan_gpe_next_protocol);
2048 	DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
2049 			  outer_vxlan_gpe_vni,
2050 			  misc_3_mask, outer_vxlan_gpe_vni);
2051 }
2052 
2053 static int
dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2054 dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
2055 					   struct mlx5dr_ste_build *sb,
2056 					   u8 *tag)
2057 {
2058 	struct mlx5dr_match_misc3 *misc3 = &value->misc3;
2059 
2060 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
2061 		       outer_vxlan_gpe_flags, misc3,
2062 		       outer_vxlan_gpe_flags);
2063 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
2064 		       outer_vxlan_gpe_next_protocol, misc3,
2065 		       outer_vxlan_gpe_next_protocol);
2066 	DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
2067 		       outer_vxlan_gpe_vni, misc3,
2068 		       outer_vxlan_gpe_vni);
2069 
2070 	return 0;
2071 }
2072 
mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)2073 void mlx5dr_ste_build_flex_parser_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
2074 						struct mlx5dr_match_param *mask,
2075 						bool inner, bool rx)
2076 {
2077 	dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(mask, inner,
2078 							sb->bit_mask);
2079 
2080 	sb->rx = rx;
2081 	sb->inner = inner;
2082 	sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
2083 	sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2084 	sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_vxlan_gpe_tag;
2085 }
2086 
2087 static void
dr_ste_build_flex_parser_tnl_geneve_bit_mask(struct mlx5dr_match_param * value,u8 * bit_mask)2088 dr_ste_build_flex_parser_tnl_geneve_bit_mask(struct mlx5dr_match_param *value,
2089 					     u8 *bit_mask)
2090 {
2091 	struct mlx5dr_match_misc *misc_mask = &value->misc;
2092 
2093 	DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
2094 			  geneve_protocol_type,
2095 			  misc_mask, geneve_protocol_type);
2096 	DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
2097 			  geneve_oam,
2098 			  misc_mask, geneve_oam);
2099 	DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
2100 			  geneve_opt_len,
2101 			  misc_mask, geneve_opt_len);
2102 	DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
2103 			  geneve_vni,
2104 			  misc_mask, geneve_vni);
2105 }
2106 
2107 static int
dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2108 dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
2109 					struct mlx5dr_ste_build *sb,
2110 					u8 *tag)
2111 {
2112 	struct mlx5dr_match_misc *misc = &value->misc;
2113 
2114 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
2115 		       geneve_protocol_type, misc, geneve_protocol_type);
2116 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
2117 		       geneve_oam, misc, geneve_oam);
2118 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
2119 		       geneve_opt_len, misc, geneve_opt_len);
2120 	DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
2121 		       geneve_vni, misc, geneve_vni);
2122 
2123 	return 0;
2124 }
2125 
mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)2126 void mlx5dr_ste_build_flex_parser_tnl_geneve(struct mlx5dr_ste_build *sb,
2127 					     struct mlx5dr_match_param *mask,
2128 					     bool inner, bool rx)
2129 {
2130 	dr_ste_build_flex_parser_tnl_geneve_bit_mask(mask, sb->bit_mask);
2131 	sb->rx = rx;
2132 	sb->inner = inner;
2133 	sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
2134 	sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2135 	sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_geneve_tag;
2136 }
2137 
dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param * value,u8 * bit_mask)2138 static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value,
2139 					     u8 *bit_mask)
2140 {
2141 	struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
2142 
2143 	DR_STE_SET_MASK_V(register_0, bit_mask, register_0_h,
2144 			  misc_2_mask, metadata_reg_c_0);
2145 	DR_STE_SET_MASK_V(register_0, bit_mask, register_0_l,
2146 			  misc_2_mask, metadata_reg_c_1);
2147 	DR_STE_SET_MASK_V(register_0, bit_mask, register_1_h,
2148 			  misc_2_mask, metadata_reg_c_2);
2149 	DR_STE_SET_MASK_V(register_0, bit_mask, register_1_l,
2150 			  misc_2_mask, metadata_reg_c_3);
2151 }
2152 
dr_ste_build_register_0_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2153 static int dr_ste_build_register_0_tag(struct mlx5dr_match_param *value,
2154 				       struct mlx5dr_ste_build *sb,
2155 				       u8 *tag)
2156 {
2157 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
2158 
2159 	DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
2160 	DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
2161 	DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
2162 	DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
2163 
2164 	return 0;
2165 }
2166 
mlx5dr_ste_build_register_0(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)2167 void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
2168 				 struct mlx5dr_match_param *mask,
2169 				 bool inner, bool rx)
2170 {
2171 	dr_ste_build_register_0_bit_mask(mask, sb->bit_mask);
2172 
2173 	sb->rx = rx;
2174 	sb->inner = inner;
2175 	sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0;
2176 	sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2177 	sb->ste_build_tag_func = &dr_ste_build_register_0_tag;
2178 }
2179 
dr_ste_build_register_1_bit_mask(struct mlx5dr_match_param * value,u8 * bit_mask)2180 static void dr_ste_build_register_1_bit_mask(struct mlx5dr_match_param *value,
2181 					     u8 *bit_mask)
2182 {
2183 	struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
2184 
2185 	DR_STE_SET_MASK_V(register_1, bit_mask, register_2_h,
2186 			  misc_2_mask, metadata_reg_c_4);
2187 	DR_STE_SET_MASK_V(register_1, bit_mask, register_2_l,
2188 			  misc_2_mask, metadata_reg_c_5);
2189 	DR_STE_SET_MASK_V(register_1, bit_mask, register_3_h,
2190 			  misc_2_mask, metadata_reg_c_6);
2191 	DR_STE_SET_MASK_V(register_1, bit_mask, register_3_l,
2192 			  misc_2_mask, metadata_reg_c_7);
2193 }
2194 
dr_ste_build_register_1_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2195 static int dr_ste_build_register_1_tag(struct mlx5dr_match_param *value,
2196 				       struct mlx5dr_ste_build *sb,
2197 				       u8 *tag)
2198 {
2199 	struct mlx5dr_match_misc2 *misc2 = &value->misc2;
2200 
2201 	DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
2202 	DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
2203 	DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
2204 	DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
2205 
2206 	return 0;
2207 }
2208 
mlx5dr_ste_build_register_1(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,bool inner,bool rx)2209 void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
2210 				 struct mlx5dr_match_param *mask,
2211 				 bool inner, bool rx)
2212 {
2213 	dr_ste_build_register_1_bit_mask(mask, sb->bit_mask);
2214 
2215 	sb->rx = rx;
2216 	sb->inner = inner;
2217 	sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1;
2218 	sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2219 	sb->ste_build_tag_func = &dr_ste_build_register_1_tag;
2220 }
2221 
dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param * value,u8 * bit_mask)2222 static void dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
2223 					       u8 *bit_mask)
2224 {
2225 	struct mlx5dr_match_misc *misc_mask = &value->misc;
2226 
2227 	DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
2228 	DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
2229 	misc_mask->source_eswitch_owner_vhca_id = 0;
2230 }
2231 
dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param * value,struct mlx5dr_ste_build * sb,u8 * tag)2232 static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
2233 					 struct mlx5dr_ste_build *sb,
2234 					 u8 *tag)
2235 {
2236 	struct mlx5dr_match_misc *misc = &value->misc;
2237 	struct mlx5dr_cmd_vport_cap *vport_cap;
2238 	struct mlx5dr_domain *dmn = sb->dmn;
2239 	struct mlx5dr_cmd_caps *caps;
2240 	u8 *bit_mask = sb->bit_mask;
2241 	bool source_gvmi_set;
2242 
2243 	DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
2244 
2245 	if (sb->vhca_id_valid) {
2246 		/* Find port GVMI based on the eswitch_owner_vhca_id */
2247 		if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
2248 			caps = &dmn->info.caps;
2249 		else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
2250 					   dmn->peer_dmn->info.caps.gvmi))
2251 			caps = &dmn->peer_dmn->info.caps;
2252 		else
2253 			return -EINVAL;
2254 	} else {
2255 		caps = &dmn->info.caps;
2256 	}
2257 
2258 	vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
2259 	if (!vport_cap)
2260 		return -EINVAL;
2261 
2262 	source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
2263 	if (vport_cap->vport_gvmi && source_gvmi_set)
2264 		MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
2265 
2266 	misc->source_eswitch_owner_vhca_id = 0;
2267 	misc->source_port = 0;
2268 
2269 	return 0;
2270 }
2271 
mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build * sb,struct mlx5dr_match_param * mask,struct mlx5dr_domain * dmn,bool inner,bool rx)2272 void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
2273 				   struct mlx5dr_match_param *mask,
2274 				   struct mlx5dr_domain *dmn,
2275 				   bool inner, bool rx)
2276 {
2277 	/* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
2278 	sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
2279 
2280 	dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
2281 
2282 	sb->rx = rx;
2283 	sb->dmn = dmn;
2284 	sb->inner = inner;
2285 	sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP;
2286 	sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
2287 	sb->ste_build_tag_func = &dr_ste_build_src_gvmi_qpn_tag;
2288 }
2289