1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3
4 #include "dr_types.h"
5
6 #define DR_RULE_MAX_STE_CHAIN (DR_RULE_MAX_STES + DR_ACTION_MAX_STES)
7
8 struct mlx5dr_rule_action_member {
9 struct mlx5dr_action *action;
10 struct list_head list;
11 };
12
dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx * ste_ctx,struct mlx5dr_ste * new_last_ste,struct list_head * miss_list,struct list_head * send_list)13 static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
14 struct mlx5dr_ste *new_last_ste,
15 struct list_head *miss_list,
16 struct list_head *send_list)
17 {
18 struct mlx5dr_ste_send_info *ste_info_last;
19 struct mlx5dr_ste *last_ste;
20
21 /* The new entry will be inserted after the last */
22 last_ste = list_last_entry(miss_list, struct mlx5dr_ste, miss_list_node);
23 WARN_ON(!last_ste);
24
25 ste_info_last = kzalloc(sizeof(*ste_info_last), GFP_KERNEL);
26 if (!ste_info_last)
27 return -ENOMEM;
28
29 mlx5dr_ste_set_miss_addr(ste_ctx, last_ste->hw_ste,
30 mlx5dr_ste_get_icm_addr(new_last_ste));
31 list_add_tail(&new_last_ste->miss_list_node, miss_list);
32
33 mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_CTRL,
34 0, last_ste->hw_ste,
35 ste_info_last, send_list, true);
36
37 return 0;
38 }
39
40 static struct mlx5dr_ste *
dr_rule_create_collision_htbl(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,u8 * hw_ste)41 dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
42 struct mlx5dr_matcher_rx_tx *nic_matcher,
43 u8 *hw_ste)
44 {
45 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
46 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
47 struct mlx5dr_ste_htbl *new_htbl;
48 struct mlx5dr_ste *ste;
49
50 /* Create new table for miss entry */
51 new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
52 DR_CHUNK_SIZE_1,
53 MLX5DR_STE_LU_TYPE_DONT_CARE,
54 0);
55 if (!new_htbl) {
56 mlx5dr_dbg(dmn, "Failed allocating collision table\n");
57 return NULL;
58 }
59
60 /* One and only entry, never grows */
61 ste = new_htbl->ste_arr;
62 mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste,
63 nic_matcher->e_anchor->chunk->icm_addr);
64 mlx5dr_htbl_get(new_htbl);
65
66 return ste;
67 }
68
69 static struct mlx5dr_ste *
dr_rule_create_collision_entry(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,u8 * hw_ste,struct mlx5dr_ste * orig_ste)70 dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
71 struct mlx5dr_matcher_rx_tx *nic_matcher,
72 u8 *hw_ste,
73 struct mlx5dr_ste *orig_ste)
74 {
75 struct mlx5dr_ste *ste;
76
77 ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
78 if (!ste) {
79 mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n");
80 return NULL;
81 }
82
83 ste->ste_chain_location = orig_ste->ste_chain_location;
84 ste->htbl->pointing_ste = orig_ste->htbl->pointing_ste;
85
86 /* In collision entry, all members share the same miss_list_head */
87 ste->htbl->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
88
89 /* Next table */
90 if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
91 DR_CHUNK_SIZE_1)) {
92 mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
93 goto free_tbl;
94 }
95
96 return ste;
97
98 free_tbl:
99 mlx5dr_ste_free(ste, matcher, nic_matcher);
100 return NULL;
101 }
102
103 static int
dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info * ste_info,struct mlx5dr_domain * dmn)104 dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
105 struct mlx5dr_domain *dmn)
106 {
107 int ret;
108
109 list_del(&ste_info->send_list);
110
111 /* Copy data to ste, only reduced size or control, the last 16B (mask)
112 * is already written to the hw.
113 */
114 if (ste_info->size == DR_STE_SIZE_CTRL)
115 memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_CTRL);
116 else
117 memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
118
119 ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
120 ste_info->size, ste_info->offset);
121 if (ret)
122 goto out;
123
124 out:
125 kfree(ste_info);
126 return ret;
127 }
128
dr_rule_send_update_list(struct list_head * send_ste_list,struct mlx5dr_domain * dmn,bool is_reverse)129 static int dr_rule_send_update_list(struct list_head *send_ste_list,
130 struct mlx5dr_domain *dmn,
131 bool is_reverse)
132 {
133 struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
134 int ret;
135
136 if (is_reverse) {
137 list_for_each_entry_safe_reverse(ste_info, tmp_ste_info,
138 send_ste_list, send_list) {
139 ret = dr_rule_handle_one_ste_in_update_list(ste_info,
140 dmn);
141 if (ret)
142 return ret;
143 }
144 } else {
145 list_for_each_entry_safe(ste_info, tmp_ste_info,
146 send_ste_list, send_list) {
147 ret = dr_rule_handle_one_ste_in_update_list(ste_info,
148 dmn);
149 if (ret)
150 return ret;
151 }
152 }
153
154 return 0;
155 }
156
157 static struct mlx5dr_ste *
dr_rule_find_ste_in_miss_list(struct list_head * miss_list,u8 * hw_ste)158 dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
159 {
160 struct mlx5dr_ste *ste;
161
162 if (list_empty(miss_list))
163 return NULL;
164
165 /* Check if hw_ste is present in the list */
166 list_for_each_entry(ste, miss_list, miss_list_node) {
167 if (mlx5dr_ste_equal_tag(ste->hw_ste, hw_ste))
168 return ste;
169 }
170
171 return NULL;
172 }
173
174 static struct mlx5dr_ste *
dr_rule_rehash_handle_collision(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct list_head * update_list,struct mlx5dr_ste * col_ste,u8 * hw_ste)175 dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
176 struct mlx5dr_matcher_rx_tx *nic_matcher,
177 struct list_head *update_list,
178 struct mlx5dr_ste *col_ste,
179 u8 *hw_ste)
180 {
181 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
182 struct mlx5dr_ste *new_ste;
183 int ret;
184
185 new_ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
186 if (!new_ste)
187 return NULL;
188
189 /* Update collision pointing STE */
190 new_ste->htbl->pointing_ste = col_ste->htbl->pointing_ste;
191
192 /* In collision entry, all members share the same miss_list_head */
193 new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
194
195 /* Update the previous from the list */
196 ret = dr_rule_append_to_miss_list(dmn->ste_ctx, new_ste,
197 mlx5dr_ste_get_miss_list(col_ste),
198 update_list);
199 if (ret) {
200 mlx5dr_dbg(dmn, "Failed update dup entry\n");
201 goto err_exit;
202 }
203
204 return new_ste;
205
206 err_exit:
207 mlx5dr_ste_free(new_ste, matcher, nic_matcher);
208 return NULL;
209 }
210
dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * cur_ste,struct mlx5dr_ste * new_ste)211 static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
212 struct mlx5dr_matcher_rx_tx *nic_matcher,
213 struct mlx5dr_ste *cur_ste,
214 struct mlx5dr_ste *new_ste)
215 {
216 new_ste->next_htbl = cur_ste->next_htbl;
217 new_ste->ste_chain_location = cur_ste->ste_chain_location;
218
219 if (new_ste->next_htbl)
220 new_ste->next_htbl->pointing_ste = new_ste;
221
222 /* We need to copy the refcount since this ste
223 * may have been traversed several times
224 */
225 new_ste->refcount = cur_ste->refcount;
226
227 /* Link old STEs rule to the new ste */
228 mlx5dr_rule_set_last_member(cur_ste->rule_rx_tx, new_ste, false);
229 }
230
231 static struct mlx5dr_ste *
dr_rule_rehash_copy_ste(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * cur_ste,struct mlx5dr_ste_htbl * new_htbl,struct list_head * update_list)232 dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
233 struct mlx5dr_matcher_rx_tx *nic_matcher,
234 struct mlx5dr_ste *cur_ste,
235 struct mlx5dr_ste_htbl *new_htbl,
236 struct list_head *update_list)
237 {
238 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
239 struct mlx5dr_ste_send_info *ste_info;
240 bool use_update_list = false;
241 u8 hw_ste[DR_STE_SIZE] = {};
242 struct mlx5dr_ste *new_ste;
243 int new_idx;
244 u8 sb_idx;
245
246 /* Copy STE mask from the matcher */
247 sb_idx = cur_ste->ste_chain_location - 1;
248 mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
249
250 /* Copy STE control and tag */
251 memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
252 mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
253 nic_matcher->e_anchor->chunk->icm_addr);
254
255 new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
256 new_ste = &new_htbl->ste_arr[new_idx];
257
258 if (mlx5dr_ste_is_not_used(new_ste)) {
259 mlx5dr_htbl_get(new_htbl);
260 list_add_tail(&new_ste->miss_list_node,
261 mlx5dr_ste_get_miss_list(new_ste));
262 } else {
263 new_ste = dr_rule_rehash_handle_collision(matcher,
264 nic_matcher,
265 update_list,
266 new_ste,
267 hw_ste);
268 if (!new_ste) {
269 mlx5dr_dbg(dmn, "Failed adding collision entry, index: %d\n",
270 new_idx);
271 return NULL;
272 }
273 new_htbl->ctrl.num_of_collisions++;
274 use_update_list = true;
275 }
276
277 memcpy(new_ste->hw_ste, hw_ste, DR_STE_SIZE_REDUCED);
278
279 new_htbl->ctrl.num_of_valid_entries++;
280
281 if (use_update_list) {
282 ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
283 if (!ste_info)
284 goto err_exit;
285
286 mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0,
287 hw_ste, ste_info,
288 update_list, true);
289 }
290
291 dr_rule_rehash_copy_ste_ctrl(matcher, nic_matcher, cur_ste, new_ste);
292
293 return new_ste;
294
295 err_exit:
296 mlx5dr_ste_free(new_ste, matcher, nic_matcher);
297 return NULL;
298 }
299
dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct list_head * cur_miss_list,struct mlx5dr_ste_htbl * new_htbl,struct list_head * update_list)300 static int dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher *matcher,
301 struct mlx5dr_matcher_rx_tx *nic_matcher,
302 struct list_head *cur_miss_list,
303 struct mlx5dr_ste_htbl *new_htbl,
304 struct list_head *update_list)
305 {
306 struct mlx5dr_ste *tmp_ste, *cur_ste, *new_ste;
307
308 if (list_empty(cur_miss_list))
309 return 0;
310
311 list_for_each_entry_safe(cur_ste, tmp_ste, cur_miss_list, miss_list_node) {
312 new_ste = dr_rule_rehash_copy_ste(matcher,
313 nic_matcher,
314 cur_ste,
315 new_htbl,
316 update_list);
317 if (!new_ste)
318 goto err_insert;
319
320 list_del(&cur_ste->miss_list_node);
321 mlx5dr_htbl_put(cur_ste->htbl);
322 }
323 return 0;
324
325 err_insert:
326 mlx5dr_err(matcher->tbl->dmn, "Fatal error during resize\n");
327 WARN_ON(true);
328 return -EINVAL;
329 }
330
dr_rule_rehash_copy_htbl(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste_htbl * cur_htbl,struct mlx5dr_ste_htbl * new_htbl,struct list_head * update_list)331 static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
332 struct mlx5dr_matcher_rx_tx *nic_matcher,
333 struct mlx5dr_ste_htbl *cur_htbl,
334 struct mlx5dr_ste_htbl *new_htbl,
335 struct list_head *update_list)
336 {
337 struct mlx5dr_ste *cur_ste;
338 int cur_entries;
339 int err = 0;
340 int i;
341
342 cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk_size);
343
344 if (cur_entries < 1) {
345 mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
346 return -EINVAL;
347 }
348
349 for (i = 0; i < cur_entries; i++) {
350 cur_ste = &cur_htbl->ste_arr[i];
351 if (mlx5dr_ste_is_not_used(cur_ste)) /* Empty, nothing to copy */
352 continue;
353
354 err = dr_rule_rehash_copy_miss_list(matcher,
355 nic_matcher,
356 mlx5dr_ste_get_miss_list(cur_ste),
357 new_htbl,
358 update_list);
359 if (err)
360 goto clean_copy;
361 }
362
363 clean_copy:
364 return err;
365 }
366
367 static struct mlx5dr_ste_htbl *
dr_rule_rehash_htbl(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule,struct mlx5dr_ste_htbl * cur_htbl,u8 ste_location,struct list_head * update_list,enum mlx5dr_icm_chunk_size new_size)368 dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
369 struct mlx5dr_rule_rx_tx *nic_rule,
370 struct mlx5dr_ste_htbl *cur_htbl,
371 u8 ste_location,
372 struct list_head *update_list,
373 enum mlx5dr_icm_chunk_size new_size)
374 {
375 struct mlx5dr_ste_send_info *del_ste_info, *tmp_ste_info;
376 struct mlx5dr_matcher *matcher = rule->matcher;
377 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
378 struct mlx5dr_matcher_rx_tx *nic_matcher;
379 struct mlx5dr_ste_send_info *ste_info;
380 struct mlx5dr_htbl_connect_info info;
381 struct mlx5dr_domain_rx_tx *nic_dmn;
382 u8 formatted_ste[DR_STE_SIZE] = {};
383 LIST_HEAD(rehash_table_send_list);
384 struct mlx5dr_ste *ste_to_update;
385 struct mlx5dr_ste_htbl *new_htbl;
386 int err;
387
388 nic_matcher = nic_rule->nic_matcher;
389 nic_dmn = nic_matcher->nic_tbl->nic_dmn;
390
391 ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
392 if (!ste_info)
393 return NULL;
394
395 new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
396 new_size,
397 cur_htbl->lu_type,
398 cur_htbl->byte_mask);
399 if (!new_htbl) {
400 mlx5dr_err(dmn, "Failed to allocate new hash table\n");
401 goto free_ste_info;
402 }
403
404 /* Write new table to HW */
405 info.type = CONNECT_MISS;
406 info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
407 mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
408 dmn->info.caps.gvmi,
409 nic_dmn->type,
410 new_htbl,
411 formatted_ste,
412 &info);
413
414 new_htbl->pointing_ste = cur_htbl->pointing_ste;
415 new_htbl->pointing_ste->next_htbl = new_htbl;
416 err = dr_rule_rehash_copy_htbl(matcher,
417 nic_matcher,
418 cur_htbl,
419 new_htbl,
420 &rehash_table_send_list);
421 if (err)
422 goto free_new_htbl;
423
424 if (mlx5dr_send_postsend_htbl(dmn, new_htbl, formatted_ste,
425 nic_matcher->ste_builder[ste_location - 1].bit_mask)) {
426 mlx5dr_err(dmn, "Failed writing table to HW\n");
427 goto free_new_htbl;
428 }
429
430 /* Writing to the hw is done in regular order of rehash_table_send_list,
431 * in order to have the origin data written before the miss address of
432 * collision entries, if exists.
433 */
434 if (dr_rule_send_update_list(&rehash_table_send_list, dmn, false)) {
435 mlx5dr_err(dmn, "Failed updating table to HW\n");
436 goto free_ste_list;
437 }
438
439 /* Connect previous hash table to current */
440 if (ste_location == 1) {
441 /* The previous table is an anchor, anchors size is always one STE */
442 struct mlx5dr_ste_htbl *prev_htbl = cur_htbl->pointing_ste->htbl;
443
444 /* On matcher s_anchor we keep an extra refcount */
445 mlx5dr_htbl_get(new_htbl);
446 mlx5dr_htbl_put(cur_htbl);
447
448 nic_matcher->s_htbl = new_htbl;
449
450 /* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
451 * (48B len) which works only on first 32B
452 */
453 mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
454 prev_htbl->ste_arr[0].hw_ste,
455 new_htbl->chunk->icm_addr,
456 new_htbl->chunk->num_of_entries);
457
458 ste_to_update = &prev_htbl->ste_arr[0];
459 } else {
460 mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
461 cur_htbl->pointing_ste->hw_ste,
462 new_htbl);
463 ste_to_update = cur_htbl->pointing_ste;
464 }
465
466 mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_CTRL,
467 0, ste_to_update->hw_ste, ste_info,
468 update_list, false);
469
470 return new_htbl;
471
472 free_ste_list:
473 /* Clean all ste_info's from the new table */
474 list_for_each_entry_safe(del_ste_info, tmp_ste_info,
475 &rehash_table_send_list, send_list) {
476 list_del(&del_ste_info->send_list);
477 kfree(del_ste_info);
478 }
479
480 free_new_htbl:
481 mlx5dr_ste_htbl_free(new_htbl);
482 free_ste_info:
483 kfree(ste_info);
484 mlx5dr_info(dmn, "Failed creating rehash table\n");
485 return NULL;
486 }
487
dr_rule_rehash(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule,struct mlx5dr_ste_htbl * cur_htbl,u8 ste_location,struct list_head * update_list)488 static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
489 struct mlx5dr_rule_rx_tx *nic_rule,
490 struct mlx5dr_ste_htbl *cur_htbl,
491 u8 ste_location,
492 struct list_head *update_list)
493 {
494 struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
495 enum mlx5dr_icm_chunk_size new_size;
496
497 new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk_size);
498 new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
499
500 if (new_size == cur_htbl->chunk_size)
501 return NULL; /* Skip rehash, we already at the max size */
502
503 return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
504 update_list, new_size);
505 }
506
507 static struct mlx5dr_ste *
dr_rule_handle_collision(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste * ste,u8 * hw_ste,struct list_head * miss_list,struct list_head * send_list)508 dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
509 struct mlx5dr_matcher_rx_tx *nic_matcher,
510 struct mlx5dr_ste *ste,
511 u8 *hw_ste,
512 struct list_head *miss_list,
513 struct list_head *send_list)
514 {
515 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
516 struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
517 struct mlx5dr_ste_send_info *ste_info;
518 struct mlx5dr_ste *new_ste;
519
520 ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
521 if (!ste_info)
522 return NULL;
523
524 new_ste = dr_rule_create_collision_entry(matcher, nic_matcher, hw_ste, ste);
525 if (!new_ste)
526 goto free_send_info;
527
528 if (dr_rule_append_to_miss_list(ste_ctx, new_ste,
529 miss_list, send_list)) {
530 mlx5dr_dbg(dmn, "Failed to update prev miss_list\n");
531 goto err_exit;
532 }
533
534 mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0, hw_ste,
535 ste_info, send_list, false);
536
537 ste->htbl->ctrl.num_of_collisions++;
538 ste->htbl->ctrl.num_of_valid_entries++;
539
540 return new_ste;
541
542 err_exit:
543 mlx5dr_ste_free(new_ste, matcher, nic_matcher);
544 free_send_info:
545 kfree(ste_info);
546 return NULL;
547 }
548
dr_rule_remove_action_members(struct mlx5dr_rule * rule)549 static void dr_rule_remove_action_members(struct mlx5dr_rule *rule)
550 {
551 struct mlx5dr_rule_action_member *action_mem;
552 struct mlx5dr_rule_action_member *tmp;
553
554 list_for_each_entry_safe(action_mem, tmp, &rule->rule_actions_list, list) {
555 list_del(&action_mem->list);
556 refcount_dec(&action_mem->action->refcount);
557 kvfree(action_mem);
558 }
559 }
560
dr_rule_add_action_members(struct mlx5dr_rule * rule,size_t num_actions,struct mlx5dr_action * actions[])561 static int dr_rule_add_action_members(struct mlx5dr_rule *rule,
562 size_t num_actions,
563 struct mlx5dr_action *actions[])
564 {
565 struct mlx5dr_rule_action_member *action_mem;
566 int i;
567
568 for (i = 0; i < num_actions; i++) {
569 action_mem = kvzalloc(sizeof(*action_mem), GFP_KERNEL);
570 if (!action_mem)
571 goto free_action_members;
572
573 action_mem->action = actions[i];
574 INIT_LIST_HEAD(&action_mem->list);
575 list_add_tail(&action_mem->list, &rule->rule_actions_list);
576 refcount_inc(&action_mem->action->refcount);
577 }
578
579 return 0;
580
581 free_action_members:
582 dr_rule_remove_action_members(rule);
583 return -ENOMEM;
584 }
585
mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx * nic_rule,struct mlx5dr_ste * ste,bool force)586 void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule,
587 struct mlx5dr_ste *ste,
588 bool force)
589 {
590 /* Update rule member is usually done for the last STE or during rule
591 * creation to recover from mid-creation failure (for this peruse the
592 * force flag is used)
593 */
594 if (ste->next_htbl && !force)
595 return;
596
597 /* Update is required since each rule keeps track of its last STE */
598 ste->rule_rx_tx = nic_rule;
599 nic_rule->last_rule_ste = ste;
600 }
601
dr_rule_get_pointed_ste(struct mlx5dr_ste * curr_ste)602 static struct mlx5dr_ste *dr_rule_get_pointed_ste(struct mlx5dr_ste *curr_ste)
603 {
604 struct mlx5dr_ste *first_ste;
605
606 first_ste = list_first_entry(mlx5dr_ste_get_miss_list(curr_ste),
607 struct mlx5dr_ste, miss_list_node);
608
609 return first_ste->htbl->pointing_ste;
610 }
611
mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste ** ste_arr,struct mlx5dr_ste * curr_ste,int * num_of_stes)612 int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
613 struct mlx5dr_ste *curr_ste,
614 int *num_of_stes)
615 {
616 bool first = false;
617
618 *num_of_stes = 0;
619
620 if (!curr_ste)
621 return -ENOENT;
622
623 /* Iterate from last to first */
624 while (!first) {
625 first = curr_ste->ste_chain_location == 1;
626 ste_arr[*num_of_stes] = curr_ste;
627 *num_of_stes += 1;
628 curr_ste = dr_rule_get_pointed_ste(curr_ste);
629 }
630
631 return 0;
632 }
633
dr_rule_clean_rule_members(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule)634 static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
635 struct mlx5dr_rule_rx_tx *nic_rule)
636 {
637 struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES];
638 struct mlx5dr_ste *curr_ste = nic_rule->last_rule_ste;
639 int i;
640
641 if (mlx5dr_rule_get_reverse_rule_members(ste_arr, curr_ste, &i))
642 return;
643
644 while (i--)
645 mlx5dr_ste_put(ste_arr[i], rule->matcher, nic_rule->nic_matcher);
646 }
647
dr_get_bits_per_mask(u16 byte_mask)648 static u16 dr_get_bits_per_mask(u16 byte_mask)
649 {
650 u16 bits = 0;
651
652 while (byte_mask) {
653 byte_mask = byte_mask & (byte_mask - 1);
654 bits++;
655 }
656
657 return bits;
658 }
659
dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl * htbl,struct mlx5dr_domain * dmn,struct mlx5dr_domain_rx_tx * nic_dmn)660 static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
661 struct mlx5dr_domain *dmn,
662 struct mlx5dr_domain_rx_tx *nic_dmn)
663 {
664 struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
665 int threshold;
666
667 if (dmn->info.max_log_sw_icm_sz <= htbl->chunk_size)
668 return false;
669
670 if (!mlx5dr_ste_htbl_may_grow(htbl))
671 return false;
672
673 if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
674 return false;
675
676 threshold = mlx5dr_ste_htbl_increase_threshold(htbl);
677 if (ctrl->num_of_collisions >= threshold &&
678 (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= threshold)
679 return true;
680
681 return false;
682 }
683
dr_rule_handle_action_stes(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule,struct list_head * send_ste_list,struct mlx5dr_ste * last_ste,u8 * hw_ste_arr,u32 new_hw_ste_arr_sz)684 static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
685 struct mlx5dr_rule_rx_tx *nic_rule,
686 struct list_head *send_ste_list,
687 struct mlx5dr_ste *last_ste,
688 u8 *hw_ste_arr,
689 u32 new_hw_ste_arr_sz)
690 {
691 struct mlx5dr_matcher_rx_tx *nic_matcher = nic_rule->nic_matcher;
692 struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
693 u8 num_of_builders = nic_matcher->num_of_builders;
694 struct mlx5dr_matcher *matcher = rule->matcher;
695 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
696 u8 *curr_hw_ste, *prev_hw_ste;
697 struct mlx5dr_ste *action_ste;
698 int i, k;
699
700 /* Two cases:
701 * 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste
702 * 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added
703 * to support the action.
704 */
705
706 for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) {
707 curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE;
708 prev_hw_ste = (i == 0) ? curr_hw_ste : hw_ste_arr + ((i - 1) * DR_STE_SIZE);
709 action_ste = dr_rule_create_collision_htbl(matcher,
710 nic_matcher,
711 curr_hw_ste);
712 if (!action_ste)
713 return -ENOMEM;
714
715 mlx5dr_ste_get(action_ste);
716
717 action_ste->htbl->pointing_ste = last_ste;
718 last_ste->next_htbl = action_ste->htbl;
719 last_ste = action_ste;
720
721 /* While free ste we go over the miss list, so add this ste to the list */
722 list_add_tail(&action_ste->miss_list_node,
723 mlx5dr_ste_get_miss_list(action_ste));
724
725 ste_info_arr[k] = kzalloc(sizeof(*ste_info_arr[k]),
726 GFP_KERNEL);
727 if (!ste_info_arr[k])
728 goto err_exit;
729
730 /* Point current ste to the new action */
731 mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
732 prev_hw_ste,
733 action_ste->htbl);
734
735 mlx5dr_rule_set_last_member(nic_rule, action_ste, true);
736
737 mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
738 curr_hw_ste,
739 ste_info_arr[k],
740 send_ste_list, false);
741 }
742
743 last_ste->next_htbl = NULL;
744
745 return 0;
746
747 err_exit:
748 mlx5dr_ste_put(action_ste, matcher, nic_matcher);
749 return -ENOMEM;
750 }
751
dr_rule_handle_empty_entry(struct mlx5dr_matcher * matcher,struct mlx5dr_matcher_rx_tx * nic_matcher,struct mlx5dr_ste_htbl * cur_htbl,struct mlx5dr_ste * ste,u8 ste_location,u8 * hw_ste,struct list_head * miss_list,struct list_head * send_list)752 static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
753 struct mlx5dr_matcher_rx_tx *nic_matcher,
754 struct mlx5dr_ste_htbl *cur_htbl,
755 struct mlx5dr_ste *ste,
756 u8 ste_location,
757 u8 *hw_ste,
758 struct list_head *miss_list,
759 struct list_head *send_list)
760 {
761 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
762 struct mlx5dr_ste_send_info *ste_info;
763
764 /* Take ref on table, only on first time this ste is used */
765 mlx5dr_htbl_get(cur_htbl);
766
767 /* new entry -> new branch */
768 list_add_tail(&ste->miss_list_node, miss_list);
769
770 mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
771 nic_matcher->e_anchor->chunk->icm_addr);
772
773 ste->ste_chain_location = ste_location;
774
775 ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
776 if (!ste_info)
777 goto clean_ste_setting;
778
779 if (mlx5dr_ste_create_next_htbl(matcher,
780 nic_matcher,
781 ste,
782 hw_ste,
783 DR_CHUNK_SIZE_1)) {
784 mlx5dr_dbg(dmn, "Failed allocating table\n");
785 goto clean_ste_info;
786 }
787
788 cur_htbl->ctrl.num_of_valid_entries++;
789
790 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 0, hw_ste,
791 ste_info, send_list, false);
792
793 return 0;
794
795 clean_ste_info:
796 kfree(ste_info);
797 clean_ste_setting:
798 list_del_init(&ste->miss_list_node);
799 mlx5dr_htbl_put(cur_htbl);
800
801 return -ENOMEM;
802 }
803
804 static struct mlx5dr_ste *
dr_rule_handle_ste_branch(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule,struct list_head * send_ste_list,struct mlx5dr_ste_htbl * cur_htbl,u8 * hw_ste,u8 ste_location,struct mlx5dr_ste_htbl ** put_htbl)805 dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
806 struct mlx5dr_rule_rx_tx *nic_rule,
807 struct list_head *send_ste_list,
808 struct mlx5dr_ste_htbl *cur_htbl,
809 u8 *hw_ste,
810 u8 ste_location,
811 struct mlx5dr_ste_htbl **put_htbl)
812 {
813 struct mlx5dr_matcher *matcher = rule->matcher;
814 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
815 struct mlx5dr_matcher_rx_tx *nic_matcher;
816 struct mlx5dr_domain_rx_tx *nic_dmn;
817 struct mlx5dr_ste_htbl *new_htbl;
818 struct mlx5dr_ste *matched_ste;
819 struct list_head *miss_list;
820 bool skip_rehash = false;
821 struct mlx5dr_ste *ste;
822 int index;
823
824 nic_matcher = nic_rule->nic_matcher;
825 nic_dmn = nic_matcher->nic_tbl->nic_dmn;
826
827 again:
828 index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
829 miss_list = &cur_htbl->chunk->miss_list[index];
830 ste = &cur_htbl->ste_arr[index];
831
832 if (mlx5dr_ste_is_not_used(ste)) {
833 if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
834 ste, ste_location,
835 hw_ste, miss_list,
836 send_ste_list))
837 return NULL;
838 } else {
839 /* Hash table index in use, check if this ste is in the miss list */
840 matched_ste = dr_rule_find_ste_in_miss_list(miss_list, hw_ste);
841 if (matched_ste) {
842 /* If it is last STE in the chain, and has the same tag
843 * it means that all the previous stes are the same,
844 * if so, this rule is duplicated.
845 */
846 if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
847 return matched_ste;
848
849 mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
850 }
851
852 if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
853 /* Hash table index in use, try to resize of the hash */
854 skip_rehash = true;
855
856 /* Hold the table till we update.
857 * Release in dr_rule_create_rule()
858 */
859 *put_htbl = cur_htbl;
860 mlx5dr_htbl_get(cur_htbl);
861
862 new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
863 ste_location, send_ste_list);
864 if (!new_htbl) {
865 mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
866 cur_htbl->chunk_size);
867 mlx5dr_htbl_put(cur_htbl);
868 } else {
869 cur_htbl = new_htbl;
870 }
871 goto again;
872 } else {
873 /* Hash table index in use, add another collision (miss) */
874 ste = dr_rule_handle_collision(matcher,
875 nic_matcher,
876 ste,
877 hw_ste,
878 miss_list,
879 send_ste_list);
880 if (!ste) {
881 mlx5dr_dbg(dmn, "failed adding collision entry, index: %d\n",
882 index);
883 return NULL;
884 }
885 }
886 }
887 return ste;
888 }
889
dr_rule_cmp_value_to_mask(u8 * mask,u8 * value,u32 s_idx,u32 e_idx)890 static bool dr_rule_cmp_value_to_mask(u8 *mask, u8 *value,
891 u32 s_idx, u32 e_idx)
892 {
893 u32 i;
894
895 for (i = s_idx; i < e_idx; i++) {
896 if (value[i] & ~mask[i]) {
897 pr_info("Rule parameters contains a value not specified by mask\n");
898 return false;
899 }
900 }
901 return true;
902 }
903
dr_rule_verify(struct mlx5dr_matcher * matcher,struct mlx5dr_match_parameters * value,struct mlx5dr_match_param * param)904 static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
905 struct mlx5dr_match_parameters *value,
906 struct mlx5dr_match_param *param)
907 {
908 u8 match_criteria = matcher->match_criteria;
909 size_t value_size = value->match_sz;
910 u8 *mask_p = (u8 *)&matcher->mask;
911 u8 *param_p = (u8 *)param;
912 u32 s_idx, e_idx;
913
914 if (!value_size ||
915 (value_size > DR_SZ_MATCH_PARAM || (value_size % sizeof(u32)))) {
916 mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
917 return false;
918 }
919
920 mlx5dr_ste_copy_param(matcher->match_criteria, param, value);
921
922 if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
923 s_idx = offsetof(struct mlx5dr_match_param, outer);
924 e_idx = min(s_idx + sizeof(param->outer), value_size);
925
926 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
927 mlx5dr_err(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
928 return false;
929 }
930 }
931
932 if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
933 s_idx = offsetof(struct mlx5dr_match_param, misc);
934 e_idx = min(s_idx + sizeof(param->misc), value_size);
935
936 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
937 mlx5dr_err(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
938 return false;
939 }
940 }
941
942 if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
943 s_idx = offsetof(struct mlx5dr_match_param, inner);
944 e_idx = min(s_idx + sizeof(param->inner), value_size);
945
946 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
947 mlx5dr_err(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
948 return false;
949 }
950 }
951
952 if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
953 s_idx = offsetof(struct mlx5dr_match_param, misc2);
954 e_idx = min(s_idx + sizeof(param->misc2), value_size);
955
956 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
957 mlx5dr_err(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
958 return false;
959 }
960 }
961
962 if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
963 s_idx = offsetof(struct mlx5dr_match_param, misc3);
964 e_idx = min(s_idx + sizeof(param->misc3), value_size);
965
966 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
967 mlx5dr_err(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
968 return false;
969 }
970 }
971
972 if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
973 s_idx = offsetof(struct mlx5dr_match_param, misc4);
974 e_idx = min(s_idx + sizeof(param->misc4), value_size);
975
976 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
977 mlx5dr_err(matcher->tbl->dmn,
978 "Rule misc4 parameters contains a value not specified by mask\n");
979 return false;
980 }
981 }
982 return true;
983 }
984
dr_rule_destroy_rule_nic(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule)985 static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
986 struct mlx5dr_rule_rx_tx *nic_rule)
987 {
988 mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
989 dr_rule_clean_rule_members(rule, nic_rule);
990 mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
991
992 return 0;
993 }
994
dr_rule_destroy_rule_fdb(struct mlx5dr_rule * rule)995 static int dr_rule_destroy_rule_fdb(struct mlx5dr_rule *rule)
996 {
997 dr_rule_destroy_rule_nic(rule, &rule->rx);
998 dr_rule_destroy_rule_nic(rule, &rule->tx);
999 return 0;
1000 }
1001
dr_rule_destroy_rule(struct mlx5dr_rule * rule)1002 static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
1003 {
1004 struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
1005
1006 switch (dmn->type) {
1007 case MLX5DR_DOMAIN_TYPE_NIC_RX:
1008 dr_rule_destroy_rule_nic(rule, &rule->rx);
1009 break;
1010 case MLX5DR_DOMAIN_TYPE_NIC_TX:
1011 dr_rule_destroy_rule_nic(rule, &rule->tx);
1012 break;
1013 case MLX5DR_DOMAIN_TYPE_FDB:
1014 dr_rule_destroy_rule_fdb(rule);
1015 break;
1016 default:
1017 return -EINVAL;
1018 }
1019
1020 dr_rule_remove_action_members(rule);
1021 kfree(rule);
1022 return 0;
1023 }
1024
dr_rule_get_ipv(struct mlx5dr_match_spec * spec)1025 static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
1026 {
1027 if (spec->ip_version == 6 || spec->ethertype == ETH_P_IPV6)
1028 return DR_RULE_IPV6;
1029
1030 return DR_RULE_IPV4;
1031 }
1032
dr_rule_skip(enum mlx5dr_domain_type domain,enum mlx5dr_domain_nic_type nic_type,struct mlx5dr_match_param * mask,struct mlx5dr_match_param * value,u32 flow_source)1033 static bool dr_rule_skip(enum mlx5dr_domain_type domain,
1034 enum mlx5dr_domain_nic_type nic_type,
1035 struct mlx5dr_match_param *mask,
1036 struct mlx5dr_match_param *value,
1037 u32 flow_source)
1038 {
1039 bool rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
1040
1041 if (domain != MLX5DR_DOMAIN_TYPE_FDB)
1042 return false;
1043
1044 if (mask->misc.source_port) {
1045 if (rx && value->misc.source_port != MLX5_VPORT_UPLINK)
1046 return true;
1047
1048 if (!rx && value->misc.source_port == MLX5_VPORT_UPLINK)
1049 return true;
1050 }
1051
1052 if (rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT)
1053 return true;
1054
1055 if (!rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK)
1056 return true;
1057
1058 return false;
1059 }
1060
1061 static int
dr_rule_create_rule_nic(struct mlx5dr_rule * rule,struct mlx5dr_rule_rx_tx * nic_rule,struct mlx5dr_match_param * param,size_t num_actions,struct mlx5dr_action * actions[])1062 dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
1063 struct mlx5dr_rule_rx_tx *nic_rule,
1064 struct mlx5dr_match_param *param,
1065 size_t num_actions,
1066 struct mlx5dr_action *actions[])
1067 {
1068 struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
1069 struct mlx5dr_matcher *matcher = rule->matcher;
1070 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1071 struct mlx5dr_matcher_rx_tx *nic_matcher;
1072 struct mlx5dr_domain_rx_tx *nic_dmn;
1073 struct mlx5dr_ste_htbl *htbl = NULL;
1074 struct mlx5dr_ste_htbl *cur_htbl;
1075 struct mlx5dr_ste *ste = NULL;
1076 LIST_HEAD(send_ste_list);
1077 u8 *hw_ste_arr = NULL;
1078 u32 new_hw_ste_arr_sz;
1079 int ret, i;
1080
1081 nic_matcher = nic_rule->nic_matcher;
1082 nic_dmn = nic_matcher->nic_tbl->nic_dmn;
1083
1084 if (dr_rule_skip(dmn->type, nic_dmn->type, &matcher->mask, param,
1085 rule->flow_source))
1086 return 0;
1087
1088 hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
1089 if (!hw_ste_arr)
1090 return -ENOMEM;
1091
1092 mlx5dr_domain_nic_lock(nic_dmn);
1093
1094 ret = mlx5dr_matcher_select_builders(matcher,
1095 nic_matcher,
1096 dr_rule_get_ipv(¶m->outer),
1097 dr_rule_get_ipv(¶m->inner));
1098 if (ret)
1099 goto free_hw_ste;
1100
1101 /* Set the tag values inside the ste array */
1102 ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
1103 if (ret)
1104 goto free_hw_ste;
1105
1106 /* Set the actions values/addresses inside the ste array */
1107 ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
1108 num_actions, hw_ste_arr,
1109 &new_hw_ste_arr_sz);
1110 if (ret)
1111 goto free_hw_ste;
1112
1113 cur_htbl = nic_matcher->s_htbl;
1114
1115 /* Go over the array of STEs, and build dr_ste accordingly.
1116 * The loop is over only the builders which are equal or less to the
1117 * number of stes, in case we have actions that lives in other stes.
1118 */
1119 for (i = 0; i < nic_matcher->num_of_builders; i++) {
1120 /* Calculate CRC and keep new ste entry */
1121 u8 *cur_hw_ste_ent = hw_ste_arr + (i * DR_STE_SIZE);
1122
1123 ste = dr_rule_handle_ste_branch(rule,
1124 nic_rule,
1125 &send_ste_list,
1126 cur_htbl,
1127 cur_hw_ste_ent,
1128 i + 1,
1129 &htbl);
1130 if (!ste) {
1131 mlx5dr_err(dmn, "Failed creating next branch\n");
1132 ret = -ENOENT;
1133 goto free_rule;
1134 }
1135
1136 cur_htbl = ste->next_htbl;
1137
1138 mlx5dr_ste_get(ste);
1139 mlx5dr_rule_set_last_member(nic_rule, ste, true);
1140 }
1141
1142 /* Connect actions */
1143 ret = dr_rule_handle_action_stes(rule, nic_rule, &send_ste_list,
1144 ste, hw_ste_arr, new_hw_ste_arr_sz);
1145 if (ret) {
1146 mlx5dr_dbg(dmn, "Failed apply actions\n");
1147 goto free_rule;
1148 }
1149 ret = dr_rule_send_update_list(&send_ste_list, dmn, true);
1150 if (ret) {
1151 mlx5dr_err(dmn, "Failed sending ste!\n");
1152 goto free_rule;
1153 }
1154
1155 if (htbl)
1156 mlx5dr_htbl_put(htbl);
1157
1158 mlx5dr_domain_nic_unlock(nic_dmn);
1159
1160 kfree(hw_ste_arr);
1161
1162 return 0;
1163
1164 free_rule:
1165 dr_rule_clean_rule_members(rule, nic_rule);
1166 /* Clean all ste_info's */
1167 list_for_each_entry_safe(ste_info, tmp_ste_info, &send_ste_list, send_list) {
1168 list_del(&ste_info->send_list);
1169 kfree(ste_info);
1170 }
1171 free_hw_ste:
1172 mlx5dr_domain_nic_unlock(nic_dmn);
1173 kfree(hw_ste_arr);
1174 return ret;
1175 }
1176
1177 static int
dr_rule_create_rule_fdb(struct mlx5dr_rule * rule,struct mlx5dr_match_param * param,size_t num_actions,struct mlx5dr_action * actions[])1178 dr_rule_create_rule_fdb(struct mlx5dr_rule *rule,
1179 struct mlx5dr_match_param *param,
1180 size_t num_actions,
1181 struct mlx5dr_action *actions[])
1182 {
1183 struct mlx5dr_match_param copy_param = {};
1184 int ret;
1185
1186 /* Copy match_param since they will be consumed during the first
1187 * nic_rule insertion.
1188 */
1189 memcpy(©_param, param, sizeof(struct mlx5dr_match_param));
1190
1191 ret = dr_rule_create_rule_nic(rule, &rule->rx, param,
1192 num_actions, actions);
1193 if (ret)
1194 return ret;
1195
1196 ret = dr_rule_create_rule_nic(rule, &rule->tx, ©_param,
1197 num_actions, actions);
1198 if (ret)
1199 goto destroy_rule_nic_rx;
1200
1201 return 0;
1202
1203 destroy_rule_nic_rx:
1204 dr_rule_destroy_rule_nic(rule, &rule->rx);
1205 return ret;
1206 }
1207
1208 static struct mlx5dr_rule *
dr_rule_create_rule(struct mlx5dr_matcher * matcher,struct mlx5dr_match_parameters * value,size_t num_actions,struct mlx5dr_action * actions[],u32 flow_source)1209 dr_rule_create_rule(struct mlx5dr_matcher *matcher,
1210 struct mlx5dr_match_parameters *value,
1211 size_t num_actions,
1212 struct mlx5dr_action *actions[],
1213 u32 flow_source)
1214 {
1215 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1216 struct mlx5dr_match_param param = {};
1217 struct mlx5dr_rule *rule;
1218 int ret;
1219
1220 if (!dr_rule_verify(matcher, value, ¶m))
1221 return NULL;
1222
1223 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1224 if (!rule)
1225 return NULL;
1226
1227 rule->matcher = matcher;
1228 rule->flow_source = flow_source;
1229 INIT_LIST_HEAD(&rule->rule_actions_list);
1230
1231 ret = dr_rule_add_action_members(rule, num_actions, actions);
1232 if (ret)
1233 goto free_rule;
1234
1235 switch (dmn->type) {
1236 case MLX5DR_DOMAIN_TYPE_NIC_RX:
1237 rule->rx.nic_matcher = &matcher->rx;
1238 ret = dr_rule_create_rule_nic(rule, &rule->rx, ¶m,
1239 num_actions, actions);
1240 break;
1241 case MLX5DR_DOMAIN_TYPE_NIC_TX:
1242 rule->tx.nic_matcher = &matcher->tx;
1243 ret = dr_rule_create_rule_nic(rule, &rule->tx, ¶m,
1244 num_actions, actions);
1245 break;
1246 case MLX5DR_DOMAIN_TYPE_FDB:
1247 rule->rx.nic_matcher = &matcher->rx;
1248 rule->tx.nic_matcher = &matcher->tx;
1249 ret = dr_rule_create_rule_fdb(rule, ¶m,
1250 num_actions, actions);
1251 break;
1252 default:
1253 ret = -EINVAL;
1254 break;
1255 }
1256
1257 if (ret)
1258 goto remove_action_members;
1259
1260 return rule;
1261
1262 remove_action_members:
1263 dr_rule_remove_action_members(rule);
1264 free_rule:
1265 kfree(rule);
1266 mlx5dr_err(dmn, "Failed creating rule\n");
1267 return NULL;
1268 }
1269
mlx5dr_rule_create(struct mlx5dr_matcher * matcher,struct mlx5dr_match_parameters * value,size_t num_actions,struct mlx5dr_action * actions[],u32 flow_source)1270 struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
1271 struct mlx5dr_match_parameters *value,
1272 size_t num_actions,
1273 struct mlx5dr_action *actions[],
1274 u32 flow_source)
1275 {
1276 struct mlx5dr_rule *rule;
1277
1278 refcount_inc(&matcher->refcount);
1279
1280 rule = dr_rule_create_rule(matcher, value, num_actions, actions, flow_source);
1281 if (!rule)
1282 refcount_dec(&matcher->refcount);
1283
1284 return rule;
1285 }
1286
mlx5dr_rule_destroy(struct mlx5dr_rule * rule)1287 int mlx5dr_rule_destroy(struct mlx5dr_rule *rule)
1288 {
1289 struct mlx5dr_matcher *matcher = rule->matcher;
1290 int ret;
1291
1292 ret = dr_rule_destroy_rule(rule);
1293 if (!ret)
1294 refcount_dec(&matcher->refcount);
1295
1296 return ret;
1297 }
1298