• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/bitops.h>
8 #include <linux/list.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
11 #include <linux/mutex.h>
12 #include <trace/events/mlxsw.h>
13 
14 #include "reg.h"
15 #include "core.h"
16 #include "resources.h"
17 #include "spectrum.h"
18 #include "spectrum_acl_tcam.h"
19 #include "core_acl_flex_keys.h"
20 
mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp * mlxsw_sp)21 size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
22 {
23 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
24 
25 	return ops->priv_size;
26 }
27 
28 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */
29 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
30 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */
31 
mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_rule_info * rulei,u32 * priority,bool fillup_priority)32 int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
33 				   struct mlxsw_sp_acl_rule_info *rulei,
34 				   u32 *priority, bool fillup_priority)
35 {
36 	u64 max_priority;
37 
38 	if (!fillup_priority) {
39 		*priority = 0;
40 		return 0;
41 	}
42 
43 	if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
44 		return -EIO;
45 
46 	/* Priority range is 1..cap_kvd_size-1. */
47 	max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
48 	if (rulei->priority >= max_priority)
49 		return -EINVAL;
50 
51 	/* Unlike in TC, in HW, higher number means higher priority. */
52 	*priority = max_priority - rulei->priority;
53 	return 0;
54 }
55 
mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam * tcam,u16 * p_id)56 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
57 					   u16 *p_id)
58 {
59 	u16 id;
60 
61 	id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
62 	if (id < tcam->max_regions) {
63 		__set_bit(id, tcam->used_regions);
64 		*p_id = id;
65 		return 0;
66 	}
67 	return -ENOBUFS;
68 }
69 
mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam * tcam,u16 id)70 static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
71 					    u16 id)
72 {
73 	__clear_bit(id, tcam->used_regions);
74 }
75 
mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam * tcam,u16 * p_id)76 static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
77 					  u16 *p_id)
78 {
79 	u16 id;
80 
81 	id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
82 	if (id < tcam->max_groups) {
83 		__set_bit(id, tcam->used_groups);
84 		*p_id = id;
85 		return 0;
86 	}
87 	return -ENOBUFS;
88 }
89 
mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam * tcam,u16 id)90 static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
91 					   u16 id)
92 {
93 	__clear_bit(id, tcam->used_groups);
94 }
95 
96 struct mlxsw_sp_acl_tcam_pattern {
97 	const enum mlxsw_afk_element *elements;
98 	unsigned int elements_count;
99 };
100 
101 struct mlxsw_sp_acl_tcam_group {
102 	struct mlxsw_sp_acl_tcam *tcam;
103 	u16 id;
104 	struct mutex lock; /* guards region list updates */
105 	struct list_head region_list;
106 	unsigned int region_count;
107 };
108 
109 struct mlxsw_sp_acl_tcam_vgroup {
110 	struct mlxsw_sp_acl_tcam_group group;
111 	struct list_head vregion_list;
112 	struct rhashtable vchunk_ht;
113 	const struct mlxsw_sp_acl_tcam_pattern *patterns;
114 	unsigned int patterns_count;
115 	bool tmplt_elusage_set;
116 	struct mlxsw_afk_element_usage tmplt_elusage;
117 	bool vregion_rehash_enabled;
118 	unsigned int *p_min_prio;
119 	unsigned int *p_max_prio;
120 };
121 
122 struct mlxsw_sp_acl_tcam_rehash_ctx {
123 	void *hints_priv;
124 	bool this_is_rollback;
125 	struct mlxsw_sp_acl_tcam_vchunk *current_vchunk; /* vchunk being
126 							  * currently migrated.
127 							  */
128 	struct mlxsw_sp_acl_tcam_ventry *start_ventry; /* ventry to start
129 							* migration from in
130 							* a vchunk being
131 							* currently migrated.
132 							*/
133 	struct mlxsw_sp_acl_tcam_ventry *stop_ventry; /* ventry to stop
134 						       * migration at
135 						       * a vchunk being
136 						       * currently migrated.
137 						       */
138 };
139 
140 struct mlxsw_sp_acl_tcam_vregion {
141 	struct mutex lock; /* Protects consistency of region, region2 pointers
142 			    * and vchunk_list.
143 			    */
144 	struct mlxsw_sp_acl_tcam_region *region;
145 	struct mlxsw_sp_acl_tcam_region *region2; /* Used during migration */
146 	struct list_head list; /* Member of a TCAM group */
147 	struct list_head tlist; /* Member of a TCAM */
148 	struct list_head vchunk_list; /* List of vchunks under this vregion */
149 	struct mlxsw_afk_key_info *key_info;
150 	struct mlxsw_sp_acl_tcam *tcam;
151 	struct mlxsw_sp_acl_tcam_vgroup *vgroup;
152 	struct {
153 		struct delayed_work dw;
154 		struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
155 	} rehash;
156 	struct mlxsw_sp *mlxsw_sp;
157 	unsigned int ref_count;
158 };
159 
160 struct mlxsw_sp_acl_tcam_vchunk;
161 
162 struct mlxsw_sp_acl_tcam_chunk {
163 	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
164 	struct mlxsw_sp_acl_tcam_region *region;
165 	unsigned long priv[];
166 	/* priv has to be always the last item */
167 };
168 
169 struct mlxsw_sp_acl_tcam_vchunk {
170 	struct mlxsw_sp_acl_tcam_chunk *chunk;
171 	struct mlxsw_sp_acl_tcam_chunk *chunk2; /* Used during migration */
172 	struct list_head list; /* Member of a TCAM vregion */
173 	struct rhash_head ht_node; /* Member of a chunk HT */
174 	struct list_head ventry_list;
175 	unsigned int priority; /* Priority within the vregion and group */
176 	struct mlxsw_sp_acl_tcam_vgroup *vgroup;
177 	struct mlxsw_sp_acl_tcam_vregion *vregion;
178 	unsigned int ref_count;
179 };
180 
181 struct mlxsw_sp_acl_tcam_entry {
182 	struct mlxsw_sp_acl_tcam_ventry *ventry;
183 	struct mlxsw_sp_acl_tcam_chunk *chunk;
184 	unsigned long priv[];
185 	/* priv has to be always the last item */
186 };
187 
188 struct mlxsw_sp_acl_tcam_ventry {
189 	struct mlxsw_sp_acl_tcam_entry *entry;
190 	struct list_head list; /* Member of a TCAM vchunk */
191 	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
192 	struct mlxsw_sp_acl_rule_info *rulei;
193 };
194 
195 static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params = {
196 	.key_len = sizeof(unsigned int),
197 	.key_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, priority),
198 	.head_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, ht_node),
199 	.automatic_shrinking = true,
200 };
201 
mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_group * group)202 static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
203 					  struct mlxsw_sp_acl_tcam_group *group)
204 {
205 	struct mlxsw_sp_acl_tcam_region *region;
206 	char pagt_pl[MLXSW_REG_PAGT_LEN];
207 	int acl_index = 0;
208 
209 	mlxsw_reg_pagt_pack(pagt_pl, group->id);
210 	list_for_each_entry(region, &group->region_list, list) {
211 		bool multi = false;
212 
213 		/* Check if the next entry in the list has the same vregion. */
214 		if (region->list.next != &group->region_list &&
215 		    list_next_entry(region, list)->vregion == region->vregion)
216 			multi = true;
217 		mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++,
218 					   region->id, multi);
219 	}
220 	mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
221 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
222 }
223 
224 static int
mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam * tcam,struct mlxsw_sp_acl_tcam_group * group)225 mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam *tcam,
226 			    struct mlxsw_sp_acl_tcam_group *group)
227 {
228 	int err;
229 
230 	group->tcam = tcam;
231 	INIT_LIST_HEAD(&group->region_list);
232 
233 	err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
234 	if (err)
235 		return err;
236 
237 	mutex_init(&group->lock);
238 
239 	return 0;
240 }
241 
mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group * group)242 static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group *group)
243 {
244 	struct mlxsw_sp_acl_tcam *tcam = group->tcam;
245 
246 	mutex_destroy(&group->lock);
247 	mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
248 	WARN_ON(!list_empty(&group->region_list));
249 }
250 
251 static int
mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam * tcam,struct mlxsw_sp_acl_tcam_vgroup * vgroup,const struct mlxsw_sp_acl_tcam_pattern * patterns,unsigned int patterns_count,struct mlxsw_afk_element_usage * tmplt_elusage,bool vregion_rehash_enabled,unsigned int * p_min_prio,unsigned int * p_max_prio)252 mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
253 			     struct mlxsw_sp_acl_tcam *tcam,
254 			     struct mlxsw_sp_acl_tcam_vgroup *vgroup,
255 			     const struct mlxsw_sp_acl_tcam_pattern *patterns,
256 			     unsigned int patterns_count,
257 			     struct mlxsw_afk_element_usage *tmplt_elusage,
258 			     bool vregion_rehash_enabled,
259 			     unsigned int *p_min_prio,
260 			     unsigned int *p_max_prio)
261 {
262 	int err;
263 
264 	vgroup->patterns = patterns;
265 	vgroup->patterns_count = patterns_count;
266 	vgroup->vregion_rehash_enabled = vregion_rehash_enabled;
267 	vgroup->p_min_prio = p_min_prio;
268 	vgroup->p_max_prio = p_max_prio;
269 
270 	if (tmplt_elusage) {
271 		vgroup->tmplt_elusage_set = true;
272 		memcpy(&vgroup->tmplt_elusage, tmplt_elusage,
273 		       sizeof(vgroup->tmplt_elusage));
274 	}
275 	INIT_LIST_HEAD(&vgroup->vregion_list);
276 
277 	err = mlxsw_sp_acl_tcam_group_add(tcam, &vgroup->group);
278 	if (err)
279 		return err;
280 
281 	err = rhashtable_init(&vgroup->vchunk_ht,
282 			      &mlxsw_sp_acl_tcam_vchunk_ht_params);
283 	if (err)
284 		goto err_rhashtable_init;
285 
286 	return 0;
287 
288 err_rhashtable_init:
289 	mlxsw_sp_acl_tcam_group_del(&vgroup->group);
290 	return err;
291 }
292 
293 static void
mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup * vgroup)294 mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
295 {
296 	rhashtable_destroy(&vgroup->vchunk_ht);
297 	mlxsw_sp_acl_tcam_group_del(&vgroup->group);
298 	WARN_ON(!list_empty(&vgroup->vregion_list));
299 }
300 
301 static int
mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_group * group,struct mlxsw_sp_port * mlxsw_sp_port,bool ingress)302 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
303 			     struct mlxsw_sp_acl_tcam_group *group,
304 			     struct mlxsw_sp_port *mlxsw_sp_port,
305 			     bool ingress)
306 {
307 	char ppbt_pl[MLXSW_REG_PPBT_LEN];
308 
309 	mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
310 					       MLXSW_REG_PXBT_E_EACL,
311 			    MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
312 			    group->id);
313 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
314 }
315 
316 static void
mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_group * group,struct mlxsw_sp_port * mlxsw_sp_port,bool ingress)317 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
318 			       struct mlxsw_sp_acl_tcam_group *group,
319 			       struct mlxsw_sp_port *mlxsw_sp_port,
320 			       bool ingress)
321 {
322 	char ppbt_pl[MLXSW_REG_PPBT_LEN];
323 
324 	mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
325 					       MLXSW_REG_PXBT_E_EACL,
326 			    MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
327 			    group->id);
328 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
329 }
330 
331 static u16
mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group * group)332 mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
333 {
334 	return group->id;
335 }
336 
337 static unsigned int
mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion * vregion)338 mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
339 {
340 	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
341 
342 	if (list_empty(&vregion->vchunk_list))
343 		return 0;
344 	/* As a priority of a vregion, return priority of the first vchunk */
345 	vchunk = list_first_entry(&vregion->vchunk_list,
346 				  typeof(*vchunk), list);
347 	return vchunk->priority;
348 }
349 
350 static unsigned int
mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion * vregion)351 mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
352 {
353 	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
354 
355 	if (list_empty(&vregion->vchunk_list))
356 		return 0;
357 	vchunk = list_last_entry(&vregion->vchunk_list,
358 				 typeof(*vchunk), list);
359 	return vchunk->priority;
360 }
361 
362 static void
mlxsw_sp_acl_tcam_vgroup_prio_update(struct mlxsw_sp_acl_tcam_vgroup * vgroup)363 mlxsw_sp_acl_tcam_vgroup_prio_update(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
364 {
365 	struct mlxsw_sp_acl_tcam_vregion *vregion;
366 
367 	if (list_empty(&vgroup->vregion_list))
368 		return;
369 	vregion = list_first_entry(&vgroup->vregion_list,
370 				   typeof(*vregion), list);
371 	*vgroup->p_min_prio = mlxsw_sp_acl_tcam_vregion_prio(vregion);
372 	vregion = list_last_entry(&vgroup->vregion_list,
373 				  typeof(*vregion), list);
374 	*vgroup->p_max_prio = mlxsw_sp_acl_tcam_vregion_max_prio(vregion);
375 }
376 
377 static int
mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_group * group,struct mlxsw_sp_acl_tcam_region * region,unsigned int priority,struct mlxsw_sp_acl_tcam_region * next_region)378 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
379 				      struct mlxsw_sp_acl_tcam_group *group,
380 				      struct mlxsw_sp_acl_tcam_region *region,
381 				      unsigned int priority,
382 				      struct mlxsw_sp_acl_tcam_region *next_region)
383 {
384 	struct mlxsw_sp_acl_tcam_region *region2;
385 	struct list_head *pos;
386 	int err;
387 
388 	mutex_lock(&group->lock);
389 	if (group->region_count == group->tcam->max_group_size) {
390 		err = -ENOBUFS;
391 		goto err_region_count_check;
392 	}
393 
394 	if (next_region) {
395 		/* If the next region is defined, place the new one
396 		 * before it. The next one is a sibling.
397 		 */
398 		pos = &next_region->list;
399 	} else {
400 		/* Position the region inside the list according to priority */
401 		list_for_each(pos, &group->region_list) {
402 			region2 = list_entry(pos, typeof(*region2), list);
403 			if (mlxsw_sp_acl_tcam_vregion_prio(region2->vregion) >
404 			    priority)
405 				break;
406 		}
407 	}
408 	list_add_tail(&region->list, pos);
409 	region->group = group;
410 
411 	err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
412 	if (err)
413 		goto err_group_update;
414 
415 	group->region_count++;
416 	mutex_unlock(&group->lock);
417 	return 0;
418 
419 err_group_update:
420 	list_del(&region->list);
421 err_region_count_check:
422 	mutex_unlock(&group->lock);
423 	return err;
424 }
425 
426 static void
mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region)427 mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
428 				      struct mlxsw_sp_acl_tcam_region *region)
429 {
430 	struct mlxsw_sp_acl_tcam_group *group = region->group;
431 
432 	mutex_lock(&group->lock);
433 	list_del(&region->list);
434 	group->region_count--;
435 	mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
436 	mutex_unlock(&group->lock);
437 }
438 
439 static int
mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vgroup * vgroup,struct mlxsw_sp_acl_tcam_vregion * vregion,unsigned int priority)440 mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp *mlxsw_sp,
441 					struct mlxsw_sp_acl_tcam_vgroup *vgroup,
442 					struct mlxsw_sp_acl_tcam_vregion *vregion,
443 					unsigned int priority)
444 {
445 	struct mlxsw_sp_acl_tcam_vregion *vregion2;
446 	struct list_head *pos;
447 	int err;
448 
449 	/* Position the vregion inside the list according to priority */
450 	list_for_each(pos, &vgroup->vregion_list) {
451 		vregion2 = list_entry(pos, typeof(*vregion2), list);
452 		if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) > priority)
453 			break;
454 	}
455 	list_add_tail(&vregion->list, pos);
456 
457 	err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, &vgroup->group,
458 						    vregion->region,
459 						    priority, NULL);
460 	if (err)
461 		goto err_region_attach;
462 
463 	return 0;
464 
465 err_region_attach:
466 	list_del(&vregion->list);
467 	return err;
468 }
469 
470 static void
mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vregion * vregion)471 mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp *mlxsw_sp,
472 					struct mlxsw_sp_acl_tcam_vregion *vregion)
473 {
474 	list_del(&vregion->list);
475 	if (vregion->region2)
476 		mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp,
477 						      vregion->region2);
478 	mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, vregion->region);
479 }
480 
481 static struct mlxsw_sp_acl_tcam_vregion *
mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup * vgroup,unsigned int priority,struct mlxsw_afk_element_usage * elusage,bool * p_need_split)482 mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
483 				      unsigned int priority,
484 				      struct mlxsw_afk_element_usage *elusage,
485 				      bool *p_need_split)
486 {
487 	struct mlxsw_sp_acl_tcam_vregion *vregion, *vregion2;
488 	struct list_head *pos;
489 	bool issubset;
490 
491 	list_for_each(pos, &vgroup->vregion_list) {
492 		vregion = list_entry(pos, typeof(*vregion), list);
493 
494 		/* First, check if the requested priority does not rather belong
495 		 * under some of the next vregions.
496 		 */
497 		if (pos->next != &vgroup->vregion_list) { /* not last */
498 			vregion2 = list_entry(pos->next, typeof(*vregion2),
499 					      list);
500 			if (priority >=
501 			    mlxsw_sp_acl_tcam_vregion_prio(vregion2))
502 				continue;
503 		}
504 
505 		issubset = mlxsw_afk_key_info_subset(vregion->key_info,
506 						     elusage);
507 
508 		/* If requested element usage would not fit and the priority
509 		 * is lower than the currently inspected vregion we cannot
510 		 * use this region, so return NULL to indicate new vregion has
511 		 * to be created.
512 		 */
513 		if (!issubset &&
514 		    priority < mlxsw_sp_acl_tcam_vregion_prio(vregion))
515 			return NULL;
516 
517 		/* If requested element usage would not fit and the priority
518 		 * is higher than the currently inspected vregion we cannot
519 		 * use this vregion. There is still some hope that the next
520 		 * vregion would be the fit. So let it be processed and
521 		 * eventually break at the check right above this.
522 		 */
523 		if (!issubset &&
524 		    priority > mlxsw_sp_acl_tcam_vregion_max_prio(vregion))
525 			continue;
526 
527 		/* Indicate if the vregion needs to be split in order to add
528 		 * the requested priority. Split is needed when requested
529 		 * element usage won't fit into the found vregion.
530 		 */
531 		*p_need_split = !issubset;
532 		return vregion;
533 	}
534 	return NULL; /* New vregion has to be created. */
535 }
536 
537 static void
mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup * vgroup,struct mlxsw_afk_element_usage * elusage,struct mlxsw_afk_element_usage * out)538 mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
539 				      struct mlxsw_afk_element_usage *elusage,
540 				      struct mlxsw_afk_element_usage *out)
541 {
542 	const struct mlxsw_sp_acl_tcam_pattern *pattern;
543 	int i;
544 
545 	/* In case the template is set, we don't have to look up the pattern
546 	 * and just use the template.
547 	 */
548 	if (vgroup->tmplt_elusage_set) {
549 		memcpy(out, &vgroup->tmplt_elusage, sizeof(*out));
550 		WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
551 		return;
552 	}
553 
554 	for (i = 0; i < vgroup->patterns_count; i++) {
555 		pattern = &vgroup->patterns[i];
556 		mlxsw_afk_element_usage_fill(out, pattern->elements,
557 					     pattern->elements_count);
558 		if (mlxsw_afk_element_usage_subset(elusage, out))
559 			return;
560 	}
561 	memcpy(out, elusage, sizeof(*out));
562 }
563 
564 static int
mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region)565 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
566 			       struct mlxsw_sp_acl_tcam_region *region)
567 {
568 	struct mlxsw_afk_key_info *key_info = region->key_info;
569 	char ptar_pl[MLXSW_REG_PTAR_LEN];
570 	unsigned int encodings_count;
571 	int i;
572 	int err;
573 
574 	mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
575 			    region->key_type,
576 			    MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
577 			    region->id, region->tcam_region_info);
578 	encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
579 	for (i = 0; i < encodings_count; i++) {
580 		u16 encoding;
581 
582 		encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
583 		mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
584 	}
585 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
586 	if (err)
587 		return err;
588 	mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
589 	return 0;
590 }
591 
592 static void
mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region)593 mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
594 			      struct mlxsw_sp_acl_tcam_region *region)
595 {
596 	char ptar_pl[MLXSW_REG_PTAR_LEN];
597 
598 	mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
599 			    region->key_type, 0, region->id,
600 			    region->tcam_region_info);
601 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
602 }
603 
604 static int
mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region)605 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
606 				struct mlxsw_sp_acl_tcam_region *region)
607 {
608 	char pacl_pl[MLXSW_REG_PACL_LEN];
609 
610 	mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
611 			    region->tcam_region_info);
612 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
613 }
614 
615 static void
mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region)616 mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
617 				 struct mlxsw_sp_acl_tcam_region *region)
618 {
619 	char pacl_pl[MLXSW_REG_PACL_LEN];
620 
621 	mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
622 			    region->tcam_region_info);
623 	mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
624 }
625 
626 static struct mlxsw_sp_acl_tcam_region *
mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam * tcam,struct mlxsw_sp_acl_tcam_vregion * vregion,void * hints_priv)627 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
628 				struct mlxsw_sp_acl_tcam *tcam,
629 				struct mlxsw_sp_acl_tcam_vregion *vregion,
630 				void *hints_priv)
631 {
632 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
633 	struct mlxsw_sp_acl_tcam_region *region;
634 	int err;
635 
636 	region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
637 	if (!region)
638 		return ERR_PTR(-ENOMEM);
639 	region->mlxsw_sp = mlxsw_sp;
640 	region->vregion = vregion;
641 	region->key_info = vregion->key_info;
642 
643 	err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
644 	if (err)
645 		goto err_region_id_get;
646 
647 	err = ops->region_associate(mlxsw_sp, region);
648 	if (err)
649 		goto err_tcam_region_associate;
650 
651 	region->key_type = ops->key_type;
652 	err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
653 	if (err)
654 		goto err_tcam_region_alloc;
655 
656 	err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
657 	if (err)
658 		goto err_tcam_region_enable;
659 
660 	err = ops->region_init(mlxsw_sp, region->priv, tcam->priv,
661 			       region, hints_priv);
662 	if (err)
663 		goto err_tcam_region_init;
664 
665 	return region;
666 
667 err_tcam_region_init:
668 	mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
669 err_tcam_region_enable:
670 	mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
671 err_tcam_region_alloc:
672 err_tcam_region_associate:
673 	mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
674 err_region_id_get:
675 	kfree(region);
676 	return ERR_PTR(err);
677 }
678 
679 static void
mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region)680 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
681 				 struct mlxsw_sp_acl_tcam_region *region)
682 {
683 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
684 
685 	ops->region_fini(mlxsw_sp, region->priv);
686 	mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
687 	mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
688 	mlxsw_sp_acl_tcam_region_id_put(region->group->tcam,
689 					region->id);
690 	kfree(region);
691 }
692 
693 static void
mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion * vregion)694 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion *vregion)
695 {
696 	unsigned long interval = vregion->tcam->vregion_rehash_intrvl;
697 
698 	if (!interval)
699 		return;
700 	mlxsw_core_schedule_dw(&vregion->rehash.dw,
701 			       msecs_to_jiffies(interval));
702 }
703 
704 static void
705 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
706 				 struct mlxsw_sp_acl_tcam_vregion *vregion,
707 				 int *credits);
708 
mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct * work)709 static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
710 {
711 	struct mlxsw_sp_acl_tcam_vregion *vregion =
712 		container_of(work, struct mlxsw_sp_acl_tcam_vregion,
713 			     rehash.dw.work);
714 	int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
715 
716 	mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
717 	if (credits < 0)
718 		/* Rehash gone out of credits so it was interrupted.
719 		 * Schedule the work as soon as possible to continue.
720 		 */
721 		mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
722 	else
723 		mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
724 }
725 
726 static void
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk * vchunk)727 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
728 {
729 	struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
730 
731 	/* If a rule was added or deleted from vchunk which is currently
732 	 * under rehash migration, we have to reset the ventry pointers
733 	 * to make sure all rules are properly migrated.
734 	 */
735 	if (vregion->rehash.ctx.current_vchunk == vchunk) {
736 		vregion->rehash.ctx.start_ventry = NULL;
737 		vregion->rehash.ctx.stop_ventry = NULL;
738 	}
739 }
740 
741 static void
mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion * vregion)742 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *vregion)
743 {
744 	/* If a chunk was added or deleted from vregion we have to reset
745 	 * the current chunk pointer to make sure all chunks
746 	 * are properly migrated.
747 	 */
748 	vregion->rehash.ctx.current_vchunk = NULL;
749 }
750 
751 static struct mlxsw_sp_acl_tcam_vregion *
mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vgroup * vgroup,unsigned int priority,struct mlxsw_afk_element_usage * elusage)752 mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
753 				 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
754 				 unsigned int priority,
755 				 struct mlxsw_afk_element_usage *elusage)
756 {
757 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
758 	struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
759 	struct mlxsw_sp_acl_tcam *tcam = vgroup->group.tcam;
760 	struct mlxsw_sp_acl_tcam_vregion *vregion;
761 	int err;
762 
763 	vregion = kzalloc(sizeof(*vregion), GFP_KERNEL);
764 	if (!vregion)
765 		return ERR_PTR(-ENOMEM);
766 	INIT_LIST_HEAD(&vregion->vchunk_list);
767 	mutex_init(&vregion->lock);
768 	vregion->tcam = tcam;
769 	vregion->mlxsw_sp = mlxsw_sp;
770 	vregion->vgroup = vgroup;
771 	vregion->ref_count = 1;
772 
773 	vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
774 	if (IS_ERR(vregion->key_info)) {
775 		err = PTR_ERR(vregion->key_info);
776 		goto err_key_info_get;
777 	}
778 
779 	vregion->region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, tcam,
780 							  vregion, NULL);
781 	if (IS_ERR(vregion->region)) {
782 		err = PTR_ERR(vregion->region);
783 		goto err_region_create;
784 	}
785 
786 	err = mlxsw_sp_acl_tcam_vgroup_vregion_attach(mlxsw_sp, vgroup, vregion,
787 						      priority);
788 	if (err)
789 		goto err_vgroup_vregion_attach;
790 
791 	if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
792 		/* Create the delayed work for vregion periodic rehash */
793 		INIT_DELAYED_WORK(&vregion->rehash.dw,
794 				  mlxsw_sp_acl_tcam_vregion_rehash_work);
795 		mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
796 		mutex_lock(&tcam->lock);
797 		list_add_tail(&vregion->tlist, &tcam->vregion_list);
798 		mutex_unlock(&tcam->lock);
799 	}
800 
801 	return vregion;
802 
803 err_vgroup_vregion_attach:
804 	mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
805 err_region_create:
806 	mlxsw_afk_key_info_put(vregion->key_info);
807 err_key_info_get:
808 	kfree(vregion);
809 	return ERR_PTR(err);
810 }
811 
812 static void
mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vregion * vregion)813 mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
814 				  struct mlxsw_sp_acl_tcam_vregion *vregion)
815 {
816 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
817 	struct mlxsw_sp_acl_tcam_vgroup *vgroup = vregion->vgroup;
818 	struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
819 
820 	if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
821 		mutex_lock(&tcam->lock);
822 		list_del(&vregion->tlist);
823 		mutex_unlock(&tcam->lock);
824 		cancel_delayed_work_sync(&vregion->rehash.dw);
825 	}
826 	mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
827 	if (vregion->region2)
828 		mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2);
829 	mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
830 	mlxsw_afk_key_info_put(vregion->key_info);
831 	mutex_destroy(&vregion->lock);
832 	kfree(vregion);
833 }
834 
mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam * tcam)835 u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
836 						struct mlxsw_sp_acl_tcam *tcam)
837 {
838 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
839 	u32 vregion_rehash_intrvl;
840 
841 	if (WARN_ON(!ops->region_rehash_hints_get))
842 		return 0;
843 	vregion_rehash_intrvl = tcam->vregion_rehash_intrvl;
844 	return vregion_rehash_intrvl;
845 }
846 
mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam * tcam,u32 val)847 int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
848 						struct mlxsw_sp_acl_tcam *tcam,
849 						u32 val)
850 {
851 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
852 	struct mlxsw_sp_acl_tcam_vregion *vregion;
853 
854 	if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
855 		return -EINVAL;
856 	if (WARN_ON(!ops->region_rehash_hints_get))
857 		return -EOPNOTSUPP;
858 	tcam->vregion_rehash_intrvl = val;
859 	mutex_lock(&tcam->lock);
860 	list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
861 		if (val)
862 			mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
863 		else
864 			cancel_delayed_work_sync(&vregion->rehash.dw);
865 	}
866 	mutex_unlock(&tcam->lock);
867 	return 0;
868 }
869 
870 static struct mlxsw_sp_acl_tcam_vregion *
mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vgroup * vgroup,unsigned int priority,struct mlxsw_afk_element_usage * elusage)871 mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
872 			      struct mlxsw_sp_acl_tcam_vgroup *vgroup,
873 			      unsigned int priority,
874 			      struct mlxsw_afk_element_usage *elusage)
875 {
876 	struct mlxsw_afk_element_usage vregion_elusage;
877 	struct mlxsw_sp_acl_tcam_vregion *vregion;
878 	bool need_split;
879 
880 	vregion = mlxsw_sp_acl_tcam_vgroup_vregion_find(vgroup, priority,
881 							elusage, &need_split);
882 	if (vregion) {
883 		if (need_split) {
884 			/* According to priority, new vchunk should belong to
885 			 * an existing vregion. However, this vchunk needs
886 			 * elements that vregion does not contain. We need
887 			 * to split the existing vregion into two and create
888 			 * a new vregion for the new vchunk in between.
889 			 * This is not supported now.
890 			 */
891 			return ERR_PTR(-EOPNOTSUPP);
892 		}
893 		vregion->ref_count++;
894 		return vregion;
895 	}
896 
897 	mlxsw_sp_acl_tcam_vgroup_use_patterns(vgroup, elusage,
898 					      &vregion_elusage);
899 
900 	return mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp, vgroup, priority,
901 						&vregion_elusage);
902 }
903 
904 static void
mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vregion * vregion)905 mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
906 			      struct mlxsw_sp_acl_tcam_vregion *vregion)
907 {
908 	if (--vregion->ref_count)
909 		return;
910 	mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
911 }
912 
913 static struct mlxsw_sp_acl_tcam_chunk *
mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vchunk * vchunk,struct mlxsw_sp_acl_tcam_region * region)914 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
915 			       struct mlxsw_sp_acl_tcam_vchunk *vchunk,
916 			       struct mlxsw_sp_acl_tcam_region *region)
917 {
918 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
919 	struct mlxsw_sp_acl_tcam_chunk *chunk;
920 
921 	chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
922 	if (!chunk)
923 		return ERR_PTR(-ENOMEM);
924 	chunk->vchunk = vchunk;
925 	chunk->region = region;
926 
927 	ops->chunk_init(region->priv, chunk->priv, vchunk->priority);
928 	return chunk;
929 }
930 
931 static void
mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_chunk * chunk)932 mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
933 				struct mlxsw_sp_acl_tcam_chunk *chunk)
934 {
935 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
936 
937 	ops->chunk_fini(chunk->priv);
938 	kfree(chunk);
939 }
940 
941 static struct mlxsw_sp_acl_tcam_vchunk *
mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vgroup * vgroup,unsigned int priority,struct mlxsw_afk_element_usage * elusage)942 mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
943 				struct mlxsw_sp_acl_tcam_vgroup *vgroup,
944 				unsigned int priority,
945 				struct mlxsw_afk_element_usage *elusage)
946 {
947 	struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2;
948 	struct mlxsw_sp_acl_tcam_vregion *vregion;
949 	struct list_head *pos;
950 	int err;
951 
952 	if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
953 		return ERR_PTR(-EINVAL);
954 
955 	vchunk = kzalloc(sizeof(*vchunk), GFP_KERNEL);
956 	if (!vchunk)
957 		return ERR_PTR(-ENOMEM);
958 	INIT_LIST_HEAD(&vchunk->ventry_list);
959 	vchunk->priority = priority;
960 	vchunk->vgroup = vgroup;
961 	vchunk->ref_count = 1;
962 
963 	vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
964 						priority, elusage);
965 	if (IS_ERR(vregion)) {
966 		err = PTR_ERR(vregion);
967 		goto err_vregion_get;
968 	}
969 
970 	vchunk->vregion = vregion;
971 
972 	err = rhashtable_insert_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
973 				     mlxsw_sp_acl_tcam_vchunk_ht_params);
974 	if (err)
975 		goto err_rhashtable_insert;
976 
977 	mutex_lock(&vregion->lock);
978 	vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk,
979 						       vchunk->vregion->region);
980 	if (IS_ERR(vchunk->chunk)) {
981 		mutex_unlock(&vregion->lock);
982 		err = PTR_ERR(vchunk->chunk);
983 		goto err_chunk_create;
984 	}
985 
986 	mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
987 
988 	/* Position the vchunk inside the list according to priority */
989 	list_for_each(pos, &vregion->vchunk_list) {
990 		vchunk2 = list_entry(pos, typeof(*vchunk2), list);
991 		if (vchunk2->priority > priority)
992 			break;
993 	}
994 	list_add_tail(&vchunk->list, pos);
995 	mutex_unlock(&vregion->lock);
996 	mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
997 
998 	return vchunk;
999 
1000 err_chunk_create:
1001 	rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1002 			       mlxsw_sp_acl_tcam_vchunk_ht_params);
1003 err_rhashtable_insert:
1004 	mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vregion);
1005 err_vregion_get:
1006 	kfree(vchunk);
1007 	return ERR_PTR(err);
1008 }
1009 
1010 static void
mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vchunk * vchunk)1011 mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
1012 				 struct mlxsw_sp_acl_tcam_vchunk *vchunk)
1013 {
1014 	struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
1015 	struct mlxsw_sp_acl_tcam_vgroup *vgroup = vchunk->vgroup;
1016 
1017 	mutex_lock(&vregion->lock);
1018 	mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
1019 	list_del(&vchunk->list);
1020 	if (vchunk->chunk2)
1021 		mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1022 	mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk);
1023 	mutex_unlock(&vregion->lock);
1024 	rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1025 			       mlxsw_sp_acl_tcam_vchunk_ht_params);
1026 	mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion);
1027 	kfree(vchunk);
1028 	mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
1029 }
1030 
1031 static struct mlxsw_sp_acl_tcam_vchunk *
mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vgroup * vgroup,unsigned int priority,struct mlxsw_afk_element_usage * elusage)1032 mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
1033 			     struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1034 			     unsigned int priority,
1035 			     struct mlxsw_afk_element_usage *elusage)
1036 {
1037 	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1038 
1039 	vchunk = rhashtable_lookup_fast(&vgroup->vchunk_ht, &priority,
1040 					mlxsw_sp_acl_tcam_vchunk_ht_params);
1041 	if (vchunk) {
1042 		if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
1043 						       elusage)))
1044 			return ERR_PTR(-EINVAL);
1045 		vchunk->ref_count++;
1046 		return vchunk;
1047 	}
1048 	return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
1049 					       priority, elusage);
1050 }
1051 
1052 static void
mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vchunk * vchunk)1053 mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
1054 			     struct mlxsw_sp_acl_tcam_vchunk *vchunk)
1055 {
1056 	if (--vchunk->ref_count)
1057 		return;
1058 	mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
1059 }
1060 
1061 static struct mlxsw_sp_acl_tcam_entry *
mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_ventry * ventry,struct mlxsw_sp_acl_tcam_chunk * chunk)1062 mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp *mlxsw_sp,
1063 			       struct mlxsw_sp_acl_tcam_ventry *ventry,
1064 			       struct mlxsw_sp_acl_tcam_chunk *chunk)
1065 {
1066 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1067 	struct mlxsw_sp_acl_tcam_entry *entry;
1068 	int err;
1069 
1070 	entry = kzalloc(sizeof(*entry) + ops->entry_priv_size, GFP_KERNEL);
1071 	if (!entry)
1072 		return ERR_PTR(-ENOMEM);
1073 	entry->ventry = ventry;
1074 	entry->chunk = chunk;
1075 
1076 	err = ops->entry_add(mlxsw_sp, chunk->region->priv, chunk->priv,
1077 			     entry->priv, ventry->rulei);
1078 	if (err)
1079 		goto err_entry_add;
1080 
1081 	return entry;
1082 
1083 err_entry_add:
1084 	kfree(entry);
1085 	return ERR_PTR(err);
1086 }
1087 
mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_entry * entry)1088 static void mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1089 					    struct mlxsw_sp_acl_tcam_entry *entry)
1090 {
1091 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1092 
1093 	ops->entry_del(mlxsw_sp, entry->chunk->region->priv,
1094 		       entry->chunk->priv, entry->priv);
1095 	kfree(entry);
1096 }
1097 
1098 static int
mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_region * region,struct mlxsw_sp_acl_tcam_entry * entry,struct mlxsw_sp_acl_rule_info * rulei)1099 mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
1100 				       struct mlxsw_sp_acl_tcam_region *region,
1101 				       struct mlxsw_sp_acl_tcam_entry *entry,
1102 				       struct mlxsw_sp_acl_rule_info *rulei)
1103 {
1104 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1105 
1106 	return ops->entry_action_replace(mlxsw_sp, region->priv,
1107 					 entry->priv, rulei);
1108 }
1109 
1110 static int
mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_entry * entry,bool * activity)1111 mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
1112 				     struct mlxsw_sp_acl_tcam_entry *entry,
1113 				     bool *activity)
1114 {
1115 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1116 
1117 	return ops->entry_activity_get(mlxsw_sp, entry->chunk->region->priv,
1118 				       entry->priv, activity);
1119 }
1120 
mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vgroup * vgroup,struct mlxsw_sp_acl_tcam_ventry * ventry,struct mlxsw_sp_acl_rule_info * rulei)1121 static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp,
1122 					struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1123 					struct mlxsw_sp_acl_tcam_ventry *ventry,
1124 					struct mlxsw_sp_acl_rule_info *rulei)
1125 {
1126 	struct mlxsw_sp_acl_tcam_vregion *vregion;
1127 	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1128 	int err;
1129 
1130 	vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, vgroup, rulei->priority,
1131 					      &rulei->values.elusage);
1132 	if (IS_ERR(vchunk))
1133 		return PTR_ERR(vchunk);
1134 
1135 	ventry->vchunk = vchunk;
1136 	ventry->rulei = rulei;
1137 	vregion = vchunk->vregion;
1138 
1139 	mutex_lock(&vregion->lock);
1140 	ventry->entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry,
1141 						       vchunk->chunk);
1142 	if (IS_ERR(ventry->entry)) {
1143 		mutex_unlock(&vregion->lock);
1144 		err = PTR_ERR(ventry->entry);
1145 		goto err_entry_create;
1146 	}
1147 
1148 	list_add_tail(&ventry->list, &vchunk->ventry_list);
1149 	mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1150 	mutex_unlock(&vregion->lock);
1151 
1152 	return 0;
1153 
1154 err_entry_create:
1155 	mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1156 	return err;
1157 }
1158 
mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_ventry * ventry)1159 static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp,
1160 					 struct mlxsw_sp_acl_tcam_ventry *ventry)
1161 {
1162 	struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1163 	struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
1164 
1165 	mutex_lock(&vregion->lock);
1166 	mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1167 	list_del(&ventry->list);
1168 	mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1169 	mutex_unlock(&vregion->lock);
1170 	mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1171 }
1172 
1173 static int
mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_ventry * ventry,struct mlxsw_sp_acl_rule_info * rulei)1174 mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp *mlxsw_sp,
1175 					struct mlxsw_sp_acl_tcam_ventry *ventry,
1176 					struct mlxsw_sp_acl_rule_info *rulei)
1177 {
1178 	struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1179 
1180 	return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp,
1181 						      vchunk->vregion->region,
1182 						      ventry->entry, rulei);
1183 }
1184 
1185 static int
mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_ventry * ventry,bool * activity)1186 mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
1187 				      struct mlxsw_sp_acl_tcam_ventry *ventry,
1188 				      bool *activity)
1189 {
1190 	return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
1191 						    ventry->entry, activity);
1192 }
1193 
1194 static int
mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_ventry * ventry,struct mlxsw_sp_acl_tcam_chunk * chunk,int * credits)1195 mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp,
1196 				 struct mlxsw_sp_acl_tcam_ventry *ventry,
1197 				 struct mlxsw_sp_acl_tcam_chunk *chunk,
1198 				 int *credits)
1199 {
1200 	struct mlxsw_sp_acl_tcam_entry *new_entry;
1201 
1202 	/* First check if the entry is not already where we want it to be. */
1203 	if (ventry->entry->chunk == chunk)
1204 		return 0;
1205 
1206 	if (--(*credits) < 0)
1207 		return 0;
1208 
1209 	new_entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk);
1210 	if (IS_ERR(new_entry))
1211 		return PTR_ERR(new_entry);
1212 	mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1213 	ventry->entry = new_entry;
1214 	return 0;
1215 }
1216 
1217 static int
mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vchunk * vchunk,struct mlxsw_sp_acl_tcam_region * region,struct mlxsw_sp_acl_tcam_rehash_ctx * ctx)1218 mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
1219 				       struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1220 				       struct mlxsw_sp_acl_tcam_region *region,
1221 				       struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1222 {
1223 	struct mlxsw_sp_acl_tcam_chunk *new_chunk;
1224 
1225 	new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
1226 	if (IS_ERR(new_chunk))
1227 		return PTR_ERR(new_chunk);
1228 	vchunk->chunk2 = vchunk->chunk;
1229 	vchunk->chunk = new_chunk;
1230 	ctx->current_vchunk = vchunk;
1231 	ctx->start_ventry = NULL;
1232 	ctx->stop_ventry = NULL;
1233 	return 0;
1234 }
1235 
1236 static void
mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vchunk * vchunk,struct mlxsw_sp_acl_tcam_rehash_ctx * ctx)1237 mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
1238 				     struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1239 				     struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1240 {
1241 	mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1242 	vchunk->chunk2 = NULL;
1243 	ctx->current_vchunk = NULL;
1244 }
1245 
1246 static int
mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vchunk * vchunk,struct mlxsw_sp_acl_tcam_region * region,struct mlxsw_sp_acl_tcam_rehash_ctx * ctx,int * credits)1247 mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1248 				     struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1249 				     struct mlxsw_sp_acl_tcam_region *region,
1250 				     struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1251 				     int *credits)
1252 {
1253 	struct mlxsw_sp_acl_tcam_ventry *ventry;
1254 	int err;
1255 
1256 	if (vchunk->chunk->region != region) {
1257 		err = mlxsw_sp_acl_tcam_vchunk_migrate_start(mlxsw_sp, vchunk,
1258 							     region, ctx);
1259 		if (err)
1260 			return err;
1261 	} else if (!vchunk->chunk2) {
1262 		/* The chunk is already as it should be, nothing to do. */
1263 		return 0;
1264 	}
1265 
1266 	/* If the migration got interrupted, we have the ventry to start from
1267 	 * stored in context.
1268 	 */
1269 	if (ctx->start_ventry)
1270 		ventry = ctx->start_ventry;
1271 	else
1272 		ventry = list_first_entry(&vchunk->ventry_list,
1273 					  typeof(*ventry), list);
1274 
1275 	list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
1276 		/* During rollback, once we reach the ventry that failed
1277 		 * to migrate, we are done.
1278 		 */
1279 		if (ventry == ctx->stop_ventry)
1280 			break;
1281 
1282 		err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
1283 						       vchunk->chunk, credits);
1284 		if (err) {
1285 			if (ctx->this_is_rollback) {
1286 				/* Save the ventry which we ended with and try
1287 				 * to continue later on.
1288 				 */
1289 				ctx->start_ventry = ventry;
1290 				return err;
1291 			}
1292 			/* Swap the chunk and chunk2 pointers so the follow-up
1293 			 * rollback call will see the original chunk pointer
1294 			 * in vchunk->chunk.
1295 			 */
1296 			swap(vchunk->chunk, vchunk->chunk2);
1297 			/* The rollback has to be done from beginning of the
1298 			 * chunk, that is why we have to null the start_ventry.
1299 			 * However, we know where to stop the rollback,
1300 			 * at the current ventry.
1301 			 */
1302 			ctx->start_ventry = NULL;
1303 			ctx->stop_ventry = ventry;
1304 			return err;
1305 		} else if (*credits < 0) {
1306 			/* We are out of credits, the rest of the ventries
1307 			 * will be migrated later. Save the ventry
1308 			 * which we ended with.
1309 			 */
1310 			ctx->start_ventry = ventry;
1311 			return 0;
1312 		}
1313 	}
1314 
1315 	mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
1316 	return 0;
1317 }
1318 
1319 static int
mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vregion * vregion,struct mlxsw_sp_acl_tcam_rehash_ctx * ctx,int * credits)1320 mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
1321 				     struct mlxsw_sp_acl_tcam_vregion *vregion,
1322 				     struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1323 				     int *credits)
1324 {
1325 	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1326 	int err;
1327 
1328 	/* If the migration got interrupted, we have the vchunk
1329 	 * we are working on stored in context.
1330 	 */
1331 	if (ctx->current_vchunk)
1332 		vchunk = ctx->current_vchunk;
1333 	else
1334 		vchunk = list_first_entry(&vregion->vchunk_list,
1335 					  typeof(*vchunk), list);
1336 
1337 	list_for_each_entry_from(vchunk, &vregion->vchunk_list, list) {
1338 		err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
1339 							   vregion->region,
1340 							   ctx, credits);
1341 		if (err || *credits < 0)
1342 			return err;
1343 	}
1344 	return 0;
1345 }
1346 
1347 static int
mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vregion * vregion,struct mlxsw_sp_acl_tcam_rehash_ctx * ctx,int * credits)1348 mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
1349 				  struct mlxsw_sp_acl_tcam_vregion *vregion,
1350 				  struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1351 				  int *credits)
1352 {
1353 	int err, err2;
1354 
1355 	trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
1356 	mutex_lock(&vregion->lock);
1357 	err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1358 						   ctx, credits);
1359 	if (err) {
1360 		/* In case migration was not successful, we need to swap
1361 		 * so the original region pointer is assigned again
1362 		 * to vregion->region.
1363 		 */
1364 		swap(vregion->region, vregion->region2);
1365 		ctx->current_vchunk = NULL;
1366 		ctx->this_is_rollback = true;
1367 		err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1368 							    ctx, credits);
1369 		if (err2) {
1370 			trace_mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed(mlxsw_sp,
1371 									       vregion);
1372 			dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
1373 			/* Let the rollback to be continued later on. */
1374 		}
1375 	}
1376 	mutex_unlock(&vregion->lock);
1377 	trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
1378 	return err;
1379 }
1380 
1381 static bool
mlxsw_sp_acl_tcam_vregion_rehash_in_progress(const struct mlxsw_sp_acl_tcam_rehash_ctx * ctx)1382 mlxsw_sp_acl_tcam_vregion_rehash_in_progress(const struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1383 {
1384 	return ctx->hints_priv;
1385 }
1386 
1387 static int
mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vregion * vregion,struct mlxsw_sp_acl_tcam_rehash_ctx * ctx)1388 mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
1389 				       struct mlxsw_sp_acl_tcam_vregion *vregion,
1390 				       struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1391 {
1392 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1393 	unsigned int priority = mlxsw_sp_acl_tcam_vregion_prio(vregion);
1394 	struct mlxsw_sp_acl_tcam_region *new_region;
1395 	void *hints_priv;
1396 	int err;
1397 
1398 	trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion);
1399 
1400 	hints_priv = ops->region_rehash_hints_get(vregion->region->priv);
1401 	if (IS_ERR(hints_priv))
1402 		return PTR_ERR(hints_priv);
1403 
1404 	new_region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, vregion->tcam,
1405 						     vregion, hints_priv);
1406 	if (IS_ERR(new_region)) {
1407 		err = PTR_ERR(new_region);
1408 		goto err_region_create;
1409 	}
1410 
1411 	/* vregion->region contains the pointer to the new region
1412 	 * we are going to migrate to.
1413 	 */
1414 	vregion->region2 = vregion->region;
1415 	vregion->region = new_region;
1416 	err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp,
1417 						    vregion->region2->group,
1418 						    new_region, priority,
1419 						    vregion->region2);
1420 	if (err)
1421 		goto err_group_region_attach;
1422 
1423 	ctx->hints_priv = hints_priv;
1424 	ctx->this_is_rollback = false;
1425 
1426 	return 0;
1427 
1428 err_group_region_attach:
1429 	vregion->region = vregion->region2;
1430 	vregion->region2 = NULL;
1431 	mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, new_region);
1432 err_region_create:
1433 	ops->region_rehash_hints_put(hints_priv);
1434 	return err;
1435 }
1436 
1437 static void
mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vregion * vregion,struct mlxsw_sp_acl_tcam_rehash_ctx * ctx)1438 mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp,
1439 				     struct mlxsw_sp_acl_tcam_vregion *vregion,
1440 				     struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1441 {
1442 	struct mlxsw_sp_acl_tcam_region *unused_region = vregion->region2;
1443 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1444 
1445 	vregion->region2 = NULL;
1446 	mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
1447 	mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
1448 	ops->region_rehash_hints_put(ctx->hints_priv);
1449 	ctx->hints_priv = NULL;
1450 }
1451 
1452 static void
mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam_vregion * vregion,int * credits)1453 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
1454 				 struct mlxsw_sp_acl_tcam_vregion *vregion,
1455 				 int *credits)
1456 {
1457 	struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
1458 	int err;
1459 
1460 	/* Check if the previous rehash work was interrupted
1461 	 * which means we have to continue it now.
1462 	 * If not, start a new rehash.
1463 	 */
1464 	if (!mlxsw_sp_acl_tcam_vregion_rehash_in_progress(ctx)) {
1465 		err = mlxsw_sp_acl_tcam_vregion_rehash_start(mlxsw_sp,
1466 							     vregion, ctx);
1467 		if (err) {
1468 			if (err != -EAGAIN)
1469 				dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n");
1470 			return;
1471 		}
1472 	}
1473 
1474 	err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
1475 						ctx, credits);
1476 	if (err) {
1477 		dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
1478 	}
1479 
1480 	if (*credits >= 0)
1481 		mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx);
1482 }
1483 
mlxsw_sp_acl_tcam_init(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam * tcam)1484 int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
1485 			   struct mlxsw_sp_acl_tcam *tcam)
1486 {
1487 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1488 	u64 max_tcam_regions;
1489 	u64 max_regions;
1490 	u64 max_groups;
1491 	int err;
1492 
1493 	mutex_init(&tcam->lock);
1494 	tcam->vregion_rehash_intrvl =
1495 			MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
1496 	INIT_LIST_HEAD(&tcam->vregion_list);
1497 
1498 	max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1499 					      ACL_MAX_TCAM_REGIONS);
1500 	max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
1501 
1502 	/* Use 1:1 mapping between ACL region and TCAM region */
1503 	if (max_tcam_regions < max_regions)
1504 		max_regions = max_tcam_regions;
1505 
1506 	tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
1507 	if (!tcam->used_regions) {
1508 		err = -ENOMEM;
1509 		goto err_alloc_used_regions;
1510 	}
1511 	tcam->max_regions = max_regions;
1512 
1513 	max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
1514 	tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
1515 	if (!tcam->used_groups) {
1516 		err = -ENOMEM;
1517 		goto err_alloc_used_groups;
1518 	}
1519 	tcam->max_groups = max_groups;
1520 	tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1521 						  ACL_MAX_GROUP_SIZE);
1522 	tcam->max_group_size = min_t(unsigned int, tcam->max_group_size,
1523 				     MLXSW_REG_PAGT_ACL_MAX_NUM);
1524 
1525 	err = ops->init(mlxsw_sp, tcam->priv, tcam);
1526 	if (err)
1527 		goto err_tcam_init;
1528 
1529 	return 0;
1530 
1531 err_tcam_init:
1532 	bitmap_free(tcam->used_groups);
1533 err_alloc_used_groups:
1534 	bitmap_free(tcam->used_regions);
1535 err_alloc_used_regions:
1536 	mutex_destroy(&tcam->lock);
1537 	return err;
1538 }
1539 
mlxsw_sp_acl_tcam_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam * tcam)1540 void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
1541 			    struct mlxsw_sp_acl_tcam *tcam)
1542 {
1543 	const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1544 
1545 	ops->fini(mlxsw_sp, tcam->priv);
1546 	bitmap_free(tcam->used_groups);
1547 	bitmap_free(tcam->used_regions);
1548 	mutex_destroy(&tcam->lock);
1549 }
1550 
1551 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
1552 	MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
1553 	MLXSW_AFK_ELEMENT_DMAC_32_47,
1554 	MLXSW_AFK_ELEMENT_DMAC_0_31,
1555 	MLXSW_AFK_ELEMENT_SMAC_32_47,
1556 	MLXSW_AFK_ELEMENT_SMAC_0_31,
1557 	MLXSW_AFK_ELEMENT_ETHERTYPE,
1558 	MLXSW_AFK_ELEMENT_IP_PROTO,
1559 	MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1560 	MLXSW_AFK_ELEMENT_DST_IP_0_31,
1561 	MLXSW_AFK_ELEMENT_DST_L4_PORT,
1562 	MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1563 	MLXSW_AFK_ELEMENT_VID,
1564 	MLXSW_AFK_ELEMENT_PCP,
1565 	MLXSW_AFK_ELEMENT_TCP_FLAGS,
1566 	MLXSW_AFK_ELEMENT_IP_TTL_,
1567 	MLXSW_AFK_ELEMENT_IP_ECN,
1568 	MLXSW_AFK_ELEMENT_IP_DSCP,
1569 };
1570 
1571 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
1572 	MLXSW_AFK_ELEMENT_ETHERTYPE,
1573 	MLXSW_AFK_ELEMENT_IP_PROTO,
1574 	MLXSW_AFK_ELEMENT_SRC_IP_96_127,
1575 	MLXSW_AFK_ELEMENT_SRC_IP_64_95,
1576 	MLXSW_AFK_ELEMENT_SRC_IP_32_63,
1577 	MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1578 	MLXSW_AFK_ELEMENT_DST_IP_96_127,
1579 	MLXSW_AFK_ELEMENT_DST_IP_64_95,
1580 	MLXSW_AFK_ELEMENT_DST_IP_32_63,
1581 	MLXSW_AFK_ELEMENT_DST_IP_0_31,
1582 	MLXSW_AFK_ELEMENT_DST_L4_PORT,
1583 	MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1584 };
1585 
1586 static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
1587 	{
1588 		.elements = mlxsw_sp_acl_tcam_pattern_ipv4,
1589 		.elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
1590 	},
1591 	{
1592 		.elements = mlxsw_sp_acl_tcam_pattern_ipv6,
1593 		.elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
1594 	},
1595 };
1596 
1597 #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
1598 	ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
1599 
1600 struct mlxsw_sp_acl_tcam_flower_ruleset {
1601 	struct mlxsw_sp_acl_tcam_vgroup vgroup;
1602 };
1603 
1604 struct mlxsw_sp_acl_tcam_flower_rule {
1605 	struct mlxsw_sp_acl_tcam_ventry ventry;
1606 };
1607 
1608 static int
mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam * tcam,void * ruleset_priv,struct mlxsw_afk_element_usage * tmplt_elusage,unsigned int * p_min_prio,unsigned int * p_max_prio)1609 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1610 				     struct mlxsw_sp_acl_tcam *tcam,
1611 				     void *ruleset_priv,
1612 				     struct mlxsw_afk_element_usage *tmplt_elusage,
1613 				     unsigned int *p_min_prio,
1614 				     unsigned int *p_max_prio)
1615 {
1616 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1617 
1618 	return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1619 					    mlxsw_sp_acl_tcam_patterns,
1620 					    MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1621 					    tmplt_elusage, true,
1622 					    p_min_prio, p_max_prio);
1623 }
1624 
1625 static void
mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp * mlxsw_sp,void * ruleset_priv)1626 mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
1627 				     void *ruleset_priv)
1628 {
1629 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1630 
1631 	mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1632 }
1633 
1634 static int
mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp * mlxsw_sp,void * ruleset_priv,struct mlxsw_sp_port * mlxsw_sp_port,bool ingress)1635 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
1636 				      void *ruleset_priv,
1637 				      struct mlxsw_sp_port *mlxsw_sp_port,
1638 				      bool ingress)
1639 {
1640 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1641 
1642 	return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->vgroup.group,
1643 					    mlxsw_sp_port, ingress);
1644 }
1645 
1646 static void
mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp * mlxsw_sp,void * ruleset_priv,struct mlxsw_sp_port * mlxsw_sp_port,bool ingress)1647 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1648 					void *ruleset_priv,
1649 					struct mlxsw_sp_port *mlxsw_sp_port,
1650 					bool ingress)
1651 {
1652 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1653 
1654 	mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->vgroup.group,
1655 				       mlxsw_sp_port, ingress);
1656 }
1657 
1658 static u16
mlxsw_sp_acl_tcam_flower_ruleset_group_id(void * ruleset_priv)1659 mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
1660 {
1661 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1662 
1663 	return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1664 }
1665 
1666 static int
mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp * mlxsw_sp,void * ruleset_priv,void * rule_priv,struct mlxsw_sp_acl_rule_info * rulei)1667 mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
1668 				  void *ruleset_priv, void *rule_priv,
1669 				  struct mlxsw_sp_acl_rule_info *rulei)
1670 {
1671 	struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1672 	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1673 
1674 	return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1675 					    &rule->ventry, rulei);
1676 }
1677 
1678 static void
mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp * mlxsw_sp,void * rule_priv)1679 mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1680 {
1681 	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1682 
1683 	mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1684 }
1685 
1686 static int
mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp * mlxsw_sp,void * rule_priv,struct mlxsw_sp_acl_rule_info * rulei)1687 mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1688 					     void *rule_priv,
1689 					     struct mlxsw_sp_acl_rule_info *rulei)
1690 {
1691 	return -EOPNOTSUPP;
1692 }
1693 
1694 static int
mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp * mlxsw_sp,void * rule_priv,bool * activity)1695 mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1696 					   void *rule_priv, bool *activity)
1697 {
1698 	struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1699 
1700 	return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
1701 						     activity);
1702 }
1703 
1704 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
1705 	.ruleset_priv_size	= sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
1706 	.ruleset_add		= mlxsw_sp_acl_tcam_flower_ruleset_add,
1707 	.ruleset_del		= mlxsw_sp_acl_tcam_flower_ruleset_del,
1708 	.ruleset_bind		= mlxsw_sp_acl_tcam_flower_ruleset_bind,
1709 	.ruleset_unbind		= mlxsw_sp_acl_tcam_flower_ruleset_unbind,
1710 	.ruleset_group_id	= mlxsw_sp_acl_tcam_flower_ruleset_group_id,
1711 	.rule_priv_size		= sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
1712 	.rule_add		= mlxsw_sp_acl_tcam_flower_rule_add,
1713 	.rule_del		= mlxsw_sp_acl_tcam_flower_rule_del,
1714 	.rule_action_replace	= mlxsw_sp_acl_tcam_flower_rule_action_replace,
1715 	.rule_activity_get	= mlxsw_sp_acl_tcam_flower_rule_activity_get,
1716 };
1717 
1718 struct mlxsw_sp_acl_tcam_mr_ruleset {
1719 	struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1720 	struct mlxsw_sp_acl_tcam_vgroup vgroup;
1721 };
1722 
1723 struct mlxsw_sp_acl_tcam_mr_rule {
1724 	struct mlxsw_sp_acl_tcam_ventry ventry;
1725 };
1726 
1727 static int
mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_acl_tcam * tcam,void * ruleset_priv,struct mlxsw_afk_element_usage * tmplt_elusage,unsigned int * p_min_prio,unsigned int * p_max_prio)1728 mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1729 				 struct mlxsw_sp_acl_tcam *tcam,
1730 				 void *ruleset_priv,
1731 				 struct mlxsw_afk_element_usage *tmplt_elusage,
1732 				 unsigned int *p_min_prio,
1733 				 unsigned int *p_max_prio)
1734 {
1735 	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1736 	int err;
1737 
1738 	err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1739 					   mlxsw_sp_acl_tcam_patterns,
1740 					   MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1741 					   tmplt_elusage, false,
1742 					   p_min_prio, p_max_prio);
1743 	if (err)
1744 		return err;
1745 
1746 	/* For most of the TCAM clients it would make sense to take a tcam chunk
1747 	 * only when the first rule is written. This is not the case for
1748 	 * multicast router as it is required to bind the multicast router to a
1749 	 * specific ACL Group ID which must exist in HW before multicast router
1750 	 * is initialized.
1751 	 */
1752 	ruleset->vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp,
1753 						       &ruleset->vgroup, 1,
1754 						       tmplt_elusage);
1755 	if (IS_ERR(ruleset->vchunk)) {
1756 		err = PTR_ERR(ruleset->vchunk);
1757 		goto err_chunk_get;
1758 	}
1759 
1760 	return 0;
1761 
1762 err_chunk_get:
1763 	mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1764 	return err;
1765 }
1766 
1767 static void
mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp * mlxsw_sp,void * ruleset_priv)1768 mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
1769 {
1770 	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1771 
1772 	mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, ruleset->vchunk);
1773 	mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1774 }
1775 
1776 static int
mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp * mlxsw_sp,void * ruleset_priv,struct mlxsw_sp_port * mlxsw_sp_port,bool ingress)1777 mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1778 				  struct mlxsw_sp_port *mlxsw_sp_port,
1779 				  bool ingress)
1780 {
1781 	/* Binding is done when initializing multicast router */
1782 	return 0;
1783 }
1784 
1785 static void
mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp * mlxsw_sp,void * ruleset_priv,struct mlxsw_sp_port * mlxsw_sp_port,bool ingress)1786 mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1787 				    void *ruleset_priv,
1788 				    struct mlxsw_sp_port *mlxsw_sp_port,
1789 				    bool ingress)
1790 {
1791 }
1792 
1793 static u16
mlxsw_sp_acl_tcam_mr_ruleset_group_id(void * ruleset_priv)1794 mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
1795 {
1796 	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1797 
1798 	return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1799 }
1800 
1801 static int
mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp * mlxsw_sp,void * ruleset_priv,void * rule_priv,struct mlxsw_sp_acl_rule_info * rulei)1802 mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1803 			      void *rule_priv,
1804 			      struct mlxsw_sp_acl_rule_info *rulei)
1805 {
1806 	struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1807 	struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1808 
1809 	return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1810 					   &rule->ventry, rulei);
1811 }
1812 
1813 static void
mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp * mlxsw_sp,void * rule_priv)1814 mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1815 {
1816 	struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1817 
1818 	mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1819 }
1820 
1821 static int
mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp * mlxsw_sp,void * rule_priv,struct mlxsw_sp_acl_rule_info * rulei)1822 mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1823 					 void *rule_priv,
1824 					 struct mlxsw_sp_acl_rule_info *rulei)
1825 {
1826 	struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1827 
1828 	return mlxsw_sp_acl_tcam_ventry_action_replace(mlxsw_sp, &rule->ventry,
1829 						       rulei);
1830 }
1831 
1832 static int
mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp * mlxsw_sp,void * rule_priv,bool * activity)1833 mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1834 				       void *rule_priv, bool *activity)
1835 {
1836 	struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1837 
1838 	return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
1839 						     activity);
1840 }
1841 
1842 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
1843 	.ruleset_priv_size	= sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset),
1844 	.ruleset_add		= mlxsw_sp_acl_tcam_mr_ruleset_add,
1845 	.ruleset_del		= mlxsw_sp_acl_tcam_mr_ruleset_del,
1846 	.ruleset_bind		= mlxsw_sp_acl_tcam_mr_ruleset_bind,
1847 	.ruleset_unbind		= mlxsw_sp_acl_tcam_mr_ruleset_unbind,
1848 	.ruleset_group_id	= mlxsw_sp_acl_tcam_mr_ruleset_group_id,
1849 	.rule_priv_size		= sizeof(struct mlxsw_sp_acl_tcam_mr_rule),
1850 	.rule_add		= mlxsw_sp_acl_tcam_mr_rule_add,
1851 	.rule_del		= mlxsw_sp_acl_tcam_mr_rule_del,
1852 	.rule_action_replace	= mlxsw_sp_acl_tcam_mr_rule_action_replace,
1853 	.rule_activity_get	= mlxsw_sp_acl_tcam_mr_rule_activity_get,
1854 };
1855 
1856 static const struct mlxsw_sp_acl_profile_ops *
1857 mlxsw_sp_acl_tcam_profile_ops_arr[] = {
1858 	[MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1859 	[MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops,
1860 };
1861 
1862 const struct mlxsw_sp_acl_profile_ops *
mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp * mlxsw_sp,enum mlxsw_sp_acl_profile profile)1863 mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
1864 			      enum mlxsw_sp_acl_profile profile)
1865 {
1866 	const struct mlxsw_sp_acl_profile_ops *ops;
1867 
1868 	if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
1869 		return NULL;
1870 	ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
1871 	if (WARN_ON(!ops))
1872 		return NULL;
1873 	return ops;
1874 }
1875