1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 *
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/rbtree.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 #include <net/netns/generic.h>
18
19 extern unsigned int nf_tables_net_id;
20
21 struct nft_rbtree {
22 struct rb_root root;
23 rwlock_t lock;
24 seqcount_rwlock_t count;
25 struct delayed_work gc_work;
26 };
27
28 struct nft_rbtree_elem {
29 struct rb_node node;
30 struct nft_set_ext ext;
31 };
32
nft_rbtree_interval_end(const struct nft_rbtree_elem * rbe)33 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
34 {
35 return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
36 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
37 }
38
nft_rbtree_interval_start(const struct nft_rbtree_elem * rbe)39 static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
40 {
41 return !nft_rbtree_interval_end(rbe);
42 }
43
nft_rbtree_cmp(const struct nft_set * set,const struct nft_rbtree_elem * e1,const struct nft_rbtree_elem * e2)44 static int nft_rbtree_cmp(const struct nft_set *set,
45 const struct nft_rbtree_elem *e1,
46 const struct nft_rbtree_elem *e2)
47 {
48 return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext),
49 set->klen);
50 }
51
nft_rbtree_elem_expired(const struct nft_rbtree_elem * rbe)52 static bool nft_rbtree_elem_expired(const struct nft_rbtree_elem *rbe)
53 {
54 return nft_set_elem_expired(&rbe->ext) ||
55 nft_set_elem_is_dead(&rbe->ext);
56 }
57
__nft_rbtree_lookup(const struct net * net,const struct nft_set * set,const u32 * key,const struct nft_set_ext ** ext,unsigned int seq)58 static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
59 const u32 *key, const struct nft_set_ext **ext,
60 unsigned int seq)
61 {
62 struct nft_rbtree *priv = nft_set_priv(set);
63 const struct nft_rbtree_elem *rbe, *interval = NULL;
64 u8 genmask = nft_genmask_cur(net);
65 const struct rb_node *parent;
66 int d;
67
68 parent = rcu_dereference_raw(priv->root.rb_node);
69 while (parent != NULL) {
70 if (read_seqcount_retry(&priv->count, seq))
71 return false;
72
73 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
74
75 d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen);
76 if (d < 0) {
77 parent = rcu_dereference_raw(parent->rb_left);
78 if (interval &&
79 !nft_rbtree_cmp(set, rbe, interval) &&
80 nft_rbtree_interval_end(rbe) &&
81 nft_rbtree_interval_start(interval))
82 continue;
83 interval = rbe;
84 } else if (d > 0)
85 parent = rcu_dereference_raw(parent->rb_right);
86 else {
87 if (!nft_set_elem_active(&rbe->ext, genmask)) {
88 parent = rcu_dereference_raw(parent->rb_left);
89 continue;
90 }
91
92 if (nft_rbtree_elem_expired(rbe))
93 return false;
94
95 if (nft_rbtree_interval_end(rbe)) {
96 if (nft_set_is_anonymous(set))
97 return false;
98 parent = rcu_dereference_raw(parent->rb_left);
99 interval = NULL;
100 continue;
101 }
102
103 *ext = &rbe->ext;
104 return true;
105 }
106 }
107
108 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
109 nft_set_elem_active(&interval->ext, genmask) &&
110 !nft_rbtree_elem_expired(interval) &&
111 nft_rbtree_interval_start(interval)) {
112 *ext = &interval->ext;
113 return true;
114 }
115
116 return false;
117 }
118
nft_rbtree_lookup(const struct net * net,const struct nft_set * set,const u32 * key,const struct nft_set_ext ** ext)119 static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
120 const u32 *key, const struct nft_set_ext **ext)
121 {
122 struct nft_rbtree *priv = nft_set_priv(set);
123 unsigned int seq = read_seqcount_begin(&priv->count);
124 bool ret;
125
126 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
127 if (ret || !read_seqcount_retry(&priv->count, seq))
128 return ret;
129
130 read_lock_bh(&priv->lock);
131 seq = read_seqcount_begin(&priv->count);
132 ret = __nft_rbtree_lookup(net, set, key, ext, seq);
133 read_unlock_bh(&priv->lock);
134
135 return ret;
136 }
137
__nft_rbtree_get(const struct net * net,const struct nft_set * set,const u32 * key,struct nft_rbtree_elem ** elem,unsigned int seq,unsigned int flags,u8 genmask)138 static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
139 const u32 *key, struct nft_rbtree_elem **elem,
140 unsigned int seq, unsigned int flags, u8 genmask)
141 {
142 struct nft_rbtree_elem *rbe, *interval = NULL;
143 struct nft_rbtree *priv = nft_set_priv(set);
144 const struct rb_node *parent;
145 const void *this;
146 int d;
147
148 parent = rcu_dereference_raw(priv->root.rb_node);
149 while (parent != NULL) {
150 if (read_seqcount_retry(&priv->count, seq))
151 return false;
152
153 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
154
155 this = nft_set_ext_key(&rbe->ext);
156 d = memcmp(this, key, set->klen);
157 if (d < 0) {
158 parent = rcu_dereference_raw(parent->rb_left);
159 if (!(flags & NFT_SET_ELEM_INTERVAL_END))
160 interval = rbe;
161 } else if (d > 0) {
162 parent = rcu_dereference_raw(parent->rb_right);
163 if (flags & NFT_SET_ELEM_INTERVAL_END)
164 interval = rbe;
165 } else {
166 if (!nft_set_elem_active(&rbe->ext, genmask)) {
167 parent = rcu_dereference_raw(parent->rb_left);
168 continue;
169 }
170
171 if (nft_set_elem_expired(&rbe->ext))
172 return false;
173
174 if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
175 (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
176 (flags & NFT_SET_ELEM_INTERVAL_END)) {
177 *elem = rbe;
178 return true;
179 }
180
181 if (nft_rbtree_interval_end(rbe))
182 interval = NULL;
183
184 parent = rcu_dereference_raw(parent->rb_left);
185 }
186 }
187
188 if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
189 nft_set_elem_active(&interval->ext, genmask) &&
190 !nft_set_elem_expired(&interval->ext) &&
191 ((!nft_rbtree_interval_end(interval) &&
192 !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
193 (nft_rbtree_interval_end(interval) &&
194 (flags & NFT_SET_ELEM_INTERVAL_END)))) {
195 *elem = interval;
196 return true;
197 }
198
199 return false;
200 }
201
nft_rbtree_get(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem,unsigned int flags)202 static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
203 const struct nft_set_elem *elem, unsigned int flags)
204 {
205 struct nft_rbtree *priv = nft_set_priv(set);
206 unsigned int seq = read_seqcount_begin(&priv->count);
207 struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
208 const u32 *key = (const u32 *)&elem->key.val;
209 u8 genmask = nft_genmask_cur(net);
210 bool ret;
211
212 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
213 if (ret || !read_seqcount_retry(&priv->count, seq))
214 return rbe;
215
216 read_lock_bh(&priv->lock);
217 seq = read_seqcount_begin(&priv->count);
218 ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
219 if (!ret)
220 rbe = ERR_PTR(-ENOENT);
221 read_unlock_bh(&priv->lock);
222
223 return rbe;
224 }
225
nft_rbtree_gc_remove(struct net * net,struct nft_set * set,struct nft_rbtree * priv,struct nft_rbtree_elem * rbe)226 static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
227 struct nft_rbtree *priv,
228 struct nft_rbtree_elem *rbe)
229 {
230 struct nft_set_elem elem = {
231 .priv = rbe,
232 };
233
234 nft_setelem_data_deactivate(net, set, &elem);
235 rb_erase(&rbe->node, &priv->root);
236 }
237
238 static const struct nft_rbtree_elem *
nft_rbtree_gc_elem(const struct nft_set * __set,struct nft_rbtree * priv,struct nft_rbtree_elem * rbe)239 nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
240 struct nft_rbtree_elem *rbe)
241 {
242 struct nft_set *set = (struct nft_set *)__set;
243 struct rb_node *prev = rb_prev(&rbe->node);
244 struct net *net = read_pnet(&set->net);
245 struct nft_rbtree_elem *rbe_prev;
246 struct nft_trans_gc *gc;
247
248 gc = nft_trans_gc_alloc(set, 0, GFP_ATOMIC);
249 if (!gc)
250 return ERR_PTR(-ENOMEM);
251
252 /* search for end interval coming before this element.
253 * end intervals don't carry a timeout extension, they
254 * are coupled with the interval start element.
255 */
256 while (prev) {
257 rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
258 if (nft_rbtree_interval_end(rbe_prev) &&
259 nft_set_elem_active(&rbe_prev->ext, NFT_GENMASK_ANY))
260 break;
261
262 prev = rb_prev(prev);
263 }
264
265 rbe_prev = NULL;
266 if (prev) {
267 rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
268 nft_rbtree_gc_remove(net, set, priv, rbe_prev);
269
270 /* There is always room in this trans gc for this element,
271 * memory allocation never actually happens, hence, the warning
272 * splat in such case. No need to set NFT_SET_ELEM_DEAD_BIT,
273 * this is synchronous gc which never fails.
274 */
275 gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
276 if (WARN_ON_ONCE(!gc))
277 return ERR_PTR(-ENOMEM);
278
279 nft_trans_gc_elem_add(gc, rbe_prev);
280 }
281
282 nft_rbtree_gc_remove(net, set, priv, rbe);
283 gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
284 if (WARN_ON_ONCE(!gc))
285 return ERR_PTR(-ENOMEM);
286
287 nft_trans_gc_elem_add(gc, rbe);
288
289 nft_trans_gc_queue_sync_done(gc);
290
291 return rbe_prev;
292 }
293
nft_rbtree_update_first(const struct nft_set * set,struct nft_rbtree_elem * rbe,struct rb_node * first)294 static bool nft_rbtree_update_first(const struct nft_set *set,
295 struct nft_rbtree_elem *rbe,
296 struct rb_node *first)
297 {
298 struct nft_rbtree_elem *first_elem;
299
300 first_elem = rb_entry(first, struct nft_rbtree_elem, node);
301 /* this element is closest to where the new element is to be inserted:
302 * update the first element for the node list path.
303 */
304 if (nft_rbtree_cmp(set, rbe, first_elem) < 0)
305 return true;
306
307 return false;
308 }
309
__nft_rbtree_insert(const struct net * net,const struct nft_set * set,struct nft_rbtree_elem * new,struct nft_set_ext ** ext)310 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
311 struct nft_rbtree_elem *new,
312 struct nft_set_ext **ext)
313 {
314 struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
315 struct rb_node *node, *next, *parent, **p, *first = NULL;
316 struct nft_rbtree *priv = nft_set_priv(set);
317 u8 cur_genmask = nft_genmask_cur(net);
318 u8 genmask = nft_genmask_next(net);
319 int d;
320
321 /* Descend the tree to search for an existing element greater than the
322 * key value to insert that is greater than the new element. This is the
323 * first element to walk the ordered elements to find possible overlap.
324 */
325 parent = NULL;
326 p = &priv->root.rb_node;
327 while (*p != NULL) {
328 parent = *p;
329 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
330 d = nft_rbtree_cmp(set, rbe, new);
331
332 if (d < 0) {
333 p = &parent->rb_left;
334 } else if (d > 0) {
335 if (!first ||
336 nft_rbtree_update_first(set, rbe, first))
337 first = &rbe->node;
338
339 p = &parent->rb_right;
340 } else {
341 if (nft_rbtree_interval_end(rbe))
342 p = &parent->rb_left;
343 else
344 p = &parent->rb_right;
345 }
346 }
347
348 if (!first)
349 first = rb_first(&priv->root);
350
351 /* Detect overlap by going through the list of valid tree nodes.
352 * Values stored in the tree are in reversed order, starting from
353 * highest to lowest value.
354 */
355 for (node = first; node != NULL; node = next) {
356 next = rb_next(node);
357
358 rbe = rb_entry(node, struct nft_rbtree_elem, node);
359
360 if (!nft_set_elem_active(&rbe->ext, genmask))
361 continue;
362
363 /* perform garbage collection to avoid bogus overlap reports
364 * but skip new elements in this transaction.
365 */
366 if (nft_set_elem_expired(&rbe->ext) &&
367 nft_set_elem_active(&rbe->ext, cur_genmask)) {
368 const struct nft_rbtree_elem *removed_end;
369
370 removed_end = nft_rbtree_gc_elem(set, priv, rbe);
371 if (IS_ERR(removed_end))
372 return PTR_ERR(removed_end);
373
374 if (removed_end == rbe_le || removed_end == rbe_ge)
375 return -EAGAIN;
376
377 continue;
378 }
379
380 d = nft_rbtree_cmp(set, rbe, new);
381 if (d == 0) {
382 /* Matching end element: no need to look for an
383 * overlapping greater or equal element.
384 */
385 if (nft_rbtree_interval_end(rbe)) {
386 rbe_le = rbe;
387 break;
388 }
389
390 /* first element that is greater or equal to key value. */
391 if (!rbe_ge) {
392 rbe_ge = rbe;
393 continue;
394 }
395
396 /* this is a closer more or equal element, update it. */
397 if (nft_rbtree_cmp(set, rbe_ge, new) != 0) {
398 rbe_ge = rbe;
399 continue;
400 }
401
402 /* element is equal to key value, make sure flags are
403 * the same, an existing more or equal start element
404 * must not be replaced by more or equal end element.
405 */
406 if ((nft_rbtree_interval_start(new) &&
407 nft_rbtree_interval_start(rbe_ge)) ||
408 (nft_rbtree_interval_end(new) &&
409 nft_rbtree_interval_end(rbe_ge))) {
410 rbe_ge = rbe;
411 continue;
412 }
413 } else if (d > 0) {
414 /* annotate element greater than the new element. */
415 rbe_ge = rbe;
416 continue;
417 } else if (d < 0) {
418 /* annotate element less than the new element. */
419 rbe_le = rbe;
420 break;
421 }
422 }
423
424 /* - new start element matching existing start element: full overlap
425 * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
426 */
427 if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
428 nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
429 *ext = &rbe_ge->ext;
430 return -EEXIST;
431 }
432
433 /* - new end element matching existing end element: full overlap
434 * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
435 */
436 if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) &&
437 nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) {
438 *ext = &rbe_le->ext;
439 return -EEXIST;
440 }
441
442 /* - new start element with existing closest, less or equal key value
443 * being a start element: partial overlap, reported as -ENOTEMPTY.
444 * Anonymous sets allow for two consecutive start element since they
445 * are constant, skip them to avoid bogus overlap reports.
446 */
447 if (!nft_set_is_anonymous(set) && rbe_le &&
448 nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new))
449 return -ENOTEMPTY;
450
451 /* - new end element with existing closest, less or equal key value
452 * being a end element: partial overlap, reported as -ENOTEMPTY.
453 */
454 if (rbe_le &&
455 nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new))
456 return -ENOTEMPTY;
457
458 /* - new end element with existing closest, greater or equal key value
459 * being an end element: partial overlap, reported as -ENOTEMPTY
460 */
461 if (rbe_ge &&
462 nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new))
463 return -ENOTEMPTY;
464
465 /* Accepted element: pick insertion point depending on key value */
466 parent = NULL;
467 p = &priv->root.rb_node;
468 while (*p != NULL) {
469 parent = *p;
470 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
471 d = nft_rbtree_cmp(set, rbe, new);
472
473 if (d < 0)
474 p = &parent->rb_left;
475 else if (d > 0)
476 p = &parent->rb_right;
477 else if (nft_rbtree_interval_end(rbe))
478 p = &parent->rb_left;
479 else
480 p = &parent->rb_right;
481 }
482
483 rb_link_node_rcu(&new->node, parent, p);
484 rb_insert_color(&new->node, &priv->root);
485 return 0;
486 }
487
nft_rbtree_insert(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem,struct nft_set_ext ** ext)488 static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
489 const struct nft_set_elem *elem,
490 struct nft_set_ext **ext)
491 {
492 struct nft_rbtree *priv = nft_set_priv(set);
493 struct nft_rbtree_elem *rbe = elem->priv;
494 int err;
495
496 do {
497 if (fatal_signal_pending(current))
498 return -EINTR;
499
500 cond_resched();
501
502 write_lock_bh(&priv->lock);
503 write_seqcount_begin(&priv->count);
504 err = __nft_rbtree_insert(net, set, rbe, ext);
505 write_seqcount_end(&priv->count);
506 write_unlock_bh(&priv->lock);
507 } while (err == -EAGAIN);
508
509 return err;
510 }
511
nft_rbtree_remove(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem)512 static void nft_rbtree_remove(const struct net *net,
513 const struct nft_set *set,
514 const struct nft_set_elem *elem)
515 {
516 struct nft_rbtree *priv = nft_set_priv(set);
517 struct nft_rbtree_elem *rbe = elem->priv;
518
519 write_lock_bh(&priv->lock);
520 write_seqcount_begin(&priv->count);
521 rb_erase(&rbe->node, &priv->root);
522 write_seqcount_end(&priv->count);
523 write_unlock_bh(&priv->lock);
524 }
525
nft_rbtree_activate(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem)526 static void nft_rbtree_activate(const struct net *net,
527 const struct nft_set *set,
528 const struct nft_set_elem *elem)
529 {
530 struct nft_rbtree_elem *rbe = elem->priv;
531
532 nft_set_elem_change_active(net, set, &rbe->ext);
533 }
534
nft_rbtree_flush(const struct net * net,const struct nft_set * set,void * priv)535 static bool nft_rbtree_flush(const struct net *net,
536 const struct nft_set *set, void *priv)
537 {
538 struct nft_rbtree_elem *rbe = priv;
539
540 nft_set_elem_change_active(net, set, &rbe->ext);
541
542 return true;
543 }
544
nft_rbtree_deactivate(const struct net * net,const struct nft_set * set,const struct nft_set_elem * elem)545 static void *nft_rbtree_deactivate(const struct net *net,
546 const struct nft_set *set,
547 const struct nft_set_elem *elem)
548 {
549 const struct nft_rbtree *priv = nft_set_priv(set);
550 const struct rb_node *parent = priv->root.rb_node;
551 struct nft_rbtree_elem *rbe, *this = elem->priv;
552 u8 genmask = nft_genmask_next(net);
553 int d;
554
555 while (parent != NULL) {
556 rbe = rb_entry(parent, struct nft_rbtree_elem, node);
557
558 d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
559 set->klen);
560 if (d < 0)
561 parent = parent->rb_left;
562 else if (d > 0)
563 parent = parent->rb_right;
564 else {
565 if (nft_rbtree_interval_end(rbe) &&
566 nft_rbtree_interval_start(this)) {
567 parent = parent->rb_left;
568 continue;
569 } else if (nft_rbtree_interval_start(rbe) &&
570 nft_rbtree_interval_end(this)) {
571 parent = parent->rb_right;
572 continue;
573 } else if (nft_set_elem_expired(&rbe->ext)) {
574 break;
575 } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
576 parent = parent->rb_left;
577 continue;
578 }
579 nft_rbtree_flush(net, set, rbe);
580 return rbe;
581 }
582 }
583 return NULL;
584 }
585
nft_rbtree_walk(const struct nft_ctx * ctx,struct nft_set * set,struct nft_set_iter * iter)586 static void nft_rbtree_walk(const struct nft_ctx *ctx,
587 struct nft_set *set,
588 struct nft_set_iter *iter)
589 {
590 struct nft_rbtree *priv = nft_set_priv(set);
591 struct nft_rbtree_elem *rbe;
592 struct nft_set_elem elem;
593 struct rb_node *node;
594
595 read_lock_bh(&priv->lock);
596 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
597 rbe = rb_entry(node, struct nft_rbtree_elem, node);
598
599 if (iter->count < iter->skip)
600 goto cont;
601 if (!nft_set_elem_active(&rbe->ext, iter->genmask))
602 goto cont;
603
604 elem.priv = rbe;
605
606 iter->err = iter->fn(ctx, set, iter, &elem);
607 if (iter->err < 0) {
608 read_unlock_bh(&priv->lock);
609 return;
610 }
611 cont:
612 iter->count++;
613 }
614 read_unlock_bh(&priv->lock);
615 }
616
nft_rbtree_gc(struct work_struct * work)617 static void nft_rbtree_gc(struct work_struct *work)
618 {
619 struct nft_rbtree_elem *rbe, *rbe_end = NULL;
620 struct nftables_pernet *nft_net;
621 struct nft_rbtree *priv;
622 struct nft_trans_gc *gc;
623 struct rb_node *node;
624 struct nft_set *set;
625 unsigned int gc_seq;
626 struct net *net;
627
628 priv = container_of(work, struct nft_rbtree, gc_work.work);
629 set = nft_set_container_of(priv);
630 net = read_pnet(&set->net);
631 nft_net = net_generic(net, nf_tables_net_id);
632 gc_seq = READ_ONCE(nft_net->gc_seq);
633
634 if (nft_set_gc_is_pending(set))
635 goto done;
636
637 gc = nft_trans_gc_alloc(set, gc_seq, GFP_KERNEL);
638 if (!gc)
639 goto done;
640
641 read_lock_bh(&priv->lock);
642 for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
643
644 /* Ruleset has been updated, try later. */
645 if (READ_ONCE(nft_net->gc_seq) != gc_seq) {
646 nft_trans_gc_destroy(gc);
647 gc = NULL;
648 goto try_later;
649 }
650
651 rbe = rb_entry(node, struct nft_rbtree_elem, node);
652
653 if (nft_set_elem_is_dead(&rbe->ext))
654 goto dead_elem;
655
656 /* elements are reversed in the rbtree for historical reasons,
657 * from highest to lowest value, that is why end element is
658 * always visited before the start element.
659 */
660 if (nft_rbtree_interval_end(rbe)) {
661 rbe_end = rbe;
662 continue;
663 }
664 if (!nft_set_elem_expired(&rbe->ext))
665 continue;
666
667 nft_set_elem_dead(&rbe->ext);
668
669 if (!rbe_end)
670 continue;
671
672 nft_set_elem_dead(&rbe_end->ext);
673
674 gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
675 if (!gc)
676 goto try_later;
677
678 nft_trans_gc_elem_add(gc, rbe_end);
679 rbe_end = NULL;
680 dead_elem:
681 gc = nft_trans_gc_queue_async(gc, gc_seq, GFP_ATOMIC);
682 if (!gc)
683 goto try_later;
684
685 nft_trans_gc_elem_add(gc, rbe);
686 }
687 try_later:
688 read_unlock_bh(&priv->lock);
689
690 if (gc)
691 nft_trans_gc_queue_async_done(gc);
692 done:
693 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
694 nft_set_gc_interval(set));
695 }
696
nft_rbtree_privsize(const struct nlattr * const nla[],const struct nft_set_desc * desc)697 static u64 nft_rbtree_privsize(const struct nlattr * const nla[],
698 const struct nft_set_desc *desc)
699 {
700 return sizeof(struct nft_rbtree);
701 }
702
nft_rbtree_init(const struct nft_set * set,const struct nft_set_desc * desc,const struct nlattr * const nla[])703 static int nft_rbtree_init(const struct nft_set *set,
704 const struct nft_set_desc *desc,
705 const struct nlattr * const nla[])
706 {
707 struct nft_rbtree *priv = nft_set_priv(set);
708
709 rwlock_init(&priv->lock);
710 seqcount_rwlock_init(&priv->count, &priv->lock);
711 priv->root = RB_ROOT;
712
713 INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc);
714 if (set->flags & NFT_SET_TIMEOUT)
715 queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
716 nft_set_gc_interval(set));
717
718 return 0;
719 }
720
nft_rbtree_destroy(const struct nft_ctx * ctx,const struct nft_set * set)721 static void nft_rbtree_destroy(const struct nft_ctx *ctx,
722 const struct nft_set *set)
723 {
724 struct nft_rbtree *priv = nft_set_priv(set);
725 struct nft_rbtree_elem *rbe;
726 struct rb_node *node;
727
728 cancel_delayed_work_sync(&priv->gc_work);
729 rcu_barrier();
730 while ((node = priv->root.rb_node) != NULL) {
731 rb_erase(node, &priv->root);
732 rbe = rb_entry(node, struct nft_rbtree_elem, node);
733 nf_tables_set_elem_destroy(ctx, set, rbe);
734 }
735 }
736
nft_rbtree_estimate(const struct nft_set_desc * desc,u32 features,struct nft_set_estimate * est)737 static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
738 struct nft_set_estimate *est)
739 {
740 if (desc->field_count > 1)
741 return false;
742
743 if (desc->size)
744 est->size = sizeof(struct nft_rbtree) +
745 desc->size * sizeof(struct nft_rbtree_elem);
746 else
747 est->size = ~0;
748
749 est->lookup = NFT_SET_CLASS_O_LOG_N;
750 est->space = NFT_SET_CLASS_O_N;
751
752 return true;
753 }
754
755 const struct nft_set_type nft_set_rbtree_type = {
756 .features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
757 .ops = {
758 .privsize = nft_rbtree_privsize,
759 .elemsize = offsetof(struct nft_rbtree_elem, ext),
760 .estimate = nft_rbtree_estimate,
761 .init = nft_rbtree_init,
762 .destroy = nft_rbtree_destroy,
763 .insert = nft_rbtree_insert,
764 .remove = nft_rbtree_remove,
765 .deactivate = nft_rbtree_deactivate,
766 .flush = nft_rbtree_flush,
767 .activate = nft_rbtree_activate,
768 .lookup = nft_rbtree_lookup,
769 .walk = nft_rbtree_walk,
770 .get = nft_rbtree_get,
771 },
772 };
773