• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
3  *                         Patrick Schaaf <bof@bof.de>
4  *                         Martin Josefsson <gandalf@wlug.westbo.se>
5  * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org>
6  */
7 #ifndef _IP_SET_H
8 #define _IP_SET_H
9 
10 #include <linux/ip.h>
11 #include <linux/ipv6.h>
12 #include <linux/netlink.h>
13 #include <linux/netfilter.h>
14 #include <linux/netfilter/x_tables.h>
15 #include <linux/stringify.h>
16 #include <linux/vmalloc.h>
17 #include <net/netlink.h>
18 #include <uapi/linux/netfilter/ipset/ip_set.h>
19 
20 #define _IP_SET_MODULE_DESC(a, b, c)		\
21 	MODULE_DESCRIPTION(a " type of IP sets, revisions " b "-" c)
22 #define IP_SET_MODULE_DESC(a, b, c)		\
23 	_IP_SET_MODULE_DESC(a, __stringify(b), __stringify(c))
24 
25 /* Set features */
26 enum ip_set_feature {
27 	IPSET_TYPE_IP_FLAG = 0,
28 	IPSET_TYPE_IP = (1 << IPSET_TYPE_IP_FLAG),
29 	IPSET_TYPE_PORT_FLAG = 1,
30 	IPSET_TYPE_PORT = (1 << IPSET_TYPE_PORT_FLAG),
31 	IPSET_TYPE_MAC_FLAG = 2,
32 	IPSET_TYPE_MAC = (1 << IPSET_TYPE_MAC_FLAG),
33 	IPSET_TYPE_IP2_FLAG = 3,
34 	IPSET_TYPE_IP2 = (1 << IPSET_TYPE_IP2_FLAG),
35 	IPSET_TYPE_NAME_FLAG = 4,
36 	IPSET_TYPE_NAME = (1 << IPSET_TYPE_NAME_FLAG),
37 	IPSET_TYPE_IFACE_FLAG = 5,
38 	IPSET_TYPE_IFACE = (1 << IPSET_TYPE_IFACE_FLAG),
39 	IPSET_TYPE_MARK_FLAG = 6,
40 	IPSET_TYPE_MARK = (1 << IPSET_TYPE_MARK_FLAG),
41 	IPSET_TYPE_NOMATCH_FLAG = 7,
42 	IPSET_TYPE_NOMATCH = (1 << IPSET_TYPE_NOMATCH_FLAG),
43 	/* Strictly speaking not a feature, but a flag for dumping:
44 	 * this settype must be dumped last */
45 	IPSET_DUMP_LAST_FLAG = 8,
46 	IPSET_DUMP_LAST = (1 << IPSET_DUMP_LAST_FLAG),
47 };
48 
49 /* Set extensions */
50 enum ip_set_extension {
51 	IPSET_EXT_BIT_TIMEOUT = 0,
52 	IPSET_EXT_TIMEOUT = (1 << IPSET_EXT_BIT_TIMEOUT),
53 	IPSET_EXT_BIT_COUNTER = 1,
54 	IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER),
55 	IPSET_EXT_BIT_COMMENT = 2,
56 	IPSET_EXT_COMMENT = (1 << IPSET_EXT_BIT_COMMENT),
57 	IPSET_EXT_BIT_SKBINFO = 3,
58 	IPSET_EXT_SKBINFO = (1 << IPSET_EXT_BIT_SKBINFO),
59 	/* Mark set with an extension which needs to call destroy */
60 	IPSET_EXT_BIT_DESTROY = 7,
61 	IPSET_EXT_DESTROY = (1 << IPSET_EXT_BIT_DESTROY),
62 };
63 
64 #define SET_WITH_TIMEOUT(s)	((s)->extensions & IPSET_EXT_TIMEOUT)
65 #define SET_WITH_COUNTER(s)	((s)->extensions & IPSET_EXT_COUNTER)
66 #define SET_WITH_COMMENT(s)	((s)->extensions & IPSET_EXT_COMMENT)
67 #define SET_WITH_SKBINFO(s)	((s)->extensions & IPSET_EXT_SKBINFO)
68 #define SET_WITH_FORCEADD(s)	((s)->flags & IPSET_CREATE_FLAG_FORCEADD)
69 
70 /* Extension id, in size order */
71 enum ip_set_ext_id {
72 	IPSET_EXT_ID_COUNTER = 0,
73 	IPSET_EXT_ID_TIMEOUT,
74 	IPSET_EXT_ID_SKBINFO,
75 	IPSET_EXT_ID_COMMENT,
76 	IPSET_EXT_ID_MAX,
77 };
78 
79 struct ip_set;
80 
81 /* Extension type */
82 struct ip_set_ext_type {
83 	/* Destroy extension private data (can be NULL) */
84 	void (*destroy)(struct ip_set *set, void *ext);
85 	enum ip_set_extension type;
86 	enum ipset_cadt_flags flag;
87 	/* Size and minimal alignment */
88 	u8 len;
89 	u8 align;
90 };
91 
92 extern const struct ip_set_ext_type ip_set_extensions[];
93 
94 struct ip_set_counter {
95 	atomic64_t bytes;
96 	atomic64_t packets;
97 };
98 
99 struct ip_set_comment_rcu {
100 	struct rcu_head rcu;
101 	char str[0];
102 };
103 
104 struct ip_set_comment {
105 	struct ip_set_comment_rcu __rcu *c;
106 };
107 
108 struct ip_set_skbinfo {
109 	u32 skbmark;
110 	u32 skbmarkmask;
111 	u32 skbprio;
112 	u16 skbqueue;
113 	u16 __pad;
114 };
115 
116 struct ip_set_ext {
117 	struct ip_set_skbinfo skbinfo;
118 	u64 packets;
119 	u64 bytes;
120 	char *comment;
121 	u32 timeout;
122 	u8 packets_op;
123 	u8 bytes_op;
124 };
125 
126 struct ip_set;
127 
128 #define ext_timeout(e, s)	\
129 ((unsigned long *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_TIMEOUT]))
130 #define ext_counter(e, s)	\
131 ((struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER]))
132 #define ext_comment(e, s)	\
133 ((struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT]))
134 #define ext_skbinfo(e, s)	\
135 ((struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO]))
136 
137 typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
138 			   const struct ip_set_ext *ext,
139 			   struct ip_set_ext *mext, u32 cmdflags);
140 
141 /* Kernel API function options */
142 struct ip_set_adt_opt {
143 	u8 family;		/* Actual protocol family */
144 	u8 dim;			/* Dimension of match/target */
145 	u8 flags;		/* Direction and negation flags */
146 	u32 cmdflags;		/* Command-like flags */
147 	struct ip_set_ext ext;	/* Extensions */
148 };
149 
150 /* Set type, variant-specific part */
151 struct ip_set_type_variant {
152 	/* Kernelspace: test/add/del entries
153 	 *		returns negative error code,
154 	 *			zero for no match/success to add/delete
155 	 *			positive for matching element */
156 	int (*kadt)(struct ip_set *set, const struct sk_buff *skb,
157 		    const struct xt_action_param *par,
158 		    enum ipset_adt adt, struct ip_set_adt_opt *opt);
159 
160 	/* Userspace: test/add/del entries
161 	 *		returns negative error code,
162 	 *			zero for no match/success to add/delete
163 	 *			positive for matching element */
164 	int (*uadt)(struct ip_set *set, struct nlattr *tb[],
165 		    enum ipset_adt adt, u32 *lineno, u32 flags, bool retried);
166 
167 	/* Low level add/del/test functions */
168 	ipset_adtfn adt[IPSET_ADT_MAX];
169 
170 	/* When adding entries and set is full, try to resize the set */
171 	int (*resize)(struct ip_set *set, bool retried);
172 	/* Destroy the set */
173 	void (*destroy)(struct ip_set *set);
174 	/* Flush the elements */
175 	void (*flush)(struct ip_set *set);
176 	/* Expire entries before listing */
177 	void (*expire)(struct ip_set *set);
178 	/* List set header data */
179 	int (*head)(struct ip_set *set, struct sk_buff *skb);
180 	/* List elements */
181 	int (*list)(const struct ip_set *set, struct sk_buff *skb,
182 		    struct netlink_callback *cb);
183 	/* Keep listing private when resizing runs parallel */
184 	void (*uref)(struct ip_set *set, struct netlink_callback *cb,
185 		     bool start);
186 
187 	/* Return true if "b" set is the same as "a"
188 	 * according to the create set parameters */
189 	bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
190 };
191 
192 /* The core set type structure */
193 struct ip_set_type {
194 	struct list_head list;
195 
196 	/* Typename */
197 	char name[IPSET_MAXNAMELEN];
198 	/* Protocol version */
199 	u8 protocol;
200 	/* Set type dimension */
201 	u8 dimension;
202 	/*
203 	 * Supported family: may be NFPROTO_UNSPEC for both
204 	 * NFPROTO_IPV4/NFPROTO_IPV6.
205 	 */
206 	u8 family;
207 	/* Type revisions */
208 	u8 revision_min, revision_max;
209 	/* Set features to control swapping */
210 	u16 features;
211 
212 	/* Create set */
213 	int (*create)(struct net *net, struct ip_set *set,
214 		      struct nlattr *tb[], u32 flags);
215 
216 	/* Attribute policies */
217 	const struct nla_policy create_policy[IPSET_ATTR_CREATE_MAX + 1];
218 	const struct nla_policy adt_policy[IPSET_ATTR_ADT_MAX + 1];
219 
220 	/* Set this to THIS_MODULE if you are a module, otherwise NULL */
221 	struct module *me;
222 };
223 
224 /* register and unregister set type */
225 extern int ip_set_type_register(struct ip_set_type *set_type);
226 extern void ip_set_type_unregister(struct ip_set_type *set_type);
227 
228 /* A generic IP set */
229 struct ip_set {
230 	/* The name of the set */
231 	char name[IPSET_MAXNAMELEN];
232 	/* Lock protecting the set data */
233 	spinlock_t lock;
234 	/* References to the set */
235 	u32 ref;
236 	/* References to the set for netlink events like dump,
237 	 * ref can be swapped out by ip_set_swap
238 	 */
239 	u32 ref_netlink;
240 	/* The core set type */
241 	struct ip_set_type *type;
242 	/* The type variant doing the real job */
243 	const struct ip_set_type_variant *variant;
244 	/* The actual INET family of the set */
245 	u8 family;
246 	/* The type revision */
247 	u8 revision;
248 	/* Extensions */
249 	u8 extensions;
250 	/* Create flags */
251 	u8 flags;
252 	/* Default timeout value, if enabled */
253 	u32 timeout;
254 	/* Number of elements (vs timeout) */
255 	u32 elements;
256 	/* Size of the dynamic extensions (vs timeout) */
257 	size_t ext_size;
258 	/* Element data size */
259 	size_t dsize;
260 	/* Offsets to extensions in elements */
261 	size_t offset[IPSET_EXT_ID_MAX];
262 	/* The type specific data */
263 	void *data;
264 };
265 
266 static inline void
ip_set_ext_destroy(struct ip_set * set,void * data)267 ip_set_ext_destroy(struct ip_set *set, void *data)
268 {
269 	/* Check that the extension is enabled for the set and
270 	 * call it's destroy function for its extension part in data.
271 	 */
272 	if (SET_WITH_COMMENT(set))
273 		ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy(
274 			set, ext_comment(data, set));
275 }
276 
277 static inline int
ip_set_put_flags(struct sk_buff * skb,struct ip_set * set)278 ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
279 {
280 	u32 cadt_flags = 0;
281 
282 	if (SET_WITH_TIMEOUT(set))
283 		if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
284 					   htonl(set->timeout))))
285 			return -EMSGSIZE;
286 	if (SET_WITH_COUNTER(set))
287 		cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
288 	if (SET_WITH_COMMENT(set))
289 		cadt_flags |= IPSET_FLAG_WITH_COMMENT;
290 	if (SET_WITH_SKBINFO(set))
291 		cadt_flags |= IPSET_FLAG_WITH_SKBINFO;
292 	if (SET_WITH_FORCEADD(set))
293 		cadt_flags |= IPSET_FLAG_WITH_FORCEADD;
294 
295 	if (!cadt_flags)
296 		return 0;
297 	return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
298 }
299 
300 /* Netlink CB args */
301 enum {
302 	IPSET_CB_NET = 0,	/* net namespace */
303 	IPSET_CB_PROTO,		/* ipset protocol */
304 	IPSET_CB_DUMP,		/* dump single set/all sets */
305 	IPSET_CB_INDEX,		/* set index */
306 	IPSET_CB_PRIVATE,	/* set private data */
307 	IPSET_CB_ARG0,		/* type specific */
308 };
309 
310 /* register and unregister set references */
311 extern ip_set_id_t ip_set_get_byname(struct net *net,
312 				     const char *name, struct ip_set **set);
313 extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
314 extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name);
315 extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
316 extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
317 
318 /* API for iptables set match, and SET target */
319 
320 extern int ip_set_add(ip_set_id_t id, const struct sk_buff *skb,
321 		      const struct xt_action_param *par,
322 		      struct ip_set_adt_opt *opt);
323 extern int ip_set_del(ip_set_id_t id, const struct sk_buff *skb,
324 		      const struct xt_action_param *par,
325 		      struct ip_set_adt_opt *opt);
326 extern int ip_set_test(ip_set_id_t id, const struct sk_buff *skb,
327 		       const struct xt_action_param *par,
328 		       struct ip_set_adt_opt *opt);
329 
330 /* Utility functions */
331 extern void *ip_set_alloc(size_t size);
332 extern void ip_set_free(void *members);
333 extern int ip_set_get_ipaddr4(struct nlattr *nla,  __be32 *ipaddr);
334 extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
335 extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
336 			      size_t len, size_t align);
337 extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
338 				 struct ip_set_ext *ext);
339 extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
340 				 const void *e, bool active);
341 extern bool ip_set_match_extensions(struct ip_set *set,
342 				    const struct ip_set_ext *ext,
343 				    struct ip_set_ext *mext,
344 				    u32 flags, void *data);
345 
346 static inline int
ip_set_get_hostipaddr4(struct nlattr * nla,u32 * ipaddr)347 ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr)
348 {
349 	__be32 ip;
350 	int ret = ip_set_get_ipaddr4(nla, &ip);
351 
352 	if (ret)
353 		return ret;
354 	*ipaddr = ntohl(ip);
355 	return 0;
356 }
357 
358 /* Ignore IPSET_ERR_EXIST errors if asked to do so? */
359 static inline bool
ip_set_eexist(int ret,u32 flags)360 ip_set_eexist(int ret, u32 flags)
361 {
362 	return ret == -IPSET_ERR_EXIST && (flags & IPSET_FLAG_EXIST);
363 }
364 
365 /* Match elements marked with nomatch */
366 static inline bool
ip_set_enomatch(int ret,u32 flags,enum ipset_adt adt,struct ip_set * set)367 ip_set_enomatch(int ret, u32 flags, enum ipset_adt adt, struct ip_set *set)
368 {
369 	return adt == IPSET_TEST &&
370 	       (set->type->features & IPSET_TYPE_NOMATCH) &&
371 	       ((flags >> 16) & IPSET_FLAG_NOMATCH) &&
372 	       (ret > 0 || ret == -ENOTEMPTY);
373 }
374 
375 /* Check the NLA_F_NET_BYTEORDER flag */
376 static inline bool
ip_set_attr_netorder(struct nlattr * tb[],int type)377 ip_set_attr_netorder(struct nlattr *tb[], int type)
378 {
379 	return tb[type] && (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
380 }
381 
382 static inline bool
ip_set_optattr_netorder(struct nlattr * tb[],int type)383 ip_set_optattr_netorder(struct nlattr *tb[], int type)
384 {
385 	return !tb[type] || (tb[type]->nla_type & NLA_F_NET_BYTEORDER);
386 }
387 
388 /* Useful converters */
389 static inline u32
ip_set_get_h32(const struct nlattr * attr)390 ip_set_get_h32(const struct nlattr *attr)
391 {
392 	return ntohl(nla_get_be32(attr));
393 }
394 
395 static inline u16
ip_set_get_h16(const struct nlattr * attr)396 ip_set_get_h16(const struct nlattr *attr)
397 {
398 	return ntohs(nla_get_be16(attr));
399 }
400 
nla_put_ipaddr4(struct sk_buff * skb,int type,__be32 ipaddr)401 static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr)
402 {
403 	struct nlattr *__nested = nla_nest_start(skb, type);
404 	int ret;
405 
406 	if (!__nested)
407 		return -EMSGSIZE;
408 	ret = nla_put_in_addr(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);
409 	if (!ret)
410 		nla_nest_end(skb, __nested);
411 	return ret;
412 }
413 
nla_put_ipaddr6(struct sk_buff * skb,int type,const struct in6_addr * ipaddrptr)414 static inline int nla_put_ipaddr6(struct sk_buff *skb, int type,
415 				  const struct in6_addr *ipaddrptr)
416 {
417 	struct nlattr *__nested = nla_nest_start(skb, type);
418 	int ret;
419 
420 	if (!__nested)
421 		return -EMSGSIZE;
422 	ret = nla_put_in6_addr(skb, IPSET_ATTR_IPADDR_IPV6, ipaddrptr);
423 	if (!ret)
424 		nla_nest_end(skb, __nested);
425 	return ret;
426 }
427 
428 /* Get address from skbuff */
429 static inline __be32
ip4addr(const struct sk_buff * skb,bool src)430 ip4addr(const struct sk_buff *skb, bool src)
431 {
432 	return src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
433 }
434 
435 static inline void
ip4addrptr(const struct sk_buff * skb,bool src,__be32 * addr)436 ip4addrptr(const struct sk_buff *skb, bool src, __be32 *addr)
437 {
438 	*addr = src ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr;
439 }
440 
441 static inline void
ip6addrptr(const struct sk_buff * skb,bool src,struct in6_addr * addr)442 ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
443 {
444 	memcpy(addr, src ? &ipv6_hdr(skb)->saddr : &ipv6_hdr(skb)->daddr,
445 	       sizeof(*addr));
446 }
447 
448 /* How often should the gc be run by default */
449 #define IPSET_GC_TIME			(3 * 60)
450 
451 /* Timeout period depending on the timeout value of the given set */
452 #define IPSET_GC_PERIOD(timeout) \
453 	((timeout/3) ? min_t(u32, (timeout)/3, IPSET_GC_TIME) : 1)
454 
455 /* Entry is set with no timeout value */
456 #define IPSET_ELEM_PERMANENT	0
457 
458 /* Set is defined with timeout support: timeout value may be 0 */
459 #define IPSET_NO_TIMEOUT	UINT_MAX
460 
461 /* Max timeout value, see msecs_to_jiffies() in jiffies.h */
462 #define IPSET_MAX_TIMEOUT	(UINT_MAX >> 1)/MSEC_PER_SEC
463 
464 #define ip_set_adt_opt_timeout(opt, set)	\
465 ((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout)
466 
467 static inline unsigned int
ip_set_timeout_uget(struct nlattr * tb)468 ip_set_timeout_uget(struct nlattr *tb)
469 {
470 	unsigned int timeout = ip_set_get_h32(tb);
471 
472 	/* Normalize to fit into jiffies */
473 	if (timeout > IPSET_MAX_TIMEOUT)
474 		timeout = IPSET_MAX_TIMEOUT;
475 
476 	return timeout;
477 }
478 
479 static inline bool
ip_set_timeout_expired(const unsigned long * t)480 ip_set_timeout_expired(const unsigned long *t)
481 {
482 	return *t != IPSET_ELEM_PERMANENT && time_is_before_jiffies(*t);
483 }
484 
485 static inline void
ip_set_timeout_set(unsigned long * timeout,u32 value)486 ip_set_timeout_set(unsigned long *timeout, u32 value)
487 {
488 	unsigned long t;
489 
490 	if (!value) {
491 		*timeout = IPSET_ELEM_PERMANENT;
492 		return;
493 	}
494 
495 	t = msecs_to_jiffies(value * MSEC_PER_SEC) + jiffies;
496 	if (t == IPSET_ELEM_PERMANENT)
497 		/* Bingo! :-) */
498 		t--;
499 	*timeout = t;
500 }
501 
502 static inline u32
ip_set_timeout_get(const unsigned long * timeout)503 ip_set_timeout_get(const unsigned long *timeout)
504 {
505 	u32 t;
506 
507 	if (*timeout == IPSET_ELEM_PERMANENT)
508 		return 0;
509 
510 	t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
511 	/* Zero value in userspace means no timeout */
512 	return t == 0 ? 1 : t;
513 }
514 
515 static inline char*
ip_set_comment_uget(struct nlattr * tb)516 ip_set_comment_uget(struct nlattr *tb)
517 {
518 	return nla_data(tb);
519 }
520 
521 /* Called from uadd only, protected by the set spinlock.
522  * The kadt functions don't use the comment extensions in any way.
523  */
524 static inline void
ip_set_init_comment(struct ip_set * set,struct ip_set_comment * comment,const struct ip_set_ext * ext)525 ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
526 		    const struct ip_set_ext *ext)
527 {
528 	struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
529 	size_t len = ext->comment ? strlen(ext->comment) : 0;
530 
531 	if (unlikely(c)) {
532 		set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
533 		kfree_rcu(c, rcu);
534 		rcu_assign_pointer(comment->c, NULL);
535 	}
536 	if (!len)
537 		return;
538 	if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
539 		len = IPSET_MAX_COMMENT_SIZE;
540 	c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
541 	if (unlikely(!c))
542 		return;
543 	strlcpy(c->str, ext->comment, len + 1);
544 	set->ext_size += sizeof(*c) + strlen(c->str) + 1;
545 	rcu_assign_pointer(comment->c, c);
546 }
547 
548 /* Used only when dumping a set, protected by rcu_read_lock() */
549 static inline int
ip_set_put_comment(struct sk_buff * skb,const struct ip_set_comment * comment)550 ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
551 {
552 	struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
553 
554 	if (!c)
555 		return 0;
556 	return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str);
557 }
558 
559 /* Called from uadd/udel, flush or the garbage collectors protected
560  * by the set spinlock.
561  * Called when the set is destroyed and when there can't be any user
562  * of the set data anymore.
563  */
564 static inline void
ip_set_comment_free(struct ip_set * set,struct ip_set_comment * comment)565 ip_set_comment_free(struct ip_set *set, struct ip_set_comment *comment)
566 {
567 	struct ip_set_comment_rcu *c;
568 
569 	c = rcu_dereference_protected(comment->c, 1);
570 	if (unlikely(!c))
571 		return;
572 	set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
573 	kfree_rcu(c, rcu);
574 	rcu_assign_pointer(comment->c, NULL);
575 }
576 
577 static inline void
ip_set_add_bytes(u64 bytes,struct ip_set_counter * counter)578 ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
579 {
580 	atomic64_add((long long)bytes, &(counter)->bytes);
581 }
582 
583 static inline void
ip_set_add_packets(u64 packets,struct ip_set_counter * counter)584 ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
585 {
586 	atomic64_add((long long)packets, &(counter)->packets);
587 }
588 
589 static inline u64
ip_set_get_bytes(const struct ip_set_counter * counter)590 ip_set_get_bytes(const struct ip_set_counter *counter)
591 {
592 	return (u64)atomic64_read(&(counter)->bytes);
593 }
594 
595 static inline u64
ip_set_get_packets(const struct ip_set_counter * counter)596 ip_set_get_packets(const struct ip_set_counter *counter)
597 {
598 	return (u64)atomic64_read(&(counter)->packets);
599 }
600 
601 static inline bool
ip_set_match_counter(u64 counter,u64 match,u8 op)602 ip_set_match_counter(u64 counter, u64 match, u8 op)
603 {
604 	switch (op) {
605 	case IPSET_COUNTER_NONE:
606 		return true;
607 	case IPSET_COUNTER_EQ:
608 		return counter == match;
609 	case IPSET_COUNTER_NE:
610 		return counter != match;
611 	case IPSET_COUNTER_LT:
612 		return counter < match;
613 	case IPSET_COUNTER_GT:
614 		return counter > match;
615 	}
616 	return false;
617 }
618 
619 static inline void
ip_set_update_counter(struct ip_set_counter * counter,const struct ip_set_ext * ext,u32 flags)620 ip_set_update_counter(struct ip_set_counter *counter,
621 		      const struct ip_set_ext *ext, u32 flags)
622 {
623 	if (ext->packets != ULLONG_MAX &&
624 	    !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
625 		ip_set_add_bytes(ext->bytes, counter);
626 		ip_set_add_packets(ext->packets, counter);
627 	}
628 }
629 
630 static inline bool
ip_set_put_counter(struct sk_buff * skb,const struct ip_set_counter * counter)631 ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter)
632 {
633 	return nla_put_net64(skb, IPSET_ATTR_BYTES,
634 			     cpu_to_be64(ip_set_get_bytes(counter)),
635 			     IPSET_ATTR_PAD) ||
636 	       nla_put_net64(skb, IPSET_ATTR_PACKETS,
637 			     cpu_to_be64(ip_set_get_packets(counter)),
638 			     IPSET_ATTR_PAD);
639 }
640 
641 static inline void
ip_set_init_counter(struct ip_set_counter * counter,const struct ip_set_ext * ext)642 ip_set_init_counter(struct ip_set_counter *counter,
643 		    const struct ip_set_ext *ext)
644 {
645 	if (ext->bytes != ULLONG_MAX)
646 		atomic64_set(&(counter)->bytes, (long long)(ext->bytes));
647 	if (ext->packets != ULLONG_MAX)
648 		atomic64_set(&(counter)->packets, (long long)(ext->packets));
649 }
650 
651 static inline void
ip_set_get_skbinfo(struct ip_set_skbinfo * skbinfo,const struct ip_set_ext * ext,struct ip_set_ext * mext,u32 flags)652 ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
653 		   const struct ip_set_ext *ext,
654 		   struct ip_set_ext *mext, u32 flags)
655 {
656 	mext->skbinfo = *skbinfo;
657 }
658 
659 static inline bool
ip_set_put_skbinfo(struct sk_buff * skb,const struct ip_set_skbinfo * skbinfo)660 ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo)
661 {
662 	/* Send nonzero parameters only */
663 	return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
664 		nla_put_net64(skb, IPSET_ATTR_SKBMARK,
665 			      cpu_to_be64((u64)skbinfo->skbmark << 32 |
666 					  skbinfo->skbmarkmask),
667 			      IPSET_ATTR_PAD)) ||
668 	       (skbinfo->skbprio &&
669 		nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
670 			      cpu_to_be32(skbinfo->skbprio))) ||
671 	       (skbinfo->skbqueue &&
672 		nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
673 			      cpu_to_be16(skbinfo->skbqueue)));
674 }
675 
676 static inline void
ip_set_init_skbinfo(struct ip_set_skbinfo * skbinfo,const struct ip_set_ext * ext)677 ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
678 		    const struct ip_set_ext *ext)
679 {
680 	*skbinfo = ext->skbinfo;
681 }
682 
683 #define IP_SET_INIT_KEXT(skb, opt, set)			\
684 	{ .bytes = (skb)->len, .packets = 1,		\
685 	  .timeout = ip_set_adt_opt_timeout(opt, set) }
686 
687 #define IP_SET_INIT_UEXT(set)				\
688 	{ .bytes = ULLONG_MAX, .packets = ULLONG_MAX,	\
689 	  .timeout = (set)->timeout }
690 
691 #define IPSET_CONCAT(a, b)		a##b
692 #define IPSET_TOKEN(a, b)		IPSET_CONCAT(a, b)
693 
694 #endif /*_IP_SET_H */
695