• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: LGPL-2.1-only */
2 /*
3  * Copyright (c) 2003-2010 Thomas Graf <tgraf@suug.ch>
4  */
5 
6 /**
7  * @ingroup rtnl
8  * @defgroup rule Routing Rules
9  * @brief
10  * @{
11  */
12 
13 #include <netlink-private/netlink.h>
14 #include <netlink/netlink.h>
15 #include <netlink/utils.h>
16 #include <netlink/route/rtnl.h>
17 #include <netlink/route/rule.h>
18 #include <inttypes.h>
19 #include <linux/fib_rules.h>
20 
21 /** @cond SKIP */
22 #define RULE_ATTR_FAMILY	0x000001
23 #define RULE_ATTR_TABLE		0x000002
24 #define RULE_ATTR_ACTION	0x000004
25 #define RULE_ATTR_FLAGS		0x000008
26 #define RULE_ATTR_IIFNAME	0x000010
27 #define RULE_ATTR_OIFNAME	0x000020
28 #define RULE_ATTR_PRIO		0x000040
29 #define RULE_ATTR_MARK		0x000080
30 #define RULE_ATTR_MASK		0x000100
31 #define RULE_ATTR_GOTO		0x000200
32 #define RULE_ATTR_SRC		0x000400
33 #define RULE_ATTR_DST		0x000800
34 #define RULE_ATTR_DSFIELD	0x001000
35 #define RULE_ATTR_FLOW		0x002000
36 #define RULE_ATTR_L3MDEV	0x004000
37 #define RULE_ATTR_PROTOCOL	0x008000
38 #define RULE_ATTR_IP_PROTO	0x010000
39 #define RULE_ATTR_SPORT		0x020000
40 #define RULE_ATTR_DPORT		0x040000
41 
42 static struct nl_cache_ops rtnl_rule_ops;
43 static struct nl_object_ops rule_obj_ops;
44 /** @endcond */
45 
rule_free_data(struct nl_object * c)46 static void rule_free_data(struct nl_object *c)
47 {
48 	struct rtnl_rule *rule = nl_object_priv(c);
49 
50 	if (!rule)
51 		return;
52 
53 	nl_addr_put(rule->r_src);
54 	nl_addr_put(rule->r_dst);
55 }
56 
rule_clone(struct nl_object * _dst,struct nl_object * _src)57 static int rule_clone(struct nl_object *_dst, struct nl_object *_src)
58 {
59 	struct rtnl_rule *dst = nl_object_priv(_dst);
60 	struct rtnl_rule *src = nl_object_priv(_src);
61 
62 	dst->r_src = NULL;
63 	dst->r_dst = NULL;
64 
65 	if (src->r_src)
66 		if (!(dst->r_src = nl_addr_clone(src->r_src)))
67 			return -NLE_NOMEM;
68 
69 	if (src->r_dst)
70 		if (!(dst->r_dst = nl_addr_clone(src->r_dst)))
71 			return -NLE_NOMEM;
72 
73 	return 0;
74 }
75 
76 static struct nla_policy rule_policy[FRA_MAX+1] = {
77 	[FRA_TABLE]	= { .type = NLA_U32 },
78 	[FRA_IIFNAME]	= { .type = NLA_STRING, .maxlen = IFNAMSIZ },
79 	[FRA_OIFNAME]	= { .type = NLA_STRING, .maxlen = IFNAMSIZ },
80 	[FRA_PRIORITY]	= { .type = NLA_U32 },
81 	[FRA_FWMARK]	= { .type = NLA_U32 },
82 	[FRA_FWMASK]	= { .type = NLA_U32 },
83 	[FRA_GOTO]	= { .type = NLA_U32 },
84 	[FRA_FLOW]	= { .type = NLA_U32 },
85 	[FRA_L3MDEV]	= { .type = NLA_U8 },
86 	[FRA_PROTOCOL]	= { .type = NLA_U8 },
87 	[FRA_IP_PROTO]	= { .type = NLA_U8 },
88 	[FRA_SPORT_RANGE] = { .minlen = sizeof(struct fib_rule_port_range),
89 			      .maxlen = sizeof(struct fib_rule_port_range) },
90 	[FRA_DPORT_RANGE] = { .minlen = sizeof(struct fib_rule_port_range),
91 			      .maxlen = sizeof(struct fib_rule_port_range) },
92 };
93 
rule_msg_parser(struct nl_cache_ops * ops,struct sockaddr_nl * who,struct nlmsghdr * n,struct nl_parser_param * pp)94 static int rule_msg_parser(struct nl_cache_ops *ops, struct sockaddr_nl *who,
95 			   struct nlmsghdr *n, struct nl_parser_param *pp)
96 {
97 	struct rtnl_rule *rule;
98 	struct fib_rule_hdr *frh;
99 	struct nlattr *tb[FRA_MAX+1];
100 	int err = 1, family;
101 
102 	rule = rtnl_rule_alloc();
103 	if (!rule) {
104 		err = -NLE_NOMEM;
105 		goto errout;
106 	}
107 
108 	rule->ce_msgtype = n->nlmsg_type;
109 	frh = nlmsg_data(n);
110 
111 	err = nlmsg_parse(n, sizeof(*frh), tb, FRA_MAX, rule_policy);
112 	if (err < 0)
113 		goto errout;
114 
115 	rule->r_family = family = frh->family;
116 	rule->r_table = frh->table;
117 	rule->r_action = frh->action;
118 	rule->r_flags = frh->flags;
119 
120 	rule->ce_mask = (RULE_ATTR_FAMILY | RULE_ATTR_ACTION | RULE_ATTR_FLAGS);
121 	if (rule->r_table)
122 		rule->ce_mask |= RULE_ATTR_TABLE;
123 
124 	/* ipv4 only */
125 	if (frh->tos) {
126 		rule->r_dsfield = frh->tos;
127 		rule->ce_mask |= RULE_ATTR_DSFIELD;
128 	}
129 
130 	if (tb[FRA_TABLE]) {
131 		rule->r_table = nla_get_u32(tb[FRA_TABLE]);
132 		if (rule->r_table)
133 			rule->ce_mask |= RULE_ATTR_TABLE;
134 	}
135 
136 	if (tb[FRA_IIFNAME]) {
137 		nla_strlcpy(rule->r_iifname, tb[FRA_IIFNAME], IFNAMSIZ);
138 		rule->ce_mask |= RULE_ATTR_IIFNAME;
139 	}
140 
141 	if (tb[FRA_OIFNAME]) {
142 		nla_strlcpy(rule->r_oifname, tb[FRA_OIFNAME], IFNAMSIZ);
143 		rule->ce_mask |= RULE_ATTR_OIFNAME;
144 	}
145 
146 	if (tb[FRA_PRIORITY]) {
147 		rule->r_prio = nla_get_u32(tb[FRA_PRIORITY]);
148 		rule->ce_mask |= RULE_ATTR_PRIO;
149 	}
150 
151 	if (tb[FRA_FWMARK]) {
152 		rule->r_mark = nla_get_u32(tb[FRA_FWMARK]);
153 		rule->ce_mask |= RULE_ATTR_MARK;
154 	}
155 
156 	if (tb[FRA_FWMASK]) {
157 		rule->r_mask = nla_get_u32(tb[FRA_FWMASK]);
158 		rule->ce_mask |= RULE_ATTR_MASK;
159 	}
160 
161 	if (tb[FRA_GOTO]) {
162 		rule->r_goto = nla_get_u32(tb[FRA_GOTO]);
163 		rule->ce_mask |= RULE_ATTR_GOTO;
164 	}
165 
166 	if (tb[FRA_SRC]) {
167 		if (!(rule->r_src = nl_addr_alloc_attr(tb[FRA_SRC], family)))
168 			goto errout_enomem;
169 
170 		nl_addr_set_prefixlen(rule->r_src, frh->src_len);
171 		rule->ce_mask |= RULE_ATTR_SRC;
172 	}
173 
174 	if (tb[FRA_DST]) {
175 		if (!(rule->r_dst = nl_addr_alloc_attr(tb[FRA_DST], family)))
176 			goto errout_enomem;
177 		nl_addr_set_prefixlen(rule->r_dst, frh->dst_len);
178 		rule->ce_mask |= RULE_ATTR_DST;
179 	}
180 
181 	/* ipv4 only */
182 	if (tb[FRA_FLOW]) {
183 		rule->r_flow = nla_get_u32(tb[FRA_FLOW]);
184 		rule->ce_mask |= RULE_ATTR_FLOW;
185 	}
186 
187 	if (tb[FRA_L3MDEV]) {
188 		rule->r_l3mdev = nla_get_u8(tb[FRA_L3MDEV]);
189 		rule->ce_mask |= RULE_ATTR_L3MDEV;
190 	}
191 
192 	if (tb[FRA_PROTOCOL]) {
193 		rule->r_protocol = nla_get_u8(tb[FRA_PROTOCOL]);
194 		rule->ce_mask |= RULE_ATTR_PROTOCOL;
195 	}
196 
197 	if (tb[FRA_IP_PROTO]) {
198 		rule->r_ip_proto = nla_get_u8(tb[FRA_IP_PROTO]);
199 		rule->ce_mask |= RULE_ATTR_IP_PROTO;
200 	}
201 
202 	if (tb[FRA_SPORT_RANGE]) {
203 		struct fib_rule_port_range *pr;
204 
205 		pr = nla_data(tb[FRA_SPORT_RANGE]);
206 		rule->r_sport = *pr;
207 		rule->ce_mask |= RULE_ATTR_SPORT;
208 	}
209 
210 	if (tb[FRA_DPORT_RANGE]) {
211 		struct fib_rule_port_range *pr;
212 
213 		pr = nla_data(tb[FRA_DPORT_RANGE]);
214 		rule->r_dport = *pr;
215 		rule->ce_mask |= RULE_ATTR_DPORT;
216 	}
217 
218 	err = pp->pp_cb((struct nl_object *) rule, pp);
219 errout:
220 	rtnl_rule_put(rule);
221 	return err;
222 
223 errout_enomem:
224 	err = -NLE_NOMEM;
225 	goto errout;
226 }
227 
rule_request_update(struct nl_cache * c,struct nl_sock * h)228 static int rule_request_update(struct nl_cache *c, struct nl_sock *h)
229 {
230 	return nl_rtgen_request(h, RTM_GETRULE, AF_UNSPEC, NLM_F_DUMP);
231 }
232 
rule_dump_line(struct nl_object * o,struct nl_dump_params * p)233 static void rule_dump_line(struct nl_object *o, struct nl_dump_params *p)
234 {
235 	struct rtnl_rule *r = (struct rtnl_rule *) o;
236 	char buf[128];
237 
238 	nl_dump_line(p, "%8d ", (r->ce_mask & RULE_ATTR_PRIO) ? r->r_prio : 0);
239 	nl_dump(p, "%s ", nl_af2str(r->r_family, buf, sizeof(buf)));
240 
241 	if (r->ce_mask & RULE_ATTR_SRC)
242 		nl_dump(p, "from %s ",
243 			nl_addr2str(r->r_src, buf, sizeof(buf)));
244 
245 	if (r->ce_mask & RULE_ATTR_DST)
246 		nl_dump(p, "to %s ",
247 			nl_addr2str(r->r_dst, buf, sizeof(buf)));
248 
249 	if (r->ce_mask & RULE_ATTR_DSFIELD)
250 		nl_dump(p, "tos %u ", r->r_dsfield);
251 
252 	if (r->ce_mask & (RULE_ATTR_MARK | RULE_ATTR_MASK))
253 		nl_dump(p, "mark %#x/%#x", r->r_mark, r->r_mask);
254 
255 	if (r->ce_mask & RULE_ATTR_IIFNAME)
256 		nl_dump(p, "iif %s ", r->r_iifname);
257 
258 	if (r->ce_mask & RULE_ATTR_OIFNAME)
259 		nl_dump(p, "oif %s ", r->r_oifname);
260 
261 	if (r->ce_mask & RULE_ATTR_TABLE)
262 		nl_dump(p, "lookup %s ",
263 			rtnl_route_table2str(r->r_table, buf, sizeof(buf)));
264 
265 	if (r->ce_mask & RULE_ATTR_L3MDEV)
266 		nl_dump(p, "lookup [l3mdev-table] ");
267 
268 	if (r->ce_mask & RULE_ATTR_IP_PROTO)
269 		nl_dump(p, "ipproto %s ",
270 			nl_ip_proto2str(r->r_ip_proto, buf, sizeof(buf)));
271 
272 	if (r->ce_mask & RULE_ATTR_SPORT) {
273 		if (r->r_sport.start == r->r_sport.end)
274 			nl_dump(p, "sport %u ", r->r_sport.start);
275 		else
276 			nl_dump(p, "sport %u-%u ",
277 				r->r_sport.start, r->r_sport.end);
278 	}
279 
280 	if (r->ce_mask & RULE_ATTR_DPORT) {
281 		if (r->r_dport.start == r->r_dport.end)
282 			nl_dump(p, "dport %u ", r->r_dport.start);
283 		else
284 			nl_dump(p, "dport %u-%u ",
285 				r->r_dport.start, r->r_dport.end);
286 	}
287 
288 	if (r->ce_mask & RULE_ATTR_PROTOCOL)
289 		nl_dump(p, "protocol %s ",
290 			rtnl_route_proto2str(r->r_protocol, buf, sizeof(buf)));
291 
292 	if (r->ce_mask & RULE_ATTR_FLOW)
293 		nl_dump(p, "flow %s ",
294 			rtnl_realms2str(r->r_flow, buf, sizeof(buf)));
295 
296 	if (r->ce_mask & RULE_ATTR_GOTO)
297 		nl_dump(p, "goto %u ", r->r_goto);
298 
299 	if (r->ce_mask & RULE_ATTR_ACTION)
300 		nl_dump(p, "action %s",
301 			nl_rtntype2str(r->r_action, buf, sizeof(buf)));
302 
303 	nl_dump(p, "\n");
304 }
305 
rule_dump_details(struct nl_object * obj,struct nl_dump_params * p)306 static void rule_dump_details(struct nl_object *obj, struct nl_dump_params *p)
307 {
308 	rule_dump_line(obj, p);
309 }
310 
rule_dump_stats(struct nl_object * obj,struct nl_dump_params * p)311 static void rule_dump_stats(struct nl_object *obj, struct nl_dump_params *p)
312 {
313 	rule_dump_details(obj, p);
314 }
315 
rule_compare(struct nl_object * _a,struct nl_object * _b,uint64_t attrs,int flags)316 static uint64_t rule_compare(struct nl_object *_a, struct nl_object *_b,
317 			     uint64_t attrs, int flags)
318 {
319 	struct rtnl_rule *a = (struct rtnl_rule *) _a;
320 	struct rtnl_rule *b = (struct rtnl_rule *) _b;
321 	uint64_t diff = 0;
322 
323 #define RULE_DIFF(ATTR, EXPR) ATTR_DIFF(attrs, RULE_ATTR_##ATTR, a, b, EXPR)
324 
325 	diff |= RULE_DIFF(FAMILY,	a->r_family != b->r_family);
326 	diff |= RULE_DIFF(TABLE,	a->r_table != b->r_table);
327 	diff |= RULE_DIFF(ACTION,	a->r_action != b->r_action);
328 	diff |= RULE_DIFF(IIFNAME,	strcmp(a->r_iifname, b->r_iifname));
329 	diff |= RULE_DIFF(OIFNAME,	strcmp(a->r_oifname, b->r_oifname));
330 	diff |= RULE_DIFF(PRIO,		a->r_prio != b->r_prio);
331 	diff |= RULE_DIFF(MARK,		a->r_mark != b->r_mark);
332 	diff |= RULE_DIFF(MASK,		a->r_mask != b->r_mask);
333 	diff |= RULE_DIFF(GOTO,		a->r_goto != b->r_goto);
334 	diff |= RULE_DIFF(SRC,		nl_addr_cmp(a->r_src, b->r_src));
335 	diff |= RULE_DIFF(DST,		nl_addr_cmp(a->r_dst, b->r_dst));
336 	diff |= RULE_DIFF(DSFIELD,	a->r_dsfield != b->r_dsfield);
337 	diff |= RULE_DIFF(FLOW,		a->r_flow != b->r_flow);
338 
339 #undef RULE_DIFF
340 
341 	return diff;
342 }
343 
344 static const struct trans_tbl rule_attrs[] = {
345 	__ADD(RULE_ATTR_FAMILY, family),
346 	__ADD(RULE_ATTR_TABLE, table),
347 	__ADD(RULE_ATTR_ACTION, action),
348 	__ADD(RULE_ATTR_IIFNAME, iifname),
349 	__ADD(RULE_ATTR_OIFNAME, oifname),
350 	__ADD(RULE_ATTR_PRIO, prio),
351 	__ADD(RULE_ATTR_MARK, mark),
352 	__ADD(RULE_ATTR_MASK, mask),
353 	__ADD(RULE_ATTR_GOTO, goto),
354 	__ADD(RULE_ATTR_SRC, src),
355 	__ADD(RULE_ATTR_DST, dst),
356 	__ADD(RULE_ATTR_DSFIELD, dsfield),
357 	__ADD(RULE_ATTR_FLOW, flow),
358 };
359 
rule_attrs2str(int attrs,char * buf,size_t len)360 static char *rule_attrs2str(int attrs, char *buf, size_t len)
361 {
362 	return __flags2str(attrs, buf, len, rule_attrs,
363 			   ARRAY_SIZE(rule_attrs));
364 }
365 
366 /**
367  * @name Allocation/Freeing
368  * @{
369  */
370 
rtnl_rule_alloc(void)371 struct rtnl_rule *rtnl_rule_alloc(void)
372 {
373 	return (struct rtnl_rule *) nl_object_alloc(&rule_obj_ops);
374 }
375 
rtnl_rule_put(struct rtnl_rule * rule)376 void rtnl_rule_put(struct rtnl_rule *rule)
377 {
378 	nl_object_put((struct nl_object *) rule);
379 }
380 
381 /** @} */
382 
383 /**
384  * @name Cache Management
385  * @{
386  */
387 
388 /**
389  * Build a rule cache including all rules currently configured in the kernel.
390  * @arg sock		Netlink socket.
391  * @arg family		Address family or AF_UNSPEC.
392  * @arg result		Pointer to store resulting cache.
393  *
394  * Allocates a new rule cache, initializes it properly and updates it
395  * to include all rules currently configured in the kernel.
396  *
397  * @return 0 on success or a negative error code.
398  */
rtnl_rule_alloc_cache(struct nl_sock * sock,int family,struct nl_cache ** result)399 int rtnl_rule_alloc_cache(struct nl_sock *sock, int family,
400 			  struct nl_cache **result)
401 {
402 	struct nl_cache * cache;
403 	int err;
404 
405 	if (!(cache = nl_cache_alloc(&rtnl_rule_ops)))
406 		return -NLE_NOMEM;
407 
408 	cache->c_iarg1 = family;
409 
410 	if (sock && (err = nl_cache_refill(sock, cache)) < 0) {
411 		free(cache);
412 		return err;
413 	}
414 
415 	*result = cache;
416 	return 0;
417 }
418 
419 /** @} */
420 
421 /**
422  * @name Rule Addition
423  * @{
424  */
425 
build_rule_msg(struct rtnl_rule * tmpl,int cmd,int flags,struct nl_msg ** result)426 static int build_rule_msg(struct rtnl_rule *tmpl, int cmd, int flags,
427 			  struct nl_msg **result)
428 {
429 	struct nl_msg *msg;
430 	struct fib_rule_hdr frh = {
431 		.family = tmpl->r_family,
432 		.table = tmpl->r_table,
433 		.action = tmpl->r_action,
434 		.flags = tmpl->r_flags,
435 		.tos = tmpl->r_dsfield,
436 	};
437 
438 	if (!(tmpl->ce_mask & RULE_ATTR_FAMILY))
439 		return -NLE_MISSING_ATTR;
440 
441 	msg = nlmsg_alloc_simple(cmd, flags);
442 	if (!msg)
443 		return -NLE_NOMEM;
444 
445 	if (tmpl->ce_mask & RULE_ATTR_SRC)
446 		frh.src_len = nl_addr_get_prefixlen(tmpl->r_src);
447 
448 	if (tmpl->ce_mask & RULE_ATTR_DST)
449 		frh.dst_len = nl_addr_get_prefixlen(tmpl->r_dst);
450 
451 	if (nlmsg_append(msg, &frh, sizeof(frh), NLMSG_ALIGNTO) < 0)
452 		goto nla_put_failure;
453 
454 	/* Additional table attribute replacing the 8bit in the header, was
455 	 * required to allow more than 256 tables. */
456 	NLA_PUT_U32(msg, FRA_TABLE, tmpl->r_table);
457 
458 	if (tmpl->ce_mask & RULE_ATTR_SRC)
459 		NLA_PUT_ADDR(msg, FRA_SRC, tmpl->r_src);
460 
461 	if (tmpl->ce_mask & RULE_ATTR_DST)
462 		NLA_PUT_ADDR(msg, FRA_DST, tmpl->r_dst);
463 
464 	if (tmpl->ce_mask & RULE_ATTR_IIFNAME)
465 		NLA_PUT_STRING(msg, FRA_IIFNAME, tmpl->r_iifname);
466 
467 	if (tmpl->ce_mask & RULE_ATTR_OIFNAME)
468 		NLA_PUT_STRING(msg, FRA_OIFNAME, tmpl->r_oifname);
469 
470 	if (tmpl->ce_mask & RULE_ATTR_PRIO)
471 		NLA_PUT_U32(msg, FRA_PRIORITY, tmpl->r_prio);
472 
473 	if (tmpl->ce_mask & RULE_ATTR_MARK)
474 		NLA_PUT_U32(msg, FRA_FWMARK, tmpl->r_mark);
475 
476 	if (tmpl->ce_mask & RULE_ATTR_MASK)
477 		NLA_PUT_U32(msg, FRA_FWMASK, tmpl->r_mask);
478 
479 	if (tmpl->ce_mask & RULE_ATTR_GOTO)
480 		NLA_PUT_U32(msg, FRA_GOTO, tmpl->r_goto);
481 
482 	if (tmpl->ce_mask & RULE_ATTR_FLOW)
483 		NLA_PUT_U32(msg, FRA_FLOW, tmpl->r_flow);
484 
485 	if (tmpl->ce_mask & RULE_ATTR_L3MDEV)
486 		NLA_PUT_U8(msg, FRA_L3MDEV, tmpl->r_l3mdev);
487 
488 	if (tmpl->ce_mask & RULE_ATTR_IP_PROTO)
489 		NLA_PUT_U8(msg, FRA_IP_PROTO, tmpl->r_ip_proto);
490 
491 	if (tmpl->ce_mask & RULE_ATTR_SPORT)
492 		NLA_PUT(msg, FRA_SPORT_RANGE, sizeof(tmpl->r_sport),
493 			&tmpl->r_sport);
494 
495 	if (tmpl->ce_mask & RULE_ATTR_DPORT)
496 		NLA_PUT(msg, FRA_DPORT_RANGE, sizeof(tmpl->r_dport),
497 			&tmpl->r_dport);
498 
499 	if (tmpl->ce_mask & RULE_ATTR_PROTOCOL)
500 		NLA_PUT_U8(msg, FRA_PROTOCOL, tmpl->r_protocol);
501 
502 	*result = msg;
503 	return 0;
504 
505 nla_put_failure:
506 	nlmsg_free(msg);
507 	return -NLE_MSGSIZE;
508 }
509 
510 /**
511  * Build netlink request message to add a new rule
512  * @arg tmpl		template with data of new rule
513  * @arg flags		additional netlink message flags
514  * @arg result		Result pointer
515  *
516  * Builds a new netlink message requesting a addition of a new
517  * rule. The netlink message header isn't fully equipped with
518  * all relevant fields and must thus be sent out via nl_send_auto_complete()
519  * or supplemented as needed. \a tmpl must contain the attributes of the new
520  * address set via \c rtnl_rule_set_* functions.
521  *
522  * @return 0 on success or a negative error code.
523  */
rtnl_rule_build_add_request(struct rtnl_rule * tmpl,int flags,struct nl_msg ** result)524 int rtnl_rule_build_add_request(struct rtnl_rule *tmpl, int flags,
525 				struct nl_msg **result)
526 {
527 	return build_rule_msg(tmpl, RTM_NEWRULE, NLM_F_CREATE | flags,
528 			      result);
529 }
530 
531 /**
532  * Add a new rule
533  * @arg sk		Netlink socket.
534  * @arg tmpl		template with requested changes
535  * @arg flags		additional netlink message flags
536  *
537  * Builds a netlink message by calling rtnl_rule_build_add_request(),
538  * sends the request to the kernel and waits for the next ACK to be
539  * received and thus blocks until the request has been fullfilled.
540  *
541  * @return 0 on sucess or a negative error if an error occured.
542  */
rtnl_rule_add(struct nl_sock * sk,struct rtnl_rule * tmpl,int flags)543 int rtnl_rule_add(struct nl_sock *sk, struct rtnl_rule *tmpl, int flags)
544 {
545 	struct nl_msg *msg;
546 	int err;
547 
548 	if ((err = rtnl_rule_build_add_request(tmpl, flags, &msg)) < 0)
549 		return err;
550 
551 	err = nl_send_auto_complete(sk, msg);
552 	nlmsg_free(msg);
553 	if (err < 0)
554 		return err;
555 
556 	return wait_for_ack(sk);
557 }
558 
559 /** @} */
560 
561 /**
562  * @name Rule Deletion
563  * @{
564  */
565 
566 /**
567  * Build a netlink request message to delete a rule
568  * @arg rule		rule to delete
569  * @arg flags		additional netlink message flags
570  * @arg result		Result pointer
571  *
572  * Builds a new netlink message requesting a deletion of a rule.
573  * The netlink message header isn't fully equipped with all relevant
574  * fields and must thus be sent out via nl_send_auto_complete()
575  * or supplemented as needed. \a rule must point to an existing
576  * address.
577  *
578  * @return 0 on success or a negative error code.
579  */
rtnl_rule_build_delete_request(struct rtnl_rule * rule,int flags,struct nl_msg ** result)580 int rtnl_rule_build_delete_request(struct rtnl_rule *rule, int flags,
581 				   struct nl_msg **result)
582 {
583 	return build_rule_msg(rule, RTM_DELRULE, flags, result);
584 }
585 
586 /**
587  * Delete a rule
588  * @arg sk		Netlink socket.
589  * @arg rule		rule to delete
590  * @arg flags		additional netlink message flags
591  *
592  * Builds a netlink message by calling rtnl_rule_build_delete_request(),
593  * sends the request to the kernel and waits for the next ACK to be
594  * received and thus blocks until the request has been fullfilled.
595  *
596  * @return 0 on sucess or a negative error if an error occured.
597  */
rtnl_rule_delete(struct nl_sock * sk,struct rtnl_rule * rule,int flags)598 int rtnl_rule_delete(struct nl_sock *sk, struct rtnl_rule *rule, int flags)
599 {
600 	struct nl_msg *msg;
601 	int err;
602 
603 	if ((err = rtnl_rule_build_delete_request(rule, flags, &msg)) < 0)
604 		return err;
605 
606 	err = nl_send_auto_complete(sk, msg);
607 	nlmsg_free(msg);
608 	if (err < 0)
609 		return err;
610 
611 	return wait_for_ack(sk);
612 }
613 
614 /** @} */
615 
616 /**
617  * @name Attribute Modification
618  * @{
619  */
620 
rtnl_rule_set_family(struct rtnl_rule * rule,int family)621 void rtnl_rule_set_family(struct rtnl_rule *rule, int family)
622 {
623 	rule->r_family = family;
624 	rule->ce_mask |= RULE_ATTR_FAMILY;
625 }
626 
rtnl_rule_get_family(struct rtnl_rule * rule)627 int rtnl_rule_get_family(struct rtnl_rule *rule)
628 {
629 	if (rule->ce_mask & RULE_ATTR_FAMILY)
630 		return rule->r_family;
631 	else
632 		return AF_UNSPEC;
633 }
634 
rtnl_rule_set_prio(struct rtnl_rule * rule,uint32_t prio)635 void rtnl_rule_set_prio(struct rtnl_rule *rule, uint32_t prio)
636 {
637 	rule->r_prio = prio;
638 	rule->ce_mask |= RULE_ATTR_PRIO;
639 }
640 
rtnl_rule_get_prio(struct rtnl_rule * rule)641 uint32_t rtnl_rule_get_prio(struct rtnl_rule *rule)
642 {
643 	return rule->r_prio;
644 }
645 
rtnl_rule_set_mark(struct rtnl_rule * rule,uint32_t mark)646 void rtnl_rule_set_mark(struct rtnl_rule *rule, uint32_t mark)
647 {
648 	rule->r_mark = mark;
649 	rule->ce_mask |= RULE_ATTR_MARK;
650 }
651 
rtnl_rule_get_mark(struct rtnl_rule * rule)652 uint32_t rtnl_rule_get_mark(struct rtnl_rule *rule)
653 {
654 	return rule->r_mark;
655 }
656 
rtnl_rule_set_mask(struct rtnl_rule * rule,uint32_t mask)657 void rtnl_rule_set_mask(struct rtnl_rule *rule, uint32_t mask)
658 {
659 	rule->r_mask = mask;
660 	rule->ce_mask |= RULE_ATTR_MASK;
661 }
662 
rtnl_rule_get_mask(struct rtnl_rule * rule)663 uint32_t rtnl_rule_get_mask(struct rtnl_rule *rule)
664 {
665 	return rule->r_mask;
666 }
667 
rtnl_rule_set_table(struct rtnl_rule * rule,uint32_t table)668 void rtnl_rule_set_table(struct rtnl_rule *rule, uint32_t table)
669 {
670 	rule->r_table = table;
671 	rule->ce_mask |= RULE_ATTR_TABLE;
672 }
673 
rtnl_rule_get_table(struct rtnl_rule * rule)674 uint32_t rtnl_rule_get_table(struct rtnl_rule *rule)
675 {
676 	return rule->r_table;
677 }
678 
rtnl_rule_set_dsfield(struct rtnl_rule * rule,uint8_t dsfield)679 void rtnl_rule_set_dsfield(struct rtnl_rule *rule, uint8_t dsfield)
680 {
681 	rule->r_dsfield = dsfield;
682 	rule->ce_mask |= RULE_ATTR_DSFIELD;
683 }
684 
rtnl_rule_get_dsfield(struct rtnl_rule * rule)685 uint8_t rtnl_rule_get_dsfield(struct rtnl_rule *rule)
686 {
687 	return rule->r_dsfield;
688 }
689 
__assign_addr(struct rtnl_rule * rule,struct nl_addr ** pos,struct nl_addr * new,int flag)690 static inline int __assign_addr(struct rtnl_rule *rule, struct nl_addr **pos,
691 			        struct nl_addr *new, int flag)
692 {
693 	if (rule->ce_mask & RULE_ATTR_FAMILY) {
694 		if (new->a_family != rule->r_family)
695 			return -NLE_AF_MISMATCH;
696 	} else
697 		rule->r_family = new->a_family;
698 
699 	if (*pos)
700 		nl_addr_put(*pos);
701 
702 	nl_addr_get(new);
703 	*pos = new;
704 
705 	rule->ce_mask |= (flag | RULE_ATTR_FAMILY);
706 
707 	return 0;
708 }
709 
rtnl_rule_set_src(struct rtnl_rule * rule,struct nl_addr * src)710 int rtnl_rule_set_src(struct rtnl_rule *rule, struct nl_addr *src)
711 {
712 	return __assign_addr(rule, &rule->r_src, src, RULE_ATTR_SRC);
713 }
714 
rtnl_rule_get_src(struct rtnl_rule * rule)715 struct nl_addr *rtnl_rule_get_src(struct rtnl_rule *rule)
716 {
717 	return rule->r_src;
718 }
719 
rtnl_rule_set_dst(struct rtnl_rule * rule,struct nl_addr * dst)720 int rtnl_rule_set_dst(struct rtnl_rule *rule, struct nl_addr *dst)
721 {
722 	return __assign_addr(rule, &rule->r_dst, dst, RULE_ATTR_DST);
723 }
724 
rtnl_rule_get_dst(struct rtnl_rule * rule)725 struct nl_addr *rtnl_rule_get_dst(struct rtnl_rule *rule)
726 {
727 	return rule->r_dst;
728 }
729 
rtnl_rule_set_iif(struct rtnl_rule * rule,const char * dev)730 int rtnl_rule_set_iif(struct rtnl_rule *rule, const char *dev)
731 {
732 	if (strlen(dev) > IFNAMSIZ-1)
733 		return -NLE_RANGE;
734 
735 	strcpy(rule->r_iifname, dev);
736 	rule->ce_mask |= RULE_ATTR_IIFNAME;
737 	return 0;
738 }
739 
rtnl_rule_get_iif(struct rtnl_rule * rule)740 char *rtnl_rule_get_iif(struct rtnl_rule *rule)
741 {
742 	if (rule->ce_mask & RULE_ATTR_IIFNAME)
743 		return rule->r_iifname;
744 	else
745 		return NULL;
746 }
747 
rtnl_rule_set_oif(struct rtnl_rule * rule,const char * dev)748 int rtnl_rule_set_oif(struct rtnl_rule *rule, const char *dev)
749 {
750 	if (strlen(dev) > IFNAMSIZ-1)
751 		return -NLE_RANGE;
752 
753 	strcpy(rule->r_oifname, dev);
754 	rule->ce_mask |= RULE_ATTR_OIFNAME;
755 	return 0;
756 }
757 
rtnl_rule_get_oif(struct rtnl_rule * rule)758 char *rtnl_rule_get_oif(struct rtnl_rule *rule)
759 {
760 	if (rule->ce_mask & RULE_ATTR_OIFNAME)
761 		return rule->r_oifname;
762 	else
763 		return NULL;
764 }
765 
rtnl_rule_set_action(struct rtnl_rule * rule,uint8_t action)766 void rtnl_rule_set_action(struct rtnl_rule *rule, uint8_t action)
767 {
768 	rule->r_action = action;
769 	rule->ce_mask |= RULE_ATTR_ACTION;
770 }
771 
rtnl_rule_get_action(struct rtnl_rule * rule)772 uint8_t rtnl_rule_get_action(struct rtnl_rule *rule)
773 {
774 	return rule->r_action;
775 }
776 
777 /**
778  * Set l3mdev value of the rule (FRA_L3MDEV)
779  * @arg rule		rule
780  * @arg value		value to set
781  *
782  * Set the l3mdev value to value. Currently supported values
783  * are only 1 (set it) and -1 (unset it). All other values
784  * are reserved.
785  */
rtnl_rule_set_l3mdev(struct rtnl_rule * rule,int value)786 void rtnl_rule_set_l3mdev(struct rtnl_rule *rule, int value)
787 {
788 	if (value >= 0) {
789 		rule->r_l3mdev = (uint8_t) value;
790 		rule->ce_mask |= RULE_ATTR_L3MDEV;
791 	} else {
792 		rule->r_l3mdev = 0;
793 		rule->ce_mask &= ~((uint32_t) RULE_ATTR_L3MDEV);
794 	}
795 }
796 
797 /**
798  * Get l3mdev value of the rule (FRA_L3MDEV)
799  * @arg rule		rule
800  *
801  * @return a negative error code, including -NLE_MISSING_ATTR
802  *   if the property is unset. Otherwise returns a non-negative
803  *   value. As FRA_L3MDEV is a boolean, the only expected
804  *   value at the moment is 1.
805  */
rtnl_rule_get_l3mdev(struct rtnl_rule * rule)806 int rtnl_rule_get_l3mdev(struct rtnl_rule *rule)
807 {
808 	if (!rule)
809 		return -NLE_INVAL;
810 	if (!(rule->ce_mask & RULE_ATTR_L3MDEV))
811 		return -NLE_MISSING_ATTR;
812 	return rule->r_l3mdev;
813 }
814 
rtnl_rule_set_protocol(struct rtnl_rule * rule,uint8_t protocol)815 int rtnl_rule_set_protocol(struct rtnl_rule *rule, uint8_t protocol)
816 {
817 	if (protocol) {
818 		rule->r_protocol = protocol;
819 		rule->ce_mask |= RULE_ATTR_PROTOCOL;
820 	} else {
821 		rule->r_protocol = 0;
822 		rule->ce_mask &= ~((uint32_t) RULE_ATTR_PROTOCOL);
823 	}
824 	return 0;
825 }
826 
rtnl_rule_get_protocol(struct rtnl_rule * rule,uint8_t * protocol)827 int rtnl_rule_get_protocol(struct rtnl_rule *rule, uint8_t *protocol)
828 {
829 	if (!(rule->ce_mask & RULE_ATTR_PROTOCOL))
830 		return -NLE_INVAL;
831 
832 	*protocol = rule->r_protocol;
833 	return 0;
834 }
835 
rtnl_rule_set_ipproto(struct rtnl_rule * rule,uint8_t ip_proto)836 int rtnl_rule_set_ipproto(struct rtnl_rule *rule, uint8_t ip_proto)
837 {
838 	if (ip_proto) {
839 		rule->r_ip_proto = ip_proto;
840 		rule->ce_mask |= RULE_ATTR_IP_PROTO;
841 	} else {
842 		rule->r_ip_proto = 0;
843 		rule->ce_mask &= ~((uint32_t) RULE_ATTR_IP_PROTO);
844 	}
845 	return 0;
846 }
847 
rtnl_rule_get_ipproto(struct rtnl_rule * rule,uint8_t * ip_proto)848 int rtnl_rule_get_ipproto(struct rtnl_rule *rule, uint8_t *ip_proto)
849 {
850 	if (!(rule->ce_mask & RULE_ATTR_IP_PROTO))
851 		return -NLE_INVAL;
852 
853 	*ip_proto = rule->r_ip_proto;
854 	return 0;
855 }
856 
__rtnl_rule_set_port(struct fib_rule_port_range * prange,uint16_t start,uint16_t end,uint64_t attr,uint64_t * mask)857 static int __rtnl_rule_set_port(struct fib_rule_port_range *prange,
858 				uint16_t start, uint16_t end,
859 				uint64_t attr, uint64_t *mask)
860 {
861 	if ((start && end < start) || (end && !start))
862 		return -NLE_INVAL;
863 
864 	if (start) {
865 		prange->start = start;
866 		prange->end = end;
867 		*mask |= attr;
868 	} else {
869 		prange->start = 0;
870 		prange->end = 0;
871 		*mask &= ~attr;
872 
873 	}
874 	return 0;
875 }
876 
rtnl_rule_set_sport(struct rtnl_rule * rule,uint16_t sport)877 int rtnl_rule_set_sport(struct rtnl_rule *rule, uint16_t sport)
878 {
879 	return __rtnl_rule_set_port(&rule->r_sport, sport, sport,
880 				    RULE_ATTR_SPORT, &rule->ce_mask);
881 }
882 
rtnl_rule_set_sport_range(struct rtnl_rule * rule,uint16_t start,uint16_t end)883 int rtnl_rule_set_sport_range(struct rtnl_rule *rule, uint16_t start,
884 			      uint16_t end)
885 {
886 	return __rtnl_rule_set_port(&rule->r_sport, start, end,
887 				    RULE_ATTR_SPORT, &rule->ce_mask);
888 }
889 
rtnl_rule_get_sport(struct rtnl_rule * rule,uint16_t * start,uint16_t * end)890 int rtnl_rule_get_sport(struct rtnl_rule *rule, uint16_t *start, uint16_t *end)
891 {
892 	if (!(rule->ce_mask & RULE_ATTR_SPORT))
893 		return -NLE_INVAL;
894 
895 	*start = rule->r_sport.start;
896 	*end = rule->r_sport.end;
897 	return 0;
898 }
899 
rtnl_rule_set_dport(struct rtnl_rule * rule,uint16_t dport)900 int rtnl_rule_set_dport(struct rtnl_rule *rule, uint16_t dport)
901 {
902 	return __rtnl_rule_set_port(&rule->r_dport, dport, dport,
903 				    RULE_ATTR_DPORT, &rule->ce_mask);
904 }
905 
rtnl_rule_set_dport_range(struct rtnl_rule * rule,uint16_t start,uint16_t end)906 int rtnl_rule_set_dport_range(struct rtnl_rule *rule, uint16_t start,
907 			      uint16_t end)
908 {
909 	return __rtnl_rule_set_port(&rule->r_dport, start, end,
910 				    RULE_ATTR_DPORT, &rule->ce_mask);
911 }
912 
rtnl_rule_get_dport(struct rtnl_rule * rule,uint16_t * start,uint16_t * end)913 int rtnl_rule_get_dport(struct rtnl_rule *rule, uint16_t *start, uint16_t *end)
914 {
915 	if (!(rule->ce_mask & RULE_ATTR_DPORT))
916 		return -NLE_INVAL;
917 
918 	*start = rule->r_dport.start;
919 	*end = rule->r_dport.end;
920 	return 0;
921 }
922 
rtnl_rule_set_realms(struct rtnl_rule * rule,uint32_t realms)923 void rtnl_rule_set_realms(struct rtnl_rule *rule, uint32_t realms)
924 {
925 	rule->r_flow = realms;
926 	rule->ce_mask |= RULE_ATTR_FLOW;
927 }
928 
rtnl_rule_get_realms(struct rtnl_rule * rule)929 uint32_t rtnl_rule_get_realms(struct rtnl_rule *rule)
930 {
931 	return rule->r_flow;
932 }
933 
rtnl_rule_set_goto(struct rtnl_rule * rule,uint32_t ref)934 void rtnl_rule_set_goto(struct rtnl_rule *rule, uint32_t ref)
935 {
936 	rule->r_goto = ref;
937 	rule->ce_mask |= RULE_ATTR_GOTO;
938 }
939 
rtnl_rule_get_goto(struct rtnl_rule * rule)940 uint32_t rtnl_rule_get_goto(struct rtnl_rule *rule)
941 {
942 	return rule->r_goto;
943 }
944 
945 /** @} */
946 
947 static struct nl_object_ops rule_obj_ops = {
948 	.oo_name		= "route/rule",
949 	.oo_size		= sizeof(struct rtnl_rule),
950 	.oo_free_data		= rule_free_data,
951 	.oo_clone		= rule_clone,
952 	.oo_dump = {
953 	    [NL_DUMP_LINE]	= rule_dump_line,
954 	    [NL_DUMP_DETAILS]	= rule_dump_details,
955 	    [NL_DUMP_STATS]	= rule_dump_stats,
956 	},
957 	.oo_compare		= rule_compare,
958 	.oo_attrs2str		= rule_attrs2str,
959 	.oo_id_attrs		= ~0,
960 };
961 
962 static struct nl_af_group rule_groups[] = {
963 	{ AF_INET,	RTNLGRP_IPV4_RULE },
964 	{ AF_INET6,	RTNLGRP_IPV6_RULE },
965 	{ END_OF_GROUP_LIST },
966 };
967 
968 static struct nl_cache_ops rtnl_rule_ops = {
969 	.co_name		= "route/rule",
970 	.co_hdrsize		= sizeof(struct fib_rule_hdr),
971 	.co_msgtypes		= {
972 					{ RTM_NEWRULE, NL_ACT_NEW, "new" },
973 					{ RTM_DELRULE, NL_ACT_DEL, "del" },
974 					{ RTM_GETRULE, NL_ACT_GET, "get" },
975 					END_OF_MSGTYPES_LIST,
976 				  },
977 	.co_protocol		= NETLINK_ROUTE,
978 	.co_request_update	= rule_request_update,
979 	.co_msg_parser		= rule_msg_parser,
980 	.co_obj_ops		= &rule_obj_ops,
981 	.co_groups		= rule_groups,
982 };
983 
rule_init(void)984 static void __init rule_init(void)
985 {
986 	nl_cache_mngt_register(&rtnl_rule_ops);
987 }
988 
rule_exit(void)989 static void __exit rule_exit(void)
990 {
991 	nl_cache_mngt_unregister(&rtnl_rule_ops);
992 }
993 
994 /** @} */
995