1 /*
2 * net/core/fib_rules.c Generic Routing Rules
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
7 *
8 * Authors: Thomas Graf <tgraf@suug.ch>
9 */
10
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <net/net_namespace.h>
17 #include <net/sock.h>
18 #include <net/fib_rules.h>
19
20 static const struct fib_kuid_range fib_kuid_range_unset = {
21 KUIDT_INIT(0),
22 KUIDT_INIT(~0),
23 };
24
fib_default_rule_add(struct fib_rules_ops * ops,u32 pref,u32 table,u32 flags)25 int fib_default_rule_add(struct fib_rules_ops *ops,
26 u32 pref, u32 table, u32 flags)
27 {
28 struct fib_rule *r;
29
30 r = kzalloc(ops->rule_size, GFP_KERNEL);
31 if (r == NULL)
32 return -ENOMEM;
33
34 atomic_set(&r->refcnt, 1);
35 r->action = FR_ACT_TO_TBL;
36 r->pref = pref;
37 r->table = table;
38 r->flags = flags;
39 r->fr_net = hold_net(ops->fro_net);
40 r->uid_range = fib_kuid_range_unset;
41
42 r->suppress_prefixlen = -1;
43 r->suppress_ifgroup = -1;
44
45 /* The lock is not required here, the list in unreacheable
46 * at the moment this function is called */
47 list_add_tail(&r->list, &ops->rules_list);
48 return 0;
49 }
50 EXPORT_SYMBOL(fib_default_rule_add);
51
fib_default_rule_pref(struct fib_rules_ops * ops)52 u32 fib_default_rule_pref(struct fib_rules_ops *ops)
53 {
54 struct list_head *pos;
55 struct fib_rule *rule;
56
57 if (!list_empty(&ops->rules_list)) {
58 pos = ops->rules_list.next;
59 if (pos->next != &ops->rules_list) {
60 rule = list_entry(pos->next, struct fib_rule, list);
61 if (rule->pref)
62 return rule->pref - 1;
63 }
64 }
65
66 return 0;
67 }
68 EXPORT_SYMBOL(fib_default_rule_pref);
69
70 static void notify_rule_change(int event, struct fib_rule *rule,
71 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
72 u32 pid);
73
lookup_rules_ops(struct net * net,int family)74 static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
75 {
76 struct fib_rules_ops *ops;
77
78 rcu_read_lock();
79 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
80 if (ops->family == family) {
81 if (!try_module_get(ops->owner))
82 ops = NULL;
83 rcu_read_unlock();
84 return ops;
85 }
86 }
87 rcu_read_unlock();
88
89 return NULL;
90 }
91
rules_ops_put(struct fib_rules_ops * ops)92 static void rules_ops_put(struct fib_rules_ops *ops)
93 {
94 if (ops)
95 module_put(ops->owner);
96 }
97
flush_route_cache(struct fib_rules_ops * ops)98 static void flush_route_cache(struct fib_rules_ops *ops)
99 {
100 if (ops->flush_cache)
101 ops->flush_cache(ops);
102 }
103
__fib_rules_register(struct fib_rules_ops * ops)104 static int __fib_rules_register(struct fib_rules_ops *ops)
105 {
106 int err = -EEXIST;
107 struct fib_rules_ops *o;
108 struct net *net;
109
110 net = ops->fro_net;
111
112 if (ops->rule_size < sizeof(struct fib_rule))
113 return -EINVAL;
114
115 if (ops->match == NULL || ops->configure == NULL ||
116 ops->compare == NULL || ops->fill == NULL ||
117 ops->action == NULL)
118 return -EINVAL;
119
120 spin_lock(&net->rules_mod_lock);
121 list_for_each_entry(o, &net->rules_ops, list)
122 if (ops->family == o->family)
123 goto errout;
124
125 hold_net(net);
126 list_add_tail_rcu(&ops->list, &net->rules_ops);
127 err = 0;
128 errout:
129 spin_unlock(&net->rules_mod_lock);
130
131 return err;
132 }
133
134 struct fib_rules_ops *
fib_rules_register(const struct fib_rules_ops * tmpl,struct net * net)135 fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
136 {
137 struct fib_rules_ops *ops;
138 int err;
139
140 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
141 if (ops == NULL)
142 return ERR_PTR(-ENOMEM);
143
144 INIT_LIST_HEAD(&ops->rules_list);
145 ops->fro_net = net;
146
147 err = __fib_rules_register(ops);
148 if (err) {
149 kfree(ops);
150 ops = ERR_PTR(err);
151 }
152
153 return ops;
154 }
155 EXPORT_SYMBOL_GPL(fib_rules_register);
156
fib_rules_cleanup_ops(struct fib_rules_ops * ops)157 static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
158 {
159 struct fib_rule *rule, *tmp;
160
161 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
162 list_del_rcu(&rule->list);
163 if (ops->delete)
164 ops->delete(rule);
165 fib_rule_put(rule);
166 }
167 }
168
fib_rules_put_rcu(struct rcu_head * head)169 static void fib_rules_put_rcu(struct rcu_head *head)
170 {
171 struct fib_rules_ops *ops = container_of(head, struct fib_rules_ops, rcu);
172 struct net *net = ops->fro_net;
173
174 release_net(net);
175 kfree(ops);
176 }
177
fib_rules_unregister(struct fib_rules_ops * ops)178 void fib_rules_unregister(struct fib_rules_ops *ops)
179 {
180 struct net *net = ops->fro_net;
181
182 spin_lock(&net->rules_mod_lock);
183 list_del_rcu(&ops->list);
184 fib_rules_cleanup_ops(ops);
185 spin_unlock(&net->rules_mod_lock);
186
187 call_rcu(&ops->rcu, fib_rules_put_rcu);
188 }
189 EXPORT_SYMBOL_GPL(fib_rules_unregister);
190
uid_range_set(struct fib_kuid_range * range)191 static int uid_range_set(struct fib_kuid_range *range)
192 {
193 return uid_valid(range->start) && uid_valid(range->end);
194 }
195
nla_get_kuid_range(struct nlattr ** tb)196 static struct fib_kuid_range nla_get_kuid_range(struct nlattr **tb)
197 {
198 struct fib_rule_uid_range *in;
199 struct fib_kuid_range out;
200
201 in = (struct fib_rule_uid_range *)nla_data(tb[FRA_UID_RANGE]);
202
203 out.start = make_kuid(current_user_ns(), in->start);
204 out.end = make_kuid(current_user_ns(), in->end);
205
206 return out;
207 }
208
nla_put_uid_range(struct sk_buff * skb,struct fib_kuid_range * range)209 static int nla_put_uid_range(struct sk_buff *skb, struct fib_kuid_range *range)
210 {
211 struct fib_rule_uid_range out = {
212 from_kuid_munged(current_user_ns(), range->start),
213 from_kuid_munged(current_user_ns(), range->end)
214 };
215
216 return nla_put(skb, FRA_UID_RANGE, sizeof(out), &out);
217 }
218
fib_rule_match(struct fib_rule * rule,struct fib_rules_ops * ops,struct flowi * fl,int flags)219 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
220 struct flowi *fl, int flags)
221 {
222 int ret = 0;
223
224 if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
225 goto out;
226
227 if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
228 goto out;
229
230 if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
231 goto out;
232
233 if (uid_lt(fl->flowi_uid, rule->uid_range.start) ||
234 uid_gt(fl->flowi_uid, rule->uid_range.end))
235 goto out;
236
237 ret = ops->match(rule, fl, flags);
238 out:
239 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
240 }
241
fib_rules_lookup(struct fib_rules_ops * ops,struct flowi * fl,int flags,struct fib_lookup_arg * arg)242 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
243 int flags, struct fib_lookup_arg *arg)
244 {
245 struct fib_rule *rule;
246 int err;
247
248 rcu_read_lock();
249
250 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
251 jumped:
252 if (!fib_rule_match(rule, ops, fl, flags))
253 continue;
254
255 if (rule->action == FR_ACT_GOTO) {
256 struct fib_rule *target;
257
258 target = rcu_dereference(rule->ctarget);
259 if (target == NULL) {
260 continue;
261 } else {
262 rule = target;
263 goto jumped;
264 }
265 } else if (rule->action == FR_ACT_NOP)
266 continue;
267 else
268 err = ops->action(rule, fl, flags, arg);
269
270 if (!err && ops->suppress && ops->suppress(rule, arg))
271 continue;
272
273 if (err != -EAGAIN) {
274 if ((arg->flags & FIB_LOOKUP_NOREF) ||
275 likely(atomic_inc_not_zero(&rule->refcnt))) {
276 arg->rule = rule;
277 goto out;
278 }
279 break;
280 }
281 }
282
283 err = -ESRCH;
284 out:
285 rcu_read_unlock();
286
287 return err;
288 }
289 EXPORT_SYMBOL_GPL(fib_rules_lookup);
290
validate_rulemsg(struct fib_rule_hdr * frh,struct nlattr ** tb,struct fib_rules_ops * ops)291 static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
292 struct fib_rules_ops *ops)
293 {
294 int err = -EINVAL;
295
296 if (frh->src_len)
297 if (tb[FRA_SRC] == NULL ||
298 frh->src_len > (ops->addr_size * 8) ||
299 nla_len(tb[FRA_SRC]) != ops->addr_size)
300 goto errout;
301
302 if (frh->dst_len)
303 if (tb[FRA_DST] == NULL ||
304 frh->dst_len > (ops->addr_size * 8) ||
305 nla_len(tb[FRA_DST]) != ops->addr_size)
306 goto errout;
307
308 err = 0;
309 errout:
310 return err;
311 }
312
fib_nl_newrule(struct sk_buff * skb,struct nlmsghdr * nlh)313 static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
314 {
315 struct net *net = sock_net(skb->sk);
316 struct fib_rule_hdr *frh = nlmsg_data(nlh);
317 struct fib_rules_ops *ops = NULL;
318 struct fib_rule *rule, *r, *last = NULL;
319 struct nlattr *tb[FRA_MAX+1];
320 int err = -EINVAL, unresolved = 0;
321
322 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
323 goto errout;
324
325 ops = lookup_rules_ops(net, frh->family);
326 if (ops == NULL) {
327 err = -EAFNOSUPPORT;
328 goto errout;
329 }
330
331 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
332 if (err < 0)
333 goto errout;
334
335 err = validate_rulemsg(frh, tb, ops);
336 if (err < 0)
337 goto errout;
338
339 rule = kzalloc(ops->rule_size, GFP_KERNEL);
340 if (rule == NULL) {
341 err = -ENOMEM;
342 goto errout;
343 }
344 rule->fr_net = hold_net(net);
345
346 if (tb[FRA_PRIORITY])
347 rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
348
349 if (tb[FRA_IIFNAME]) {
350 struct net_device *dev;
351
352 rule->iifindex = -1;
353 nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
354 dev = __dev_get_by_name(net, rule->iifname);
355 if (dev)
356 rule->iifindex = dev->ifindex;
357 }
358
359 if (tb[FRA_OIFNAME]) {
360 struct net_device *dev;
361
362 rule->oifindex = -1;
363 nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
364 dev = __dev_get_by_name(net, rule->oifname);
365 if (dev)
366 rule->oifindex = dev->ifindex;
367 }
368
369 if (tb[FRA_FWMARK]) {
370 rule->mark = nla_get_u32(tb[FRA_FWMARK]);
371 if (rule->mark)
372 /* compatibility: if the mark value is non-zero all bits
373 * are compared unless a mask is explicitly specified.
374 */
375 rule->mark_mask = 0xFFFFFFFF;
376 }
377
378 if (tb[FRA_FWMASK])
379 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
380
381 rule->action = frh->action;
382 rule->flags = frh->flags;
383 rule->table = frh_get_table(frh, tb);
384 if (tb[FRA_SUPPRESS_PREFIXLEN])
385 rule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]);
386 else
387 rule->suppress_prefixlen = -1;
388
389 if (tb[FRA_SUPPRESS_IFGROUP])
390 rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]);
391 else
392 rule->suppress_ifgroup = -1;
393
394 if (!tb[FRA_PRIORITY] && ops->default_pref)
395 rule->pref = ops->default_pref(ops);
396
397 err = -EINVAL;
398 if (tb[FRA_GOTO]) {
399 if (rule->action != FR_ACT_GOTO)
400 goto errout_free;
401
402 rule->target = nla_get_u32(tb[FRA_GOTO]);
403 /* Backward jumps are prohibited to avoid endless loops */
404 if (rule->target <= rule->pref)
405 goto errout_free;
406
407 list_for_each_entry(r, &ops->rules_list, list) {
408 if (r->pref == rule->target) {
409 RCU_INIT_POINTER(rule->ctarget, r);
410 break;
411 }
412 }
413
414 if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
415 unresolved = 1;
416 } else if (rule->action == FR_ACT_GOTO)
417 goto errout_free;
418
419 if (tb[FRA_UID_RANGE]) {
420 if (current_user_ns() != net->user_ns) {
421 err = -EPERM;
422 goto errout_free;
423 }
424
425 rule->uid_range = nla_get_kuid_range(tb);
426
427 if (!uid_range_set(&rule->uid_range) ||
428 !uid_lte(rule->uid_range.start, rule->uid_range.end))
429 goto errout_free;
430 } else {
431 rule->uid_range = fib_kuid_range_unset;
432 }
433
434 err = ops->configure(rule, skb, frh, tb);
435 if (err < 0)
436 goto errout_free;
437
438 list_for_each_entry(r, &ops->rules_list, list) {
439 if (r->pref > rule->pref)
440 break;
441 last = r;
442 }
443
444 fib_rule_get(rule);
445
446 if (last)
447 list_add_rcu(&rule->list, &last->list);
448 else
449 list_add_rcu(&rule->list, &ops->rules_list);
450
451 if (ops->unresolved_rules) {
452 /*
453 * There are unresolved goto rules in the list, check if
454 * any of them are pointing to this new rule.
455 */
456 list_for_each_entry(r, &ops->rules_list, list) {
457 if (r->action == FR_ACT_GOTO &&
458 r->target == rule->pref &&
459 rtnl_dereference(r->ctarget) == NULL) {
460 rcu_assign_pointer(r->ctarget, rule);
461 if (--ops->unresolved_rules == 0)
462 break;
463 }
464 }
465 }
466
467 if (rule->action == FR_ACT_GOTO)
468 ops->nr_goto_rules++;
469
470 if (unresolved)
471 ops->unresolved_rules++;
472
473 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
474 flush_route_cache(ops);
475 rules_ops_put(ops);
476 return 0;
477
478 errout_free:
479 release_net(rule->fr_net);
480 kfree(rule);
481 errout:
482 rules_ops_put(ops);
483 return err;
484 }
485
fib_nl_delrule(struct sk_buff * skb,struct nlmsghdr * nlh)486 static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
487 {
488 struct net *net = sock_net(skb->sk);
489 struct fib_rule_hdr *frh = nlmsg_data(nlh);
490 struct fib_rules_ops *ops = NULL;
491 struct fib_rule *rule, *tmp;
492 struct nlattr *tb[FRA_MAX+1];
493 struct fib_kuid_range range;
494 int err = -EINVAL;
495
496 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
497 goto errout;
498
499 ops = lookup_rules_ops(net, frh->family);
500 if (ops == NULL) {
501 err = -EAFNOSUPPORT;
502 goto errout;
503 }
504
505 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
506 if (err < 0)
507 goto errout;
508
509 err = validate_rulemsg(frh, tb, ops);
510 if (err < 0)
511 goto errout;
512
513 if (tb[FRA_UID_RANGE]) {
514 range = nla_get_kuid_range(tb);
515 if (!uid_range_set(&range))
516 goto errout;
517 } else {
518 range = fib_kuid_range_unset;
519 }
520
521 list_for_each_entry(rule, &ops->rules_list, list) {
522 if (frh->action && (frh->action != rule->action))
523 continue;
524
525 if (frh_get_table(frh, tb) &&
526 (frh_get_table(frh, tb) != rule->table))
527 continue;
528
529 if (tb[FRA_PRIORITY] &&
530 (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
531 continue;
532
533 if (tb[FRA_IIFNAME] &&
534 nla_strcmp(tb[FRA_IIFNAME], rule->iifname))
535 continue;
536
537 if (tb[FRA_OIFNAME] &&
538 nla_strcmp(tb[FRA_OIFNAME], rule->oifname))
539 continue;
540
541 if (tb[FRA_FWMARK] &&
542 (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
543 continue;
544
545 if (tb[FRA_FWMASK] &&
546 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
547 continue;
548
549 if (uid_range_set(&range) &&
550 (!uid_eq(rule->uid_range.start, range.start) ||
551 !uid_eq(rule->uid_range.end, range.end)))
552 continue;
553
554 if (!ops->compare(rule, frh, tb))
555 continue;
556
557 if (rule->flags & FIB_RULE_PERMANENT) {
558 err = -EPERM;
559 goto errout;
560 }
561
562 list_del_rcu(&rule->list);
563
564 if (rule->action == FR_ACT_GOTO) {
565 ops->nr_goto_rules--;
566 if (rtnl_dereference(rule->ctarget) == NULL)
567 ops->unresolved_rules--;
568 }
569
570 /*
571 * Check if this rule is a target to any of them. If so,
572 * disable them. As this operation is eventually very
573 * expensive, it is only performed if goto rules have
574 * actually been added.
575 */
576 if (ops->nr_goto_rules > 0) {
577 list_for_each_entry(tmp, &ops->rules_list, list) {
578 if (rtnl_dereference(tmp->ctarget) == rule) {
579 RCU_INIT_POINTER(tmp->ctarget, NULL);
580 ops->unresolved_rules++;
581 }
582 }
583 }
584
585 notify_rule_change(RTM_DELRULE, rule, ops, nlh,
586 NETLINK_CB(skb).portid);
587 if (ops->delete)
588 ops->delete(rule);
589 fib_rule_put(rule);
590 flush_route_cache(ops);
591 rules_ops_put(ops);
592 return 0;
593 }
594
595 err = -ENOENT;
596 errout:
597 rules_ops_put(ops);
598 return err;
599 }
600
fib_rule_nlmsg_size(struct fib_rules_ops * ops,struct fib_rule * rule)601 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
602 struct fib_rule *rule)
603 {
604 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
605 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
606 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
607 + nla_total_size(4) /* FRA_PRIORITY */
608 + nla_total_size(4) /* FRA_TABLE */
609 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
610 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
611 + nla_total_size(4) /* FRA_FWMARK */
612 + nla_total_size(4) /* FRA_FWMASK */
613 + nla_total_size(sizeof(struct fib_kuid_range));
614
615 if (ops->nlmsg_payload)
616 payload += ops->nlmsg_payload(rule);
617
618 return payload;
619 }
620
fib_nl_fill_rule(struct sk_buff * skb,struct fib_rule * rule,u32 pid,u32 seq,int type,int flags,struct fib_rules_ops * ops)621 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
622 u32 pid, u32 seq, int type, int flags,
623 struct fib_rules_ops *ops)
624 {
625 struct nlmsghdr *nlh;
626 struct fib_rule_hdr *frh;
627
628 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
629 if (nlh == NULL)
630 return -EMSGSIZE;
631
632 frh = nlmsg_data(nlh);
633 frh->family = ops->family;
634 frh->table = rule->table;
635 if (nla_put_u32(skb, FRA_TABLE, rule->table))
636 goto nla_put_failure;
637 if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
638 goto nla_put_failure;
639 frh->res1 = 0;
640 frh->res2 = 0;
641 frh->action = rule->action;
642 frh->flags = rule->flags;
643
644 if (rule->action == FR_ACT_GOTO &&
645 rcu_access_pointer(rule->ctarget) == NULL)
646 frh->flags |= FIB_RULE_UNRESOLVED;
647
648 if (rule->iifname[0]) {
649 if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
650 goto nla_put_failure;
651 if (rule->iifindex == -1)
652 frh->flags |= FIB_RULE_IIF_DETACHED;
653 }
654
655 if (rule->oifname[0]) {
656 if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
657 goto nla_put_failure;
658 if (rule->oifindex == -1)
659 frh->flags |= FIB_RULE_OIF_DETACHED;
660 }
661
662 if ((rule->pref &&
663 nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
664 (rule->mark &&
665 nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
666 ((rule->mark_mask || rule->mark) &&
667 nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
668 (rule->target &&
669 nla_put_u32(skb, FRA_GOTO, rule->target)) ||
670 (uid_range_set(&rule->uid_range) &&
671 nla_put_uid_range(skb, &rule->uid_range)))
672 goto nla_put_failure;
673
674 if (rule->suppress_ifgroup != -1) {
675 if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup))
676 goto nla_put_failure;
677 }
678
679 if (ops->fill(rule, skb, frh) < 0)
680 goto nla_put_failure;
681
682 return nlmsg_end(skb, nlh);
683
684 nla_put_failure:
685 nlmsg_cancel(skb, nlh);
686 return -EMSGSIZE;
687 }
688
dump_rules(struct sk_buff * skb,struct netlink_callback * cb,struct fib_rules_ops * ops)689 static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
690 struct fib_rules_ops *ops)
691 {
692 int idx = 0;
693 struct fib_rule *rule;
694 int err = 0;
695
696 rcu_read_lock();
697 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
698 if (idx < cb->args[1])
699 goto skip;
700
701 err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
702 cb->nlh->nlmsg_seq, RTM_NEWRULE,
703 NLM_F_MULTI, ops);
704 if (err < 0)
705 break;
706 skip:
707 idx++;
708 }
709 rcu_read_unlock();
710 cb->args[1] = idx;
711 rules_ops_put(ops);
712
713 return err;
714 }
715
fib_nl_dumprule(struct sk_buff * skb,struct netlink_callback * cb)716 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
717 {
718 struct net *net = sock_net(skb->sk);
719 struct fib_rules_ops *ops;
720 int idx = 0, family;
721
722 family = rtnl_msg_family(cb->nlh);
723 if (family != AF_UNSPEC) {
724 /* Protocol specific dump request */
725 ops = lookup_rules_ops(net, family);
726 if (ops == NULL)
727 return -EAFNOSUPPORT;
728
729 dump_rules(skb, cb, ops);
730
731 return skb->len;
732 }
733
734 rcu_read_lock();
735 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
736 if (idx < cb->args[0] || !try_module_get(ops->owner))
737 goto skip;
738
739 if (dump_rules(skb, cb, ops) < 0)
740 break;
741
742 cb->args[1] = 0;
743 skip:
744 idx++;
745 }
746 rcu_read_unlock();
747 cb->args[0] = idx;
748
749 return skb->len;
750 }
751
notify_rule_change(int event,struct fib_rule * rule,struct fib_rules_ops * ops,struct nlmsghdr * nlh,u32 pid)752 static void notify_rule_change(int event, struct fib_rule *rule,
753 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
754 u32 pid)
755 {
756 struct net *net;
757 struct sk_buff *skb;
758 int err = -ENOBUFS;
759
760 net = ops->fro_net;
761 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
762 if (skb == NULL)
763 goto errout;
764
765 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
766 if (err < 0) {
767 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
768 WARN_ON(err == -EMSGSIZE);
769 kfree_skb(skb);
770 goto errout;
771 }
772
773 rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
774 return;
775 errout:
776 if (err < 0)
777 rtnl_set_sk_err(net, ops->nlgroup, err);
778 }
779
attach_rules(struct list_head * rules,struct net_device * dev)780 static void attach_rules(struct list_head *rules, struct net_device *dev)
781 {
782 struct fib_rule *rule;
783
784 list_for_each_entry(rule, rules, list) {
785 if (rule->iifindex == -1 &&
786 strcmp(dev->name, rule->iifname) == 0)
787 rule->iifindex = dev->ifindex;
788 if (rule->oifindex == -1 &&
789 strcmp(dev->name, rule->oifname) == 0)
790 rule->oifindex = dev->ifindex;
791 }
792 }
793
detach_rules(struct list_head * rules,struct net_device * dev)794 static void detach_rules(struct list_head *rules, struct net_device *dev)
795 {
796 struct fib_rule *rule;
797
798 list_for_each_entry(rule, rules, list) {
799 if (rule->iifindex == dev->ifindex)
800 rule->iifindex = -1;
801 if (rule->oifindex == dev->ifindex)
802 rule->oifindex = -1;
803 }
804 }
805
806
fib_rules_event(struct notifier_block * this,unsigned long event,void * ptr)807 static int fib_rules_event(struct notifier_block *this, unsigned long event,
808 void *ptr)
809 {
810 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
811 struct net *net = dev_net(dev);
812 struct fib_rules_ops *ops;
813
814 ASSERT_RTNL();
815
816 switch (event) {
817 case NETDEV_REGISTER:
818 list_for_each_entry(ops, &net->rules_ops, list)
819 attach_rules(&ops->rules_list, dev);
820 break;
821
822 case NETDEV_CHANGENAME:
823 list_for_each_entry(ops, &net->rules_ops, list) {
824 detach_rules(&ops->rules_list, dev);
825 attach_rules(&ops->rules_list, dev);
826 }
827 break;
828
829 case NETDEV_UNREGISTER:
830 list_for_each_entry(ops, &net->rules_ops, list)
831 detach_rules(&ops->rules_list, dev);
832 break;
833 }
834
835 return NOTIFY_DONE;
836 }
837
838 static struct notifier_block fib_rules_notifier = {
839 .notifier_call = fib_rules_event,
840 };
841
fib_rules_net_init(struct net * net)842 static int __net_init fib_rules_net_init(struct net *net)
843 {
844 INIT_LIST_HEAD(&net->rules_ops);
845 spin_lock_init(&net->rules_mod_lock);
846 return 0;
847 }
848
849 static struct pernet_operations fib_rules_net_ops = {
850 .init = fib_rules_net_init,
851 };
852
fib_rules_init(void)853 static int __init fib_rules_init(void)
854 {
855 int err;
856 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL);
857 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL);
858 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL);
859
860 err = register_pernet_subsys(&fib_rules_net_ops);
861 if (err < 0)
862 goto fail;
863
864 err = register_netdevice_notifier(&fib_rules_notifier);
865 if (err < 0)
866 goto fail_unregister;
867
868 return 0;
869
870 fail_unregister:
871 unregister_pernet_subsys(&fib_rules_net_ops);
872 fail:
873 rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
874 rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
875 rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
876 return err;
877 }
878
879 subsys_initcall(fib_rules_init);
880