1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * ROUTE - implementation of the IP router.
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 *
14 * Fixes:
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
24 * clamper.
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
39 *
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
58 *
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
63 */
64
65 #define pr_fmt(fmt) "IPv4: " fmt
66
67 #include <linux/module.h>
68 #include <asm/uaccess.h>
69 #include <linux/bitops.h>
70 #include <linux/types.h>
71 #include <linux/kernel.h>
72 #include <linux/mm.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
77 #include <linux/in.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/skbuff.h>
83 #include <linux/inetdevice.h>
84 #include <linux/igmp.h>
85 #include <linux/pkt_sched.h>
86 #include <linux/mroute.h>
87 #include <linux/netfilter_ipv4.h>
88 #include <linux/random.h>
89 #include <linux/rcupdate.h>
90 #include <linux/times.h>
91 #include <linux/slab.h>
92 #include <linux/jhash.h>
93 #include <net/dst.h>
94 #include <net/net_namespace.h>
95 #include <net/protocol.h>
96 #include <net/ip.h>
97 #include <net/route.h>
98 #include <net/inetpeer.h>
99 #include <net/sock.h>
100 #include <net/ip_fib.h>
101 #include <net/arp.h>
102 #include <net/tcp.h>
103 #include <net/icmp.h>
104 #include <net/xfrm.h>
105 #include <net/netevent.h>
106 #include <net/rtnetlink.h>
107 #ifdef CONFIG_SYSCTL
108 #include <linux/sysctl.h>
109 #include <linux/kmemleak.h>
110 #endif
111 #include <net/secure_seq.h>
112
113 #define RT_FL_TOS(oldflp4) \
114 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
115
116 #define RT_GC_TIMEOUT (300*HZ)
117
118 static int ip_rt_max_size;
119 static int ip_rt_redirect_number __read_mostly = 9;
120 static int ip_rt_redirect_load __read_mostly = HZ / 50;
121 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
122 static int ip_rt_error_cost __read_mostly = HZ;
123 static int ip_rt_error_burst __read_mostly = 5 * HZ;
124 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
125 static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
126 static int ip_rt_min_advmss __read_mostly = 256;
127
128 /*
129 * Interface to generic destination cache.
130 */
131
132 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
133 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
134 static unsigned int ipv4_mtu(const struct dst_entry *dst);
135 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
136 static void ipv4_link_failure(struct sk_buff *skb);
137 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
138 struct sk_buff *skb, u32 mtu);
139 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
140 struct sk_buff *skb);
141 static void ipv4_dst_destroy(struct dst_entry *dst);
142
ipv4_cow_metrics(struct dst_entry * dst,unsigned long old)143 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
144 {
145 WARN_ON(1);
146 return NULL;
147 }
148
149 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
150 struct sk_buff *skb,
151 const void *daddr);
152
153 static struct dst_ops ipv4_dst_ops = {
154 .family = AF_INET,
155 .protocol = cpu_to_be16(ETH_P_IP),
156 .check = ipv4_dst_check,
157 .default_advmss = ipv4_default_advmss,
158 .mtu = ipv4_mtu,
159 .cow_metrics = ipv4_cow_metrics,
160 .destroy = ipv4_dst_destroy,
161 .negative_advice = ipv4_negative_advice,
162 .link_failure = ipv4_link_failure,
163 .update_pmtu = ip_rt_update_pmtu,
164 .redirect = ip_do_redirect,
165 .local_out = __ip_local_out,
166 .neigh_lookup = ipv4_neigh_lookup,
167 };
168
169 #define ECN_OR_COST(class) TC_PRIO_##class
170
171 const __u8 ip_tos2prio[16] = {
172 TC_PRIO_BESTEFFORT,
173 ECN_OR_COST(BESTEFFORT),
174 TC_PRIO_BESTEFFORT,
175 ECN_OR_COST(BESTEFFORT),
176 TC_PRIO_BULK,
177 ECN_OR_COST(BULK),
178 TC_PRIO_BULK,
179 ECN_OR_COST(BULK),
180 TC_PRIO_INTERACTIVE,
181 ECN_OR_COST(INTERACTIVE),
182 TC_PRIO_INTERACTIVE,
183 ECN_OR_COST(INTERACTIVE),
184 TC_PRIO_INTERACTIVE_BULK,
185 ECN_OR_COST(INTERACTIVE_BULK),
186 TC_PRIO_INTERACTIVE_BULK,
187 ECN_OR_COST(INTERACTIVE_BULK)
188 };
189 EXPORT_SYMBOL(ip_tos2prio);
190
191 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
192 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
193
194 #ifdef CONFIG_PROC_FS
rt_cache_seq_start(struct seq_file * seq,loff_t * pos)195 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
196 {
197 if (*pos)
198 return NULL;
199 return SEQ_START_TOKEN;
200 }
201
rt_cache_seq_next(struct seq_file * seq,void * v,loff_t * pos)202 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
203 {
204 ++*pos;
205 return NULL;
206 }
207
rt_cache_seq_stop(struct seq_file * seq,void * v)208 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
209 {
210 }
211
rt_cache_seq_show(struct seq_file * seq,void * v)212 static int rt_cache_seq_show(struct seq_file *seq, void *v)
213 {
214 if (v == SEQ_START_TOKEN)
215 seq_printf(seq, "%-127s\n",
216 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
217 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
218 "HHUptod\tSpecDst");
219 return 0;
220 }
221
222 static const struct seq_operations rt_cache_seq_ops = {
223 .start = rt_cache_seq_start,
224 .next = rt_cache_seq_next,
225 .stop = rt_cache_seq_stop,
226 .show = rt_cache_seq_show,
227 };
228
rt_cache_seq_open(struct inode * inode,struct file * file)229 static int rt_cache_seq_open(struct inode *inode, struct file *file)
230 {
231 return seq_open(file, &rt_cache_seq_ops);
232 }
233
234 static const struct file_operations rt_cache_seq_fops = {
235 .owner = THIS_MODULE,
236 .open = rt_cache_seq_open,
237 .read = seq_read,
238 .llseek = seq_lseek,
239 .release = seq_release,
240 };
241
242
rt_cpu_seq_start(struct seq_file * seq,loff_t * pos)243 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
244 {
245 int cpu;
246
247 if (*pos == 0)
248 return SEQ_START_TOKEN;
249
250 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
251 if (!cpu_possible(cpu))
252 continue;
253 *pos = cpu+1;
254 return &per_cpu(rt_cache_stat, cpu);
255 }
256 return NULL;
257 }
258
rt_cpu_seq_next(struct seq_file * seq,void * v,loff_t * pos)259 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
260 {
261 int cpu;
262
263 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
264 if (!cpu_possible(cpu))
265 continue;
266 *pos = cpu+1;
267 return &per_cpu(rt_cache_stat, cpu);
268 }
269 return NULL;
270
271 }
272
rt_cpu_seq_stop(struct seq_file * seq,void * v)273 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
274 {
275
276 }
277
rt_cpu_seq_show(struct seq_file * seq,void * v)278 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
279 {
280 struct rt_cache_stat *st = v;
281
282 if (v == SEQ_START_TOKEN) {
283 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
284 return 0;
285 }
286
287 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
288 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
289 dst_entries_get_slow(&ipv4_dst_ops),
290 0, /* st->in_hit */
291 st->in_slow_tot,
292 st->in_slow_mc,
293 st->in_no_route,
294 st->in_brd,
295 st->in_martian_dst,
296 st->in_martian_src,
297
298 0, /* st->out_hit */
299 st->out_slow_tot,
300 st->out_slow_mc,
301
302 0, /* st->gc_total */
303 0, /* st->gc_ignored */
304 0, /* st->gc_goal_miss */
305 0, /* st->gc_dst_overflow */
306 0, /* st->in_hlist_search */
307 0 /* st->out_hlist_search */
308 );
309 return 0;
310 }
311
312 static const struct seq_operations rt_cpu_seq_ops = {
313 .start = rt_cpu_seq_start,
314 .next = rt_cpu_seq_next,
315 .stop = rt_cpu_seq_stop,
316 .show = rt_cpu_seq_show,
317 };
318
319
rt_cpu_seq_open(struct inode * inode,struct file * file)320 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
321 {
322 return seq_open(file, &rt_cpu_seq_ops);
323 }
324
325 static const struct file_operations rt_cpu_seq_fops = {
326 .owner = THIS_MODULE,
327 .open = rt_cpu_seq_open,
328 .read = seq_read,
329 .llseek = seq_lseek,
330 .release = seq_release,
331 };
332
333 #ifdef CONFIG_IP_ROUTE_CLASSID
rt_acct_proc_show(struct seq_file * m,void * v)334 static int rt_acct_proc_show(struct seq_file *m, void *v)
335 {
336 struct ip_rt_acct *dst, *src;
337 unsigned int i, j;
338
339 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
340 if (!dst)
341 return -ENOMEM;
342
343 for_each_possible_cpu(i) {
344 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
345 for (j = 0; j < 256; j++) {
346 dst[j].o_bytes += src[j].o_bytes;
347 dst[j].o_packets += src[j].o_packets;
348 dst[j].i_bytes += src[j].i_bytes;
349 dst[j].i_packets += src[j].i_packets;
350 }
351 }
352
353 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
354 kfree(dst);
355 return 0;
356 }
357
rt_acct_proc_open(struct inode * inode,struct file * file)358 static int rt_acct_proc_open(struct inode *inode, struct file *file)
359 {
360 return single_open(file, rt_acct_proc_show, NULL);
361 }
362
363 static const struct file_operations rt_acct_proc_fops = {
364 .owner = THIS_MODULE,
365 .open = rt_acct_proc_open,
366 .read = seq_read,
367 .llseek = seq_lseek,
368 .release = single_release,
369 };
370 #endif
371
ip_rt_do_proc_init(struct net * net)372 static int __net_init ip_rt_do_proc_init(struct net *net)
373 {
374 struct proc_dir_entry *pde;
375
376 pde = proc_create("rt_cache", S_IRUGO, net->proc_net,
377 &rt_cache_seq_fops);
378 if (!pde)
379 goto err1;
380
381 pde = proc_create("rt_cache", S_IRUGO,
382 net->proc_net_stat, &rt_cpu_seq_fops);
383 if (!pde)
384 goto err2;
385
386 #ifdef CONFIG_IP_ROUTE_CLASSID
387 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
388 if (!pde)
389 goto err3;
390 #endif
391 return 0;
392
393 #ifdef CONFIG_IP_ROUTE_CLASSID
394 err3:
395 remove_proc_entry("rt_cache", net->proc_net_stat);
396 #endif
397 err2:
398 remove_proc_entry("rt_cache", net->proc_net);
399 err1:
400 return -ENOMEM;
401 }
402
ip_rt_do_proc_exit(struct net * net)403 static void __net_exit ip_rt_do_proc_exit(struct net *net)
404 {
405 remove_proc_entry("rt_cache", net->proc_net_stat);
406 remove_proc_entry("rt_cache", net->proc_net);
407 #ifdef CONFIG_IP_ROUTE_CLASSID
408 remove_proc_entry("rt_acct", net->proc_net);
409 #endif
410 }
411
412 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
413 .init = ip_rt_do_proc_init,
414 .exit = ip_rt_do_proc_exit,
415 };
416
ip_rt_proc_init(void)417 static int __init ip_rt_proc_init(void)
418 {
419 return register_pernet_subsys(&ip_rt_proc_ops);
420 }
421
422 #else
ip_rt_proc_init(void)423 static inline int ip_rt_proc_init(void)
424 {
425 return 0;
426 }
427 #endif /* CONFIG_PROC_FS */
428
rt_is_expired(const struct rtable * rth)429 static inline bool rt_is_expired(const struct rtable *rth)
430 {
431 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
432 }
433
rt_cache_flush(struct net * net)434 void rt_cache_flush(struct net *net)
435 {
436 rt_genid_bump_ipv4(net);
437 }
438
ipv4_neigh_lookup(const struct dst_entry * dst,struct sk_buff * skb,const void * daddr)439 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
440 struct sk_buff *skb,
441 const void *daddr)
442 {
443 struct net_device *dev = dst->dev;
444 const __be32 *pkey = daddr;
445 const struct rtable *rt;
446 struct neighbour *n;
447
448 rt = (const struct rtable *) dst;
449 if (rt->rt_gateway)
450 pkey = (const __be32 *) &rt->rt_gateway;
451 else if (skb)
452 pkey = &ip_hdr(skb)->daddr;
453
454 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
455 if (n)
456 return n;
457 return neigh_create(&arp_tbl, pkey, dev);
458 }
459
460 #define IP_IDENTS_SZ 2048u
461 struct ip_ident_bucket {
462 atomic_t id;
463 u32 stamp32;
464 };
465
466 static struct ip_ident_bucket *ip_idents __read_mostly;
467
468 /* In order to protect privacy, we add a perturbation to identifiers
469 * if one generator is seldom used. This makes hard for an attacker
470 * to infer how many packets were sent between two points in time.
471 */
ip_idents_reserve(u32 hash,int segs)472 u32 ip_idents_reserve(u32 hash, int segs)
473 {
474 struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
475 u32 old = ACCESS_ONCE(bucket->stamp32);
476 u32 now = (u32)jiffies;
477 u32 delta = 0;
478
479 if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
480 delta = prandom_u32_max(now - old);
481
482 return atomic_add_return(segs + delta, &bucket->id) - segs;
483 }
484 EXPORT_SYMBOL(ip_idents_reserve);
485
__ip_select_ident(struct iphdr * iph,int segs)486 void __ip_select_ident(struct iphdr *iph, int segs)
487 {
488 static u32 ip_idents_hashrnd __read_mostly;
489 u32 hash, id;
490
491 net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
492
493 hash = jhash_3words((__force u32)iph->daddr,
494 (__force u32)iph->saddr,
495 iph->protocol,
496 ip_idents_hashrnd);
497 id = ip_idents_reserve(hash, segs);
498 iph->id = htons(id);
499 }
500 EXPORT_SYMBOL(__ip_select_ident);
501
__build_flow_key(const struct net * net,struct flowi4 * fl4,const struct sock * sk,const struct iphdr * iph,int oif,u8 tos,u8 prot,u32 mark,int flow_flags)502 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
503 const struct sock *sk,
504 const struct iphdr *iph,
505 int oif, u8 tos,
506 u8 prot, u32 mark, int flow_flags)
507 {
508 if (sk) {
509 const struct inet_sock *inet = inet_sk(sk);
510
511 oif = sk->sk_bound_dev_if;
512 mark = sk->sk_mark;
513 tos = RT_CONN_FLAGS(sk);
514 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
515 }
516 flowi4_init_output(fl4, oif, mark, tos,
517 RT_SCOPE_UNIVERSE, prot,
518 flow_flags,
519 iph->daddr, iph->saddr, 0, 0,
520 sock_net_uid(net, sk));
521 }
522
build_skb_flow_key(struct flowi4 * fl4,const struct sk_buff * skb,const struct sock * sk)523 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
524 const struct sock *sk)
525 {
526 const struct net *net = dev_net(skb->dev);
527 const struct iphdr *iph = ip_hdr(skb);
528 int oif = skb->dev->ifindex;
529 u8 tos = RT_TOS(iph->tos);
530 u8 prot = iph->protocol;
531 u32 mark = skb->mark;
532
533 __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
534 }
535
build_sk_flow_key(struct flowi4 * fl4,const struct sock * sk)536 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
537 {
538 const struct inet_sock *inet = inet_sk(sk);
539 const struct ip_options_rcu *inet_opt;
540 __be32 daddr = inet->inet_daddr;
541
542 rcu_read_lock();
543 inet_opt = rcu_dereference(inet->inet_opt);
544 if (inet_opt && inet_opt->opt.srr)
545 daddr = inet_opt->opt.faddr;
546 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
547 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
548 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
549 inet_sk_flowi_flags(sk),
550 daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
551 rcu_read_unlock();
552 }
553
ip_rt_build_flow_key(struct flowi4 * fl4,const struct sock * sk,const struct sk_buff * skb)554 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
555 const struct sk_buff *skb)
556 {
557 if (skb)
558 build_skb_flow_key(fl4, skb, sk);
559 else
560 build_sk_flow_key(fl4, sk);
561 }
562
rt_free(struct rtable * rt)563 static inline void rt_free(struct rtable *rt)
564 {
565 call_rcu(&rt->dst.rcu_head, dst_rcu_free);
566 }
567
568 static DEFINE_SPINLOCK(fnhe_lock);
569
fnhe_flush_routes(struct fib_nh_exception * fnhe)570 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
571 {
572 struct rtable *rt;
573
574 rt = rcu_dereference(fnhe->fnhe_rth_input);
575 if (rt) {
576 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
577 rt_free(rt);
578 }
579 rt = rcu_dereference(fnhe->fnhe_rth_output);
580 if (rt) {
581 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
582 rt_free(rt);
583 }
584 }
585
fnhe_oldest(struct fnhe_hash_bucket * hash)586 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
587 {
588 struct fib_nh_exception *fnhe, *oldest;
589
590 oldest = rcu_dereference(hash->chain);
591 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
592 fnhe = rcu_dereference(fnhe->fnhe_next)) {
593 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
594 oldest = fnhe;
595 }
596 fnhe_flush_routes(oldest);
597 return oldest;
598 }
599
fnhe_hashfun(__be32 daddr)600 static inline u32 fnhe_hashfun(__be32 daddr)
601 {
602 static u32 fnhe_hashrnd __read_mostly;
603 u32 hval;
604
605 net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
606 hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
607 return hash_32(hval, FNHE_HASH_SHIFT);
608 }
609
fill_route_from_fnhe(struct rtable * rt,struct fib_nh_exception * fnhe)610 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
611 {
612 rt->rt_pmtu = fnhe->fnhe_pmtu;
613 rt->dst.expires = fnhe->fnhe_expires;
614
615 if (fnhe->fnhe_gw) {
616 rt->rt_flags |= RTCF_REDIRECTED;
617 rt->rt_gateway = fnhe->fnhe_gw;
618 rt->rt_uses_gateway = 1;
619 }
620 }
621
update_or_create_fnhe(struct fib_nh * nh,__be32 daddr,__be32 gw,u32 pmtu,unsigned long expires)622 static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
623 u32 pmtu, unsigned long expires)
624 {
625 struct fnhe_hash_bucket *hash;
626 struct fib_nh_exception *fnhe;
627 struct rtable *rt;
628 u32 genid, hval;
629 unsigned int i;
630 int depth;
631
632 genid = fnhe_genid(dev_net(nh->nh_dev));
633 hval = fnhe_hashfun(daddr);
634
635 spin_lock_bh(&fnhe_lock);
636
637 hash = rcu_dereference(nh->nh_exceptions);
638 if (!hash) {
639 hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
640 if (!hash)
641 goto out_unlock;
642 rcu_assign_pointer(nh->nh_exceptions, hash);
643 }
644
645 hash += hval;
646
647 depth = 0;
648 for (fnhe = rcu_dereference(hash->chain); fnhe;
649 fnhe = rcu_dereference(fnhe->fnhe_next)) {
650 if (fnhe->fnhe_daddr == daddr)
651 break;
652 depth++;
653 }
654
655 if (fnhe) {
656 if (fnhe->fnhe_genid != genid)
657 fnhe->fnhe_genid = genid;
658 if (gw)
659 fnhe->fnhe_gw = gw;
660 if (pmtu)
661 fnhe->fnhe_pmtu = pmtu;
662 fnhe->fnhe_expires = max(1UL, expires);
663 /* Update all cached dsts too */
664 rt = rcu_dereference(fnhe->fnhe_rth_input);
665 if (rt)
666 fill_route_from_fnhe(rt, fnhe);
667 rt = rcu_dereference(fnhe->fnhe_rth_output);
668 if (rt)
669 fill_route_from_fnhe(rt, fnhe);
670 } else {
671 if (depth > FNHE_RECLAIM_DEPTH)
672 fnhe = fnhe_oldest(hash);
673 else {
674 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
675 if (!fnhe)
676 goto out_unlock;
677
678 fnhe->fnhe_next = hash->chain;
679 rcu_assign_pointer(hash->chain, fnhe);
680 }
681 fnhe->fnhe_genid = genid;
682 fnhe->fnhe_daddr = daddr;
683 fnhe->fnhe_gw = gw;
684 fnhe->fnhe_pmtu = pmtu;
685 fnhe->fnhe_expires = expires;
686
687 /* Exception created; mark the cached routes for the nexthop
688 * stale, so anyone caching it rechecks if this exception
689 * applies to them.
690 */
691 rt = rcu_dereference(nh->nh_rth_input);
692 if (rt)
693 rt->dst.obsolete = DST_OBSOLETE_KILL;
694
695 for_each_possible_cpu(i) {
696 struct rtable __rcu **prt;
697 prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i);
698 rt = rcu_dereference(*prt);
699 if (rt)
700 rt->dst.obsolete = DST_OBSOLETE_KILL;
701 }
702 }
703
704 fnhe->fnhe_stamp = jiffies;
705
706 out_unlock:
707 spin_unlock_bh(&fnhe_lock);
708 }
709
__ip_do_redirect(struct rtable * rt,struct sk_buff * skb,struct flowi4 * fl4,bool kill_route)710 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
711 bool kill_route)
712 {
713 __be32 new_gw = icmp_hdr(skb)->un.gateway;
714 __be32 old_gw = ip_hdr(skb)->saddr;
715 struct net_device *dev = skb->dev;
716 struct in_device *in_dev;
717 struct fib_result res;
718 struct neighbour *n;
719 struct net *net;
720
721 switch (icmp_hdr(skb)->code & 7) {
722 case ICMP_REDIR_NET:
723 case ICMP_REDIR_NETTOS:
724 case ICMP_REDIR_HOST:
725 case ICMP_REDIR_HOSTTOS:
726 break;
727
728 default:
729 return;
730 }
731
732 if (rt->rt_gateway != old_gw)
733 return;
734
735 in_dev = __in_dev_get_rcu(dev);
736 if (!in_dev)
737 return;
738
739 net = dev_net(dev);
740 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
741 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
742 ipv4_is_zeronet(new_gw))
743 goto reject_redirect;
744
745 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
746 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
747 goto reject_redirect;
748 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
749 goto reject_redirect;
750 } else {
751 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
752 goto reject_redirect;
753 }
754
755 n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
756 if (!IS_ERR(n)) {
757 if (!(n->nud_state & NUD_VALID)) {
758 neigh_event_send(n, NULL);
759 } else {
760 if (fib_lookup(net, fl4, &res) == 0) {
761 struct fib_nh *nh = &FIB_RES_NH(res);
762
763 update_or_create_fnhe(nh, fl4->daddr, new_gw,
764 0, 0);
765 }
766 if (kill_route)
767 rt->dst.obsolete = DST_OBSOLETE_KILL;
768 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
769 }
770 neigh_release(n);
771 }
772 return;
773
774 reject_redirect:
775 #ifdef CONFIG_IP_ROUTE_VERBOSE
776 if (IN_DEV_LOG_MARTIANS(in_dev)) {
777 const struct iphdr *iph = (const struct iphdr *) skb->data;
778 __be32 daddr = iph->daddr;
779 __be32 saddr = iph->saddr;
780
781 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
782 " Advised path = %pI4 -> %pI4\n",
783 &old_gw, dev->name, &new_gw,
784 &saddr, &daddr);
785 }
786 #endif
787 ;
788 }
789
ip_do_redirect(struct dst_entry * dst,struct sock * sk,struct sk_buff * skb)790 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
791 {
792 struct rtable *rt;
793 struct flowi4 fl4;
794 const struct iphdr *iph = (const struct iphdr *) skb->data;
795 struct net *net = dev_net(skb->dev);
796 int oif = skb->dev->ifindex;
797 u8 tos = RT_TOS(iph->tos);
798 u8 prot = iph->protocol;
799 u32 mark = skb->mark;
800
801 rt = (struct rtable *) dst;
802
803 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
804 __ip_do_redirect(rt, skb, &fl4, true);
805 }
806
ipv4_negative_advice(struct dst_entry * dst)807 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
808 {
809 struct rtable *rt = (struct rtable *)dst;
810 struct dst_entry *ret = dst;
811
812 if (rt) {
813 if (dst->obsolete > 0) {
814 ip_rt_put(rt);
815 ret = NULL;
816 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
817 rt->dst.expires) {
818 ip_rt_put(rt);
819 ret = NULL;
820 }
821 }
822 return ret;
823 }
824
825 /*
826 * Algorithm:
827 * 1. The first ip_rt_redirect_number redirects are sent
828 * with exponential backoff, then we stop sending them at all,
829 * assuming that the host ignores our redirects.
830 * 2. If we did not see packets requiring redirects
831 * during ip_rt_redirect_silence, we assume that the host
832 * forgot redirected route and start to send redirects again.
833 *
834 * This algorithm is much cheaper and more intelligent than dumb load limiting
835 * in icmp.c.
836 *
837 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
838 * and "frag. need" (breaks PMTU discovery) in icmp.c.
839 */
840
ip_rt_send_redirect(struct sk_buff * skb)841 void ip_rt_send_redirect(struct sk_buff *skb)
842 {
843 struct rtable *rt = skb_rtable(skb);
844 struct in_device *in_dev;
845 struct inet_peer *peer;
846 struct net *net;
847 int log_martians;
848
849 rcu_read_lock();
850 in_dev = __in_dev_get_rcu(rt->dst.dev);
851 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
852 rcu_read_unlock();
853 return;
854 }
855 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
856 rcu_read_unlock();
857
858 net = dev_net(rt->dst.dev);
859 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
860 if (!peer) {
861 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
862 rt_nexthop(rt, ip_hdr(skb)->daddr));
863 return;
864 }
865
866 /* No redirected packets during ip_rt_redirect_silence;
867 * reset the algorithm.
868 */
869 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
870 peer->rate_tokens = 0;
871
872 /* Too many ignored redirects; do not send anything
873 * set dst.rate_last to the last seen redirected packet.
874 */
875 if (peer->rate_tokens >= ip_rt_redirect_number) {
876 peer->rate_last = jiffies;
877 goto out_put_peer;
878 }
879
880 /* Check for load limit; set rate_last to the latest sent
881 * redirect.
882 */
883 if (peer->rate_tokens == 0 ||
884 time_after(jiffies,
885 (peer->rate_last +
886 (ip_rt_redirect_load << peer->rate_tokens)))) {
887 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
888
889 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
890 peer->rate_last = jiffies;
891 ++peer->rate_tokens;
892 #ifdef CONFIG_IP_ROUTE_VERBOSE
893 if (log_martians &&
894 peer->rate_tokens == ip_rt_redirect_number)
895 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
896 &ip_hdr(skb)->saddr, inet_iif(skb),
897 &ip_hdr(skb)->daddr, &gw);
898 #endif
899 }
900 out_put_peer:
901 inet_putpeer(peer);
902 }
903
ip_error(struct sk_buff * skb)904 static int ip_error(struct sk_buff *skb)
905 {
906 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
907 struct rtable *rt = skb_rtable(skb);
908 struct inet_peer *peer;
909 unsigned long now;
910 struct net *net;
911 bool send;
912 int code;
913
914 /* IP on this device is disabled. */
915 if (!in_dev)
916 goto out;
917
918 net = dev_net(rt->dst.dev);
919 if (!IN_DEV_FORWARD(in_dev)) {
920 switch (rt->dst.error) {
921 case EHOSTUNREACH:
922 IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
923 break;
924
925 case ENETUNREACH:
926 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
927 break;
928 }
929 goto out;
930 }
931
932 switch (rt->dst.error) {
933 case EINVAL:
934 default:
935 goto out;
936 case EHOSTUNREACH:
937 code = ICMP_HOST_UNREACH;
938 break;
939 case ENETUNREACH:
940 code = ICMP_NET_UNREACH;
941 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
942 break;
943 case EACCES:
944 code = ICMP_PKT_FILTERED;
945 break;
946 }
947
948 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
949
950 send = true;
951 if (peer) {
952 now = jiffies;
953 peer->rate_tokens += now - peer->rate_last;
954 if (peer->rate_tokens > ip_rt_error_burst)
955 peer->rate_tokens = ip_rt_error_burst;
956 peer->rate_last = now;
957 if (peer->rate_tokens >= ip_rt_error_cost)
958 peer->rate_tokens -= ip_rt_error_cost;
959 else
960 send = false;
961 inet_putpeer(peer);
962 }
963 if (send)
964 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
965
966 out: kfree_skb(skb);
967 return 0;
968 }
969
__ip_rt_update_pmtu(struct rtable * rt,struct flowi4 * fl4,u32 mtu)970 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
971 {
972 struct dst_entry *dst = &rt->dst;
973 struct fib_result res;
974
975 if (dst_metric_locked(dst, RTAX_MTU))
976 return;
977
978 if (dst->dev->mtu < mtu)
979 return;
980
981 if (mtu < ip_rt_min_pmtu)
982 mtu = ip_rt_min_pmtu;
983
984 if (rt->rt_pmtu == mtu &&
985 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
986 return;
987
988 rcu_read_lock();
989 if (fib_lookup(dev_net(dst->dev), fl4, &res) == 0) {
990 struct fib_nh *nh = &FIB_RES_NH(res);
991
992 update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
993 jiffies + ip_rt_mtu_expires);
994 }
995 rcu_read_unlock();
996 }
997
ip_rt_update_pmtu(struct dst_entry * dst,struct sock * sk,struct sk_buff * skb,u32 mtu)998 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
999 struct sk_buff *skb, u32 mtu)
1000 {
1001 struct rtable *rt = (struct rtable *) dst;
1002 struct flowi4 fl4;
1003
1004 ip_rt_build_flow_key(&fl4, sk, skb);
1005 __ip_rt_update_pmtu(rt, &fl4, mtu);
1006 }
1007
ipv4_update_pmtu(struct sk_buff * skb,struct net * net,u32 mtu,int oif,u32 mark,u8 protocol,int flow_flags)1008 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1009 int oif, u32 mark, u8 protocol, int flow_flags)
1010 {
1011 const struct iphdr *iph = (const struct iphdr *) skb->data;
1012 struct flowi4 fl4;
1013 struct rtable *rt;
1014
1015 if (!mark)
1016 mark = IP4_REPLY_MARK(net, skb->mark);
1017
1018 __build_flow_key(net, &fl4, NULL, iph, oif,
1019 RT_TOS(iph->tos), protocol, mark, flow_flags);
1020 rt = __ip_route_output_key(net, &fl4);
1021 if (!IS_ERR(rt)) {
1022 __ip_rt_update_pmtu(rt, &fl4, mtu);
1023 ip_rt_put(rt);
1024 }
1025 }
1026 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1027
__ipv4_sk_update_pmtu(struct sk_buff * skb,struct sock * sk,u32 mtu)1028 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1029 {
1030 const struct iphdr *iph = (const struct iphdr *) skb->data;
1031 struct flowi4 fl4;
1032 struct rtable *rt;
1033
1034 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1035
1036 if (!fl4.flowi4_mark)
1037 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1038
1039 rt = __ip_route_output_key(sock_net(sk), &fl4);
1040 if (!IS_ERR(rt)) {
1041 __ip_rt_update_pmtu(rt, &fl4, mtu);
1042 ip_rt_put(rt);
1043 }
1044 }
1045
ipv4_sk_update_pmtu(struct sk_buff * skb,struct sock * sk,u32 mtu)1046 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1047 {
1048 const struct iphdr *iph = (const struct iphdr *) skb->data;
1049 struct flowi4 fl4;
1050 struct rtable *rt;
1051 struct dst_entry *odst = NULL;
1052 bool new = false;
1053 struct net *net = sock_net(sk);
1054
1055 bh_lock_sock(sk);
1056
1057 if (!ip_sk_accept_pmtu(sk))
1058 goto out;
1059
1060 odst = sk_dst_get(sk);
1061
1062 if (sock_owned_by_user(sk) || !odst) {
1063 __ipv4_sk_update_pmtu(skb, sk, mtu);
1064 goto out;
1065 }
1066
1067 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1068
1069 rt = (struct rtable *)odst;
1070 if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
1071 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1072 if (IS_ERR(rt))
1073 goto out;
1074
1075 new = true;
1076 }
1077
1078 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
1079
1080 if (!dst_check(&rt->dst, 0)) {
1081 if (new)
1082 dst_release(&rt->dst);
1083
1084 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1085 if (IS_ERR(rt))
1086 goto out;
1087
1088 new = true;
1089 }
1090
1091 if (new)
1092 sk_dst_set(sk, &rt->dst);
1093
1094 out:
1095 bh_unlock_sock(sk);
1096 dst_release(odst);
1097 }
1098 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1099
ipv4_redirect(struct sk_buff * skb,struct net * net,int oif,u32 mark,u8 protocol,int flow_flags)1100 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1101 int oif, u32 mark, u8 protocol, int flow_flags)
1102 {
1103 const struct iphdr *iph = (const struct iphdr *) skb->data;
1104 struct flowi4 fl4;
1105 struct rtable *rt;
1106
1107 __build_flow_key(net, &fl4, NULL, iph, oif,
1108 RT_TOS(iph->tos), protocol, mark, flow_flags);
1109 rt = __ip_route_output_key(net, &fl4);
1110 if (!IS_ERR(rt)) {
1111 __ip_do_redirect(rt, skb, &fl4, false);
1112 ip_rt_put(rt);
1113 }
1114 }
1115 EXPORT_SYMBOL_GPL(ipv4_redirect);
1116
ipv4_sk_redirect(struct sk_buff * skb,struct sock * sk)1117 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1118 {
1119 const struct iphdr *iph = (const struct iphdr *) skb->data;
1120 struct flowi4 fl4;
1121 struct rtable *rt;
1122 struct net *net = sock_net(sk);
1123
1124 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1125 rt = __ip_route_output_key(net, &fl4);
1126 if (!IS_ERR(rt)) {
1127 __ip_do_redirect(rt, skb, &fl4, false);
1128 ip_rt_put(rt);
1129 }
1130 }
1131 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1132
ipv4_dst_check(struct dst_entry * dst,u32 cookie)1133 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1134 {
1135 struct rtable *rt = (struct rtable *) dst;
1136
1137 /* All IPV4 dsts are created with ->obsolete set to the value
1138 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1139 * into this function always.
1140 *
1141 * When a PMTU/redirect information update invalidates a route,
1142 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1143 * DST_OBSOLETE_DEAD by dst_free().
1144 */
1145 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1146 return NULL;
1147 return dst;
1148 }
1149
ipv4_link_failure(struct sk_buff * skb)1150 static void ipv4_link_failure(struct sk_buff *skb)
1151 {
1152 struct rtable *rt;
1153
1154 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1155
1156 rt = skb_rtable(skb);
1157 if (rt)
1158 dst_set_expires(&rt->dst, 0);
1159 }
1160
ip_rt_bug(struct sock * sk,struct sk_buff * skb)1161 static int ip_rt_bug(struct sock *sk, struct sk_buff *skb)
1162 {
1163 pr_debug("%s: %pI4 -> %pI4, %s\n",
1164 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1165 skb->dev ? skb->dev->name : "?");
1166 kfree_skb(skb);
1167 WARN_ON(1);
1168 return 0;
1169 }
1170
1171 /*
1172 We do not cache source address of outgoing interface,
1173 because it is used only by IP RR, TS and SRR options,
1174 so that it out of fast path.
1175
1176 BTW remember: "addr" is allowed to be not aligned
1177 in IP options!
1178 */
1179
ip_rt_get_source(u8 * addr,struct sk_buff * skb,struct rtable * rt)1180 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1181 {
1182 __be32 src;
1183
1184 if (rt_is_output_route(rt))
1185 src = ip_hdr(skb)->saddr;
1186 else {
1187 struct fib_result res;
1188 struct flowi4 fl4;
1189 struct iphdr *iph;
1190
1191 iph = ip_hdr(skb);
1192
1193 memset(&fl4, 0, sizeof(fl4));
1194 fl4.daddr = iph->daddr;
1195 fl4.saddr = iph->saddr;
1196 fl4.flowi4_tos = RT_TOS(iph->tos);
1197 fl4.flowi4_oif = rt->dst.dev->ifindex;
1198 fl4.flowi4_iif = skb->dev->ifindex;
1199 fl4.flowi4_mark = skb->mark;
1200
1201 rcu_read_lock();
1202 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
1203 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1204 else
1205 src = inet_select_addr(rt->dst.dev,
1206 rt_nexthop(rt, iph->daddr),
1207 RT_SCOPE_UNIVERSE);
1208 rcu_read_unlock();
1209 }
1210 memcpy(addr, &src, 4);
1211 }
1212
1213 #ifdef CONFIG_IP_ROUTE_CLASSID
set_class_tag(struct rtable * rt,u32 tag)1214 static void set_class_tag(struct rtable *rt, u32 tag)
1215 {
1216 if (!(rt->dst.tclassid & 0xFFFF))
1217 rt->dst.tclassid |= tag & 0xFFFF;
1218 if (!(rt->dst.tclassid & 0xFFFF0000))
1219 rt->dst.tclassid |= tag & 0xFFFF0000;
1220 }
1221 #endif
1222
ipv4_default_advmss(const struct dst_entry * dst)1223 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1224 {
1225 unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1226
1227 if (advmss == 0) {
1228 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1229 ip_rt_min_advmss);
1230 if (advmss > 65535 - 40)
1231 advmss = 65535 - 40;
1232 }
1233 return advmss;
1234 }
1235
ipv4_mtu(const struct dst_entry * dst)1236 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1237 {
1238 const struct rtable *rt = (const struct rtable *) dst;
1239 unsigned int mtu = rt->rt_pmtu;
1240
1241 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1242 mtu = dst_metric_raw(dst, RTAX_MTU);
1243
1244 if (mtu)
1245 return mtu;
1246
1247 mtu = dst->dev->mtu;
1248
1249 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1250 if (rt->rt_uses_gateway && mtu > 576)
1251 mtu = 576;
1252 }
1253
1254 return min_t(unsigned int, mtu, IP_MAX_MTU);
1255 }
1256
find_exception(struct fib_nh * nh,__be32 daddr)1257 static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1258 {
1259 struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
1260 struct fib_nh_exception *fnhe;
1261 u32 hval;
1262
1263 if (!hash)
1264 return NULL;
1265
1266 hval = fnhe_hashfun(daddr);
1267
1268 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1269 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1270 if (fnhe->fnhe_daddr == daddr)
1271 return fnhe;
1272 }
1273 return NULL;
1274 }
1275
rt_bind_exception(struct rtable * rt,struct fib_nh_exception * fnhe,__be32 daddr)1276 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1277 __be32 daddr)
1278 {
1279 bool ret = false;
1280
1281 spin_lock_bh(&fnhe_lock);
1282
1283 if (daddr == fnhe->fnhe_daddr) {
1284 struct rtable __rcu **porig;
1285 struct rtable *orig;
1286 int genid = fnhe_genid(dev_net(rt->dst.dev));
1287
1288 if (rt_is_input_route(rt))
1289 porig = &fnhe->fnhe_rth_input;
1290 else
1291 porig = &fnhe->fnhe_rth_output;
1292 orig = rcu_dereference(*porig);
1293
1294 if (fnhe->fnhe_genid != genid) {
1295 fnhe->fnhe_genid = genid;
1296 fnhe->fnhe_gw = 0;
1297 fnhe->fnhe_pmtu = 0;
1298 fnhe->fnhe_expires = 0;
1299 fnhe_flush_routes(fnhe);
1300 orig = NULL;
1301 }
1302 fill_route_from_fnhe(rt, fnhe);
1303 if (!rt->rt_gateway)
1304 rt->rt_gateway = daddr;
1305
1306 if (!(rt->dst.flags & DST_NOCACHE)) {
1307 rcu_assign_pointer(*porig, rt);
1308 if (orig)
1309 rt_free(orig);
1310 ret = true;
1311 }
1312
1313 fnhe->fnhe_stamp = jiffies;
1314 }
1315 spin_unlock_bh(&fnhe_lock);
1316
1317 return ret;
1318 }
1319
rt_cache_route(struct fib_nh * nh,struct rtable * rt)1320 static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1321 {
1322 struct rtable *orig, *prev, **p;
1323 bool ret = true;
1324
1325 if (rt_is_input_route(rt)) {
1326 p = (struct rtable **)&nh->nh_rth_input;
1327 } else {
1328 p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output);
1329 }
1330 orig = *p;
1331
1332 prev = cmpxchg(p, orig, rt);
1333 if (prev == orig) {
1334 if (orig)
1335 rt_free(orig);
1336 } else
1337 ret = false;
1338
1339 return ret;
1340 }
1341
1342 static DEFINE_SPINLOCK(rt_uncached_lock);
1343 static LIST_HEAD(rt_uncached_list);
1344
rt_add_uncached_list(struct rtable * rt)1345 static void rt_add_uncached_list(struct rtable *rt)
1346 {
1347 spin_lock_bh(&rt_uncached_lock);
1348 list_add_tail(&rt->rt_uncached, &rt_uncached_list);
1349 spin_unlock_bh(&rt_uncached_lock);
1350 }
1351
ipv4_dst_destroy(struct dst_entry * dst)1352 static void ipv4_dst_destroy(struct dst_entry *dst)
1353 {
1354 struct rtable *rt = (struct rtable *) dst;
1355
1356 if (!list_empty(&rt->rt_uncached)) {
1357 spin_lock_bh(&rt_uncached_lock);
1358 list_del(&rt->rt_uncached);
1359 spin_unlock_bh(&rt_uncached_lock);
1360 }
1361 }
1362
rt_flush_dev(struct net_device * dev)1363 void rt_flush_dev(struct net_device *dev)
1364 {
1365 if (!list_empty(&rt_uncached_list)) {
1366 struct net *net = dev_net(dev);
1367 struct rtable *rt;
1368
1369 spin_lock_bh(&rt_uncached_lock);
1370 list_for_each_entry(rt, &rt_uncached_list, rt_uncached) {
1371 if (rt->dst.dev != dev)
1372 continue;
1373 rt->dst.dev = net->loopback_dev;
1374 dev_hold(rt->dst.dev);
1375 dev_put(dev);
1376 }
1377 spin_unlock_bh(&rt_uncached_lock);
1378 }
1379 }
1380
rt_cache_valid(const struct rtable * rt)1381 static bool rt_cache_valid(const struct rtable *rt)
1382 {
1383 return rt &&
1384 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1385 !rt_is_expired(rt);
1386 }
1387
rt_set_nexthop(struct rtable * rt,__be32 daddr,const struct fib_result * res,struct fib_nh_exception * fnhe,struct fib_info * fi,u16 type,u32 itag)1388 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1389 const struct fib_result *res,
1390 struct fib_nh_exception *fnhe,
1391 struct fib_info *fi, u16 type, u32 itag)
1392 {
1393 bool cached = false;
1394
1395 if (fi) {
1396 struct fib_nh *nh = &FIB_RES_NH(*res);
1397
1398 if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) {
1399 rt->rt_gateway = nh->nh_gw;
1400 rt->rt_uses_gateway = 1;
1401 }
1402 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
1403 #ifdef CONFIG_IP_ROUTE_CLASSID
1404 rt->dst.tclassid = nh->nh_tclassid;
1405 #endif
1406 if (unlikely(fnhe))
1407 cached = rt_bind_exception(rt, fnhe, daddr);
1408 else if (!(rt->dst.flags & DST_NOCACHE))
1409 cached = rt_cache_route(nh, rt);
1410 if (unlikely(!cached)) {
1411 /* Routes we intend to cache in nexthop exception or
1412 * FIB nexthop have the DST_NOCACHE bit clear.
1413 * However, if we are unsuccessful at storing this
1414 * route into the cache we really need to set it.
1415 */
1416 rt->dst.flags |= DST_NOCACHE;
1417 if (!rt->rt_gateway)
1418 rt->rt_gateway = daddr;
1419 rt_add_uncached_list(rt);
1420 }
1421 } else
1422 rt_add_uncached_list(rt);
1423
1424 #ifdef CONFIG_IP_ROUTE_CLASSID
1425 #ifdef CONFIG_IP_MULTIPLE_TABLES
1426 set_class_tag(rt, res->tclassid);
1427 #endif
1428 set_class_tag(rt, itag);
1429 #endif
1430 }
1431
rt_dst_alloc(struct net_device * dev,bool nopolicy,bool noxfrm,bool will_cache)1432 static struct rtable *rt_dst_alloc(struct net_device *dev,
1433 bool nopolicy, bool noxfrm, bool will_cache)
1434 {
1435 return dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1436 (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
1437 (nopolicy ? DST_NOPOLICY : 0) |
1438 (noxfrm ? DST_NOXFRM : 0));
1439 }
1440
1441 /* called in rcu_read_lock() section */
ip_route_input_mc(struct sk_buff * skb,__be32 daddr,__be32 saddr,u8 tos,struct net_device * dev,int our)1442 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1443 u8 tos, struct net_device *dev, int our)
1444 {
1445 struct rtable *rth;
1446 struct in_device *in_dev = __in_dev_get_rcu(dev);
1447 u32 itag = 0;
1448 int err;
1449
1450 /* Primary sanity checks. */
1451
1452 if (in_dev == NULL)
1453 return -EINVAL;
1454
1455 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1456 skb->protocol != htons(ETH_P_IP))
1457 goto e_inval;
1458
1459 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1460 if (ipv4_is_loopback(saddr))
1461 goto e_inval;
1462
1463 if (ipv4_is_zeronet(saddr)) {
1464 if (!ipv4_is_local_multicast(daddr))
1465 goto e_inval;
1466 } else {
1467 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1468 in_dev, &itag);
1469 if (err < 0)
1470 goto e_err;
1471 }
1472 rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
1473 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1474 if (!rth)
1475 goto e_nobufs;
1476
1477 #ifdef CONFIG_IP_ROUTE_CLASSID
1478 rth->dst.tclassid = itag;
1479 #endif
1480 rth->dst.output = ip_rt_bug;
1481
1482 rth->rt_genid = rt_genid_ipv4(dev_net(dev));
1483 rth->rt_flags = RTCF_MULTICAST;
1484 rth->rt_type = RTN_MULTICAST;
1485 rth->rt_is_input= 1;
1486 rth->rt_iif = 0;
1487 rth->rt_pmtu = 0;
1488 rth->rt_gateway = 0;
1489 rth->rt_uses_gateway = 0;
1490 INIT_LIST_HEAD(&rth->rt_uncached);
1491 if (our) {
1492 rth->dst.input= ip_local_deliver;
1493 rth->rt_flags |= RTCF_LOCAL;
1494 }
1495
1496 #ifdef CONFIG_IP_MROUTE
1497 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1498 rth->dst.input = ip_mr_input;
1499 #endif
1500 RT_CACHE_STAT_INC(in_slow_mc);
1501
1502 skb_dst_set(skb, &rth->dst);
1503 return 0;
1504
1505 e_nobufs:
1506 return -ENOBUFS;
1507 e_inval:
1508 return -EINVAL;
1509 e_err:
1510 return err;
1511 }
1512
1513
ip_handle_martian_source(struct net_device * dev,struct in_device * in_dev,struct sk_buff * skb,__be32 daddr,__be32 saddr)1514 static void ip_handle_martian_source(struct net_device *dev,
1515 struct in_device *in_dev,
1516 struct sk_buff *skb,
1517 __be32 daddr,
1518 __be32 saddr)
1519 {
1520 RT_CACHE_STAT_INC(in_martian_src);
1521 #ifdef CONFIG_IP_ROUTE_VERBOSE
1522 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1523 /*
1524 * RFC1812 recommendation, if source is martian,
1525 * the only hint is MAC header.
1526 */
1527 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1528 &daddr, &saddr, dev->name);
1529 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1530 print_hex_dump(KERN_WARNING, "ll header: ",
1531 DUMP_PREFIX_OFFSET, 16, 1,
1532 skb_mac_header(skb),
1533 dev->hard_header_len, true);
1534 }
1535 }
1536 #endif
1537 }
1538
1539 /* called in rcu_read_lock() section */
__mkroute_input(struct sk_buff * skb,const struct fib_result * res,struct in_device * in_dev,__be32 daddr,__be32 saddr,u32 tos)1540 static int __mkroute_input(struct sk_buff *skb,
1541 const struct fib_result *res,
1542 struct in_device *in_dev,
1543 __be32 daddr, __be32 saddr, u32 tos)
1544 {
1545 struct fib_nh_exception *fnhe;
1546 struct rtable *rth;
1547 int err;
1548 struct in_device *out_dev;
1549 unsigned int flags = 0;
1550 bool do_cache;
1551 u32 itag = 0;
1552
1553 /* get a working reference to the output device */
1554 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1555 if (out_dev == NULL) {
1556 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1557 return -EINVAL;
1558 }
1559
1560 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1561 in_dev->dev, in_dev, &itag);
1562 if (err < 0) {
1563 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1564 saddr);
1565
1566 goto cleanup;
1567 }
1568
1569 do_cache = res->fi && !itag;
1570 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1571 skb->protocol == htons(ETH_P_IP) &&
1572 (IN_DEV_SHARED_MEDIA(out_dev) ||
1573 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1574 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1575
1576 if (skb->protocol != htons(ETH_P_IP)) {
1577 /* Not IP (i.e. ARP). Do not create route, if it is
1578 * invalid for proxy arp. DNAT routes are always valid.
1579 *
1580 * Proxy arp feature have been extended to allow, ARP
1581 * replies back to the same interface, to support
1582 * Private VLAN switch technologies. See arp.c.
1583 */
1584 if (out_dev == in_dev &&
1585 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1586 err = -EINVAL;
1587 goto cleanup;
1588 }
1589 }
1590
1591 fnhe = find_exception(&FIB_RES_NH(*res), daddr);
1592 if (do_cache) {
1593 if (fnhe != NULL)
1594 rth = rcu_dereference(fnhe->fnhe_rth_input);
1595 else
1596 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1597
1598 if (rt_cache_valid(rth)) {
1599 skb_dst_set_noref(skb, &rth->dst);
1600 goto out;
1601 }
1602 }
1603
1604 rth = rt_dst_alloc(out_dev->dev,
1605 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1606 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1607 if (!rth) {
1608 err = -ENOBUFS;
1609 goto cleanup;
1610 }
1611
1612 rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev));
1613 rth->rt_flags = flags;
1614 rth->rt_type = res->type;
1615 rth->rt_is_input = 1;
1616 rth->rt_iif = 0;
1617 rth->rt_pmtu = 0;
1618 rth->rt_gateway = 0;
1619 rth->rt_uses_gateway = 0;
1620 INIT_LIST_HEAD(&rth->rt_uncached);
1621 RT_CACHE_STAT_INC(in_slow_tot);
1622
1623 rth->dst.input = ip_forward;
1624 rth->dst.output = ip_output;
1625
1626 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
1627 skb_dst_set(skb, &rth->dst);
1628 out:
1629 err = 0;
1630 cleanup:
1631 return err;
1632 }
1633
ip_mkroute_input(struct sk_buff * skb,struct fib_result * res,const struct flowi4 * fl4,struct in_device * in_dev,__be32 daddr,__be32 saddr,u32 tos)1634 static int ip_mkroute_input(struct sk_buff *skb,
1635 struct fib_result *res,
1636 const struct flowi4 *fl4,
1637 struct in_device *in_dev,
1638 __be32 daddr, __be32 saddr, u32 tos)
1639 {
1640 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1641 if (res->fi && res->fi->fib_nhs > 1)
1642 fib_select_multipath(res);
1643 #endif
1644
1645 /* create a routing cache entry */
1646 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
1647 }
1648
1649 /*
1650 * NOTE. We drop all the packets that has local source
1651 * addresses, because every properly looped back packet
1652 * must have correct destination already attached by output routine.
1653 *
1654 * Such approach solves two big problems:
1655 * 1. Not simplex devices are handled properly.
1656 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1657 * called with rcu_read_lock()
1658 */
1659
ip_route_input_slow(struct sk_buff * skb,__be32 daddr,__be32 saddr,u8 tos,struct net_device * dev)1660 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1661 u8 tos, struct net_device *dev)
1662 {
1663 struct fib_result res;
1664 struct in_device *in_dev = __in_dev_get_rcu(dev);
1665 struct flowi4 fl4;
1666 unsigned int flags = 0;
1667 u32 itag = 0;
1668 struct rtable *rth;
1669 int err = -EINVAL;
1670 struct net *net = dev_net(dev);
1671 bool do_cache;
1672
1673 /* IP on this device is disabled. */
1674
1675 if (!in_dev)
1676 goto out;
1677
1678 /* Check for the most weird martians, which can be not detected
1679 by fib_lookup.
1680 */
1681
1682 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
1683 goto martian_source;
1684
1685 res.fi = NULL;
1686 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1687 goto brd_input;
1688
1689 /* Accept zero addresses only to limited broadcast;
1690 * I even do not know to fix it or not. Waiting for complains :-)
1691 */
1692 if (ipv4_is_zeronet(saddr))
1693 goto martian_source;
1694
1695 if (ipv4_is_zeronet(daddr))
1696 goto martian_destination;
1697
1698 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
1699 * and call it once if daddr or/and saddr are loopback addresses
1700 */
1701 if (ipv4_is_loopback(daddr)) {
1702 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1703 goto martian_destination;
1704 } else if (ipv4_is_loopback(saddr)) {
1705 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1706 goto martian_source;
1707 }
1708
1709 /*
1710 * Now we are ready to route packet.
1711 */
1712 fl4.flowi4_oif = 0;
1713 fl4.flowi4_iif = dev->ifindex;
1714 fl4.flowi4_mark = skb->mark;
1715 fl4.flowi4_tos = tos;
1716 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
1717 fl4.daddr = daddr;
1718 fl4.saddr = saddr;
1719 err = fib_lookup(net, &fl4, &res);
1720 if (err != 0) {
1721 if (!IN_DEV_FORWARD(in_dev))
1722 err = -EHOSTUNREACH;
1723 goto no_route;
1724 }
1725
1726 if (res.type == RTN_BROADCAST)
1727 goto brd_input;
1728
1729 if (res.type == RTN_LOCAL) {
1730 err = fib_validate_source(skb, saddr, daddr, tos,
1731 0, dev, in_dev, &itag);
1732 if (err < 0)
1733 goto martian_source_keep_err;
1734 goto local_input;
1735 }
1736
1737 if (!IN_DEV_FORWARD(in_dev)) {
1738 err = -EHOSTUNREACH;
1739 goto no_route;
1740 }
1741 if (res.type != RTN_UNICAST)
1742 goto martian_destination;
1743
1744 err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
1745 out: return err;
1746
1747 brd_input:
1748 if (skb->protocol != htons(ETH_P_IP))
1749 goto e_inval;
1750
1751 if (!ipv4_is_zeronet(saddr)) {
1752 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1753 in_dev, &itag);
1754 if (err < 0)
1755 goto martian_source_keep_err;
1756 }
1757 flags |= RTCF_BROADCAST;
1758 res.type = RTN_BROADCAST;
1759 RT_CACHE_STAT_INC(in_brd);
1760
1761 local_input:
1762 do_cache = false;
1763 if (res.fi) {
1764 if (!itag) {
1765 rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input);
1766 if (rt_cache_valid(rth)) {
1767 skb_dst_set_noref(skb, &rth->dst);
1768 err = 0;
1769 goto out;
1770 }
1771 do_cache = true;
1772 }
1773 }
1774
1775 rth = rt_dst_alloc(net->loopback_dev,
1776 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
1777 if (!rth)
1778 goto e_nobufs;
1779
1780 rth->dst.input= ip_local_deliver;
1781 rth->dst.output= ip_rt_bug;
1782 #ifdef CONFIG_IP_ROUTE_CLASSID
1783 rth->dst.tclassid = itag;
1784 #endif
1785
1786 rth->rt_genid = rt_genid_ipv4(net);
1787 rth->rt_flags = flags|RTCF_LOCAL;
1788 rth->rt_type = res.type;
1789 rth->rt_is_input = 1;
1790 rth->rt_iif = 0;
1791 rth->rt_pmtu = 0;
1792 rth->rt_gateway = 0;
1793 rth->rt_uses_gateway = 0;
1794 INIT_LIST_HEAD(&rth->rt_uncached);
1795 RT_CACHE_STAT_INC(in_slow_tot);
1796 if (res.type == RTN_UNREACHABLE) {
1797 rth->dst.input= ip_error;
1798 rth->dst.error= -err;
1799 rth->rt_flags &= ~RTCF_LOCAL;
1800 }
1801 if (do_cache) {
1802 if (unlikely(!rt_cache_route(&FIB_RES_NH(res), rth))) {
1803 rth->dst.flags |= DST_NOCACHE;
1804 rt_add_uncached_list(rth);
1805 }
1806 }
1807 skb_dst_set(skb, &rth->dst);
1808 err = 0;
1809 goto out;
1810
1811 no_route:
1812 RT_CACHE_STAT_INC(in_no_route);
1813 res.type = RTN_UNREACHABLE;
1814 res.fi = NULL;
1815 goto local_input;
1816
1817 /*
1818 * Do not cache martian addresses: they should be logged (RFC1812)
1819 */
1820 martian_destination:
1821 RT_CACHE_STAT_INC(in_martian_dst);
1822 #ifdef CONFIG_IP_ROUTE_VERBOSE
1823 if (IN_DEV_LOG_MARTIANS(in_dev))
1824 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
1825 &daddr, &saddr, dev->name);
1826 #endif
1827
1828 e_inval:
1829 err = -EINVAL;
1830 goto out;
1831
1832 e_nobufs:
1833 err = -ENOBUFS;
1834 goto out;
1835
1836 martian_source:
1837 err = -EINVAL;
1838 martian_source_keep_err:
1839 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
1840 goto out;
1841 }
1842
ip_route_input_noref(struct sk_buff * skb,__be32 daddr,__be32 saddr,u8 tos,struct net_device * dev)1843 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1844 u8 tos, struct net_device *dev)
1845 {
1846 int res;
1847
1848 tos &= IPTOS_RT_MASK;
1849 rcu_read_lock();
1850
1851 /* Multicast recognition logic is moved from route cache to here.
1852 The problem was that too many Ethernet cards have broken/missing
1853 hardware multicast filters :-( As result the host on multicasting
1854 network acquires a lot of useless route cache entries, sort of
1855 SDR messages from all the world. Now we try to get rid of them.
1856 Really, provided software IP multicast filter is organized
1857 reasonably (at least, hashed), it does not result in a slowdown
1858 comparing with route cache reject entries.
1859 Note, that multicast routers are not affected, because
1860 route cache entry is created eventually.
1861 */
1862 if (ipv4_is_multicast(daddr)) {
1863 struct in_device *in_dev = __in_dev_get_rcu(dev);
1864
1865 if (in_dev) {
1866 int our = ip_check_mc_rcu(in_dev, daddr, saddr,
1867 ip_hdr(skb)->protocol);
1868 if (our
1869 #ifdef CONFIG_IP_MROUTE
1870 ||
1871 (!ipv4_is_local_multicast(daddr) &&
1872 IN_DEV_MFORWARD(in_dev))
1873 #endif
1874 ) {
1875 int res = ip_route_input_mc(skb, daddr, saddr,
1876 tos, dev, our);
1877 rcu_read_unlock();
1878 return res;
1879 }
1880 }
1881 rcu_read_unlock();
1882 return -EINVAL;
1883 }
1884 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
1885 rcu_read_unlock();
1886 return res;
1887 }
1888 EXPORT_SYMBOL(ip_route_input_noref);
1889
1890 /* called with rcu_read_lock() */
__mkroute_output(const struct fib_result * res,const struct flowi4 * fl4,int orig_oif,struct net_device * dev_out,unsigned int flags)1891 static struct rtable *__mkroute_output(const struct fib_result *res,
1892 const struct flowi4 *fl4, int orig_oif,
1893 struct net_device *dev_out,
1894 unsigned int flags)
1895 {
1896 struct fib_info *fi = res->fi;
1897 struct fib_nh_exception *fnhe;
1898 struct in_device *in_dev;
1899 u16 type = res->type;
1900 struct rtable *rth;
1901 bool do_cache;
1902
1903 in_dev = __in_dev_get_rcu(dev_out);
1904 if (!in_dev)
1905 return ERR_PTR(-EINVAL);
1906
1907 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1908 if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
1909 return ERR_PTR(-EINVAL);
1910
1911 if (ipv4_is_lbcast(fl4->daddr))
1912 type = RTN_BROADCAST;
1913 else if (ipv4_is_multicast(fl4->daddr))
1914 type = RTN_MULTICAST;
1915 else if (ipv4_is_zeronet(fl4->daddr))
1916 return ERR_PTR(-EINVAL);
1917
1918 if (dev_out->flags & IFF_LOOPBACK)
1919 flags |= RTCF_LOCAL;
1920
1921 do_cache = true;
1922 if (type == RTN_BROADCAST) {
1923 flags |= RTCF_BROADCAST | RTCF_LOCAL;
1924 fi = NULL;
1925 } else if (type == RTN_MULTICAST) {
1926 flags |= RTCF_MULTICAST | RTCF_LOCAL;
1927 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
1928 fl4->flowi4_proto))
1929 flags &= ~RTCF_LOCAL;
1930 else
1931 do_cache = false;
1932 /* If multicast route do not exist use
1933 * default one, but do not gateway in this case.
1934 * Yes, it is hack.
1935 */
1936 if (fi && res->prefixlen < 4)
1937 fi = NULL;
1938 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
1939 (orig_oif != dev_out->ifindex)) {
1940 /* For local routes that require a particular output interface
1941 * we do not want to cache the result. Caching the result
1942 * causes incorrect behaviour when there are multiple source
1943 * addresses on the interface, the end result being that if the
1944 * intended recipient is waiting on that interface for the
1945 * packet he won't receive it because it will be delivered on
1946 * the loopback interface and the IP_PKTINFO ipi_ifindex will
1947 * be set to the loopback interface as well.
1948 */
1949 fi = NULL;
1950 }
1951
1952 fnhe = NULL;
1953 do_cache &= fi != NULL;
1954 if (do_cache) {
1955 struct rtable __rcu **prth;
1956 struct fib_nh *nh = &FIB_RES_NH(*res);
1957
1958 fnhe = find_exception(nh, fl4->daddr);
1959 if (fnhe)
1960 prth = &fnhe->fnhe_rth_output;
1961 else {
1962 if (unlikely(fl4->flowi4_flags &
1963 FLOWI_FLAG_KNOWN_NH &&
1964 !(nh->nh_gw &&
1965 nh->nh_scope == RT_SCOPE_LINK))) {
1966 do_cache = false;
1967 goto add;
1968 }
1969 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
1970 }
1971 rth = rcu_dereference(*prth);
1972 if (rt_cache_valid(rth)) {
1973 dst_hold(&rth->dst);
1974 return rth;
1975 }
1976 }
1977
1978 add:
1979 rth = rt_dst_alloc(dev_out,
1980 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1981 IN_DEV_CONF_GET(in_dev, NOXFRM),
1982 do_cache);
1983 if (!rth)
1984 return ERR_PTR(-ENOBUFS);
1985
1986 rth->dst.output = ip_output;
1987
1988 rth->rt_genid = rt_genid_ipv4(dev_net(dev_out));
1989 rth->rt_flags = flags;
1990 rth->rt_type = type;
1991 rth->rt_is_input = 0;
1992 rth->rt_iif = orig_oif ? : 0;
1993 rth->rt_pmtu = 0;
1994 rth->rt_gateway = 0;
1995 rth->rt_uses_gateway = 0;
1996 INIT_LIST_HEAD(&rth->rt_uncached);
1997
1998 RT_CACHE_STAT_INC(out_slow_tot);
1999
2000 if (flags & RTCF_LOCAL)
2001 rth->dst.input = ip_local_deliver;
2002 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2003 if (flags & RTCF_LOCAL &&
2004 !(dev_out->flags & IFF_LOOPBACK)) {
2005 rth->dst.output = ip_mc_output;
2006 RT_CACHE_STAT_INC(out_slow_mc);
2007 }
2008 #ifdef CONFIG_IP_MROUTE
2009 if (type == RTN_MULTICAST) {
2010 if (IN_DEV_MFORWARD(in_dev) &&
2011 !ipv4_is_local_multicast(fl4->daddr)) {
2012 rth->dst.input = ip_mr_input;
2013 rth->dst.output = ip_mc_output;
2014 }
2015 }
2016 #endif
2017 }
2018
2019 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
2020
2021 return rth;
2022 }
2023
2024 /*
2025 * Major route resolver routine.
2026 */
2027
__ip_route_output_key(struct net * net,struct flowi4 * fl4)2028 struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2029 {
2030 struct net_device *dev_out = NULL;
2031 __u8 tos = RT_FL_TOS(fl4);
2032 unsigned int flags = 0;
2033 struct fib_result res;
2034 struct rtable *rth;
2035 int orig_oif;
2036
2037 res.tclassid = 0;
2038 res.fi = NULL;
2039 res.table = NULL;
2040
2041 orig_oif = fl4->flowi4_oif;
2042
2043 fl4->flowi4_iif = LOOPBACK_IFINDEX;
2044 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2045 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2046 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2047
2048 rcu_read_lock();
2049 if (fl4->saddr) {
2050 rth = ERR_PTR(-EINVAL);
2051 if (ipv4_is_multicast(fl4->saddr) ||
2052 ipv4_is_lbcast(fl4->saddr) ||
2053 ipv4_is_zeronet(fl4->saddr))
2054 goto out;
2055
2056 /* I removed check for oif == dev_out->oif here.
2057 It was wrong for two reasons:
2058 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2059 is assigned to multiple interfaces.
2060 2. Moreover, we are allowed to send packets with saddr
2061 of another iface. --ANK
2062 */
2063
2064 if (fl4->flowi4_oif == 0 &&
2065 (ipv4_is_multicast(fl4->daddr) ||
2066 ipv4_is_lbcast(fl4->daddr))) {
2067 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2068 dev_out = __ip_dev_find(net, fl4->saddr, false);
2069 if (dev_out == NULL)
2070 goto out;
2071
2072 /* Special hack: user can direct multicasts
2073 and limited broadcast via necessary interface
2074 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2075 This hack is not just for fun, it allows
2076 vic,vat and friends to work.
2077 They bind socket to loopback, set ttl to zero
2078 and expect that it will work.
2079 From the viewpoint of routing cache they are broken,
2080 because we are not allowed to build multicast path
2081 with loopback source addr (look, routing cache
2082 cannot know, that ttl is zero, so that packet
2083 will not leave this host and route is valid).
2084 Luckily, this hack is good workaround.
2085 */
2086
2087 fl4->flowi4_oif = dev_out->ifindex;
2088 goto make_route;
2089 }
2090
2091 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2092 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2093 if (!__ip_dev_find(net, fl4->saddr, false))
2094 goto out;
2095 }
2096 }
2097
2098
2099 if (fl4->flowi4_oif) {
2100 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2101 rth = ERR_PTR(-ENODEV);
2102 if (dev_out == NULL)
2103 goto out;
2104
2105 /* RACE: Check return value of inet_select_addr instead. */
2106 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2107 rth = ERR_PTR(-ENETUNREACH);
2108 goto out;
2109 }
2110 if (ipv4_is_local_multicast(fl4->daddr) ||
2111 ipv4_is_lbcast(fl4->daddr)) {
2112 if (!fl4->saddr)
2113 fl4->saddr = inet_select_addr(dev_out, 0,
2114 RT_SCOPE_LINK);
2115 goto make_route;
2116 }
2117 if (!fl4->saddr) {
2118 if (ipv4_is_multicast(fl4->daddr))
2119 fl4->saddr = inet_select_addr(dev_out, 0,
2120 fl4->flowi4_scope);
2121 else if (!fl4->daddr)
2122 fl4->saddr = inet_select_addr(dev_out, 0,
2123 RT_SCOPE_HOST);
2124 }
2125 }
2126
2127 if (!fl4->daddr) {
2128 fl4->daddr = fl4->saddr;
2129 if (!fl4->daddr)
2130 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2131 dev_out = net->loopback_dev;
2132 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2133 res.type = RTN_LOCAL;
2134 flags |= RTCF_LOCAL;
2135 goto make_route;
2136 }
2137
2138 if (fib_lookup(net, fl4, &res)) {
2139 res.fi = NULL;
2140 res.table = NULL;
2141 if (fl4->flowi4_oif) {
2142 /* Apparently, routing tables are wrong. Assume,
2143 that the destination is on link.
2144
2145 WHY? DW.
2146 Because we are allowed to send to iface
2147 even if it has NO routes and NO assigned
2148 addresses. When oif is specified, routing
2149 tables are looked up with only one purpose:
2150 to catch if destination is gatewayed, rather than
2151 direct. Moreover, if MSG_DONTROUTE is set,
2152 we send packet, ignoring both routing tables
2153 and ifaddr state. --ANK
2154
2155
2156 We could make it even if oif is unknown,
2157 likely IPv6, but we do not.
2158 */
2159
2160 if (fl4->saddr == 0)
2161 fl4->saddr = inet_select_addr(dev_out, 0,
2162 RT_SCOPE_LINK);
2163 res.type = RTN_UNICAST;
2164 goto make_route;
2165 }
2166 rth = ERR_PTR(-ENETUNREACH);
2167 goto out;
2168 }
2169
2170 if (res.type == RTN_LOCAL) {
2171 if (!fl4->saddr) {
2172 if (res.fi->fib_prefsrc)
2173 fl4->saddr = res.fi->fib_prefsrc;
2174 else
2175 fl4->saddr = fl4->daddr;
2176 }
2177 dev_out = net->loopback_dev;
2178 fl4->flowi4_oif = dev_out->ifindex;
2179 flags |= RTCF_LOCAL;
2180 goto make_route;
2181 }
2182
2183 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2184 if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
2185 fib_select_multipath(&res);
2186 else
2187 #endif
2188 if (!res.prefixlen &&
2189 res.table->tb_num_default > 1 &&
2190 res.type == RTN_UNICAST && !fl4->flowi4_oif)
2191 fib_select_default(&res);
2192
2193 if (!fl4->saddr)
2194 fl4->saddr = FIB_RES_PREFSRC(net, res);
2195
2196 dev_out = FIB_RES_DEV(res);
2197 fl4->flowi4_oif = dev_out->ifindex;
2198
2199
2200 make_route:
2201 rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags);
2202
2203 out:
2204 rcu_read_unlock();
2205 return rth;
2206 }
2207 EXPORT_SYMBOL_GPL(__ip_route_output_key);
2208
ipv4_blackhole_dst_check(struct dst_entry * dst,u32 cookie)2209 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2210 {
2211 return NULL;
2212 }
2213
ipv4_blackhole_mtu(const struct dst_entry * dst)2214 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2215 {
2216 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2217
2218 return mtu ? : dst->dev->mtu;
2219 }
2220
ipv4_rt_blackhole_update_pmtu(struct dst_entry * dst,struct sock * sk,struct sk_buff * skb,u32 mtu)2221 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2222 struct sk_buff *skb, u32 mtu)
2223 {
2224 }
2225
ipv4_rt_blackhole_redirect(struct dst_entry * dst,struct sock * sk,struct sk_buff * skb)2226 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2227 struct sk_buff *skb)
2228 {
2229 }
2230
ipv4_rt_blackhole_cow_metrics(struct dst_entry * dst,unsigned long old)2231 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2232 unsigned long old)
2233 {
2234 return NULL;
2235 }
2236
2237 static struct dst_ops ipv4_dst_blackhole_ops = {
2238 .family = AF_INET,
2239 .protocol = cpu_to_be16(ETH_P_IP),
2240 .check = ipv4_blackhole_dst_check,
2241 .mtu = ipv4_blackhole_mtu,
2242 .default_advmss = ipv4_default_advmss,
2243 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2244 .redirect = ipv4_rt_blackhole_redirect,
2245 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
2246 .neigh_lookup = ipv4_neigh_lookup,
2247 };
2248
ipv4_blackhole_route(struct net * net,struct dst_entry * dst_orig)2249 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2250 {
2251 struct rtable *ort = (struct rtable *) dst_orig;
2252 struct rtable *rt;
2253
2254 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2255 if (rt) {
2256 struct dst_entry *new = &rt->dst;
2257
2258 new->__use = 1;
2259 new->input = dst_discard;
2260 new->output = dst_discard_sk;
2261
2262 new->dev = ort->dst.dev;
2263 if (new->dev)
2264 dev_hold(new->dev);
2265
2266 rt->rt_is_input = ort->rt_is_input;
2267 rt->rt_iif = ort->rt_iif;
2268 rt->rt_pmtu = ort->rt_pmtu;
2269
2270 rt->rt_genid = rt_genid_ipv4(net);
2271 rt->rt_flags = ort->rt_flags;
2272 rt->rt_type = ort->rt_type;
2273 rt->rt_gateway = ort->rt_gateway;
2274 rt->rt_uses_gateway = ort->rt_uses_gateway;
2275
2276 INIT_LIST_HEAD(&rt->rt_uncached);
2277
2278 dst_free(new);
2279 }
2280
2281 dst_release(dst_orig);
2282
2283 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2284 }
2285
ip_route_output_flow(struct net * net,struct flowi4 * flp4,struct sock * sk)2286 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2287 struct sock *sk)
2288 {
2289 struct rtable *rt = __ip_route_output_key(net, flp4);
2290
2291 if (IS_ERR(rt))
2292 return rt;
2293
2294 if (flp4->flowi4_proto)
2295 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2296 flowi4_to_flowi(flp4),
2297 sk, 0);
2298
2299 return rt;
2300 }
2301 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2302
rt_fill_info(struct net * net,__be32 dst,__be32 src,struct flowi4 * fl4,struct sk_buff * skb,u32 portid,u32 seq,int event,int nowait,unsigned int flags)2303 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2304 struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
2305 u32 seq, int event, int nowait, unsigned int flags)
2306 {
2307 struct rtable *rt = skb_rtable(skb);
2308 struct rtmsg *r;
2309 struct nlmsghdr *nlh;
2310 unsigned long expires = 0;
2311 u32 error;
2312 u32 metrics[RTAX_MAX];
2313
2314 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
2315 if (nlh == NULL)
2316 return -EMSGSIZE;
2317
2318 r = nlmsg_data(nlh);
2319 r->rtm_family = AF_INET;
2320 r->rtm_dst_len = 32;
2321 r->rtm_src_len = 0;
2322 r->rtm_tos = fl4->flowi4_tos;
2323 r->rtm_table = RT_TABLE_MAIN;
2324 if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
2325 goto nla_put_failure;
2326 r->rtm_type = rt->rt_type;
2327 r->rtm_scope = RT_SCOPE_UNIVERSE;
2328 r->rtm_protocol = RTPROT_UNSPEC;
2329 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2330 if (rt->rt_flags & RTCF_NOTIFY)
2331 r->rtm_flags |= RTM_F_NOTIFY;
2332 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2333 r->rtm_flags |= RTCF_DOREDIRECT;
2334
2335 if (nla_put_be32(skb, RTA_DST, dst))
2336 goto nla_put_failure;
2337 if (src) {
2338 r->rtm_src_len = 32;
2339 if (nla_put_be32(skb, RTA_SRC, src))
2340 goto nla_put_failure;
2341 }
2342 if (rt->dst.dev &&
2343 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2344 goto nla_put_failure;
2345 #ifdef CONFIG_IP_ROUTE_CLASSID
2346 if (rt->dst.tclassid &&
2347 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2348 goto nla_put_failure;
2349 #endif
2350 if (!rt_is_input_route(rt) &&
2351 fl4->saddr != src) {
2352 if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr))
2353 goto nla_put_failure;
2354 }
2355 if (rt->rt_uses_gateway &&
2356 nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
2357 goto nla_put_failure;
2358
2359 expires = rt->dst.expires;
2360 if (expires) {
2361 unsigned long now = jiffies;
2362
2363 if (time_before(now, expires))
2364 expires -= now;
2365 else
2366 expires = 0;
2367 }
2368
2369 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2370 if (rt->rt_pmtu && expires)
2371 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2372 if (rtnetlink_put_metrics(skb, metrics) < 0)
2373 goto nla_put_failure;
2374
2375 if (fl4->flowi4_mark &&
2376 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2377 goto nla_put_failure;
2378
2379 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2380 nla_put_u32(skb, RTA_UID,
2381 from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
2382 goto nla_put_failure;
2383
2384 error = rt->dst.error;
2385
2386 if (rt_is_input_route(rt)) {
2387 #ifdef CONFIG_IP_MROUTE
2388 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2389 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2390 int err = ipmr_get_route(net, skb,
2391 fl4->saddr, fl4->daddr,
2392 r, nowait, portid);
2393
2394 if (err <= 0) {
2395 if (!nowait) {
2396 if (err == 0)
2397 return 0;
2398 goto nla_put_failure;
2399 } else {
2400 if (err == -EMSGSIZE)
2401 goto nla_put_failure;
2402 error = err;
2403 }
2404 }
2405 } else
2406 #endif
2407 if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex))
2408 goto nla_put_failure;
2409 }
2410
2411 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2412 goto nla_put_failure;
2413
2414 return nlmsg_end(skb, nlh);
2415
2416 nla_put_failure:
2417 nlmsg_cancel(skb, nlh);
2418 return -EMSGSIZE;
2419 }
2420
inet_rtm_getroute(struct sk_buff * in_skb,struct nlmsghdr * nlh)2421 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
2422 {
2423 struct net *net = sock_net(in_skb->sk);
2424 struct rtmsg *rtm;
2425 struct nlattr *tb[RTA_MAX+1];
2426 struct rtable *rt = NULL;
2427 struct flowi4 fl4;
2428 __be32 dst = 0;
2429 __be32 src = 0;
2430 u32 iif;
2431 int err;
2432 int mark;
2433 struct sk_buff *skb;
2434 kuid_t uid;
2435
2436 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2437 if (err < 0)
2438 goto errout;
2439
2440 rtm = nlmsg_data(nlh);
2441
2442 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2443 if (skb == NULL) {
2444 err = -ENOBUFS;
2445 goto errout;
2446 }
2447
2448 /* Reserve room for dummy headers, this skb can pass
2449 through good chunk of routing engine.
2450 */
2451 skb_reset_mac_header(skb);
2452 skb_reset_network_header(skb);
2453
2454 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2455 ip_hdr(skb)->protocol = IPPROTO_ICMP;
2456 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2457
2458 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2459 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2460 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2461 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2462 if (tb[RTA_UID])
2463 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
2464 else
2465 uid = (iif ? INVALID_UID : current_uid());
2466
2467 memset(&fl4, 0, sizeof(fl4));
2468 fl4.daddr = dst;
2469 fl4.saddr = src;
2470 fl4.flowi4_tos = rtm->rtm_tos;
2471 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
2472 fl4.flowi4_mark = mark;
2473 fl4.flowi4_uid = uid;
2474
2475 if (iif) {
2476 struct net_device *dev;
2477
2478 dev = __dev_get_by_index(net, iif);
2479 if (dev == NULL) {
2480 err = -ENODEV;
2481 goto errout_free;
2482 }
2483
2484 skb->protocol = htons(ETH_P_IP);
2485 skb->dev = dev;
2486 skb->mark = mark;
2487 local_bh_disable();
2488 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2489 local_bh_enable();
2490
2491 rt = skb_rtable(skb);
2492 if (err == 0 && rt->dst.error)
2493 err = -rt->dst.error;
2494 } else {
2495 rt = ip_route_output_key(net, &fl4);
2496
2497 err = 0;
2498 if (IS_ERR(rt))
2499 err = PTR_ERR(rt);
2500 }
2501
2502 if (err)
2503 goto errout_free;
2504
2505 skb_dst_set(skb, &rt->dst);
2506 if (rtm->rtm_flags & RTM_F_NOTIFY)
2507 rt->rt_flags |= RTCF_NOTIFY;
2508
2509 err = rt_fill_info(net, dst, src, &fl4, skb,
2510 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2511 RTM_NEWROUTE, 0, 0);
2512 if (err <= 0)
2513 goto errout_free;
2514
2515 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2516 errout:
2517 return err;
2518
2519 errout_free:
2520 kfree_skb(skb);
2521 goto errout;
2522 }
2523
ip_rt_multicast_event(struct in_device * in_dev)2524 void ip_rt_multicast_event(struct in_device *in_dev)
2525 {
2526 rt_cache_flush(dev_net(in_dev->dev));
2527 }
2528
2529 #ifdef CONFIG_SYSCTL
2530 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
2531 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
2532 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
2533 static int ip_rt_gc_elasticity __read_mostly = 8;
2534
ipv4_sysctl_rtcache_flush(struct ctl_table * __ctl,int write,void __user * buffer,size_t * lenp,loff_t * ppos)2535 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
2536 void __user *buffer,
2537 size_t *lenp, loff_t *ppos)
2538 {
2539 struct net *net = (struct net *)__ctl->extra1;
2540
2541 if (write) {
2542 rt_cache_flush(net);
2543 fnhe_genid_bump(net);
2544 return 0;
2545 }
2546
2547 return -EINVAL;
2548 }
2549
2550 static struct ctl_table ipv4_route_table[] = {
2551 {
2552 .procname = "gc_thresh",
2553 .data = &ipv4_dst_ops.gc_thresh,
2554 .maxlen = sizeof(int),
2555 .mode = 0644,
2556 .proc_handler = proc_dointvec,
2557 },
2558 {
2559 .procname = "max_size",
2560 .data = &ip_rt_max_size,
2561 .maxlen = sizeof(int),
2562 .mode = 0644,
2563 .proc_handler = proc_dointvec,
2564 },
2565 {
2566 /* Deprecated. Use gc_min_interval_ms */
2567
2568 .procname = "gc_min_interval",
2569 .data = &ip_rt_gc_min_interval,
2570 .maxlen = sizeof(int),
2571 .mode = 0644,
2572 .proc_handler = proc_dointvec_jiffies,
2573 },
2574 {
2575 .procname = "gc_min_interval_ms",
2576 .data = &ip_rt_gc_min_interval,
2577 .maxlen = sizeof(int),
2578 .mode = 0644,
2579 .proc_handler = proc_dointvec_ms_jiffies,
2580 },
2581 {
2582 .procname = "gc_timeout",
2583 .data = &ip_rt_gc_timeout,
2584 .maxlen = sizeof(int),
2585 .mode = 0644,
2586 .proc_handler = proc_dointvec_jiffies,
2587 },
2588 {
2589 .procname = "gc_interval",
2590 .data = &ip_rt_gc_interval,
2591 .maxlen = sizeof(int),
2592 .mode = 0644,
2593 .proc_handler = proc_dointvec_jiffies,
2594 },
2595 {
2596 .procname = "redirect_load",
2597 .data = &ip_rt_redirect_load,
2598 .maxlen = sizeof(int),
2599 .mode = 0644,
2600 .proc_handler = proc_dointvec,
2601 },
2602 {
2603 .procname = "redirect_number",
2604 .data = &ip_rt_redirect_number,
2605 .maxlen = sizeof(int),
2606 .mode = 0644,
2607 .proc_handler = proc_dointvec,
2608 },
2609 {
2610 .procname = "redirect_silence",
2611 .data = &ip_rt_redirect_silence,
2612 .maxlen = sizeof(int),
2613 .mode = 0644,
2614 .proc_handler = proc_dointvec,
2615 },
2616 {
2617 .procname = "error_cost",
2618 .data = &ip_rt_error_cost,
2619 .maxlen = sizeof(int),
2620 .mode = 0644,
2621 .proc_handler = proc_dointvec,
2622 },
2623 {
2624 .procname = "error_burst",
2625 .data = &ip_rt_error_burst,
2626 .maxlen = sizeof(int),
2627 .mode = 0644,
2628 .proc_handler = proc_dointvec,
2629 },
2630 {
2631 .procname = "gc_elasticity",
2632 .data = &ip_rt_gc_elasticity,
2633 .maxlen = sizeof(int),
2634 .mode = 0644,
2635 .proc_handler = proc_dointvec,
2636 },
2637 {
2638 .procname = "mtu_expires",
2639 .data = &ip_rt_mtu_expires,
2640 .maxlen = sizeof(int),
2641 .mode = 0644,
2642 .proc_handler = proc_dointvec_jiffies,
2643 },
2644 {
2645 .procname = "min_pmtu",
2646 .data = &ip_rt_min_pmtu,
2647 .maxlen = sizeof(int),
2648 .mode = 0644,
2649 .proc_handler = proc_dointvec,
2650 },
2651 {
2652 .procname = "min_adv_mss",
2653 .data = &ip_rt_min_advmss,
2654 .maxlen = sizeof(int),
2655 .mode = 0644,
2656 .proc_handler = proc_dointvec,
2657 },
2658 { }
2659 };
2660
2661 static struct ctl_table ipv4_route_flush_table[] = {
2662 {
2663 .procname = "flush",
2664 .maxlen = sizeof(int),
2665 .mode = 0200,
2666 .proc_handler = ipv4_sysctl_rtcache_flush,
2667 },
2668 { },
2669 };
2670
sysctl_route_net_init(struct net * net)2671 static __net_init int sysctl_route_net_init(struct net *net)
2672 {
2673 struct ctl_table *tbl;
2674
2675 tbl = ipv4_route_flush_table;
2676 if (!net_eq(net, &init_net)) {
2677 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
2678 if (tbl == NULL)
2679 goto err_dup;
2680
2681 /* Don't export sysctls to unprivileged users */
2682 if (net->user_ns != &init_user_ns)
2683 tbl[0].procname = NULL;
2684 }
2685 tbl[0].extra1 = net;
2686
2687 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
2688 if (net->ipv4.route_hdr == NULL)
2689 goto err_reg;
2690 return 0;
2691
2692 err_reg:
2693 if (tbl != ipv4_route_flush_table)
2694 kfree(tbl);
2695 err_dup:
2696 return -ENOMEM;
2697 }
2698
sysctl_route_net_exit(struct net * net)2699 static __net_exit void sysctl_route_net_exit(struct net *net)
2700 {
2701 struct ctl_table *tbl;
2702
2703 tbl = net->ipv4.route_hdr->ctl_table_arg;
2704 unregister_net_sysctl_table(net->ipv4.route_hdr);
2705 BUG_ON(tbl == ipv4_route_flush_table);
2706 kfree(tbl);
2707 }
2708
2709 static __net_initdata struct pernet_operations sysctl_route_ops = {
2710 .init = sysctl_route_net_init,
2711 .exit = sysctl_route_net_exit,
2712 };
2713 #endif
2714
rt_genid_init(struct net * net)2715 static __net_init int rt_genid_init(struct net *net)
2716 {
2717 atomic_set(&net->ipv4.rt_genid, 0);
2718 atomic_set(&net->fnhe_genid, 0);
2719 get_random_bytes(&net->ipv4.dev_addr_genid,
2720 sizeof(net->ipv4.dev_addr_genid));
2721 return 0;
2722 }
2723
2724 static __net_initdata struct pernet_operations rt_genid_ops = {
2725 .init = rt_genid_init,
2726 };
2727
ipv4_inetpeer_init(struct net * net)2728 static int __net_init ipv4_inetpeer_init(struct net *net)
2729 {
2730 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
2731
2732 if (!bp)
2733 return -ENOMEM;
2734 inet_peer_base_init(bp);
2735 net->ipv4.peers = bp;
2736 return 0;
2737 }
2738
ipv4_inetpeer_exit(struct net * net)2739 static void __net_exit ipv4_inetpeer_exit(struct net *net)
2740 {
2741 struct inet_peer_base *bp = net->ipv4.peers;
2742
2743 net->ipv4.peers = NULL;
2744 inetpeer_invalidate_tree(bp);
2745 kfree(bp);
2746 }
2747
2748 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
2749 .init = ipv4_inetpeer_init,
2750 .exit = ipv4_inetpeer_exit,
2751 };
2752
2753 #ifdef CONFIG_IP_ROUTE_CLASSID
2754 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
2755 #endif /* CONFIG_IP_ROUTE_CLASSID */
2756
ip_rt_init(void)2757 int __init ip_rt_init(void)
2758 {
2759 int rc = 0;
2760
2761 ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
2762 if (!ip_idents)
2763 panic("IP: failed to allocate ip_idents\n");
2764
2765 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
2766
2767 #ifdef CONFIG_IP_ROUTE_CLASSID
2768 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
2769 if (!ip_rt_acct)
2770 panic("IP: failed to allocate ip_rt_acct\n");
2771 #endif
2772
2773 ipv4_dst_ops.kmem_cachep =
2774 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
2775 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2776
2777 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
2778
2779 if (dst_entries_init(&ipv4_dst_ops) < 0)
2780 panic("IP: failed to allocate ipv4_dst_ops counter\n");
2781
2782 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
2783 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
2784
2785 ipv4_dst_ops.gc_thresh = ~0;
2786 ip_rt_max_size = INT_MAX;
2787
2788 devinet_init();
2789 ip_fib_init();
2790
2791 if (ip_rt_proc_init())
2792 pr_err("Unable to create route proc files\n");
2793 #ifdef CONFIG_XFRM
2794 xfrm_init();
2795 xfrm4_init();
2796 #endif
2797 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
2798
2799 #ifdef CONFIG_SYSCTL
2800 register_pernet_subsys(&sysctl_route_ops);
2801 #endif
2802 register_pernet_subsys(&rt_genid_ops);
2803 register_pernet_subsys(&ipv4_inetpeer_ops);
2804 return rc;
2805 }
2806
2807 #ifdef CONFIG_SYSCTL
2808 /*
2809 * We really need to sanitize the damn ipv4 init order, then all
2810 * this nonsense will go away.
2811 */
ip_static_sysctl_init(void)2812 void __init ip_static_sysctl_init(void)
2813 {
2814 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
2815 }
2816 #endif
2817