• Home
  • Raw
  • Download

Lines Matching refs:rt

452 	const struct rtable *rt;  in ipv4_neigh_lookup()  local
455 rt = (const struct rtable *) dst; in ipv4_neigh_lookup()
456 if (rt->rt_gateway) in ipv4_neigh_lookup()
457 pkey = (const __be32 *) &rt->rt_gateway; in ipv4_neigh_lookup()
580 static inline void rt_free(struct rtable *rt) in rt_free() argument
582 call_rcu(&rt->dst.rcu_head, dst_rcu_free); in rt_free()
589 struct rtable *rt; in fnhe_flush_routes() local
591 rt = rcu_dereference(fnhe->fnhe_rth_input); in fnhe_flush_routes()
592 if (rt) { in fnhe_flush_routes()
594 rt_free(rt); in fnhe_flush_routes()
596 rt = rcu_dereference(fnhe->fnhe_rth_output); in fnhe_flush_routes()
597 if (rt) { in fnhe_flush_routes()
599 rt_free(rt); in fnhe_flush_routes()
634 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) in fill_route_from_fnhe() argument
636 rt->rt_pmtu = fnhe->fnhe_pmtu; in fill_route_from_fnhe()
637 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked; in fill_route_from_fnhe()
638 rt->dst.expires = fnhe->fnhe_expires; in fill_route_from_fnhe()
641 rt->rt_flags |= RTCF_REDIRECTED; in fill_route_from_fnhe()
642 rt->rt_gateway = fnhe->fnhe_gw; in fill_route_from_fnhe()
643 rt->rt_uses_gateway = 1; in fill_route_from_fnhe()
652 struct rtable *rt; in update_or_create_fnhe() local
691 rt = rcu_dereference(fnhe->fnhe_rth_input); in update_or_create_fnhe()
692 if (rt) in update_or_create_fnhe()
693 fill_route_from_fnhe(rt, fnhe); in update_or_create_fnhe()
694 rt = rcu_dereference(fnhe->fnhe_rth_output); in update_or_create_fnhe()
695 if (rt) in update_or_create_fnhe()
696 fill_route_from_fnhe(rt, fnhe); in update_or_create_fnhe()
726 rt = rcu_dereference(nh->nh_rth_input); in update_or_create_fnhe()
727 if (rt) in update_or_create_fnhe()
728 rt->dst.obsolete = DST_OBSOLETE_KILL; in update_or_create_fnhe()
733 rt = rcu_dereference(*prt); in update_or_create_fnhe()
734 if (rt) in update_or_create_fnhe()
735 rt->dst.obsolete = DST_OBSOLETE_KILL; in update_or_create_fnhe()
745 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4, in __ip_do_redirect() argument
767 if (rt->rt_gateway != old_gw) in __ip_do_redirect()
790 n = __ipv4_neigh_lookup(rt->dst.dev, new_gw); in __ip_do_redirect()
792 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev); in __ip_do_redirect()
805 rt->dst.obsolete = DST_OBSOLETE_KILL; in __ip_do_redirect()
830 struct rtable *rt; in ip_do_redirect() local
839 rt = (struct rtable *) dst; in ip_do_redirect()
842 __ip_do_redirect(rt, skb, &fl4, true); in ip_do_redirect()
847 struct rtable *rt = (struct rtable *)dst; in ipv4_negative_advice() local
850 if (rt) { in ipv4_negative_advice()
852 ip_rt_put(rt); in ipv4_negative_advice()
854 } else if ((rt->rt_flags & RTCF_REDIRECTED) || in ipv4_negative_advice()
855 rt->dst.expires) { in ipv4_negative_advice()
856 ip_rt_put(rt); in ipv4_negative_advice()
881 struct rtable *rt = skb_rtable(skb); in ip_rt_send_redirect() local
889 in_dev = __in_dev_get_rcu(rt->dst.dev); in ip_rt_send_redirect()
895 vif = l3mdev_master_ifindex_rcu(rt->dst.dev); in ip_rt_send_redirect()
898 net = dev_net(rt->dst.dev); in ip_rt_send_redirect()
902 rt_nexthop(rt, ip_hdr(skb)->daddr)); in ip_rt_send_redirect()
929 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr); in ip_rt_send_redirect()
949 struct rtable *rt = skb_rtable(skb); in ip_error() local
960 net = dev_net(rt->dst.dev); in ip_error()
962 switch (rt->dst.error) { in ip_error()
974 switch (rt->dst.error) { in ip_error()
1013 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) in __ip_rt_update_pmtu() argument
1015 struct dst_entry *dst = &rt->dst; in __ip_rt_update_pmtu()
1031 if (rt->rt_pmtu == mtu && !lock && in __ip_rt_update_pmtu()
1048 struct rtable *rt = (struct rtable *) dst; in ip_rt_update_pmtu() local
1052 __ip_rt_update_pmtu(rt, &fl4, mtu); in ip_rt_update_pmtu()
1060 struct rtable *rt; in ipv4_update_pmtu() local
1067 rt = __ip_route_output_key(net, &fl4); in ipv4_update_pmtu()
1068 if (!IS_ERR(rt)) { in ipv4_update_pmtu()
1069 __ip_rt_update_pmtu(rt, &fl4, mtu); in ipv4_update_pmtu()
1070 ip_rt_put(rt); in ipv4_update_pmtu()
1079 struct rtable *rt; in __ipv4_sk_update_pmtu() local
1086 rt = __ip_route_output_key(sock_net(sk), &fl4); in __ipv4_sk_update_pmtu()
1087 if (!IS_ERR(rt)) { in __ipv4_sk_update_pmtu()
1088 __ip_rt_update_pmtu(rt, &fl4, mtu); in __ipv4_sk_update_pmtu()
1089 ip_rt_put(rt); in __ipv4_sk_update_pmtu()
1097 struct rtable *rt; in ipv4_sk_update_pmtu() local
1116 rt = (struct rtable *)odst; in ipv4_sk_update_pmtu()
1118 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); in ipv4_sk_update_pmtu()
1119 if (IS_ERR(rt)) in ipv4_sk_update_pmtu()
1125 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu); in ipv4_sk_update_pmtu()
1127 if (!dst_check(&rt->dst, 0)) { in ipv4_sk_update_pmtu()
1129 dst_release(&rt->dst); in ipv4_sk_update_pmtu()
1131 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); in ipv4_sk_update_pmtu()
1132 if (IS_ERR(rt)) in ipv4_sk_update_pmtu()
1139 sk_dst_set(sk, &rt->dst); in ipv4_sk_update_pmtu()
1152 struct rtable *rt; in ipv4_redirect() local
1156 rt = __ip_route_output_key(net, &fl4); in ipv4_redirect()
1157 if (!IS_ERR(rt)) { in ipv4_redirect()
1158 __ip_do_redirect(rt, skb, &fl4, false); in ipv4_redirect()
1159 ip_rt_put(rt); in ipv4_redirect()
1168 struct rtable *rt; in ipv4_sk_redirect() local
1172 rt = __ip_route_output_key(net, &fl4); in ipv4_sk_redirect()
1173 if (!IS_ERR(rt)) { in ipv4_sk_redirect()
1174 __ip_do_redirect(rt, skb, &fl4, false); in ipv4_sk_redirect()
1175 ip_rt_put(rt); in ipv4_sk_redirect()
1182 struct rtable *rt = (struct rtable *) dst; in ipv4_dst_check() local
1192 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt)) in ipv4_dst_check()
1227 struct rtable *rt; in ipv4_link_failure() local
1231 rt = skb_rtable(skb); in ipv4_link_failure()
1232 if (rt) in ipv4_link_failure()
1233 dst_set_expires(&rt->dst, 0); in ipv4_link_failure()
1255 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt) in ip_rt_get_source() argument
1259 if (rt_is_output_route(rt)) in ip_rt_get_source()
1272 fl4.flowi4_oif = rt->dst.dev->ifindex; in ip_rt_get_source()
1277 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0) in ip_rt_get_source()
1278 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res); in ip_rt_get_source()
1280 src = inet_select_addr(rt->dst.dev, in ip_rt_get_source()
1281 rt_nexthop(rt, iph->daddr), in ip_rt_get_source()
1289 static void set_class_tag(struct rtable *rt, u32 tag) in set_class_tag() argument
1291 if (!(rt->dst.tclassid & 0xFFFF)) in set_class_tag()
1292 rt->dst.tclassid |= tag & 0xFFFF; in set_class_tag()
1293 if (!(rt->dst.tclassid & 0xFFFF0000)) in set_class_tag()
1294 rt->dst.tclassid |= tag & 0xFFFF0000; in set_class_tag()
1313 const struct rtable *rt = (const struct rtable *) dst; in ipv4_mtu() local
1314 unsigned int mtu = rt->rt_pmtu; in ipv4_mtu()
1316 if (!mtu || time_after_eq(jiffies, rt->dst.expires)) in ipv4_mtu()
1325 if (rt->rt_uses_gateway && mtu > 576) in ipv4_mtu()
1351 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe, in rt_bind_exception() argument
1361 int genid = fnhe_genid(dev_net(rt->dst.dev)); in rt_bind_exception()
1363 if (rt_is_input_route(rt)) in rt_bind_exception()
1377 fill_route_from_fnhe(rt, fnhe); in rt_bind_exception()
1378 if (!rt->rt_gateway) in rt_bind_exception()
1379 rt->rt_gateway = daddr; in rt_bind_exception()
1381 if (!(rt->dst.flags & DST_NOCACHE)) { in rt_bind_exception()
1382 rcu_assign_pointer(*porig, rt); in rt_bind_exception()
1395 static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt) in rt_cache_route() argument
1400 if (rt_is_input_route(rt)) { in rt_cache_route()
1407 prev = cmpxchg(p, orig, rt); in rt_cache_route()
1424 static void rt_add_uncached_list(struct rtable *rt) in rt_add_uncached_list() argument
1428 rt->rt_uncached_list = ul; in rt_add_uncached_list()
1431 list_add_tail(&rt->rt_uncached, &ul->head); in rt_add_uncached_list()
1438 struct rtable *rt = (struct rtable *) dst; in ipv4_dst_destroy() local
1443 if (!list_empty(&rt->rt_uncached)) { in ipv4_dst_destroy()
1444 struct uncached_list *ul = rt->rt_uncached_list; in ipv4_dst_destroy()
1447 list_del(&rt->rt_uncached); in ipv4_dst_destroy()
1455 struct rtable *rt; in rt_flush_dev() local
1462 list_for_each_entry(rt, &ul->head, rt_uncached) { in rt_flush_dev()
1463 if (rt->dst.dev != dev) in rt_flush_dev()
1465 rt->dst.dev = net->loopback_dev; in rt_flush_dev()
1466 dev_hold(rt->dst.dev); in rt_flush_dev()
1473 static bool rt_cache_valid(const struct rtable *rt) in rt_cache_valid() argument
1475 return rt && in rt_cache_valid()
1476 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && in rt_cache_valid()
1477 !rt_is_expired(rt); in rt_cache_valid()
1480 static void rt_set_nexthop(struct rtable *rt, __be32 daddr, in rt_set_nexthop() argument
1491 rt->rt_gateway = nh->nh_gw; in rt_set_nexthop()
1492 rt->rt_uses_gateway = 1; in rt_set_nexthop()
1494 dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true); in rt_set_nexthop()
1496 rt->dst._metrics |= DST_METRICS_REFCOUNTED; in rt_set_nexthop()
1500 rt->dst.tclassid = nh->nh_tclassid; in rt_set_nexthop()
1502 rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate); in rt_set_nexthop()
1504 cached = rt_bind_exception(rt, fnhe, daddr); in rt_set_nexthop()
1505 else if (!(rt->dst.flags & DST_NOCACHE)) in rt_set_nexthop()
1506 cached = rt_cache_route(nh, rt); in rt_set_nexthop()
1513 rt->dst.flags |= DST_NOCACHE; in rt_set_nexthop()
1514 if (!rt->rt_gateway) in rt_set_nexthop()
1515 rt->rt_gateway = daddr; in rt_set_nexthop()
1516 rt_add_uncached_list(rt); in rt_set_nexthop()
1519 rt_add_uncached_list(rt); in rt_set_nexthop()
1523 set_class_tag(rt, res->tclassid); in rt_set_nexthop()
1525 set_class_tag(rt, itag); in rt_set_nexthop()
1533 struct rtable *rt; in rt_dst_alloc() local
1535 rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK, in rt_dst_alloc()
1540 if (rt) { in rt_dst_alloc()
1541 rt->rt_genid = rt_genid_ipv4(dev_net(dev)); in rt_dst_alloc()
1542 rt->rt_flags = flags; in rt_dst_alloc()
1543 rt->rt_type = type; in rt_dst_alloc()
1544 rt->rt_is_input = 0; in rt_dst_alloc()
1545 rt->rt_iif = 0; in rt_dst_alloc()
1546 rt->rt_pmtu = 0; in rt_dst_alloc()
1547 rt->rt_mtu_locked = 0; in rt_dst_alloc()
1548 rt->rt_gateway = 0; in rt_dst_alloc()
1549 rt->rt_uses_gateway = 0; in rt_dst_alloc()
1550 rt->rt_table_id = 0; in rt_dst_alloc()
1551 INIT_LIST_HEAD(&rt->rt_uncached); in rt_dst_alloc()
1553 rt->dst.output = ip_output; in rt_dst_alloc()
1555 rt->dst.input = ip_local_deliver; in rt_dst_alloc()
1558 return rt; in rt_dst_alloc()
2459 struct rtable *rt; in ipv4_blackhole_route() local
2461 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0); in ipv4_blackhole_route()
2462 if (rt) { in ipv4_blackhole_route()
2463 struct dst_entry *new = &rt->dst; in ipv4_blackhole_route()
2473 rt->rt_is_input = ort->rt_is_input; in ipv4_blackhole_route()
2474 rt->rt_iif = ort->rt_iif; in ipv4_blackhole_route()
2475 rt->rt_pmtu = ort->rt_pmtu; in ipv4_blackhole_route()
2476 rt->rt_mtu_locked = ort->rt_mtu_locked; in ipv4_blackhole_route()
2478 rt->rt_genid = rt_genid_ipv4(net); in ipv4_blackhole_route()
2479 rt->rt_flags = ort->rt_flags; in ipv4_blackhole_route()
2480 rt->rt_type = ort->rt_type; in ipv4_blackhole_route()
2481 rt->rt_gateway = ort->rt_gateway; in ipv4_blackhole_route()
2482 rt->rt_uses_gateway = ort->rt_uses_gateway; in ipv4_blackhole_route()
2484 INIT_LIST_HEAD(&rt->rt_uncached); in ipv4_blackhole_route()
2490 return rt ? &rt->dst : ERR_PTR(-ENOMEM); in ipv4_blackhole_route()
2496 struct rtable *rt = __ip_route_output_key(net, flp4); in ip_route_output_flow() local
2498 if (IS_ERR(rt)) in ip_route_output_flow()
2499 return rt; in ip_route_output_flow()
2502 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst, in ip_route_output_flow()
2506 return rt; in ip_route_output_flow()
2514 struct rtable *rt = skb_rtable(skb); in rt_fill_info() local
2533 r->rtm_type = rt->rt_type; in rt_fill_info()
2536 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; in rt_fill_info()
2537 if (rt->rt_flags & RTCF_NOTIFY) in rt_fill_info()
2549 if (rt->dst.dev && in rt_fill_info()
2550 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) in rt_fill_info()
2553 if (rt->dst.tclassid && in rt_fill_info()
2554 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid)) in rt_fill_info()
2557 if (!rt_is_input_route(rt) && in rt_fill_info()
2562 if (rt->rt_uses_gateway && in rt_fill_info()
2563 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gateway)) in rt_fill_info()
2566 expires = rt->dst.expires; in rt_fill_info()
2576 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); in rt_fill_info()
2577 if (rt->rt_pmtu && expires) in rt_fill_info()
2578 metrics[RTAX_MTU - 1] = rt->rt_pmtu; in rt_fill_info()
2579 if (rt->rt_mtu_locked && expires) in rt_fill_info()
2593 error = rt->dst.error; in rt_fill_info()
2595 if (rt_is_input_route(rt)) { in rt_fill_info()
2620 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0) in rt_fill_info()
2636 struct rtable *rt = NULL; in inet_rtm_getroute() local
2705 rt = skb_rtable(skb); in inet_rtm_getroute()
2706 if (err == 0 && rt->dst.error) in inet_rtm_getroute()
2707 err = -rt->dst.error; in inet_rtm_getroute()
2709 rt = ip_route_output_key(net, &fl4); in inet_rtm_getroute()
2712 if (IS_ERR(rt)) in inet_rtm_getroute()
2713 err = PTR_ERR(rt); in inet_rtm_getroute()
2719 skb_dst_set(skb, &rt->dst); in inet_rtm_getroute()
2721 rt->rt_flags |= RTCF_NOTIFY; in inet_rtm_getroute()
2724 table_id = rt->rt_table_id; in inet_rtm_getroute()