Lines Matching refs:rt

415 	const struct rtable *rt = container_of(dst, struct rtable, dst);  in ipv4_neigh_lookup()  local
421 if (likely(rt->rt_gw_family == AF_INET)) { in ipv4_neigh_lookup()
422 n = ip_neigh_gw4(dev, rt->rt_gw4); in ipv4_neigh_lookup()
423 } else if (rt->rt_gw_family == AF_INET6) { in ipv4_neigh_lookup()
424 n = ip_neigh_gw6(dev, &rt->rt_gw6); in ipv4_neigh_lookup()
442 const struct rtable *rt = container_of(dst, struct rtable, dst); in ipv4_confirm_neigh() local
446 if (rt->rt_gw_family == AF_INET) { in ipv4_confirm_neigh()
447 pkey = (const __be32 *)&rt->rt_gw4; in ipv4_confirm_neigh()
448 } else if (rt->rt_gw_family == AF_INET6) { in ipv4_confirm_neigh()
449 return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6); in ipv4_confirm_neigh()
451 (rt->rt_flags & in ipv4_confirm_neigh()
577 struct rtable *rt; in fnhe_flush_routes() local
579 rt = rcu_dereference(fnhe->fnhe_rth_input); in fnhe_flush_routes()
580 if (rt) { in fnhe_flush_routes()
582 dst_dev_put(&rt->dst); in fnhe_flush_routes()
583 dst_release(&rt->dst); in fnhe_flush_routes()
585 rt = rcu_dereference(fnhe->fnhe_rth_output); in fnhe_flush_routes()
586 if (rt) { in fnhe_flush_routes()
588 dst_dev_put(&rt->dst); in fnhe_flush_routes()
589 dst_release(&rt->dst); in fnhe_flush_routes()
624 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) in fill_route_from_fnhe() argument
626 rt->rt_pmtu = fnhe->fnhe_pmtu; in fill_route_from_fnhe()
627 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked; in fill_route_from_fnhe()
628 rt->dst.expires = fnhe->fnhe_expires; in fill_route_from_fnhe()
631 rt->rt_flags |= RTCF_REDIRECTED; in fill_route_from_fnhe()
632 rt->rt_uses_gateway = 1; in fill_route_from_fnhe()
633 rt->rt_gw_family = AF_INET; in fill_route_from_fnhe()
634 rt->rt_gw4 = fnhe->fnhe_gw; in fill_route_from_fnhe()
644 struct rtable *rt; in update_or_create_fnhe() local
683 rt = rcu_dereference(fnhe->fnhe_rth_input); in update_or_create_fnhe()
684 if (rt) in update_or_create_fnhe()
685 fill_route_from_fnhe(rt, fnhe); in update_or_create_fnhe()
686 rt = rcu_dereference(fnhe->fnhe_rth_output); in update_or_create_fnhe()
687 if (rt) in update_or_create_fnhe()
688 fill_route_from_fnhe(rt, fnhe); in update_or_create_fnhe()
718 rt = rcu_dereference(nhc->nhc_rth_input); in update_or_create_fnhe()
719 if (rt) in update_or_create_fnhe()
720 WRITE_ONCE(rt->dst.obsolete, DST_OBSOLETE_KILL); in update_or_create_fnhe()
726 rt = rcu_dereference(*prt); in update_or_create_fnhe()
727 if (rt) in update_or_create_fnhe()
728 WRITE_ONCE(rt->dst.obsolete, DST_OBSOLETE_KILL); in update_or_create_fnhe()
738 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4, in __ip_do_redirect() argument
760 if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw) in __ip_do_redirect()
783 n = __ipv4_neigh_lookup(rt->dst.dev, (__force u32)new_gw); in __ip_do_redirect()
785 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev); in __ip_do_redirect()
800 WRITE_ONCE(rt->dst.obsolete, DST_OBSOLETE_KILL); in __ip_do_redirect()
825 struct rtable *rt; in ip_do_redirect() local
834 rt = dst_rtable(dst); in ip_do_redirect()
837 __ip_do_redirect(rt, skb, &fl4, true); in ip_do_redirect()
843 struct rtable *rt = dst_rtable(dst); in ipv4_negative_advice() local
846 (rt->rt_flags & RTCF_REDIRECTED) || in ipv4_negative_advice()
847 READ_ONCE(rt->dst.expires)) in ipv4_negative_advice()
869 struct rtable *rt = skb_rtable(skb); in ip_rt_send_redirect() local
877 in_dev = __in_dev_get_rcu(rt->dst.dev); in ip_rt_send_redirect()
883 vif = l3mdev_master_ifindex_rcu(rt->dst.dev); in ip_rt_send_redirect()
885 net = dev_net(rt->dst.dev); in ip_rt_send_redirect()
890 rt_nexthop(rt, ip_hdr(skb)->daddr)); in ip_rt_send_redirect()
917 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr); in ip_rt_send_redirect()
934 struct rtable *rt = skb_rtable(skb); in ip_error() local
956 net = dev_net(rt->dst.dev); in ip_error()
958 switch (rt->dst.error) { in ip_error()
972 switch (rt->dst.error) { in ip_error()
1013 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) in __ip_rt_update_pmtu() argument
1015 struct dst_entry *dst = &rt->dst; in __ip_rt_update_pmtu()
1035 if (rt->rt_pmtu == mtu && !lock && in __ip_rt_update_pmtu()
1068 struct rtable *rt = dst_rtable(dst); in ip_rt_update_pmtu() local
1077 __ip_rt_update_pmtu(rt, &fl4, mtu); in ip_rt_update_pmtu()
1085 struct rtable *rt; in ipv4_update_pmtu() local
1090 rt = __ip_route_output_key(net, &fl4); in ipv4_update_pmtu()
1091 if (!IS_ERR(rt)) { in ipv4_update_pmtu()
1092 __ip_rt_update_pmtu(rt, &fl4, mtu); in ipv4_update_pmtu()
1093 ip_rt_put(rt); in ipv4_update_pmtu()
1102 struct rtable *rt; in __ipv4_sk_update_pmtu() local
1109 rt = __ip_route_output_key(sock_net(sk), &fl4); in __ipv4_sk_update_pmtu()
1110 if (!IS_ERR(rt)) { in __ipv4_sk_update_pmtu()
1111 __ip_rt_update_pmtu(rt, &fl4, mtu); in __ipv4_sk_update_pmtu()
1112 ip_rt_put(rt); in __ipv4_sk_update_pmtu()
1120 struct rtable *rt; in ipv4_sk_update_pmtu() local
1139 rt = dst_rtable(odst); in ipv4_sk_update_pmtu()
1141 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); in ipv4_sk_update_pmtu()
1142 if (IS_ERR(rt)) in ipv4_sk_update_pmtu()
1148 __ip_rt_update_pmtu(dst_rtable(xfrm_dst_path(&rt->dst)), &fl4, mtu); in ipv4_sk_update_pmtu()
1150 if (!dst_check(&rt->dst, 0)) { in ipv4_sk_update_pmtu()
1152 dst_release(&rt->dst); in ipv4_sk_update_pmtu()
1154 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); in ipv4_sk_update_pmtu()
1155 if (IS_ERR(rt)) in ipv4_sk_update_pmtu()
1162 sk_dst_set(sk, &rt->dst); in ipv4_sk_update_pmtu()
1175 struct rtable *rt; in ipv4_redirect() local
1178 rt = __ip_route_output_key(net, &fl4); in ipv4_redirect()
1179 if (!IS_ERR(rt)) { in ipv4_redirect()
1180 __ip_do_redirect(rt, skb, &fl4, false); in ipv4_redirect()
1181 ip_rt_put(rt); in ipv4_redirect()
1190 struct rtable *rt; in ipv4_sk_redirect() local
1194 rt = __ip_route_output_key(net, &fl4); in ipv4_sk_redirect()
1195 if (!IS_ERR(rt)) { in ipv4_sk_redirect()
1196 __ip_do_redirect(rt, skb, &fl4, false); in ipv4_sk_redirect()
1197 ip_rt_put(rt); in ipv4_sk_redirect()
1205 struct rtable *rt = dst_rtable(dst); in ipv4_dst_check() local
1216 rt_is_expired(rt)) in ipv4_dst_check()
1254 struct rtable *rt; in ipv4_link_failure() local
1258 rt = skb_rtable(skb); in ipv4_link_failure()
1259 if (rt) in ipv4_link_failure()
1260 dst_set_expires(&rt->dst, 0); in ipv4_link_failure()
1282 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt) in ip_rt_get_source() argument
1286 if (rt_is_output_route(rt)) in ip_rt_get_source()
1295 .flowi4_oif = rt->dst.dev->ifindex, in ip_rt_get_source()
1301 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0) in ip_rt_get_source()
1302 src = fib_result_prefsrc(dev_net(rt->dst.dev), &res); in ip_rt_get_source()
1304 src = inet_select_addr(rt->dst.dev, in ip_rt_get_source()
1305 rt_nexthop(rt, iph->daddr), in ip_rt_get_source()
1313 static void set_class_tag(struct rtable *rt, u32 tag) in set_class_tag() argument
1315 if (!(rt->dst.tclassid & 0xFFFF)) in set_class_tag()
1316 rt->dst.tclassid |= tag & 0xFFFF; in set_class_tag()
1317 if (!(rt->dst.tclassid & 0xFFFF0000)) in set_class_tag()
1318 rt->dst.tclassid |= tag & 0xFFFF0000; in set_class_tag()
1434 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe, in rt_bind_exception() argument
1444 int genid = fnhe_genid(dev_net(rt->dst.dev)); in rt_bind_exception()
1446 if (rt_is_input_route(rt)) in rt_bind_exception()
1461 fill_route_from_fnhe(rt, fnhe); in rt_bind_exception()
1462 if (!rt->rt_gw4) { in rt_bind_exception()
1463 rt->rt_gw4 = daddr; in rt_bind_exception()
1464 rt->rt_gw_family = AF_INET; in rt_bind_exception()
1468 dst_hold(&rt->dst); in rt_bind_exception()
1469 rcu_assign_pointer(*porig, rt); in rt_bind_exception()
1484 static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt) in rt_cache_route() argument
1489 if (rt_is_input_route(rt)) { in rt_cache_route()
1499 dst_hold(&rt->dst); in rt_cache_route()
1500 prev = cmpxchg(p, orig, rt); in rt_cache_route()
1507 dst_release(&rt->dst); in rt_cache_route()
1521 void rt_add_uncached_list(struct rtable *rt) in rt_add_uncached_list() argument
1525 rt->dst.rt_uncached_list = ul; in rt_add_uncached_list()
1528 list_add_tail(&rt->dst.rt_uncached, &ul->head); in rt_add_uncached_list()
1532 void rt_del_uncached_list(struct rtable *rt) in rt_del_uncached_list() argument
1534 if (!list_empty(&rt->dst.rt_uncached)) { in rt_del_uncached_list()
1535 struct uncached_list *ul = rt->dst.rt_uncached_list; in rt_del_uncached_list()
1538 list_del_init(&rt->dst.rt_uncached); in rt_del_uncached_list()
1551 struct rtable *rt, *safe; in rt_flush_dev() local
1561 list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) { in rt_flush_dev()
1562 if (rt->dst.dev != dev) in rt_flush_dev()
1564 rt->dst.dev = blackhole_netdev; in rt_flush_dev()
1566 &rt->dst.dev_tracker, GFP_ATOMIC); in rt_flush_dev()
1567 list_del_init(&rt->dst.rt_uncached); in rt_flush_dev()
1573 static bool rt_cache_valid(const struct rtable *rt) in rt_cache_valid() argument
1575 return rt && in rt_cache_valid()
1576 READ_ONCE(rt->dst.obsolete) == DST_OBSOLETE_FORCE_CHK && in rt_cache_valid()
1577 !rt_is_expired(rt); in rt_cache_valid()
1580 static void rt_set_nexthop(struct rtable *rt, __be32 daddr, in rt_set_nexthop() argument
1592 rt->rt_uses_gateway = 1; in rt_set_nexthop()
1593 rt->rt_gw_family = nhc->nhc_gw_family; in rt_set_nexthop()
1596 rt->rt_gw4 = nhc->nhc_gw.ipv4; in rt_set_nexthop()
1598 rt->rt_gw6 = nhc->nhc_gw.ipv6; in rt_set_nexthop()
1601 ip_dst_init_metrics(&rt->dst, fi->fib_metrics); in rt_set_nexthop()
1608 rt->dst.tclassid = nh->nh_tclassid; in rt_set_nexthop()
1611 rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate); in rt_set_nexthop()
1613 cached = rt_bind_exception(rt, fnhe, daddr, do_cache); in rt_set_nexthop()
1615 cached = rt_cache_route(nhc, rt); in rt_set_nexthop()
1622 if (!rt->rt_gw4) { in rt_set_nexthop()
1623 rt->rt_gw_family = AF_INET; in rt_set_nexthop()
1624 rt->rt_gw4 = daddr; in rt_set_nexthop()
1626 rt_add_uncached_list(rt); in rt_set_nexthop()
1629 rt_add_uncached_list(rt); in rt_set_nexthop()
1633 set_class_tag(rt, res->tclassid); in rt_set_nexthop()
1635 set_class_tag(rt, itag); in rt_set_nexthop()
1643 struct rtable *rt; in rt_dst_alloc() local
1645 rt = dst_alloc(&ipv4_dst_ops, dev, DST_OBSOLETE_FORCE_CHK, in rt_dst_alloc()
1648 if (rt) { in rt_dst_alloc()
1649 rt->rt_genid = rt_genid_ipv4(dev_net(dev)); in rt_dst_alloc()
1650 rt->rt_flags = flags; in rt_dst_alloc()
1651 rt->rt_type = type; in rt_dst_alloc()
1652 rt->rt_is_input = 0; in rt_dst_alloc()
1653 rt->rt_iif = 0; in rt_dst_alloc()
1654 rt->rt_pmtu = 0; in rt_dst_alloc()
1655 rt->rt_mtu_locked = 0; in rt_dst_alloc()
1656 rt->rt_uses_gateway = 0; in rt_dst_alloc()
1657 rt->rt_gw_family = 0; in rt_dst_alloc()
1658 rt->rt_gw4 = 0; in rt_dst_alloc()
1660 rt->dst.output = ip_output; in rt_dst_alloc()
1662 rt->dst.input = ip_local_deliver; in rt_dst_alloc()
1665 return rt; in rt_dst_alloc()
1669 struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt) in rt_dst_clone() argument
1674 rt->dst.flags); in rt_dst_clone()
1678 new_rt->rt_flags = rt->rt_flags; in rt_dst_clone()
1679 new_rt->rt_type = rt->rt_type; in rt_dst_clone()
1680 new_rt->rt_is_input = rt->rt_is_input; in rt_dst_clone()
1681 new_rt->rt_iif = rt->rt_iif; in rt_dst_clone()
1682 new_rt->rt_pmtu = rt->rt_pmtu; in rt_dst_clone()
1683 new_rt->rt_mtu_locked = rt->rt_mtu_locked; in rt_dst_clone()
1684 new_rt->rt_gw_family = rt->rt_gw_family; in rt_dst_clone()
1685 if (rt->rt_gw_family == AF_INET) in rt_dst_clone()
1686 new_rt->rt_gw4 = rt->rt_gw4; in rt_dst_clone()
1687 else if (rt->rt_gw_family == AF_INET6) in rt_dst_clone()
1688 new_rt->rt_gw6 = rt->rt_gw6; in rt_dst_clone()
1690 new_rt->dst.input = READ_ONCE(rt->dst.input); in rt_dst_clone()
1691 new_rt->dst.output = READ_ONCE(rt->dst.output); in rt_dst_clone()
1692 new_rt->dst.error = rt->dst.error; in rt_dst_clone()
1694 new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate); in rt_dst_clone()
2191 struct rtable *rt = skb_rtable(hint); in ip_route_use_hint() local
2213 if (rt->rt_type != RTN_LOCAL) in ip_route_use_hint()
2886 struct rtable *rt; in ipv4_blackhole_route() local
2888 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, DST_OBSOLETE_DEAD, 0); in ipv4_blackhole_route()
2889 if (rt) { in ipv4_blackhole_route()
2890 struct dst_entry *new = &rt->dst; in ipv4_blackhole_route()
2899 rt->rt_is_input = ort->rt_is_input; in ipv4_blackhole_route()
2900 rt->rt_iif = ort->rt_iif; in ipv4_blackhole_route()
2901 rt->rt_pmtu = ort->rt_pmtu; in ipv4_blackhole_route()
2902 rt->rt_mtu_locked = ort->rt_mtu_locked; in ipv4_blackhole_route()
2904 rt->rt_genid = rt_genid_ipv4(net); in ipv4_blackhole_route()
2905 rt->rt_flags = ort->rt_flags; in ipv4_blackhole_route()
2906 rt->rt_type = ort->rt_type; in ipv4_blackhole_route()
2907 rt->rt_uses_gateway = ort->rt_uses_gateway; in ipv4_blackhole_route()
2908 rt->rt_gw_family = ort->rt_gw_family; in ipv4_blackhole_route()
2909 if (rt->rt_gw_family == AF_INET) in ipv4_blackhole_route()
2910 rt->rt_gw4 = ort->rt_gw4; in ipv4_blackhole_route()
2911 else if (rt->rt_gw_family == AF_INET6) in ipv4_blackhole_route()
2912 rt->rt_gw6 = ort->rt_gw6; in ipv4_blackhole_route()
2917 return rt ? &rt->dst : ERR_PTR(-ENOMEM); in ipv4_blackhole_route()
2923 struct rtable *rt = __ip_route_output_key(net, flp4); in ip_route_output_flow() local
2925 if (IS_ERR(rt)) in ip_route_output_flow()
2926 return rt; in ip_route_output_flow()
2929 flp4->flowi4_oif = rt->dst.dev->ifindex; in ip_route_output_flow()
2930 rt = dst_rtable(xfrm_lookup_route(net, &rt->dst, in ip_route_output_flow()
2935 return rt; in ip_route_output_flow()
2941 struct rtable *rt, u32 table_id, dscp_t dscp, in rt_fill_info() argument
2963 r->rtm_type = rt->rt_type; in rt_fill_info()
2966 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; in rt_fill_info()
2967 if (rt->rt_flags & RTCF_NOTIFY) in rt_fill_info()
2979 if (rt->dst.dev && in rt_fill_info()
2980 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) in rt_fill_info()
2982 if (lwtunnel_fill_encap(skb, rt->dst.lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0) in rt_fill_info()
2985 if (rt->dst.tclassid && in rt_fill_info()
2986 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid)) in rt_fill_info()
2989 if (fl4 && !rt_is_input_route(rt) && in rt_fill_info()
2994 if (rt->rt_uses_gateway) { in rt_fill_info()
2995 if (rt->rt_gw_family == AF_INET && in rt_fill_info()
2996 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) { in rt_fill_info()
2998 } else if (rt->rt_gw_family == AF_INET6) { in rt_fill_info()
3009 memcpy(via->rtvia_addr, &rt->rt_gw6, alen); in rt_fill_info()
3013 expires = READ_ONCE(rt->dst.expires); in rt_fill_info()
3023 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); in rt_fill_info()
3024 if (rt->rt_pmtu && expires) in rt_fill_info()
3025 metrics[RTAX_MTU - 1] = rt->rt_pmtu; in rt_fill_info()
3026 if (rt->rt_mtu_locked && expires) in rt_fill_info()
3042 if (rt_is_input_route(rt)) { in rt_fill_info()
3063 error = rt->dst.error; in rt_fill_info()
3065 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0) in rt_fill_info()
3088 struct rtable *rt; in fnhe_dump_bucket() local
3101 rt = rcu_dereference(fnhe->fnhe_rth_input); in fnhe_dump_bucket()
3102 if (!rt) in fnhe_dump_bucket()
3103 rt = rcu_dereference(fnhe->fnhe_rth_output); in fnhe_dump_bucket()
3104 if (!rt) in fnhe_dump_bucket()
3107 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt, in fnhe_dump_bucket()
3290 struct rtable *rt = NULL; in inet_rtm_getroute() local
3363 rt = skb_rtable(skb); in inet_rtm_getroute()
3364 if (err == 0 && rt->dst.error) in inet_rtm_getroute()
3365 err = -rt->dst.error; in inet_rtm_getroute()
3369 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb); in inet_rtm_getroute()
3371 if (IS_ERR(rt)) in inet_rtm_getroute()
3372 err = PTR_ERR(rt); in inet_rtm_getroute()
3374 skb_dst_set(skb, &rt->dst); in inet_rtm_getroute()
3381 rt->rt_flags |= RTCF_NOTIFY; in inet_rtm_getroute()
3406 fri.type = rt->rt_type; in inet_rtm_getroute()
3432 err = rt_fill_info(net, dst, src, rt, table_id, res.dscp, &fl4, in inet_rtm_getroute()