Lines Matching refs:fi

69 #define for_nexthops(fi) {						\
71 for (nhsel = 0, nh = (fi)->fib_nh; \
72 nhsel < fib_info_num_path((fi)); \
75 #define change_nexthops(fi) { \
77 for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
78 nhsel < fib_info_num_path((fi)); \
85 #define for_nexthops(fi) { \
86 int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \
89 #define change_nexthops(fi) { \
91 struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
96 #define endfor_nexthops(fi) }
235 struct fib_info *fi = container_of(head, struct fib_info, rcu);
237 if (fi->nh) {
238 nexthop_put(fi->nh);
240 change_nexthops(fi) {
241 fib_nh_release(fi->fib_net, nexthop_nh);
242 } endfor_nexthops(fi);
245 ip_fib_metrics_put(fi->fib_metrics);
247 kfree(fi);
250 void free_fib_info(struct fib_info *fi)
252 if (fi->fib_dead == 0) {
253 pr_warn("Freeing alive fib_info %p\n", fi);
257 call_rcu(&fi->rcu, free_fib_info_rcu);
261 void fib_release_info(struct fib_info *fi)
264 if (fi && refcount_dec_and_test(&fi->fib_treeref)) {
265 hlist_del(&fi->fib_hash);
270 if (fi->fib_prefsrc)
271 hlist_del(&fi->fib_lhash);
272 if (fi->nh) {
273 list_del(&fi->nh_list);
275 change_nexthops(fi) {
279 } endfor_nexthops(fi)
282 WRITE_ONCE(fi->fib_dead, 1);
283 fib_info_put(fi);
288 static inline int nh_comp(struct fib_info *fi, struct fib_info *ofi)
292 if (fi->nh || ofi->nh)
293 return nexthop_cmp(fi->nh, ofi->nh) ? 0 : -1;
298 for_nexthops(fi) {
321 } endfor_nexthops(fi);
357 static inline unsigned int fib_info_hashfn(struct fib_info *fi)
361 val = fib_info_hashfn_1(fi->fib_nhs, fi->fib_protocol,
362 fi->fib_scope, (__force u32)fi->fib_prefsrc,
363 fi->fib_priority);
365 if (fi->nh) {
366 val ^= fib_devindex_hashfn(fi->nh->id);
368 for_nexthops(fi) {
370 } endfor_nexthops(fi)
381 struct fib_info *fi;
391 hlist_for_each_entry(fi, head, fib_hash) {
392 if (!net_eq(fi->fib_net, net))
394 if (!fi->nh || fi->nh->id != cfg->fc_nh_id)
396 if (cfg->fc_protocol == fi->fib_protocol &&
397 cfg->fc_scope == fi->fib_scope &&
398 cfg->fc_prefsrc == fi->fib_prefsrc &&
399 cfg->fc_priority == fi->fib_priority &&
400 cfg->fc_type == fi->fib_type &&
401 cfg->fc_table == fi->fib_tb_id &&
402 !((cfg->fc_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK))
403 return fi;
412 struct fib_info *fi;
418 hlist_for_each_entry(fi, head, fib_hash) {
419 if (!net_eq(fi->fib_net, nfi->fib_net))
421 if (fi->fib_nhs != nfi->fib_nhs)
423 if (nfi->fib_protocol == fi->fib_protocol &&
424 nfi->fib_scope == fi->fib_scope &&
425 nfi->fib_prefsrc == fi->fib_prefsrc &&
426 nfi->fib_priority == fi->fib_priority &&
427 nfi->fib_type == fi->fib_type &&
428 nfi->fib_tb_id == fi->fib_tb_id &&
429 memcmp(nfi->fib_metrics, fi->fib_metrics,
431 !((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK) &&
432 nh_comp(fi, nfi) == 0)
433 return fi;
465 size_t fib_nlmsg_size(struct fib_info *fi)
473 unsigned int nhs = fib_info_num_path(fi);
478 if (fi->nh)
493 for (i = 0; i < fib_info_num_path(fi); i++) {
494 struct fib_nh_common *nhc = fib_info_nhc(fi, i);
526 fri.fi = fa->fa_info;
550 static int fib_detect_death(struct fib_info *fi, int order,
554 const struct fib_nh_common *nhc = fib_info_nhc(fi, 0);
578 *last_resort = fi;
691 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
695 struct net *net = fi->fib_net;
700 change_nexthops(fi) {
769 } endfor_nexthops(fi);
772 nh = fib_info_nh(fi, 0);
802 static void fib_rebalance(struct fib_info *fi)
807 if (fib_info_num_path(fi) < 2)
811 for_nexthops(fi) {
820 } endfor_nexthops(fi);
823 change_nexthops(fi) {
838 } endfor_nexthops(fi);
842 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
851 #define fib_rebalance(fi) do { } while (0)
877 int fib_nh_match(struct net *net, struct fib_config *cfg, struct fib_info *fi,
885 if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority)
889 if (fi->nh && cfg->fc_nh_id == fi->nh->id)
894 if (fi->nh) {
903 nh = fib_info_nh(fi, 0);
937 for_nexthops(fi) {
1005 } endfor_nexthops(fi);
1010 bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
1033 val = tcp_ca_get_key_by_name(fi->fib_net, tmp, &ecn_ca);
1040 fi_val = fi->fib_metrics->metrics[type - 1];
1290 struct fib_info *fi;
1292 hlist_for_each_entry_safe(fi, n, head, fib_hash) {
1296 new_hash = fib_info_hashfn(fi);
1298 hlist_add_head(&fi->fib_hash, dest);
1307 struct fib_info *fi;
1309 hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) {
1312 ldest = fib_info_laddrhash_bucket(fi->fib_net,
1313 fi->fib_prefsrc);
1314 hlist_add_head(&fi->fib_lhash, ldest);
1346 if (res->fi->fib_prefsrc)
1347 return res->fi->fib_prefsrc;
1358 return fib_info_update_nhc_saddr(net, nhc, res->fi->fib_scope);
1389 struct fib_info *fi = NULL;
1412 fi = fib_find_info_nh(net, cfg);
1413 if (fi) {
1414 refcount_inc(&fi->fib_treeref);
1415 return fi;
1459 fi = kzalloc(struct_size(fi, fib_nh, nhs), GFP_KERNEL);
1460 if (!fi)
1462 fi->fib_metrics = ip_fib_metrics_init(fi->fib_net, cfg->fc_mx,
1464 if (IS_ERR(fi->fib_metrics)) {
1465 err = PTR_ERR(fi->fib_metrics);
1466 kfree(fi);
1470 fi->fib_net = net;
1471 fi->fib_protocol = cfg->fc_protocol;
1472 fi->fib_scope = cfg->fc_scope;
1473 fi->fib_flags = cfg->fc_flags;
1474 fi->fib_priority = cfg->fc_priority;
1475 fi->fib_prefsrc = cfg->fc_prefsrc;
1476 fi->fib_type = cfg->fc_type;
1477 fi->fib_tb_id = cfg->fc_table;
1479 fi->fib_nhs = nhs;
1486 fi->nh = nh;
1489 change_nexthops(fi) {
1490 nexthop_nh->nh_parent = fi;
1491 } endfor_nexthops(fi)
1494 err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg,
1497 err = fib_nh_init(net, fi->fib_nh, cfg, 1, extack);
1529 if (fi->nh) {
1530 err = fib_check_nexthop(fi->nh, cfg->fc_scope, extack);
1534 struct fib_nh *nh = fi->fib_nh;
1557 change_nexthops(fi) {
1565 } endfor_nexthops(fi)
1566 if (linkdown == fi->fib_nhs)
1567 fi->fib_flags |= RTNH_F_LINKDOWN;
1570 if (fi->fib_prefsrc && !fib_valid_prefsrc(cfg, fi->fib_prefsrc)) {
1575 if (!fi->nh) {
1576 change_nexthops(fi) {
1578 fi->fib_scope);
1580 fi->fib_nh_is_v6 = true;
1581 } endfor_nexthops(fi)
1583 fib_rebalance(fi);
1587 ofi = fib_find_info(fi);
1589 /* fib_table_lookup() should not see @fi yet. */
1590 fi->fib_dead = 1;
1591 free_fib_info(fi);
1596 refcount_set(&fi->fib_treeref, 1);
1597 refcount_set(&fi->fib_clntref, 1);
1600 hlist_add_head(&fi->fib_hash,
1601 &fib_info_hash[fib_info_hashfn(fi)]);
1602 if (fi->fib_prefsrc) {
1605 head = fib_info_laddrhash_bucket(net, fi->fib_prefsrc);
1606 hlist_add_head(&fi->fib_lhash, head);
1608 if (fi->nh) {
1609 list_add(&fi->nh_list, &nh->fi_list);
1611 change_nexthops(fi) {
1618 } endfor_nexthops(fi)
1621 return fi;
1627 if (fi) {
1628 /* fib_table_lookup() should not see @fi yet. */
1629 fi->fib_dead = 1;
1630 free_fib_info(fi);
1741 static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi)
1749 if (unlikely(fi->nh)) {
1750 if (nexthop_mpath_fill_node(skb, fi->nh, AF_INET) < 0)
1755 for_nexthops(fi) {
1763 } endfor_nexthops(fi);
1774 static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi)
1783 unsigned int nhs = fib_info_num_path(fri->fi);
1784 struct fib_info *fi = fri->fi;
1805 rtm->rtm_flags = fi->fib_flags;
1806 rtm->rtm_scope = fi->fib_scope;
1807 rtm->rtm_protocol = fi->fib_protocol;
1812 if (fi->fib_priority &&
1813 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
1815 if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0)
1818 if (fi->fib_prefsrc &&
1819 nla_put_in_addr(skb, RTA_PREFSRC, fi->fib_prefsrc))
1822 if (fi->nh) {
1823 if (nla_put_u32(skb, RTA_NH_ID, fi->nh->id))
1825 if (nexthop_is_blackhole(fi->nh))
1827 if (!READ_ONCE(fi->fib_net->ipv4.sysctl_nexthop_compat_mode))
1832 const struct fib_nh_common *nhc = fib_info_nhc(fi, 0);
1850 if (fib_add_multipath(skb, fi) < 0)
1881 struct fib_info *fi;
1888 hlist_for_each_entry(fi, head, fib_lhash) {
1889 if (!net_eq(fi->fib_net, net) ||
1890 fi->fib_tb_id != tb_id)
1892 if (fi->fib_prefsrc == local) {
1893 fi->fib_flags |= RTNH_F_DEAD;
1894 fi->pfsrc_removed = true;
1999 struct fib_info *fi = nh->nh_parent;
2002 BUG_ON(!fi->fib_nhs);
2003 if (nh->fib_nh_dev != dev || fi == prev_fi)
2005 prev_fi = fi;
2007 change_nexthops(fi) {
2028 dead = fi->fib_nhs;
2032 } endfor_nexthops(fi)
2033 if (dead == fi->fib_nhs) {
2037 fi->fib_flags |= RTNH_F_DEAD;
2040 fi->fib_flags |= RTNH_F_LINKDOWN;
2046 fib_rebalance(fi);
2055 struct fib_info *fi = NULL, *last_resort = NULL;
2061 u32 last_prio = res->fi->fib_priority;
2096 if (!fi) {
2097 if (next_fi != res->fi)
2100 } else if (!fib_detect_death(fi, order, &last_resort,
2102 fib_result_assign(res, fi);
2106 fi = next_fi;
2110 if (order <= 0 || !fi) {
2116 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
2118 fib_result_assign(res, fi);
2158 struct fib_info *fi = nh->nh_parent;
2161 BUG_ON(!fi->fib_nhs);
2162 if (nh->fib_nh_dev != dev || fi == prev_fi)
2165 prev_fi = fi;
2167 change_nexthops(fi) {
2181 } endfor_nexthops(fi)
2184 fi->fib_flags &= ~nh_flags;
2188 fib_rebalance(fi);
2223 struct fib_info *fi = res->fi;
2224 struct net *net = fi->fib_net;
2227 if (unlikely(res->fi->nh)) {
2232 change_nexthops(fi) {
2249 } endfor_nexthops(fi);
2260 if (fib_info_num_path(res->fi) > 1) {