Lines Matching refs:block

181 	struct tcf_block *block = chain->block;
183 mutex_lock(&block->proto_destroy_lock);
184 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
186 mutex_unlock(&block->proto_destroy_lock);
205 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
220 struct tcf_block *block = chain->block;
222 mutex_lock(&block->proto_destroy_lock);
225 mutex_unlock(&block->proto_destroy_lock);
460 #define ASSERT_BLOCK_LOCKED(block) \
461 lockdep_assert_held(&(block)->lock)
469 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
474 ASSERT_BLOCK_LOCKED(block);
479 list_add_tail_rcu(&chain->list, &block->chain_list);
481 chain->block = block;
485 block->chain0.chain = chain;
500 struct tcf_block *block = chain->block;
505 mutex_lock(&block->lock);
506 list_for_each_entry(item, &block->chain0.filter_chain_list, list)
508 mutex_unlock(&block->lock);
511 /* Returns true if block can be safely freed. */
515 struct tcf_block *block = chain->block;
517 ASSERT_BLOCK_LOCKED(block);
521 block->chain0.chain = NULL;
523 if (list_empty(&block->chain_list) &&
524 refcount_read(&block->refcnt) == 0)
530 static void tcf_block_destroy(struct tcf_block *block)
532 mutex_destroy(&block->lock);
533 mutex_destroy(&block->proto_destroy_lock);
534 xa_destroy(&block->ports);
535 kfree_rcu(block, rcu);
540 struct tcf_block *block = chain->block;
545 tcf_block_destroy(block);
550 ASSERT_BLOCK_LOCKED(chain->block);
557 ASSERT_BLOCK_LOCKED(chain->block);
565 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
570 ASSERT_BLOCK_LOCKED(block);
572 list_for_each_entry(chain, &block->chain_list, list) {
580 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
585 list_for_each_entry_rcu(chain, &block->chain_list, list) {
597 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
604 mutex_lock(&block->lock);
605 chain = tcf_chain_lookup(block, chain_index);
611 chain = tcf_chain_create(block, chain_index);
619 mutex_unlock(&block->lock);
633 mutex_unlock(&block->lock);
637 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
640 return __tcf_chain_get(block, chain_index, create, false);
643 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
645 return __tcf_chain_get(block, chain_index, true, true);
653 struct tcf_block *block, struct sk_buff *oskb,
659 struct tcf_block *block = chain->block;
665 mutex_lock(&block->lock);
668 mutex_unlock(&block->lock);
677 /* tc_chain_notify_delete can't be called while holding block lock.
678 * However, when block is unlocked chain can be changed concurrently, so
689 chain->index, block, NULL, 0, 0);
696 mutex_unlock(&block->lock);
744 static int tcf_block_setup(struct tcf_block *block,
757 bo->block = flow_block;
765 static void tcf_block_unbind(struct tcf_block *block,
770 struct tcf_block *block = block_cb->indr.data;
778 &block->flow_block, tcf_block_shared(block),
781 down_write(&block->cb_lock);
784 tcf_block_unbind(block, &bo);
785 up_write(&block->cb_lock);
789 static bool tcf_block_offload_in_use(struct tcf_block *block)
791 return atomic_read(&block->offloadcnt);
794 static int tcf_block_offload_cmd(struct tcf_block *block,
803 &block->flow_block, tcf_block_shared(block),
816 return tcf_block_setup(block, &bo);
819 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
821 tcf_block_setup(block, &bo);
826 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
833 down_write(&block->cb_lock);
835 /* If tc offload feature is disabled and the block we try to bind
840 tcf_block_offload_in_use(block)) {
841 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
846 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
852 up_write(&block->cb_lock);
856 if (tcf_block_offload_in_use(block))
860 block->nooffloaddevcnt++;
862 up_write(&block->cb_lock);
866 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
872 down_write(&block->cb_lock);
873 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
876 up_write(&block->cb_lock);
880 WARN_ON(block->nooffloaddevcnt-- == 0);
881 up_write(&block->cb_lock);
885 tcf_chain0_head_change_cb_add(struct tcf_block *block,
900 mutex_lock(&block->lock);
901 chain0 = block->chain0.chain;
905 list_add(&item->list, &block->chain0.filter_chain_list);
906 mutex_unlock(&block->lock);
917 mutex_lock(&block->lock);
918 list_add(&item->list, &block->chain0.filter_chain_list);
919 mutex_unlock(&block->lock);
929 tcf_chain0_head_change_cb_del(struct tcf_block *block,
934 mutex_lock(&block->lock);
935 list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
939 if (block->chain0.chain)
942 mutex_unlock(&block->lock);
948 mutex_unlock(&block->lock);
959 static int tcf_block_insert(struct tcf_block *block, struct net *net,
967 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
975 static void tcf_block_remove(struct tcf_block *block, struct net *net)
980 idr_remove(&tn->idr, block->index);
988 struct tcf_block *block;
990 block = kzalloc(sizeof(*block), GFP_KERNEL);
991 if (!block) {
992 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
995 mutex_init(&block->lock);
996 mutex_init(&block->proto_destroy_lock);
997 init_rwsem(&block->cb_lock);
998 flow_block_init(&block->flow_block);
999 INIT_LIST_HEAD(&block->chain_list);
1000 INIT_LIST_HEAD(&block->owner_list);
1001 INIT_LIST_HEAD(&block->chain0.filter_chain_list);
1003 refcount_set(&block->refcnt, 1);
1004 block->net = net;
1005 block->index = block_index;
1006 xa_init(&block->ports);
1009 if (!tcf_block_shared(block))
1010 block->q = q;
1011 return block;
1024 struct tcf_block *block;
1027 block = tcf_block_lookup(net, block_index);
1028 if (block && !refcount_inc_not_zero(&block->refcnt))
1029 block = NULL;
1032 return block;
1036 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1038 mutex_lock(&block->lock);
1040 chain = list_is_last(&chain->list, &block->chain_list) ?
1043 chain = list_first_entry_or_null(&block->chain_list,
1048 chain = list_is_last(&chain->list, &block->chain_list) ?
1053 mutex_unlock(&block->lock);
1059 * block. It properly obtains block->lock and takes reference to chain before
1068 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
1070 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
1132 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1136 /* Last reference to block. At this point chains cannot be added or
1139 for (chain = tcf_get_next_chain(block, NULL);
1141 chain = tcf_get_next_chain(block, chain)) {
1251 struct tcf_block *block;
1254 block = tcf_block_refcnt_get(net, block_index);
1255 if (!block) {
1262 block = cops->tcf_block(q, cl, extack);
1263 if (!block)
1266 if (tcf_block_shared(block)) {
1267 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1271 /* Always take reference to block in order to support execution
1273 * must release block when it is finished using it. 'if' block
1274 * of this conditional obtain reference to block by calling
1277 refcount_inc(&block->refcnt);
1280 return block;
1283 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1286 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1287 /* Flushing/putting all chains will cause the block to be
1289 * is empty, block has to be manually deallocated. After block
1291 * increment it or add new chains to block.
1293 bool free_block = list_empty(&block->chain_list);
1295 mutex_unlock(&block->lock);
1296 if (tcf_block_shared(block))
1297 tcf_block_remove(block, block->net);
1300 tcf_block_offload_unbind(block, q, ei);
1303 tcf_block_destroy(block);
1305 tcf_block_flush_all_chains(block, rtnl_held);
1307 tcf_block_offload_unbind(block, q, ei);
1311 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1313 __tcf_block_put(block, NULL, NULL, rtnl_held);
1316 /* Find tcf block.
1325 struct tcf_block *block;
1338 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1339 if (IS_ERR(block)) {
1340 err = PTR_ERR(block);
1344 return block;
1354 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1357 if (!IS_ERR_OR_NULL(block))
1358 tcf_block_refcnt_put(block, rtnl_held);
1375 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1379 if (block->keep_dst &&
1385 void tcf_block_netif_keep_dst(struct tcf_block *block)
1389 block->keep_dst = true;
1390 list_for_each_entry(item, &block->owner_list, list)
1391 tcf_block_owner_netif_keep_dst(block, item->q,
1396 static int tcf_block_owner_add(struct tcf_block *block,
1407 list_add(&item->list, &block->owner_list);
1411 static void tcf_block_owner_del(struct tcf_block *block,
1417 list_for_each_entry(item, &block->owner_list, list) {
1427 static bool tcf_block_tracks_dev(struct tcf_block *block,
1430 return tcf_block_shared(block) &&
1441 struct tcf_block *block = NULL;
1445 /* block_index not 0 means the shared block is requested */
1446 block = tcf_block_refcnt_get(net, ei->block_index);
1448 if (!block) {
1449 block = tcf_block_create(net, q, ei->block_index, extack);
1450 if (IS_ERR(block))
1451 return PTR_ERR(block);
1452 if (tcf_block_shared(block)) {
1453 err = tcf_block_insert(block, net, extack);
1459 err = tcf_block_owner_add(block, q, ei->binder_type);
1463 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1465 err = tcf_chain0_head_change_cb_add(block, ei, extack);
1469 err = tcf_block_offload_bind(block, q, ei, extack);
1473 if (tcf_block_tracks_dev(block, ei)) {
1474 err = xa_insert(&block->ports, dev->ifindex, dev, GFP_KERNEL);
1476 NL_SET_ERR_MSG(extack, "block dev insert failed");
1481 *p_block = block;
1486 tcf_chain0_head_change_cb_del(block, ei);
1488 tcf_block_owner_del(block, q, ei->binder_type);
1491 tcf_block_refcnt_put(block, true);
1520 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1525 if (!block)
1527 if (tcf_block_tracks_dev(block, ei))
1528 xa_erase(&block->ports, dev->ifindex);
1529 tcf_chain0_head_change_cb_del(block, ei);
1530 tcf_block_owner_del(block, q, ei->binder_type);
1532 __tcf_block_put(block, q, ei, true);
1536 void tcf_block_put(struct tcf_block *block)
1540 if (!block)
1542 tcf_block_put_ext(block, block->q, &ei);
1548 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1556 lockdep_assert_held(&block->cb_lock);
1558 for (chain = __tcf_get_next_chain(block, NULL);
1561 chain = __tcf_get_next_chain(block, chain),
1591 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1596 static int tcf_block_bind(struct tcf_block *block,
1602 lockdep_assert_held(&block->cb_lock);
1605 err = tcf_block_playback_offloads(block, block_cb->cb,
1607 tcf_block_offload_in_use(block),
1612 block->lockeddevcnt++;
1616 list_splice(&bo->cb_list, &block->flow_block.cb_list);
1625 tcf_block_playback_offloads(block, block_cb->cb,
1627 tcf_block_offload_in_use(block),
1630 block->lockeddevcnt--;
1638 static void tcf_block_unbind(struct tcf_block *block,
1643 lockdep_assert_held(&block->cb_lock);
1646 tcf_block_playback_offloads(block, block_cb->cb,
1648 tcf_block_offload_in_use(block),
1653 block->lockeddevcnt--;
1657 static int tcf_block_setup(struct tcf_block *block,
1664 err = tcf_block_bind(block, bo);
1668 tcf_block_unbind(block, bo);
1762 tp->chain->block->index,
1776 const struct tcf_block *block,
1793 if (block) {
1814 fchain = tcf_chain_lookup_rcu(block, chain);
2015 struct tcf_proto *tp, struct tcf_block *block,
2037 tcm->tcm_block_index = block->index;
2077 struct tcf_block *block, struct Qdisc *q,
2092 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2109 struct tcf_block *block, struct Qdisc *q,
2124 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
2147 struct tcf_block *block, struct Qdisc *q,
2156 tfilter_notify(net, oskb, n, tp, block, q, parent, NULL,
2186 struct tcf_block *block;
2210 block = NULL;
2241 * block is shared (no qdisc found), qdisc is not unlocked, classifier
2255 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2257 if (IS_ERR(block)) {
2258 err = PTR_ERR(block);
2261 block->classid = parent;
2269 chain = tcf_chain_get(block, chain_index, true);
2367 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2385 tcf_block_release(q, block, rtnl_held);
2419 struct tcf_block *block = NULL;
2452 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2467 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2469 if (IS_ERR(block)) {
2470 err = PTR_ERR(block);
2480 chain = tcf_chain_get(block, chain_index, false);
2495 tfilter_notify_chain(net, skb, block, q, parent, n,
2519 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2534 err = tfilter_del_notify(net, skb, n, tp, block, q, parent, fh,
2549 tcf_block_release(q, block, rtnl_held);
2575 struct tcf_block *block = NULL;
2608 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2622 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2624 if (IS_ERR(block)) {
2625 err = PTR_ERR(block);
2635 chain = tcf_chain_get(block, chain_index, false);
2662 err = tfilter_notify(net, skb, n, tp, block, q, parent,
2675 tcf_block_release(q, block, rtnl_held);
2687 struct tcf_block *block;
2698 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2709 struct tcf_block *block = chain->block;
2732 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2744 arg.block = block;
2777 struct tcf_block *block;
2801 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2802 if (!block)
2804 /* If we work with block index, q is NULL and parent value
2837 block = cops->tcf_block(q, cl, NULL);
2838 if (!block)
2840 parent = block->classid;
2841 if (tcf_block_shared(block))
2848 for (chain = __tcf_get_next_chain(block, NULL);
2851 chain = __tcf_get_next_chain(block, chain),
2865 tcf_block_refcnt_put(block, true);
2878 struct tcf_block *block,
2899 if (block->q) {
2900 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2901 tcm->tcm_parent = block->q->handle;
2904 tcm->tcm_block_index = block->index;
2936 struct tcf_block *block = chain->block;
2937 struct net *net = block->net;
2949 chain->index, net, skb, block, portid,
2966 struct tcf_block *block, struct sk_buff *oskb,
2970 struct net *net = block->net;
2981 block, portid, seq, flags, RTM_DELCHAIN, NULL) <= 0) {
3049 struct tcf_block *block;
3064 block = tcf_block_find(net, &q, &parent, &cl,
3066 if (IS_ERR(block))
3067 return PTR_ERR(block);
3076 mutex_lock(&block->lock);
3077 chain = tcf_chain_lookup(block, chain_index);
3096 chain = tcf_chain_create(block, chain_index);
3113 /* Modifying chain requires holding parent block lock. In case
3121 mutex_unlock(&block->lock);
3135 tfilter_notify_chain(net, skb, block, q, parent, n,
3159 tcf_block_release(q, block, true);
3166 mutex_unlock(&block->lock);
3176 struct tcf_block *block;
3192 block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
3193 if (!block)
3221 block = cops->tcf_block(q, cl, NULL);
3222 if (!block)
3224 if (tcf_block_shared(block))
3231 mutex_lock(&block->lock);
3232 list_for_each_entry(chain, &block->chain_list, list) {
3243 chain->index, net, skb, block,
3251 mutex_unlock(&block->lock);
3254 tcf_block_refcnt_put(block, true);
3481 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3486 atomic_inc(&block->offloadcnt);
3489 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3494 atomic_dec(&block->offloadcnt);
3497 static void tc_cls_offload_cnt_update(struct tcf_block *block,
3501 lockdep_assert_held(&block->cb_lock);
3506 tcf_block_offload_inc(block, flags);
3511 tcf_block_offload_dec(block, flags);
3517 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3520 lockdep_assert_held(&block->cb_lock);
3523 tcf_block_offload_dec(block, flags);
3529 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3536 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3548 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3551 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3557 down_read(&block->cb_lock);
3558 /* Need to obtain rtnl lock if block is bound to devs that require it.
3559 * In block bind code cb_lock is obtained while holding rtnl, so we must
3562 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3563 up_read(&block->cb_lock);
3568 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3570 up_read(&block->cb_lock);
3578 * successfully offloaded, increment block offloads counter. On failure,
3583 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3587 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3593 down_read(&block->cb_lock);
3594 /* Need to obtain rtnl lock if block is bound to devs that require it.
3595 * In block bind code cb_lock is obtained while holding rtnl, so we must
3598 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3599 up_read(&block->cb_lock);
3604 /* Make sure all netdevs sharing this block are offload-capable. */
3605 if (block->nooffloaddevcnt && err_stop) {
3610 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3617 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3620 up_read(&block->cb_lock);
3628 * successfully offloaded, increment block offload counter. On failure,
3633 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3639 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3645 down_read(&block->cb_lock);
3646 /* Need to obtain rtnl lock if block is bound to devs that require it.
3647 * In block bind code cb_lock is obtained while holding rtnl, so we must
3650 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3651 up_read(&block->cb_lock);
3656 /* Make sure all netdevs sharing this block are offload-capable. */
3657 if (block->nooffloaddevcnt && err_stop) {
3662 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3666 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3673 tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3676 up_read(&block->cb_lock);
3683 /* Destroy filter and decrement block offload counter, if filter was previously
3687 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3691 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3697 down_read(&block->cb_lock);
3698 /* Need to obtain rtnl lock if block is bound to devs that require it.
3699 * In block bind code cb_lock is obtained while holding rtnl, so we must
3702 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3703 up_read(&block->cb_lock);
3708 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3710 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3714 up_read(&block->cb_lock);
3721 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3732 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3918 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3925 tcf_block_put_ext(qe->block, sch, &qe->info);
3942 /* Bounce newly-configured block or change in block. */