Lines Matching full:block
59 struct tcf_block *block = chain->block; in tcf_proto_signal_destroying() local
61 mutex_lock(&block->proto_destroy_lock); in tcf_proto_signal_destroying()
62 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node, in tcf_proto_signal_destroying()
64 mutex_unlock(&block->proto_destroy_lock); in tcf_proto_signal_destroying()
83 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter, in tcf_proto_exists_destroying()
98 struct tcf_block *block = chain->block; in tcf_proto_signal_destroyed() local
100 mutex_lock(&block->proto_destroy_lock); in tcf_proto_signal_destroyed()
103 mutex_unlock(&block->proto_destroy_lock); in tcf_proto_signal_destroyed()
337 #define ASSERT_BLOCK_LOCKED(block) \ argument
338 lockdep_assert_held(&(block)->lock)
346 static struct tcf_chain *tcf_chain_create(struct tcf_block *block, in tcf_chain_create() argument
351 ASSERT_BLOCK_LOCKED(block); in tcf_chain_create()
356 list_add_tail_rcu(&chain->list, &block->chain_list); in tcf_chain_create()
358 chain->block = block; in tcf_chain_create()
362 block->chain0.chain = chain; in tcf_chain_create()
377 struct tcf_block *block = chain->block; in tcf_chain0_head_change() local
382 mutex_lock(&block->lock); in tcf_chain0_head_change()
383 list_for_each_entry(item, &block->chain0.filter_chain_list, list) in tcf_chain0_head_change()
385 mutex_unlock(&block->lock); in tcf_chain0_head_change()
388 /* Returns true if block can be safely freed. */
392 struct tcf_block *block = chain->block; in tcf_chain_detach() local
394 ASSERT_BLOCK_LOCKED(block); in tcf_chain_detach()
398 block->chain0.chain = NULL; in tcf_chain_detach()
400 if (list_empty(&block->chain_list) && in tcf_chain_detach()
401 refcount_read(&block->refcnt) == 0) in tcf_chain_detach()
407 static void tcf_block_destroy(struct tcf_block *block) in tcf_block_destroy() argument
409 mutex_destroy(&block->lock); in tcf_block_destroy()
410 mutex_destroy(&block->proto_destroy_lock); in tcf_block_destroy()
411 kfree_rcu(block, rcu); in tcf_block_destroy()
416 struct tcf_block *block = chain->block; in tcf_chain_destroy() local
421 tcf_block_destroy(block); in tcf_chain_destroy()
426 ASSERT_BLOCK_LOCKED(chain->block); in tcf_chain_hold()
433 ASSERT_BLOCK_LOCKED(chain->block); in tcf_chain_held_by_acts_only()
441 static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block, in tcf_chain_lookup() argument
446 ASSERT_BLOCK_LOCKED(block); in tcf_chain_lookup()
448 list_for_each_entry(chain, &block->chain_list, list) { in tcf_chain_lookup()
456 static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block, in tcf_chain_lookup_rcu() argument
461 list_for_each_entry_rcu(chain, &block->chain_list, list) { in tcf_chain_lookup_rcu()
472 static struct tcf_chain *__tcf_chain_get(struct tcf_block *block, in __tcf_chain_get() argument
479 mutex_lock(&block->lock); in __tcf_chain_get()
480 chain = tcf_chain_lookup(block, chain_index); in __tcf_chain_get()
486 chain = tcf_chain_create(block, chain_index); in __tcf_chain_get()
494 mutex_unlock(&block->lock); in __tcf_chain_get()
508 mutex_unlock(&block->lock); in __tcf_chain_get()
512 static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, in tcf_chain_get() argument
515 return __tcf_chain_get(block, chain_index, create, false); in tcf_chain_get()
518 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index) in tcf_chain_get_by_act() argument
520 return __tcf_chain_get(block, chain_index, true, true); in tcf_chain_get_by_act()
528 struct tcf_block *block, struct sk_buff *oskb,
534 struct tcf_block *block = chain->block; in __tcf_chain_put() local
540 mutex_lock(&block->lock); in __tcf_chain_put()
543 mutex_unlock(&block->lock); in __tcf_chain_put()
552 /* tc_chain_notify_delete can't be called while holding block lock. in __tcf_chain_put()
553 * However, when block is unlocked chain can be changed concurrently, so in __tcf_chain_put()
563 block, NULL, 0, 0, false); in __tcf_chain_put()
570 mutex_unlock(&block->lock); in __tcf_chain_put()
618 static int tcf_block_setup(struct tcf_block *block,
631 bo->block = flow_block; in tcf_block_offload_init()
639 static void tcf_block_unbind(struct tcf_block *block,
644 struct tcf_block *block = block_cb->indr.data; in tc_block_indr_cleanup() local
652 &block->flow_block, tcf_block_shared(block), in tc_block_indr_cleanup()
655 down_write(&block->cb_lock); in tc_block_indr_cleanup()
658 tcf_block_unbind(block, &bo); in tc_block_indr_cleanup()
659 up_write(&block->cb_lock); in tc_block_indr_cleanup()
663 static bool tcf_block_offload_in_use(struct tcf_block *block) in tcf_block_offload_in_use() argument
665 return atomic_read(&block->offloadcnt); in tcf_block_offload_in_use()
668 static int tcf_block_offload_cmd(struct tcf_block *block, in tcf_block_offload_cmd() argument
677 &block->flow_block, tcf_block_shared(block), in tcf_block_offload_cmd()
690 return tcf_block_setup(block, &bo); in tcf_block_offload_cmd()
693 flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo, in tcf_block_offload_cmd()
695 tcf_block_setup(block, &bo); in tcf_block_offload_cmd()
700 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, in tcf_block_offload_bind() argument
707 down_write(&block->cb_lock); in tcf_block_offload_bind()
709 /* If tc offload feature is disabled and the block we try to bind in tcf_block_offload_bind()
714 tcf_block_offload_in_use(block)) { in tcf_block_offload_bind()
715 NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled"); in tcf_block_offload_bind()
720 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack); in tcf_block_offload_bind()
726 up_write(&block->cb_lock); in tcf_block_offload_bind()
730 if (tcf_block_offload_in_use(block)) in tcf_block_offload_bind()
734 block->nooffloaddevcnt++; in tcf_block_offload_bind()
736 up_write(&block->cb_lock); in tcf_block_offload_bind()
740 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, in tcf_block_offload_unbind() argument
746 down_write(&block->cb_lock); in tcf_block_offload_unbind()
747 err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL); in tcf_block_offload_unbind()
750 up_write(&block->cb_lock); in tcf_block_offload_unbind()
754 WARN_ON(block->nooffloaddevcnt-- == 0); in tcf_block_offload_unbind()
755 up_write(&block->cb_lock); in tcf_block_offload_unbind()
759 tcf_chain0_head_change_cb_add(struct tcf_block *block, in tcf_chain0_head_change_cb_add() argument
774 mutex_lock(&block->lock); in tcf_chain0_head_change_cb_add()
775 chain0 = block->chain0.chain; in tcf_chain0_head_change_cb_add()
779 list_add(&item->list, &block->chain0.filter_chain_list); in tcf_chain0_head_change_cb_add()
780 mutex_unlock(&block->lock); in tcf_chain0_head_change_cb_add()
791 mutex_lock(&block->lock); in tcf_chain0_head_change_cb_add()
792 list_add(&item->list, &block->chain0.filter_chain_list); in tcf_chain0_head_change_cb_add()
793 mutex_unlock(&block->lock); in tcf_chain0_head_change_cb_add()
803 tcf_chain0_head_change_cb_del(struct tcf_block *block, in tcf_chain0_head_change_cb_del() argument
808 mutex_lock(&block->lock); in tcf_chain0_head_change_cb_del()
809 list_for_each_entry(item, &block->chain0.filter_chain_list, list) { in tcf_chain0_head_change_cb_del()
813 if (block->chain0.chain) in tcf_chain0_head_change_cb_del()
816 mutex_unlock(&block->lock); in tcf_chain0_head_change_cb_del()
822 mutex_unlock(&block->lock); in tcf_chain0_head_change_cb_del()
833 static int tcf_block_insert(struct tcf_block *block, struct net *net, in tcf_block_insert() argument
841 err = idr_alloc_u32(&tn->idr, block, &block->index, block->index, in tcf_block_insert()
849 static void tcf_block_remove(struct tcf_block *block, struct net *net) in tcf_block_remove() argument
854 idr_remove(&tn->idr, block->index); in tcf_block_remove()
862 struct tcf_block *block; in tcf_block_create() local
864 block = kzalloc(sizeof(*block), GFP_KERNEL); in tcf_block_create()
865 if (!block) { in tcf_block_create()
866 NL_SET_ERR_MSG(extack, "Memory allocation for block failed"); in tcf_block_create()
869 mutex_init(&block->lock); in tcf_block_create()
870 mutex_init(&block->proto_destroy_lock); in tcf_block_create()
871 init_rwsem(&block->cb_lock); in tcf_block_create()
872 flow_block_init(&block->flow_block); in tcf_block_create()
873 INIT_LIST_HEAD(&block->chain_list); in tcf_block_create()
874 INIT_LIST_HEAD(&block->owner_list); in tcf_block_create()
875 INIT_LIST_HEAD(&block->chain0.filter_chain_list); in tcf_block_create()
877 refcount_set(&block->refcnt, 1); in tcf_block_create()
878 block->net = net; in tcf_block_create()
879 block->index = block_index; in tcf_block_create()
882 if (!tcf_block_shared(block)) in tcf_block_create()
883 block->q = q; in tcf_block_create()
884 return block; in tcf_block_create()
896 struct tcf_block *block; in tcf_block_refcnt_get() local
899 block = tcf_block_lookup(net, block_index); in tcf_block_refcnt_get()
900 if (block && !refcount_inc_not_zero(&block->refcnt)) in tcf_block_refcnt_get()
901 block = NULL; in tcf_block_refcnt_get()
904 return block; in tcf_block_refcnt_get()
908 __tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) in __tcf_get_next_chain() argument
910 mutex_lock(&block->lock); in __tcf_get_next_chain()
912 chain = list_is_last(&chain->list, &block->chain_list) ? in __tcf_get_next_chain()
915 chain = list_first_entry_or_null(&block->chain_list, in __tcf_get_next_chain()
920 chain = list_is_last(&chain->list, &block->chain_list) ? in __tcf_get_next_chain()
925 mutex_unlock(&block->lock); in __tcf_get_next_chain()
931 * block. It properly obtains block->lock and takes reference to chain before
940 tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain) in tcf_get_next_chain() argument
942 struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain); in tcf_get_next_chain()
1005 static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held) in tcf_block_flush_all_chains() argument
1009 /* Last reference to block. At this point chains cannot be added or in tcf_block_flush_all_chains()
1012 for (chain = tcf_get_next_chain(block, NULL); in tcf_block_flush_all_chains()
1014 chain = tcf_get_next_chain(block, chain)) { in tcf_block_flush_all_chains()
1124 struct tcf_block *block; in __tcf_block_find() local
1127 block = tcf_block_refcnt_get(net, block_index); in __tcf_block_find()
1128 if (!block) { in __tcf_block_find()
1129 NL_SET_ERR_MSG(extack, "Block of given index was not found"); in __tcf_block_find()
1135 block = cops->tcf_block(q, cl, extack); in __tcf_block_find()
1136 if (!block) in __tcf_block_find()
1139 if (tcf_block_shared(block)) { in __tcf_block_find()
1140 …NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the … in __tcf_block_find()
1144 /* Always take reference to block in order to support execution in __tcf_block_find()
1146 * must release block when it is finished using it. 'if' block in __tcf_block_find()
1147 * of this conditional obtain reference to block by calling in __tcf_block_find()
1150 refcount_inc(&block->refcnt); in __tcf_block_find()
1153 return block; in __tcf_block_find()
1156 static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, in __tcf_block_put() argument
1159 if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) { in __tcf_block_put()
1160 /* Flushing/putting all chains will cause the block to be in __tcf_block_put()
1162 * is empty, block has to be manually deallocated. After block in __tcf_block_put()
1164 * increment it or add new chains to block. in __tcf_block_put()
1166 bool free_block = list_empty(&block->chain_list); in __tcf_block_put()
1168 mutex_unlock(&block->lock); in __tcf_block_put()
1169 if (tcf_block_shared(block)) in __tcf_block_put()
1170 tcf_block_remove(block, block->net); in __tcf_block_put()
1173 tcf_block_offload_unbind(block, q, ei); in __tcf_block_put()
1176 tcf_block_destroy(block); in __tcf_block_put()
1178 tcf_block_flush_all_chains(block, rtnl_held); in __tcf_block_put()
1180 tcf_block_offload_unbind(block, q, ei); in __tcf_block_put()
1184 static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held) in tcf_block_refcnt_put() argument
1186 __tcf_block_put(block, NULL, NULL, rtnl_held); in tcf_block_refcnt_put()
1189 /* Find tcf block.
1198 struct tcf_block *block; in tcf_block_find() local
1211 block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); in tcf_block_find()
1212 if (IS_ERR(block)) { in tcf_block_find()
1213 err = PTR_ERR(block); in tcf_block_find()
1217 return block; in tcf_block_find()
1227 static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, in tcf_block_release() argument
1230 if (!IS_ERR_OR_NULL(block)) in tcf_block_release()
1231 tcf_block_refcnt_put(block, rtnl_held); in tcf_block_release()
1248 tcf_block_owner_netif_keep_dst(struct tcf_block *block, in tcf_block_owner_netif_keep_dst() argument
1252 if (block->keep_dst && in tcf_block_owner_netif_keep_dst()
1258 void tcf_block_netif_keep_dst(struct tcf_block *block) in tcf_block_netif_keep_dst() argument
1262 block->keep_dst = true; in tcf_block_netif_keep_dst()
1263 list_for_each_entry(item, &block->owner_list, list) in tcf_block_netif_keep_dst()
1264 tcf_block_owner_netif_keep_dst(block, item->q, in tcf_block_netif_keep_dst()
1269 static int tcf_block_owner_add(struct tcf_block *block, in tcf_block_owner_add() argument
1280 list_add(&item->list, &block->owner_list); in tcf_block_owner_add()
1284 static void tcf_block_owner_del(struct tcf_block *block, in tcf_block_owner_del() argument
1290 list_for_each_entry(item, &block->owner_list, list) { in tcf_block_owner_del()
1305 struct tcf_block *block = NULL; in tcf_block_get_ext() local
1309 /* block_index not 0 means the shared block is requested */ in tcf_block_get_ext()
1310 block = tcf_block_refcnt_get(net, ei->block_index); in tcf_block_get_ext()
1312 if (!block) { in tcf_block_get_ext()
1313 block = tcf_block_create(net, q, ei->block_index, extack); in tcf_block_get_ext()
1314 if (IS_ERR(block)) in tcf_block_get_ext()
1315 return PTR_ERR(block); in tcf_block_get_ext()
1316 if (tcf_block_shared(block)) { in tcf_block_get_ext()
1317 err = tcf_block_insert(block, net, extack); in tcf_block_get_ext()
1323 err = tcf_block_owner_add(block, q, ei->binder_type); in tcf_block_get_ext()
1327 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); in tcf_block_get_ext()
1329 err = tcf_chain0_head_change_cb_add(block, ei, extack); in tcf_block_get_ext()
1333 err = tcf_block_offload_bind(block, q, ei, extack); in tcf_block_get_ext()
1337 *p_block = block; in tcf_block_get_ext()
1341 tcf_chain0_head_change_cb_del(block, ei); in tcf_block_get_ext()
1343 tcf_block_owner_del(block, q, ei->binder_type); in tcf_block_get_ext()
1346 tcf_block_refcnt_put(block, true); in tcf_block_get_ext()
1375 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, in tcf_block_put_ext() argument
1378 if (!block) in tcf_block_put_ext()
1380 tcf_chain0_head_change_cb_del(block, ei); in tcf_block_put_ext()
1381 tcf_block_owner_del(block, q, ei->binder_type); in tcf_block_put_ext()
1383 __tcf_block_put(block, q, ei, true); in tcf_block_put_ext()
1387 void tcf_block_put(struct tcf_block *block) in tcf_block_put() argument
1391 if (!block) in tcf_block_put()
1393 tcf_block_put_ext(block, block->q, &ei); in tcf_block_put()
1399 tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb, in tcf_block_playback_offloads() argument
1407 lockdep_assert_held(&block->cb_lock); in tcf_block_playback_offloads()
1409 for (chain = __tcf_get_next_chain(block, NULL); in tcf_block_playback_offloads()
1412 chain = __tcf_get_next_chain(block, chain), in tcf_block_playback_offloads()
1436 tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use, in tcf_block_playback_offloads()
1441 static int tcf_block_bind(struct tcf_block *block, in tcf_block_bind() argument
1447 lockdep_assert_held(&block->cb_lock); in tcf_block_bind()
1450 err = tcf_block_playback_offloads(block, block_cb->cb, in tcf_block_bind()
1452 tcf_block_offload_in_use(block), in tcf_block_bind()
1457 block->lockeddevcnt++; in tcf_block_bind()
1461 list_splice(&bo->cb_list, &block->flow_block.cb_list); in tcf_block_bind()
1470 tcf_block_playback_offloads(block, block_cb->cb, in tcf_block_bind()
1472 tcf_block_offload_in_use(block), in tcf_block_bind()
1475 block->lockeddevcnt--; in tcf_block_bind()
1483 static void tcf_block_unbind(struct tcf_block *block, in tcf_block_unbind() argument
1488 lockdep_assert_held(&block->cb_lock); in tcf_block_unbind()
1491 tcf_block_playback_offloads(block, block_cb->cb, in tcf_block_unbind()
1493 tcf_block_offload_in_use(block), in tcf_block_unbind()
1498 block->lockeddevcnt--; in tcf_block_unbind()
1502 static int tcf_block_setup(struct tcf_block *block, in tcf_block_setup() argument
1509 err = tcf_block_bind(block, bo); in tcf_block_setup()
1513 tcf_block_unbind(block, bo); in tcf_block_setup()
1570 tp->chain->block->index, in __tcf_classify()
1798 struct tcf_proto *tp, struct tcf_block *block, in tcf_fill_node() argument
1819 tcm->tcm_block_index = block->index; in tcf_fill_node()
1853 struct tcf_block *block, struct Qdisc *q, in tfilter_notify() argument
1865 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, in tfilter_notify()
1885 struct tcf_block *block, struct Qdisc *q, in tfilter_del_notify() argument
1897 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, in tfilter_del_notify()
1925 struct tcf_block *block, struct Qdisc *q, in tfilter_notify_chain() argument
1934 tfilter_notify(net, oskb, n, tp, block, in tfilter_notify_chain()
1959 struct tcf_block *block; in tc_new_tfilter() local
1985 block = NULL; in tc_new_tfilter()
2015 * block is shared (no qdisc found), qdisc is not unlocked, classifier in tc_new_tfilter()
2029 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, in tc_new_tfilter()
2031 if (IS_ERR(block)) { in tc_new_tfilter()
2032 err = PTR_ERR(block); in tc_new_tfilter()
2035 block->classid = parent; in tc_new_tfilter()
2043 chain = tcf_chain_get(block, chain_index, true); in tc_new_tfilter()
2136 tfilter_notify(net, skb, n, tp, block, q, parent, fh, in tc_new_tfilter()
2154 tcf_block_release(q, block, rtnl_held); in tc_new_tfilter()
2188 struct tcf_block *block = NULL; in tc_del_tfilter() local
2224 /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc in tc_del_tfilter()
2239 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, in tc_del_tfilter()
2241 if (IS_ERR(block)) { in tc_del_tfilter()
2242 err = PTR_ERR(block); in tc_del_tfilter()
2252 chain = tcf_chain_get(block, chain_index, false); in tc_del_tfilter()
2267 tfilter_notify_chain(net, skb, block, q, parent, n, in tc_del_tfilter()
2291 tfilter_notify(net, skb, n, tp, block, q, parent, fh, in tc_del_tfilter()
2306 err = tfilter_del_notify(net, skb, n, tp, block, in tc_del_tfilter()
2322 tcf_block_release(q, block, rtnl_held); in tc_del_tfilter()
2348 struct tcf_block *block = NULL; in tc_get_tfilter() local
2381 /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not in tc_get_tfilter()
2395 block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, in tc_get_tfilter()
2397 if (IS_ERR(block)) { in tc_get_tfilter()
2398 err = PTR_ERR(block); in tc_get_tfilter()
2408 chain = tcf_chain_get(block, chain_index, false); in tc_get_tfilter()
2435 err = tfilter_notify(net, skb, n, tp, block, q, parent, in tc_get_tfilter()
2448 tcf_block_release(q, block, rtnl_held); in tc_get_tfilter()
2460 struct tcf_block *block; member
2471 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, in tcf_node_dump()
2482 struct tcf_block *block = chain->block; in tcf_chain_dump() local
2505 if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, in tcf_chain_dump()
2517 arg.block = block; in tcf_chain_dump()
2549 struct tcf_block *block; in tc_dump_tfilter() local
2573 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); in tc_dump_tfilter()
2574 if (!block) in tc_dump_tfilter()
2576 /* If we work with block index, q is NULL and parent value in tc_dump_tfilter()
2609 block = cops->tcf_block(q, cl, NULL); in tc_dump_tfilter()
2610 if (!block) in tc_dump_tfilter()
2612 parent = block->classid; in tc_dump_tfilter()
2613 if (tcf_block_shared(block)) in tc_dump_tfilter()
2620 for (chain = __tcf_get_next_chain(block, NULL); in tc_dump_tfilter()
2623 chain = __tcf_get_next_chain(block, chain), in tc_dump_tfilter()
2637 tcf_block_refcnt_put(block, true); in tc_dump_tfilter()
2650 struct tcf_block *block, in tc_chain_fill_node() argument
2670 if (block->q) { in tc_chain_fill_node()
2671 tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; in tc_chain_fill_node()
2672 tcm->tcm_parent = block->q->handle; in tc_chain_fill_node()
2675 tcm->tcm_block_index = block->index; in tc_chain_fill_node()
2701 struct tcf_block *block = chain->block; in tc_chain_notify() local
2702 struct net *net = block->net; in tc_chain_notify()
2711 chain->index, net, skb, block, portid, in tc_chain_notify()
2730 struct tcf_block *block, struct sk_buff *oskb, in tc_chain_notify_delete() argument
2734 struct net *net = block->net; in tc_chain_notify_delete()
2742 block, portid, seq, flags, RTM_DELCHAIN) <= 0) { in tc_chain_notify_delete()
2812 struct tcf_block *block; in tc_ctl_chain() local
2831 block = tcf_block_find(net, &q, &parent, &cl, in tc_ctl_chain()
2833 if (IS_ERR(block)) in tc_ctl_chain()
2834 return PTR_ERR(block); in tc_ctl_chain()
2843 mutex_lock(&block->lock); in tc_ctl_chain()
2844 chain = tcf_chain_lookup(block, chain_index); in tc_ctl_chain()
2863 chain = tcf_chain_create(block, chain_index); in tc_ctl_chain()
2880 /* Modifying chain requires holding parent block lock. In case in tc_ctl_chain()
2888 mutex_unlock(&block->lock); in tc_ctl_chain()
2902 tfilter_notify_chain(net, skb, block, q, parent, n, in tc_ctl_chain()
2926 tcf_block_release(q, block, true); in tc_ctl_chain()
2933 mutex_unlock(&block->lock); in tc_ctl_chain()
2943 struct tcf_block *block; in tc_dump_chain() local
2960 block = tcf_block_refcnt_get(net, tcm->tcm_block_index); in tc_dump_chain()
2961 if (!block) in tc_dump_chain()
2963 /* If we work with block index, q is NULL and parent value in tc_dump_chain()
2998 block = cops->tcf_block(q, cl, NULL); in tc_dump_chain()
2999 if (!block) in tc_dump_chain()
3001 if (tcf_block_shared(block)) in tc_dump_chain()
3008 mutex_lock(&block->lock); in tc_dump_chain()
3009 list_for_each_entry(chain, &block->chain_list, list) { in tc_dump_chain()
3020 chain->index, net, skb, block, in tc_dump_chain()
3028 mutex_unlock(&block->lock); in tc_dump_chain()
3031 tcf_block_refcnt_put(block, true); in tc_dump_chain()
3204 static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) in tcf_block_offload_inc() argument
3209 atomic_inc(&block->offloadcnt); in tcf_block_offload_inc()
3212 static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) in tcf_block_offload_dec() argument
3217 atomic_dec(&block->offloadcnt); in tcf_block_offload_dec()
3220 static void tc_cls_offload_cnt_update(struct tcf_block *block, in tc_cls_offload_cnt_update() argument
3224 lockdep_assert_held(&block->cb_lock); in tc_cls_offload_cnt_update()
3229 tcf_block_offload_inc(block, flags); in tc_cls_offload_cnt_update()
3234 tcf_block_offload_dec(block, flags); in tc_cls_offload_cnt_update()
3240 tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp, in tc_cls_offload_cnt_reset() argument
3243 lockdep_assert_held(&block->cb_lock); in tc_cls_offload_cnt_reset()
3246 tcf_block_offload_dec(block, flags); in tc_cls_offload_cnt_reset()
3252 __tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, in __tc_setup_cb_call() argument
3259 list_for_each_entry(block_cb, &block->flow_block.cb_list, list) { in __tc_setup_cb_call()
3271 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type, in tc_setup_cb_call() argument
3274 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; in tc_setup_cb_call()
3280 down_read(&block->cb_lock); in tc_setup_cb_call()
3281 /* Need to obtain rtnl lock if block is bound to devs that require it. in tc_setup_cb_call()
3282 * In block bind code cb_lock is obtained while holding rtnl, so we must in tc_setup_cb_call()
3285 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { in tc_setup_cb_call()
3286 up_read(&block->cb_lock); in tc_setup_cb_call()
3291 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); in tc_setup_cb_call()
3293 up_read(&block->cb_lock); in tc_setup_cb_call()
3301 * successfully offloaded, increment block offloads counter. On failure,
3306 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp, in tc_setup_cb_add() argument
3310 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; in tc_setup_cb_add()
3316 down_read(&block->cb_lock); in tc_setup_cb_add()
3317 /* Need to obtain rtnl lock if block is bound to devs that require it. in tc_setup_cb_add()
3318 * In block bind code cb_lock is obtained while holding rtnl, so we must in tc_setup_cb_add()
3321 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { in tc_setup_cb_add()
3322 up_read(&block->cb_lock); in tc_setup_cb_add()
3327 /* Make sure all netdevs sharing this block are offload-capable. */ in tc_setup_cb_add()
3328 if (block->nooffloaddevcnt && err_stop) { in tc_setup_cb_add()
3333 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); in tc_setup_cb_add()
3340 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, in tc_setup_cb_add()
3343 up_read(&block->cb_lock); in tc_setup_cb_add()
3351 * successfully offloaded, increment block offload counter. On failure,
3356 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp, in tc_setup_cb_replace() argument
3362 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; in tc_setup_cb_replace()
3368 down_read(&block->cb_lock); in tc_setup_cb_replace()
3369 /* Need to obtain rtnl lock if block is bound to devs that require it. in tc_setup_cb_replace()
3370 * In block bind code cb_lock is obtained while holding rtnl, so we must in tc_setup_cb_replace()
3373 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { in tc_setup_cb_replace()
3374 up_read(&block->cb_lock); in tc_setup_cb_replace()
3379 /* Make sure all netdevs sharing this block are offload-capable. */ in tc_setup_cb_replace()
3380 if (block->nooffloaddevcnt && err_stop) { in tc_setup_cb_replace()
3385 tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags); in tc_setup_cb_replace()
3389 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); in tc_setup_cb_replace()
3396 tc_cls_offload_cnt_update(block, tp, new_in_hw_count, in tc_setup_cb_replace()
3399 up_read(&block->cb_lock); in tc_setup_cb_replace()
3406 /* Destroy filter and decrement block offload counter, if filter was previously
3410 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp, in tc_setup_cb_destroy() argument
3414 bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held; in tc_setup_cb_destroy()
3420 down_read(&block->cb_lock); in tc_setup_cb_destroy()
3421 /* Need to obtain rtnl lock if block is bound to devs that require it. in tc_setup_cb_destroy()
3422 * In block bind code cb_lock is obtained while holding rtnl, so we must in tc_setup_cb_destroy()
3425 if (!rtnl_held && !take_rtnl && block->lockeddevcnt) { in tc_setup_cb_destroy()
3426 up_read(&block->cb_lock); in tc_setup_cb_destroy()
3431 ok_count = __tc_setup_cb_call(block, type, type_data, err_stop); in tc_setup_cb_destroy()
3433 tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags); in tc_setup_cb_destroy()
3437 up_read(&block->cb_lock); in tc_setup_cb_destroy()
3444 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp, in tc_setup_cb_reoffload() argument
3455 tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1, in tc_setup_cb_reoffload()
3774 NL_SET_ERR_MSG(extack, "Block number may not be zero"); in tcf_qevent_parse_block_index()
3804 return tcf_block_get_ext(&qe->block, sch, &qe->info, extack); in tcf_qevent_init()
3811 tcf_block_put_ext(qe->block, sch, &qe->info); in tcf_qevent_destroy()
3828 /* Bounce newly-configured block or change in block. */ in tcf_qevent_validate_change()