linux/net/sched/cls_api.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/cls_api.c  Packet classifier API.
   4 *
   5 * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
   6 *
   7 * Changes:
   8 *
   9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/types.h>
  14#include <linux/kernel.h>
  15#include <linux/string.h>
  16#include <linux/errno.h>
  17#include <linux/err.h>
  18#include <linux/skbuff.h>
  19#include <linux/init.h>
  20#include <linux/kmod.h>
  21#include <linux/slab.h>
  22#include <linux/idr.h>
  23#include <linux/jhash.h>
  24#include <linux/rculist.h>
  25#include <net/net_namespace.h>
  26#include <net/sock.h>
  27#include <net/netlink.h>
  28#include <net/pkt_sched.h>
  29#include <net/pkt_cls.h>
  30#include <net/tc_act/tc_pedit.h>
  31#include <net/tc_act/tc_mirred.h>
  32#include <net/tc_act/tc_vlan.h>
  33#include <net/tc_act/tc_tunnel_key.h>
  34#include <net/tc_act/tc_csum.h>
  35#include <net/tc_act/tc_gact.h>
  36#include <net/tc_act/tc_police.h>
  37#include <net/tc_act/tc_sample.h>
  38#include <net/tc_act/tc_skbedit.h>
  39#include <net/tc_act/tc_ct.h>
  40#include <net/tc_act/tc_mpls.h>
  41#include <net/tc_act/tc_gate.h>
  42#include <net/flow_offload.h>
  43
  44extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
  45
  46/* The list of all installed classifier types */
  47static LIST_HEAD(tcf_proto_base);
  48
  49/* Protects list of registered TC modules. It is pure SMP lock. */
  50static DEFINE_RWLOCK(cls_mod_lock);
  51
  52static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
  53{
  54        return jhash_3words(tp->chain->index, tp->prio,
  55                            (__force __u32)tp->protocol, 0);
  56}
  57
  58static void tcf_proto_signal_destroying(struct tcf_chain *chain,
  59                                        struct tcf_proto *tp)
  60{
  61        struct tcf_block *block = chain->block;
  62
  63        mutex_lock(&block->proto_destroy_lock);
  64        hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
  65                     destroy_obj_hashfn(tp));
  66        mutex_unlock(&block->proto_destroy_lock);
  67}
  68
  69static bool tcf_proto_cmp(const struct tcf_proto *tp1,
  70                          const struct tcf_proto *tp2)
  71{
  72        return tp1->chain->index == tp2->chain->index &&
  73               tp1->prio == tp2->prio &&
  74               tp1->protocol == tp2->protocol;
  75}
  76
  77static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
  78                                        struct tcf_proto *tp)
  79{
  80        u32 hash = destroy_obj_hashfn(tp);
  81        struct tcf_proto *iter;
  82        bool found = false;
  83
  84        rcu_read_lock();
  85        hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
  86                                   destroy_ht_node, hash) {
  87                if (tcf_proto_cmp(tp, iter)) {
  88                        found = true;
  89                        break;
  90                }
  91        }
  92        rcu_read_unlock();
  93
  94        return found;
  95}
  96
  97static void
  98tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
  99{
 100        struct tcf_block *block = chain->block;
 101
 102        mutex_lock(&block->proto_destroy_lock);
 103        if (hash_hashed(&tp->destroy_ht_node))
 104                hash_del_rcu(&tp->destroy_ht_node);
 105        mutex_unlock(&block->proto_destroy_lock);
 106}
 107
 108/* Find classifier type by string name */
 109
 110static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
 111{
 112        const struct tcf_proto_ops *t, *res = NULL;
 113
 114        if (kind) {
 115                read_lock(&cls_mod_lock);
 116                list_for_each_entry(t, &tcf_proto_base, head) {
 117                        if (strcmp(kind, t->kind) == 0) {
 118                                if (try_module_get(t->owner))
 119                                        res = t;
 120                                break;
 121                        }
 122                }
 123                read_unlock(&cls_mod_lock);
 124        }
 125        return res;
 126}
 127
 128static const struct tcf_proto_ops *
 129tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
 130                     struct netlink_ext_ack *extack)
 131{
 132        const struct tcf_proto_ops *ops;
 133
 134        ops = __tcf_proto_lookup_ops(kind);
 135        if (ops)
 136                return ops;
 137#ifdef CONFIG_MODULES
 138        if (rtnl_held)
 139                rtnl_unlock();
 140        request_module("cls_%s", kind);
 141        if (rtnl_held)
 142                rtnl_lock();
 143        ops = __tcf_proto_lookup_ops(kind);
 144        /* We dropped the RTNL semaphore in order to perform
 145         * the module load. So, even if we succeeded in loading
 146         * the module we have to replay the request. We indicate
 147         * this using -EAGAIN.
 148         */
 149        if (ops) {
 150                module_put(ops->owner);
 151                return ERR_PTR(-EAGAIN);
 152        }
 153#endif
 154        NL_SET_ERR_MSG(extack, "TC classifier not found");
 155        return ERR_PTR(-ENOENT);
 156}
 157
 158/* Register(unregister) new classifier type */
 159
 160int register_tcf_proto_ops(struct tcf_proto_ops *ops)
 161{
 162        struct tcf_proto_ops *t;
 163        int rc = -EEXIST;
 164
 165        write_lock(&cls_mod_lock);
 166        list_for_each_entry(t, &tcf_proto_base, head)
 167                if (!strcmp(ops->kind, t->kind))
 168                        goto out;
 169
 170        list_add_tail(&ops->head, &tcf_proto_base);
 171        rc = 0;
 172out:
 173        write_unlock(&cls_mod_lock);
 174        return rc;
 175}
 176EXPORT_SYMBOL(register_tcf_proto_ops);
 177
 178static struct workqueue_struct *tc_filter_wq;
 179
 180int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
 181{
 182        struct tcf_proto_ops *t;
 183        int rc = -ENOENT;
 184
 185        /* Wait for outstanding call_rcu()s, if any, from a
 186         * tcf_proto_ops's destroy() handler.
 187         */
 188        rcu_barrier();
 189        flush_workqueue(tc_filter_wq);
 190
 191        write_lock(&cls_mod_lock);
 192        list_for_each_entry(t, &tcf_proto_base, head) {
 193                if (t == ops) {
 194                        list_del(&t->head);
 195                        rc = 0;
 196                        break;
 197                }
 198        }
 199        write_unlock(&cls_mod_lock);
 200        return rc;
 201}
 202EXPORT_SYMBOL(unregister_tcf_proto_ops);
 203
 204bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
 205{
 206        INIT_RCU_WORK(rwork, func);
 207        return queue_rcu_work(tc_filter_wq, rwork);
 208}
 209EXPORT_SYMBOL(tcf_queue_work);
 210
 211/* Select new prio value from the range, managed by kernel. */
 212
 213static inline u32 tcf_auto_prio(struct tcf_proto *tp)
 214{
 215        u32 first = TC_H_MAKE(0xC0000000U, 0U);
 216
 217        if (tp)
 218                first = tp->prio - 1;
 219
 220        return TC_H_MAJ(first);
 221}
 222
 223static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
 224{
 225        if (kind)
 226                return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ;
 227        memset(name, 0, IFNAMSIZ);
 228        return false;
 229}
 230
 231static bool tcf_proto_is_unlocked(const char *kind)
 232{
 233        const struct tcf_proto_ops *ops;
 234        bool ret;
 235
 236        if (strlen(kind) == 0)
 237                return false;
 238
 239        ops = tcf_proto_lookup_ops(kind, false, NULL);
 240        /* On error return false to take rtnl lock. Proto lookup/create
 241         * functions will perform lookup again and properly handle errors.
 242         */
 243        if (IS_ERR(ops))
 244                return false;
 245
 246        ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
 247        module_put(ops->owner);
 248        return ret;
 249}
 250
 251static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
 252                                          u32 prio, struct tcf_chain *chain,
 253                                          bool rtnl_held,
 254                                          struct netlink_ext_ack *extack)
 255{
 256        struct tcf_proto *tp;
 257        int err;
 258
 259        tp = kzalloc(sizeof(*tp), GFP_KERNEL);
 260        if (!tp)
 261                return ERR_PTR(-ENOBUFS);
 262
 263        tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
 264        if (IS_ERR(tp->ops)) {
 265                err = PTR_ERR(tp->ops);
 266                goto errout;
 267        }
 268        tp->classify = tp->ops->classify;
 269        tp->protocol = protocol;
 270        tp->prio = prio;
 271        tp->chain = chain;
 272        spin_lock_init(&tp->lock);
 273        refcount_set(&tp->refcnt, 1);
 274
 275        err = tp->ops->init(tp);
 276        if (err) {
 277                module_put(tp->ops->owner);
 278                goto errout;
 279        }
 280        return tp;
 281
 282errout:
 283        kfree(tp);
 284        return ERR_PTR(err);
 285}
 286
 287static void tcf_proto_get(struct tcf_proto *tp)
 288{
 289        refcount_inc(&tp->refcnt);
 290}
 291
 292static void tcf_chain_put(struct tcf_chain *chain);
 293
 294static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
 295                              bool sig_destroy, struct netlink_ext_ack *extack)
 296{
 297        tp->ops->destroy(tp, rtnl_held, extack);
 298        if (sig_destroy)
 299                tcf_proto_signal_destroyed(tp->chain, tp);
 300        tcf_chain_put(tp->chain);
 301        module_put(tp->ops->owner);
 302        kfree_rcu(tp, rcu);
 303}
 304
 305static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
 306                          struct netlink_ext_ack *extack)
 307{
 308        if (refcount_dec_and_test(&tp->refcnt))
 309                tcf_proto_destroy(tp, rtnl_held, true, extack);
 310}
 311
 312static bool tcf_proto_check_delete(struct tcf_proto *tp)
 313{
 314        if (tp->ops->delete_empty)
 315                return tp->ops->delete_empty(tp);
 316
 317        tp->deleting = true;
 318        return tp->deleting;
 319}
 320
 321static void tcf_proto_mark_delete(struct tcf_proto *tp)
 322{
 323        spin_lock(&tp->lock);
 324        tp->deleting = true;
 325        spin_unlock(&tp->lock);
 326}
 327
 328static bool tcf_proto_is_deleting(struct tcf_proto *tp)
 329{
 330        bool deleting;
 331
 332        spin_lock(&tp->lock);
 333        deleting = tp->deleting;
 334        spin_unlock(&tp->lock);
 335
 336        return deleting;
 337}
 338
 339#define ASSERT_BLOCK_LOCKED(block)                                      \
 340        lockdep_assert_held(&(block)->lock)
 341
 342struct tcf_filter_chain_list_item {
 343        struct list_head list;
 344        tcf_chain_head_change_t *chain_head_change;
 345        void *chain_head_change_priv;
 346};
 347
 348static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
 349                                          u32 chain_index)
 350{
 351        struct tcf_chain *chain;
 352
 353        ASSERT_BLOCK_LOCKED(block);
 354
 355        chain = kzalloc(sizeof(*chain), GFP_KERNEL);
 356        if (!chain)
 357                return NULL;
 358        list_add_tail_rcu(&chain->list, &block->chain_list);
 359        mutex_init(&chain->filter_chain_lock);
 360        chain->block = block;
 361        chain->index = chain_index;
 362        chain->refcnt = 1;
 363        if (!chain->index)
 364                block->chain0.chain = chain;
 365        return chain;
 366}
 367
 368static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
 369                                       struct tcf_proto *tp_head)
 370{
 371        if (item->chain_head_change)
 372                item->chain_head_change(tp_head, item->chain_head_change_priv);
 373}
 374
 375static void tcf_chain0_head_change(struct tcf_chain *chain,
 376                                   struct tcf_proto *tp_head)
 377{
 378        struct tcf_filter_chain_list_item *item;
 379        struct tcf_block *block = chain->block;
 380
 381        if (chain->index)
 382                return;
 383
 384        mutex_lock(&block->lock);
 385        list_for_each_entry(item, &block->chain0.filter_chain_list, list)
 386                tcf_chain_head_change_item(item, tp_head);
 387        mutex_unlock(&block->lock);
 388}
 389
 390/* Returns true if block can be safely freed. */
 391
 392static bool tcf_chain_detach(struct tcf_chain *chain)
 393{
 394        struct tcf_block *block = chain->block;
 395
 396        ASSERT_BLOCK_LOCKED(block);
 397
 398        list_del_rcu(&chain->list);
 399        if (!chain->index)
 400                block->chain0.chain = NULL;
 401
 402        if (list_empty(&block->chain_list) &&
 403            refcount_read(&block->refcnt) == 0)
 404                return true;
 405
 406        return false;
 407}
 408
 409static void tcf_block_destroy(struct tcf_block *block)
 410{
 411        mutex_destroy(&block->lock);
 412        mutex_destroy(&block->proto_destroy_lock);
 413        kfree_rcu(block, rcu);
 414}
 415
 416static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
 417{
 418        struct tcf_block *block = chain->block;
 419
 420        mutex_destroy(&chain->filter_chain_lock);
 421        kfree_rcu(chain, rcu);
 422        if (free_block)
 423                tcf_block_destroy(block);
 424}
 425
 426static void tcf_chain_hold(struct tcf_chain *chain)
 427{
 428        ASSERT_BLOCK_LOCKED(chain->block);
 429
 430        ++chain->refcnt;
 431}
 432
 433static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
 434{
 435        ASSERT_BLOCK_LOCKED(chain->block);
 436
 437        /* In case all the references are action references, this
 438         * chain should not be shown to the user.
 439         */
 440        return chain->refcnt == chain->action_refcnt;
 441}
 442
 443static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
 444                                          u32 chain_index)
 445{
 446        struct tcf_chain *chain;
 447
 448        ASSERT_BLOCK_LOCKED(block);
 449
 450        list_for_each_entry(chain, &block->chain_list, list) {
 451                if (chain->index == chain_index)
 452                        return chain;
 453        }
 454        return NULL;
 455}
 456
 457#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
 458static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
 459                                              u32 chain_index)
 460{
 461        struct tcf_chain *chain;
 462
 463        list_for_each_entry_rcu(chain, &block->chain_list, list) {
 464                if (chain->index == chain_index)
 465                        return chain;
 466        }
 467        return NULL;
 468}
 469#endif
 470
 471static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
 472                           u32 seq, u16 flags, int event, bool unicast);
 473
 474static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
 475                                         u32 chain_index, bool create,
 476                                         bool by_act)
 477{
 478        struct tcf_chain *chain = NULL;
 479        bool is_first_reference;
 480
 481        mutex_lock(&block->lock);
 482        chain = tcf_chain_lookup(block, chain_index);
 483        if (chain) {
 484                tcf_chain_hold(chain);
 485        } else {
 486                if (!create)
 487                        goto errout;
 488                chain = tcf_chain_create(block, chain_index);
 489                if (!chain)
 490                        goto errout;
 491        }
 492
 493        if (by_act)
 494                ++chain->action_refcnt;
 495        is_first_reference = chain->refcnt - chain->action_refcnt == 1;
 496        mutex_unlock(&block->lock);
 497
 498        /* Send notification only in case we got the first
 499         * non-action reference. Until then, the chain acts only as
 500         * a placeholder for actions pointing to it and user ought
 501         * not know about them.
 502         */
 503        if (is_first_reference && !by_act)
 504                tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
 505                                RTM_NEWCHAIN, false);
 506
 507        return chain;
 508
 509errout:
 510        mutex_unlock(&block->lock);
 511        return chain;
 512}
 513
 514static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
 515                                       bool create)
 516{
 517        return __tcf_chain_get(block, chain_index, create, false);
 518}
 519
 520struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
 521{
 522        return __tcf_chain_get(block, chain_index, true, true);
 523}
 524EXPORT_SYMBOL(tcf_chain_get_by_act);
 525
 526static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
 527                               void *tmplt_priv);
 528static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
 529                                  void *tmplt_priv, u32 chain_index,
 530                                  struct tcf_block *block, struct sk_buff *oskb,
 531                                  u32 seq, u16 flags, bool unicast);
 532
 533static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
 534                            bool explicitly_created)
 535{
 536        struct tcf_block *block = chain->block;
 537        const struct tcf_proto_ops *tmplt_ops;
 538        bool free_block = false;
 539        unsigned int refcnt;
 540        void *tmplt_priv;
 541
 542        mutex_lock(&block->lock);
 543        if (explicitly_created) {
 544                if (!chain->explicitly_created) {
 545                        mutex_unlock(&block->lock);
 546                        return;
 547                }
 548                chain->explicitly_created = false;
 549        }
 550
 551        if (by_act)
 552                chain->action_refcnt--;
 553
 554        /* tc_chain_notify_delete can't be called while holding block lock.
 555         * However, when block is unlocked chain can be changed concurrently, so
 556         * save these to temporary variables.
 557         */
 558        refcnt = --chain->refcnt;
 559        tmplt_ops = chain->tmplt_ops;
 560        tmplt_priv = chain->tmplt_priv;
 561
 562        /* The last dropped non-action reference will trigger notification. */
 563        if (refcnt - chain->action_refcnt == 0 && !by_act) {
 564                tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
 565                                       block, NULL, 0, 0, false);
 566                /* Last reference to chain, no need to lock. */
 567                chain->flushing = false;
 568        }
 569
 570        if (refcnt == 0)
 571                free_block = tcf_chain_detach(chain);
 572        mutex_unlock(&block->lock);
 573
 574        if (refcnt == 0) {
 575                tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
 576                tcf_chain_destroy(chain, free_block);
 577        }
 578}
 579
 580static void tcf_chain_put(struct tcf_chain *chain)
 581{
 582        __tcf_chain_put(chain, false, false);
 583}
 584
 585void tcf_chain_put_by_act(struct tcf_chain *chain)
 586{
 587        __tcf_chain_put(chain, true, false);
 588}
 589EXPORT_SYMBOL(tcf_chain_put_by_act);
 590
 591static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
 592{
 593        __tcf_chain_put(chain, false, true);
 594}
 595
 596static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
 597{
 598        struct tcf_proto *tp, *tp_next;
 599
 600        mutex_lock(&chain->filter_chain_lock);
 601        tp = tcf_chain_dereference(chain->filter_chain, chain);
 602        while (tp) {
 603                tp_next = rcu_dereference_protected(tp->next, 1);
 604                tcf_proto_signal_destroying(chain, tp);
 605                tp = tp_next;
 606        }
 607        tp = tcf_chain_dereference(chain->filter_chain, chain);
 608        RCU_INIT_POINTER(chain->filter_chain, NULL);
 609        tcf_chain0_head_change(chain, NULL);
 610        chain->flushing = true;
 611        mutex_unlock(&chain->filter_chain_lock);
 612
 613        while (tp) {
 614                tp_next = rcu_dereference_protected(tp->next, 1);
 615                tcf_proto_put(tp, rtnl_held, NULL);
 616                tp = tp_next;
 617        }
 618}
 619
 620static int tcf_block_setup(struct tcf_block *block,
 621                           struct flow_block_offload *bo);
 622
 623static void tcf_block_offload_init(struct flow_block_offload *bo,
 624                                   struct net_device *dev, struct Qdisc *sch,
 625                                   enum flow_block_command command,
 626                                   enum flow_block_binder_type binder_type,
 627                                   struct flow_block *flow_block,
 628                                   bool shared, struct netlink_ext_ack *extack)
 629{
 630        bo->net = dev_net(dev);
 631        bo->command = command;
 632        bo->binder_type = binder_type;
 633        bo->block = flow_block;
 634        bo->block_shared = shared;
 635        bo->extack = extack;
 636        bo->sch = sch;
 637        INIT_LIST_HEAD(&bo->cb_list);
 638}
 639
 640static void tcf_block_unbind(struct tcf_block *block,
 641                             struct flow_block_offload *bo);
 642
 643static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
 644{
 645        struct tcf_block *block = block_cb->indr.data;
 646        struct net_device *dev = block_cb->indr.dev;
 647        struct Qdisc *sch = block_cb->indr.sch;
 648        struct netlink_ext_ack extack = {};
 649        struct flow_block_offload bo;
 650
 651        tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
 652                               block_cb->indr.binder_type,
 653                               &block->flow_block, tcf_block_shared(block),
 654                               &extack);
 655        rtnl_lock();
 656        down_write(&block->cb_lock);
 657        list_del(&block_cb->driver_list);
 658        list_move(&block_cb->list, &bo.cb_list);
 659        tcf_block_unbind(block, &bo);
 660        up_write(&block->cb_lock);
 661        rtnl_unlock();
 662}
 663
 664static bool tcf_block_offload_in_use(struct tcf_block *block)
 665{
 666        return atomic_read(&block->offloadcnt);
 667}
 668
 669static int tcf_block_offload_cmd(struct tcf_block *block,
 670                                 struct net_device *dev, struct Qdisc *sch,
 671                                 struct tcf_block_ext_info *ei,
 672                                 enum flow_block_command command,
 673                                 struct netlink_ext_ack *extack)
 674{
 675        struct flow_block_offload bo = {};
 676
 677        tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
 678                               &block->flow_block, tcf_block_shared(block),
 679                               extack);
 680
 681        if (dev->netdev_ops->ndo_setup_tc) {
 682                int err;
 683
 684                err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
 685                if (err < 0) {
 686                        if (err != -EOPNOTSUPP)
 687                                NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
 688                        return err;
 689                }
 690
 691                return tcf_block_setup(block, &bo);
 692        }
 693
 694        flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
 695                                    tc_block_indr_cleanup);
 696        tcf_block_setup(block, &bo);
 697
 698        return -EOPNOTSUPP;
 699}
 700
 701static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
 702                                  struct tcf_block_ext_info *ei,
 703                                  struct netlink_ext_ack *extack)
 704{
 705        struct net_device *dev = q->dev_queue->dev;
 706        int err;
 707
 708        down_write(&block->cb_lock);
 709
 710        /* If tc offload feature is disabled and the block we try to bind
 711         * to already has some offloaded filters, forbid to bind.
 712         */
 713        if (dev->netdev_ops->ndo_setup_tc &&
 714            !tc_can_offload(dev) &&
 715            tcf_block_offload_in_use(block)) {
 716                NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
 717                err = -EOPNOTSUPP;
 718                goto err_unlock;
 719        }
 720
 721        err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
 722        if (err == -EOPNOTSUPP)
 723                goto no_offload_dev_inc;
 724        if (err)
 725                goto err_unlock;
 726
 727        up_write(&block->cb_lock);
 728        return 0;
 729
 730no_offload_dev_inc:
 731        if (tcf_block_offload_in_use(block))
 732                goto err_unlock;
 733
 734        err = 0;
 735        block->nooffloaddevcnt++;
 736err_unlock:
 737        up_write(&block->cb_lock);
 738        return err;
 739}
 740
 741static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
 742                                     struct tcf_block_ext_info *ei)
 743{
 744        struct net_device *dev = q->dev_queue->dev;
 745        int err;
 746
 747        down_write(&block->cb_lock);
 748        err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
 749        if (err == -EOPNOTSUPP)
 750                goto no_offload_dev_dec;
 751        up_write(&block->cb_lock);
 752        return;
 753
 754no_offload_dev_dec:
 755        WARN_ON(block->nooffloaddevcnt-- == 0);
 756        up_write(&block->cb_lock);
 757}
 758
 759static int
 760tcf_chain0_head_change_cb_add(struct tcf_block *block,
 761                              struct tcf_block_ext_info *ei,
 762                              struct netlink_ext_ack *extack)
 763{
 764        struct tcf_filter_chain_list_item *item;
 765        struct tcf_chain *chain0;
 766
 767        item = kmalloc(sizeof(*item), GFP_KERNEL);
 768        if (!item) {
 769                NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
 770                return -ENOMEM;
 771        }
 772        item->chain_head_change = ei->chain_head_change;
 773        item->chain_head_change_priv = ei->chain_head_change_priv;
 774
 775        mutex_lock(&block->lock);
 776        chain0 = block->chain0.chain;
 777        if (chain0)
 778                tcf_chain_hold(chain0);
 779        else
 780                list_add(&item->list, &block->chain0.filter_chain_list);
 781        mutex_unlock(&block->lock);
 782
 783        if (chain0) {
 784                struct tcf_proto *tp_head;
 785
 786                mutex_lock(&chain0->filter_chain_lock);
 787
 788                tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
 789                if (tp_head)
 790                        tcf_chain_head_change_item(item, tp_head);
 791
 792                mutex_lock(&block->lock);
 793                list_add(&item->list, &block->chain0.filter_chain_list);
 794                mutex_unlock(&block->lock);
 795
 796                mutex_unlock(&chain0->filter_chain_lock);
 797                tcf_chain_put(chain0);
 798        }
 799
 800        return 0;
 801}
 802
 803static void
 804tcf_chain0_head_change_cb_del(struct tcf_block *block,
 805                              struct tcf_block_ext_info *ei)
 806{
 807        struct tcf_filter_chain_list_item *item;
 808
 809        mutex_lock(&block->lock);
 810        list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
 811                if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
 812                    (item->chain_head_change == ei->chain_head_change &&
 813                     item->chain_head_change_priv == ei->chain_head_change_priv)) {
 814                        if (block->chain0.chain)
 815                                tcf_chain_head_change_item(item, NULL);
 816                        list_del(&item->list);
 817                        mutex_unlock(&block->lock);
 818
 819                        kfree(item);
 820                        return;
 821                }
 822        }
 823        mutex_unlock(&block->lock);
 824        WARN_ON(1);
 825}
 826
 827struct tcf_net {
 828        spinlock_t idr_lock; /* Protects idr */
 829        struct idr idr;
 830};
 831
 832static unsigned int tcf_net_id;
 833
 834static int tcf_block_insert(struct tcf_block *block, struct net *net,
 835                            struct netlink_ext_ack *extack)
 836{
 837        struct tcf_net *tn = net_generic(net, tcf_net_id);
 838        int err;
 839
 840        idr_preload(GFP_KERNEL);
 841        spin_lock(&tn->idr_lock);
 842        err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
 843                            GFP_NOWAIT);
 844        spin_unlock(&tn->idr_lock);
 845        idr_preload_end();
 846
 847        return err;
 848}
 849
 850static void tcf_block_remove(struct tcf_block *block, struct net *net)
 851{
 852        struct tcf_net *tn = net_generic(net, tcf_net_id);
 853
 854        spin_lock(&tn->idr_lock);
 855        idr_remove(&tn->idr, block->index);
 856        spin_unlock(&tn->idr_lock);
 857}
 858
 859static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
 860                                          u32 block_index,
 861                                          struct netlink_ext_ack *extack)
 862{
 863        struct tcf_block *block;
 864
 865        block = kzalloc(sizeof(*block), GFP_KERNEL);
 866        if (!block) {
 867                NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
 868                return ERR_PTR(-ENOMEM);
 869        }
 870        mutex_init(&block->lock);
 871        mutex_init(&block->proto_destroy_lock);
 872        init_rwsem(&block->cb_lock);
 873        flow_block_init(&block->flow_block);
 874        INIT_LIST_HEAD(&block->chain_list);
 875        INIT_LIST_HEAD(&block->owner_list);
 876        INIT_LIST_HEAD(&block->chain0.filter_chain_list);
 877
 878        refcount_set(&block->refcnt, 1);
 879        block->net = net;
 880        block->index = block_index;
 881
 882        /* Don't store q pointer for blocks which are shared */
 883        if (!tcf_block_shared(block))
 884                block->q = q;
 885        return block;
 886}
 887
 888static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
 889{
 890        struct tcf_net *tn = net_generic(net, tcf_net_id);
 891
 892        return idr_find(&tn->idr, block_index);
 893}
 894
 895static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
 896{
 897        struct tcf_block *block;
 898
 899        rcu_read_lock();
 900        block = tcf_block_lookup(net, block_index);
 901        if (block && !refcount_inc_not_zero(&block->refcnt))
 902                block = NULL;
 903        rcu_read_unlock();
 904
 905        return block;
 906}
 907
 908static struct tcf_chain *
 909__tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
 910{
 911        mutex_lock(&block->lock);
 912        if (chain)
 913                chain = list_is_last(&chain->list, &block->chain_list) ?
 914                        NULL : list_next_entry(chain, list);
 915        else
 916                chain = list_first_entry_or_null(&block->chain_list,
 917                                                 struct tcf_chain, list);
 918
 919        /* skip all action-only chains */
 920        while (chain && tcf_chain_held_by_acts_only(chain))
 921                chain = list_is_last(&chain->list, &block->chain_list) ?
 922                        NULL : list_next_entry(chain, list);
 923
 924        if (chain)
 925                tcf_chain_hold(chain);
 926        mutex_unlock(&block->lock);
 927
 928        return chain;
 929}
 930
 931/* Function to be used by all clients that want to iterate over all chains on
 932 * block. It properly obtains block->lock and takes reference to chain before
 933 * returning it. Users of this function must be tolerant to concurrent chain
 934 * insertion/deletion or ensure that no concurrent chain modification is
 935 * possible. Note that all netlink dump callbacks cannot guarantee to provide
 936 * consistent dump because rtnl lock is released each time skb is filled with
 937 * data and sent to user-space.
 938 */
 939
 940struct tcf_chain *
 941tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
 942{
 943        struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
 944
 945        if (chain)
 946                tcf_chain_put(chain);
 947
 948        return chain_next;
 949}
 950EXPORT_SYMBOL(tcf_get_next_chain);
 951
 952static struct tcf_proto *
 953__tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
 954{
 955        u32 prio = 0;
 956
 957        ASSERT_RTNL();
 958        mutex_lock(&chain->filter_chain_lock);
 959
 960        if (!tp) {
 961                tp = tcf_chain_dereference(chain->filter_chain, chain);
 962        } else if (tcf_proto_is_deleting(tp)) {
 963                /* 'deleting' flag is set and chain->filter_chain_lock was
 964                 * unlocked, which means next pointer could be invalid. Restart
 965                 * search.
 966                 */
 967                prio = tp->prio + 1;
 968                tp = tcf_chain_dereference(chain->filter_chain, chain);
 969
 970                for (; tp; tp = tcf_chain_dereference(tp->next, chain))
 971                        if (!tp->deleting && tp->prio >= prio)
 972                                break;
 973        } else {
 974                tp = tcf_chain_dereference(tp->next, chain);
 975        }
 976
 977        if (tp)
 978                tcf_proto_get(tp);
 979
 980        mutex_unlock(&chain->filter_chain_lock);
 981
 982        return tp;
 983}
 984
 985/* Function to be used by all clients that want to iterate over all tp's on
 986 * chain. Users of this function must be tolerant to concurrent tp
 987 * insertion/deletion or ensure that no concurrent chain modification is
 988 * possible. Note that all netlink dump callbacks cannot guarantee to provide
 989 * consistent dump because rtnl lock is released each time skb is filled with
 990 * data and sent to user-space.
 991 */
 992
 993struct tcf_proto *
 994tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
 995                   bool rtnl_held)
 996{
 997        struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
 998
 999        if (tp)
1000                tcf_proto_put(tp, rtnl_held, NULL);
1001
1002        return tp_next;
1003}
1004EXPORT_SYMBOL(tcf_get_next_proto);
1005
1006static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1007{
1008        struct tcf_chain *chain;
1009
1010        /* Last reference to block. At this point chains cannot be added or
1011         * removed concurrently.
1012         */
1013        for (chain = tcf_get_next_chain(block, NULL);
1014             chain;
1015             chain = tcf_get_next_chain(block, chain)) {
1016                tcf_chain_put_explicitly_created(chain);
1017                tcf_chain_flush(chain, rtnl_held);
1018        }
1019}
1020
1021/* Lookup Qdisc and increments its reference counter.
1022 * Set parent, if necessary.
1023 */
1024
1025static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1026                            u32 *parent, int ifindex, bool rtnl_held,
1027                            struct netlink_ext_ack *extack)
1028{
1029        const struct Qdisc_class_ops *cops;
1030        struct net_device *dev;
1031        int err = 0;
1032
1033        if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1034                return 0;
1035
1036        rcu_read_lock();
1037
1038        /* Find link */
1039        dev = dev_get_by_index_rcu(net, ifindex);
1040        if (!dev) {
1041                rcu_read_unlock();
1042                return -ENODEV;
1043        }
1044
1045        /* Find qdisc */
1046        if (!*parent) {
1047                *q = dev->qdisc;
1048                *parent = (*q)->handle;
1049        } else {
1050                *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1051                if (!*q) {
1052                        NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1053                        err = -EINVAL;
1054                        goto errout_rcu;
1055                }
1056        }
1057
1058        *q = qdisc_refcount_inc_nz(*q);
1059        if (!*q) {
1060                NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1061                err = -EINVAL;
1062                goto errout_rcu;
1063        }
1064
1065        /* Is it classful? */
1066        cops = (*q)->ops->cl_ops;
1067        if (!cops) {
1068                NL_SET_ERR_MSG(extack, "Qdisc not classful");
1069                err = -EINVAL;
1070                goto errout_qdisc;
1071        }
1072
1073        if (!cops->tcf_block) {
1074                NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1075                err = -EOPNOTSUPP;
1076                goto errout_qdisc;
1077        }
1078
1079errout_rcu:
1080        /* At this point we know that qdisc is not noop_qdisc,
1081         * which means that qdisc holds a reference to net_device
1082         * and we hold a reference to qdisc, so it is safe to release
1083         * rcu read lock.
1084         */
1085        rcu_read_unlock();
1086        return err;
1087
1088errout_qdisc:
1089        rcu_read_unlock();
1090
1091        if (rtnl_held)
1092                qdisc_put(*q);
1093        else
1094                qdisc_put_unlocked(*q);
1095        *q = NULL;
1096
1097        return err;
1098}
1099
1100static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1101                               int ifindex, struct netlink_ext_ack *extack)
1102{
1103        if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1104                return 0;
1105
1106        /* Do we search for filter, attached to class? */
1107        if (TC_H_MIN(parent)) {
1108                const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1109
1110                *cl = cops->find(q, parent);
1111                if (*cl == 0) {
1112                        NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1113                        return -ENOENT;
1114                }
1115        }
1116
1117        return 0;
1118}
1119
1120static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1121                                          unsigned long cl, int ifindex,
1122                                          u32 block_index,
1123                                          struct netlink_ext_ack *extack)
1124{
1125        struct tcf_block *block;
1126
1127        if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1128                block = tcf_block_refcnt_get(net, block_index);
1129                if (!block) {
1130                        NL_SET_ERR_MSG(extack, "Block of given index was not found");
1131                        return ERR_PTR(-EINVAL);
1132                }
1133        } else {
1134                const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1135
1136                block = cops->tcf_block(q, cl, extack);
1137                if (!block)
1138                        return ERR_PTR(-EINVAL);
1139
1140                if (tcf_block_shared(block)) {
1141                        NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1142                        return ERR_PTR(-EOPNOTSUPP);
1143                }
1144
1145                /* Always take reference to block in order to support execution
1146                 * of rules update path of cls API without rtnl lock. Caller
1147                 * must release block when it is finished using it. 'if' block
1148                 * of this conditional obtain reference to block by calling
1149                 * tcf_block_refcnt_get().
1150                 */
1151                refcount_inc(&block->refcnt);
1152        }
1153
1154        return block;
1155}
1156
1157static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1158                            struct tcf_block_ext_info *ei, bool rtnl_held)
1159{
1160        if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1161                /* Flushing/putting all chains will cause the block to be
1162                 * deallocated when last chain is freed. However, if chain_list
1163                 * is empty, block has to be manually deallocated. After block
1164                 * reference counter reached 0, it is no longer possible to
1165                 * increment it or add new chains to block.
1166                 */
1167                bool free_block = list_empty(&block->chain_list);
1168
1169                mutex_unlock(&block->lock);
1170                if (tcf_block_shared(block))
1171                        tcf_block_remove(block, block->net);
1172
1173                if (q)
1174                        tcf_block_offload_unbind(block, q, ei);
1175
1176                if (free_block)
1177                        tcf_block_destroy(block);
1178                else
1179                        tcf_block_flush_all_chains(block, rtnl_held);
1180        } else if (q) {
1181                tcf_block_offload_unbind(block, q, ei);
1182        }
1183}
1184
1185static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1186{
1187        __tcf_block_put(block, NULL, NULL, rtnl_held);
1188}
1189
1190/* Find tcf block.
1191 * Set q, parent, cl when appropriate.
1192 */
1193
1194static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1195                                        u32 *parent, unsigned long *cl,
1196                                        int ifindex, u32 block_index,
1197                                        struct netlink_ext_ack *extack)
1198{
1199        struct tcf_block *block;
1200        int err = 0;
1201
1202        ASSERT_RTNL();
1203
1204        err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1205        if (err)
1206                goto errout;
1207
1208        err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1209        if (err)
1210                goto errout_qdisc;
1211
1212        block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1213        if (IS_ERR(block)) {
1214                err = PTR_ERR(block);
1215                goto errout_qdisc;
1216        }
1217
1218        return block;
1219
1220errout_qdisc:
1221        if (*q)
1222                qdisc_put(*q);
1223errout:
1224        *q = NULL;
1225        return ERR_PTR(err);
1226}
1227
1228static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1229                              bool rtnl_held)
1230{
1231        if (!IS_ERR_OR_NULL(block))
1232                tcf_block_refcnt_put(block, rtnl_held);
1233
1234        if (q) {
1235                if (rtnl_held)
1236                        qdisc_put(q);
1237                else
1238                        qdisc_put_unlocked(q);
1239        }
1240}
1241
1242struct tcf_block_owner_item {
1243        struct list_head list;
1244        struct Qdisc *q;
1245        enum flow_block_binder_type binder_type;
1246};
1247
1248static void
1249tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1250                               struct Qdisc *q,
1251                               enum flow_block_binder_type binder_type)
1252{
1253        if (block->keep_dst &&
1254            binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1255            binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1256                netif_keep_dst(qdisc_dev(q));
1257}
1258
1259void tcf_block_netif_keep_dst(struct tcf_block *block)
1260{
1261        struct tcf_block_owner_item *item;
1262
1263        block->keep_dst = true;
1264        list_for_each_entry(item, &block->owner_list, list)
1265                tcf_block_owner_netif_keep_dst(block, item->q,
1266                                               item->binder_type);
1267}
1268EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1269
1270static int tcf_block_owner_add(struct tcf_block *block,
1271                               struct Qdisc *q,
1272                               enum flow_block_binder_type binder_type)
1273{
1274        struct tcf_block_owner_item *item;
1275
1276        item = kmalloc(sizeof(*item), GFP_KERNEL);
1277        if (!item)
1278                return -ENOMEM;
1279        item->q = q;
1280        item->binder_type = binder_type;
1281        list_add(&item->list, &block->owner_list);
1282        return 0;
1283}
1284
1285static void tcf_block_owner_del(struct tcf_block *block,
1286                                struct Qdisc *q,
1287                                enum flow_block_binder_type binder_type)
1288{
1289        struct tcf_block_owner_item *item;
1290
1291        list_for_each_entry(item, &block->owner_list, list) {
1292                if (item->q == q && item->binder_type == binder_type) {
1293                        list_del(&item->list);
1294                        kfree(item);
1295                        return;
1296                }
1297        }
1298        WARN_ON(1);
1299}
1300
1301int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1302                      struct tcf_block_ext_info *ei,
1303                      struct netlink_ext_ack *extack)
1304{
1305        struct net *net = qdisc_net(q);
1306        struct tcf_block *block = NULL;
1307        int err;
1308
1309        if (ei->block_index)
1310                /* block_index not 0 means the shared block is requested */
1311                block = tcf_block_refcnt_get(net, ei->block_index);
1312
1313        if (!block) {
1314                block = tcf_block_create(net, q, ei->block_index, extack);
1315                if (IS_ERR(block))
1316                        return PTR_ERR(block);
1317                if (tcf_block_shared(block)) {
1318                        err = tcf_block_insert(block, net, extack);
1319                        if (err)
1320                                goto err_block_insert;
1321                }
1322        }
1323
1324        err = tcf_block_owner_add(block, q, ei->binder_type);
1325        if (err)
1326                goto err_block_owner_add;
1327
1328        tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1329
1330        err = tcf_chain0_head_change_cb_add(block, ei, extack);
1331        if (err)
1332                goto err_chain0_head_change_cb_add;
1333
1334        err = tcf_block_offload_bind(block, q, ei, extack);
1335        if (err)
1336                goto err_block_offload_bind;
1337
1338        *p_block = block;
1339        return 0;
1340
1341err_block_offload_bind:
1342        tcf_chain0_head_change_cb_del(block, ei);
1343err_chain0_head_change_cb_add:
1344        tcf_block_owner_del(block, q, ei->binder_type);
1345err_block_owner_add:
1346err_block_insert:
1347        tcf_block_refcnt_put(block, true);
1348        return err;
1349}
1350EXPORT_SYMBOL(tcf_block_get_ext);
1351
1352static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1353{
1354        struct tcf_proto __rcu **p_filter_chain = priv;
1355
1356        rcu_assign_pointer(*p_filter_chain, tp_head);
1357}
1358
1359int tcf_block_get(struct tcf_block **p_block,
1360                  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1361                  struct netlink_ext_ack *extack)
1362{
1363        struct tcf_block_ext_info ei = {
1364                .chain_head_change = tcf_chain_head_change_dflt,
1365                .chain_head_change_priv = p_filter_chain,
1366        };
1367
1368        WARN_ON(!p_filter_chain);
1369        return tcf_block_get_ext(p_block, q, &ei, extack);
1370}
1371EXPORT_SYMBOL(tcf_block_get);
1372
1373/* XXX: Standalone actions are not allowed to jump to any chain, and bound
1374 * actions should be all removed after flushing.
1375 */
1376void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1377                       struct tcf_block_ext_info *ei)
1378{
1379        if (!block)
1380                return;
1381        tcf_chain0_head_change_cb_del(block, ei);
1382        tcf_block_owner_del(block, q, ei->binder_type);
1383
1384        __tcf_block_put(block, q, ei, true);
1385}
1386EXPORT_SYMBOL(tcf_block_put_ext);
1387
1388void tcf_block_put(struct tcf_block *block)
1389{
1390        struct tcf_block_ext_info ei = {0, };
1391
1392        if (!block)
1393                return;
1394        tcf_block_put_ext(block, block->q, &ei);
1395}
1396
1397EXPORT_SYMBOL(tcf_block_put);
1398
1399static int
1400tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1401                            void *cb_priv, bool add, bool offload_in_use,
1402                            struct netlink_ext_ack *extack)
1403{
1404        struct tcf_chain *chain, *chain_prev;
1405        struct tcf_proto *tp, *tp_prev;
1406        int err;
1407
1408        lockdep_assert_held(&block->cb_lock);
1409
1410        for (chain = __tcf_get_next_chain(block, NULL);
1411             chain;
1412             chain_prev = chain,
1413                     chain = __tcf_get_next_chain(block, chain),
1414                     tcf_chain_put(chain_prev)) {
1415                for (tp = __tcf_get_next_proto(chain, NULL); tp;
1416                     tp_prev = tp,
1417                             tp = __tcf_get_next_proto(chain, tp),
1418                             tcf_proto_put(tp_prev, true, NULL)) {
1419                        if (tp->ops->reoffload) {
1420                                err = tp->ops->reoffload(tp, add, cb, cb_priv,
1421                                                         extack);
1422                                if (err && add)
1423                                        goto err_playback_remove;
1424                        } else if (add && offload_in_use) {
1425                                err = -EOPNOTSUPP;
1426                                NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1427                                goto err_playback_remove;
1428                        }
1429                }
1430        }
1431
1432        return 0;
1433
1434err_playback_remove:
1435        tcf_proto_put(tp, true, NULL);
1436        tcf_chain_put(chain);
1437        tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1438                                    extack);
1439        return err;
1440}
1441
1442static int tcf_block_bind(struct tcf_block *block,
1443                          struct flow_block_offload *bo)
1444{
1445        struct flow_block_cb *block_cb, *next;
1446        int err, i = 0;
1447
1448        lockdep_assert_held(&block->cb_lock);
1449
1450        list_for_each_entry(block_cb, &bo->cb_list, list) {
1451                err = tcf_block_playback_offloads(block, block_cb->cb,
1452                                                  block_cb->cb_priv, true,
1453                                                  tcf_block_offload_in_use(block),
1454                                                  bo->extack);
1455                if (err)
1456                        goto err_unroll;
1457                if (!bo->unlocked_driver_cb)
1458                        block->lockeddevcnt++;
1459
1460                i++;
1461        }
1462        list_splice(&bo->cb_list, &block->flow_block.cb_list);
1463
1464        return 0;
1465
1466err_unroll:
1467        list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1468                if (i-- > 0) {
1469                        list_del(&block_cb->list);
1470                        tcf_block_playback_offloads(block, block_cb->cb,
1471                                                    block_cb->cb_priv, false,
1472                                                    tcf_block_offload_in_use(block),
1473                                                    NULL);
1474                        if (!bo->unlocked_driver_cb)
1475                                block->lockeddevcnt--;
1476                }
1477                flow_block_cb_free(block_cb);
1478        }
1479
1480        return err;
1481}
1482
1483static void tcf_block_unbind(struct tcf_block *block,
1484                             struct flow_block_offload *bo)
1485{
1486        struct flow_block_cb *block_cb, *next;
1487
1488        lockdep_assert_held(&block->cb_lock);
1489
1490        list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1491                tcf_block_playback_offloads(block, block_cb->cb,
1492                                            block_cb->cb_priv, false,
1493                                            tcf_block_offload_in_use(block),
1494                                            NULL);
1495                list_del(&block_cb->list);
1496                flow_block_cb_free(block_cb);
1497                if (!bo->unlocked_driver_cb)
1498                        block->lockeddevcnt--;
1499        }
1500}
1501
1502static int tcf_block_setup(struct tcf_block *block,
1503                           struct flow_block_offload *bo)
1504{
1505        int err;
1506
1507        switch (bo->command) {
1508        case FLOW_BLOCK_BIND:
1509                err = tcf_block_bind(block, bo);
1510                break;
1511        case FLOW_BLOCK_UNBIND:
1512                err = 0;
1513                tcf_block_unbind(block, bo);
1514                break;
1515        default:
1516                WARN_ON_ONCE(1);
1517                err = -EOPNOTSUPP;
1518        }
1519
1520        return err;
1521}
1522
1523/* Main classifier routine: scans classifier chain attached
1524 * to this qdisc, (optionally) tests for protocol and asks
1525 * specific classifiers.
1526 */
1527static inline int __tcf_classify(struct sk_buff *skb,
1528                                 const struct tcf_proto *tp,
1529                                 const struct tcf_proto *orig_tp,
1530                                 struct tcf_result *res,
1531                                 bool compat_mode,
1532                                 u32 *last_executed_chain)
1533{
1534#ifdef CONFIG_NET_CLS_ACT
1535        const int max_reclassify_loop = 4;
1536        const struct tcf_proto *first_tp;
1537        int limit = 0;
1538
1539reclassify:
1540#endif
1541        for (; tp; tp = rcu_dereference_bh(tp->next)) {
1542                __be16 protocol = skb_protocol(skb, false);
1543                int err;
1544
1545                if (tp->protocol != protocol &&
1546                    tp->protocol != htons(ETH_P_ALL))
1547                        continue;
1548
1549                err = tp->classify(skb, tp, res);
1550#ifdef CONFIG_NET_CLS_ACT
1551                if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1552                        first_tp = orig_tp;
1553                        *last_executed_chain = first_tp->chain->index;
1554                        goto reset;
1555                } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1556                        first_tp = res->goto_tp;
1557                        *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1558                        goto reset;
1559                }
1560#endif
1561                if (err >= 0)
1562                        return err;
1563        }
1564
1565        return TC_ACT_UNSPEC; /* signal: continue lookup */
1566#ifdef CONFIG_NET_CLS_ACT
1567reset:
1568        if (unlikely(limit++ >= max_reclassify_loop)) {
1569                net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1570                                       tp->chain->block->index,
1571                                       tp->prio & 0xffff,
1572                                       ntohs(tp->protocol));
1573                return TC_ACT_SHOT;
1574        }
1575
1576        tp = first_tp;
1577        goto reclassify;
1578#endif
1579}
1580
1581int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1582                 struct tcf_result *res, bool compat_mode)
1583{
1584        u32 last_executed_chain = 0;
1585
1586        return __tcf_classify(skb, tp, tp, res, compat_mode,
1587                              &last_executed_chain);
1588}
1589EXPORT_SYMBOL(tcf_classify);
1590
1591int tcf_classify_ingress(struct sk_buff *skb,
1592                         const struct tcf_block *ingress_block,
1593                         const struct tcf_proto *tp,
1594                         struct tcf_result *res, bool compat_mode)
1595{
1596#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1597        u32 last_executed_chain = 0;
1598
1599        return __tcf_classify(skb, tp, tp, res, compat_mode,
1600                              &last_executed_chain);
1601#else
1602        u32 last_executed_chain = tp ? tp->chain->index : 0;
1603        const struct tcf_proto *orig_tp = tp;
1604        struct tc_skb_ext *ext;
1605        int ret;
1606
1607        ext = skb_ext_find(skb, TC_SKB_EXT);
1608
1609        if (ext && ext->chain) {
1610                struct tcf_chain *fchain;
1611
1612                fchain = tcf_chain_lookup_rcu(ingress_block, ext->chain);
1613                if (!fchain)
1614                        return TC_ACT_SHOT;
1615
1616                /* Consume, so cloned/redirect skbs won't inherit ext */
1617                skb_ext_del(skb, TC_SKB_EXT);
1618
1619                tp = rcu_dereference_bh(fchain->filter_chain);
1620                last_executed_chain = fchain->index;
1621        }
1622
1623        ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
1624                             &last_executed_chain);
1625
1626        /* If we missed on some chain */
1627        if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1628                ext = skb_ext_add(skb, TC_SKB_EXT);
1629                if (WARN_ON_ONCE(!ext))
1630                        return TC_ACT_SHOT;
1631                ext->chain = last_executed_chain;
1632                ext->mru = qdisc_skb_cb(skb)->mru;
1633        }
1634
1635        return ret;
1636#endif
1637}
1638EXPORT_SYMBOL(tcf_classify_ingress);
1639
1640struct tcf_chain_info {
1641        struct tcf_proto __rcu **pprev;
1642        struct tcf_proto __rcu *next;
1643};
1644
1645static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1646                                           struct tcf_chain_info *chain_info)
1647{
1648        return tcf_chain_dereference(*chain_info->pprev, chain);
1649}
1650
1651static int tcf_chain_tp_insert(struct tcf_chain *chain,
1652                               struct tcf_chain_info *chain_info,
1653                               struct tcf_proto *tp)
1654{
1655        if (chain->flushing)
1656                return -EAGAIN;
1657
1658        if (*chain_info->pprev == chain->filter_chain)
1659                tcf_chain0_head_change(chain, tp);
1660        tcf_proto_get(tp);
1661        RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1662        rcu_assign_pointer(*chain_info->pprev, tp);
1663
1664        return 0;
1665}
1666
1667static void tcf_chain_tp_remove(struct tcf_chain *chain,
1668                                struct tcf_chain_info *chain_info,
1669                                struct tcf_proto *tp)
1670{
1671        struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1672
1673        tcf_proto_mark_delete(tp);
1674        if (tp == chain->filter_chain)
1675                tcf_chain0_head_change(chain, next);
1676        RCU_INIT_POINTER(*chain_info->pprev, next);
1677}
1678
1679static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1680                                           struct tcf_chain_info *chain_info,
1681                                           u32 protocol, u32 prio,
1682                                           bool prio_allocate);
1683
1684/* Try to insert new proto.
1685 * If proto with specified priority already exists, free new proto
1686 * and return existing one.
1687 */
1688
1689static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1690                                                    struct tcf_proto *tp_new,
1691                                                    u32 protocol, u32 prio,
1692                                                    bool rtnl_held)
1693{
1694        struct tcf_chain_info chain_info;
1695        struct tcf_proto *tp;
1696        int err = 0;
1697
1698        mutex_lock(&chain->filter_chain_lock);
1699
1700        if (tcf_proto_exists_destroying(chain, tp_new)) {
1701                mutex_unlock(&chain->filter_chain_lock);
1702                tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1703                return ERR_PTR(-EAGAIN);
1704        }
1705
1706        tp = tcf_chain_tp_find(chain, &chain_info,
1707                               protocol, prio, false);
1708        if (!tp)
1709                err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1710        mutex_unlock(&chain->filter_chain_lock);
1711
1712        if (tp) {
1713                tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1714                tp_new = tp;
1715        } else if (err) {
1716                tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1717                tp_new = ERR_PTR(err);
1718        }
1719
1720        return tp_new;
1721}
1722
1723static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1724                                      struct tcf_proto *tp, bool rtnl_held,
1725                                      struct netlink_ext_ack *extack)
1726{
1727        struct tcf_chain_info chain_info;
1728        struct tcf_proto *tp_iter;
1729        struct tcf_proto **pprev;
1730        struct tcf_proto *next;
1731
1732        mutex_lock(&chain->filter_chain_lock);
1733
1734        /* Atomically find and remove tp from chain. */
1735        for (pprev = &chain->filter_chain;
1736             (tp_iter = tcf_chain_dereference(*pprev, chain));
1737             pprev = &tp_iter->next) {
1738                if (tp_iter == tp) {
1739                        chain_info.pprev = pprev;
1740                        chain_info.next = tp_iter->next;
1741                        WARN_ON(tp_iter->deleting);
1742                        break;
1743                }
1744        }
1745        /* Verify that tp still exists and no new filters were inserted
1746         * concurrently.
1747         * Mark tp for deletion if it is empty.
1748         */
1749        if (!tp_iter || !tcf_proto_check_delete(tp)) {
1750                mutex_unlock(&chain->filter_chain_lock);
1751                return;
1752        }
1753
1754        tcf_proto_signal_destroying(chain, tp);
1755        next = tcf_chain_dereference(chain_info.next, chain);
1756        if (tp == chain->filter_chain)
1757                tcf_chain0_head_change(chain, next);
1758        RCU_INIT_POINTER(*chain_info.pprev, next);
1759        mutex_unlock(&chain->filter_chain_lock);
1760
1761        tcf_proto_put(tp, rtnl_held, extack);
1762}
1763
1764static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1765                                           struct tcf_chain_info *chain_info,
1766                                           u32 protocol, u32 prio,
1767                                           bool prio_allocate)
1768{
1769        struct tcf_proto **pprev;
1770        struct tcf_proto *tp;
1771
1772        /* Check the chain for existence of proto-tcf with this priority */
1773        for (pprev = &chain->filter_chain;
1774             (tp = tcf_chain_dereference(*pprev, chain));
1775             pprev = &tp->next) {
1776                if (tp->prio >= prio) {
1777                        if (tp->prio == prio) {
1778                                if (prio_allocate ||
1779                                    (tp->protocol != protocol && protocol))
1780                                        return ERR_PTR(-EINVAL);
1781                        } else {
1782                                tp = NULL;
1783                        }
1784                        break;
1785                }
1786        }
1787        chain_info->pprev = pprev;
1788        if (tp) {
1789                chain_info->next = tp->next;
1790                tcf_proto_get(tp);
1791        } else {
1792                chain_info->next = NULL;
1793        }
1794        return tp;
1795}
1796
1797static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1798                         struct tcf_proto *tp, struct tcf_block *block,
1799                         struct Qdisc *q, u32 parent, void *fh,
1800                         u32 portid, u32 seq, u16 flags, int event,
1801                         bool terse_dump, bool rtnl_held)
1802{
1803        struct tcmsg *tcm;
1804        struct nlmsghdr  *nlh;
1805        unsigned char *b = skb_tail_pointer(skb);
1806
1807        nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1808        if (!nlh)
1809                goto out_nlmsg_trim;
1810        tcm = nlmsg_data(nlh);
1811        tcm->tcm_family = AF_UNSPEC;
1812        tcm->tcm__pad1 = 0;
1813        tcm->tcm__pad2 = 0;
1814        if (q) {
1815                tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1816                tcm->tcm_parent = parent;
1817        } else {
1818                tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1819                tcm->tcm_block_index = block->index;
1820        }
1821        tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1822        if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1823                goto nla_put_failure;
1824        if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1825                goto nla_put_failure;
1826        if (!fh) {
1827                tcm->tcm_handle = 0;
1828        } else if (terse_dump) {
1829                if (tp->ops->terse_dump) {
1830                        if (tp->ops->terse_dump(net, tp, fh, skb, tcm,
1831                                                rtnl_held) < 0)
1832                                goto nla_put_failure;
1833                } else {
1834                        goto cls_op_not_supp;
1835                }
1836        } else {
1837                if (tp->ops->dump &&
1838                    tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1839                        goto nla_put_failure;
1840        }
1841        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1842        return skb->len;
1843
1844out_nlmsg_trim:
1845nla_put_failure:
1846cls_op_not_supp:
1847        nlmsg_trim(skb, b);
1848        return -1;
1849}
1850
1851static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1852                          struct nlmsghdr *n, struct tcf_proto *tp,
1853                          struct tcf_block *block, struct Qdisc *q,
1854                          u32 parent, void *fh, int event, bool unicast,
1855                          bool rtnl_held)
1856{
1857        struct sk_buff *skb;
1858        u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1859        int err = 0;
1860
1861        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1862        if (!skb)
1863                return -ENOBUFS;
1864
1865        if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1866                          n->nlmsg_seq, n->nlmsg_flags, event,
1867                          false, rtnl_held) <= 0) {
1868                kfree_skb(skb);
1869                return -EINVAL;
1870        }
1871
1872        if (unicast)
1873                err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1874        else
1875                err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1876                                     n->nlmsg_flags & NLM_F_ECHO);
1877
1878        if (err > 0)
1879                err = 0;
1880        return err;
1881}
1882
1883static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1884                              struct nlmsghdr *n, struct tcf_proto *tp,
1885                              struct tcf_block *block, struct Qdisc *q,
1886                              u32 parent, void *fh, bool unicast, bool *last,
1887                              bool rtnl_held, struct netlink_ext_ack *extack)
1888{
1889        struct sk_buff *skb;
1890        u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1891        int err;
1892
1893        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1894        if (!skb)
1895                return -ENOBUFS;
1896
1897        if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1898                          n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1899                          false, rtnl_held) <= 0) {
1900                NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1901                kfree_skb(skb);
1902                return -EINVAL;
1903        }
1904
1905        err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1906        if (err) {
1907                kfree_skb(skb);
1908                return err;
1909        }
1910
1911        if (unicast)
1912                err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1913        else
1914                err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1915                                     n->nlmsg_flags & NLM_F_ECHO);
1916        if (err < 0)
1917                NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1918
1919        if (err > 0)
1920                err = 0;
1921        return err;
1922}
1923
1924static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1925                                 struct tcf_block *block, struct Qdisc *q,
1926                                 u32 parent, struct nlmsghdr *n,
1927                                 struct tcf_chain *chain, int event,
1928                                 bool rtnl_held)
1929{
1930        struct tcf_proto *tp;
1931
1932        for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
1933             tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
1934                tfilter_notify(net, oskb, n, tp, block,
1935                               q, parent, NULL, event, false, rtnl_held);
1936}
1937
1938static void tfilter_put(struct tcf_proto *tp, void *fh)
1939{
1940        if (tp->ops->put && fh)
1941                tp->ops->put(tp, fh);
1942}
1943
1944static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1945                          struct netlink_ext_ack *extack)
1946{
1947        struct net *net = sock_net(skb->sk);
1948        struct nlattr *tca[TCA_MAX + 1];
1949        char name[IFNAMSIZ];
1950        struct tcmsg *t;
1951        u32 protocol;
1952        u32 prio;
1953        bool prio_allocate;
1954        u32 parent;
1955        u32 chain_index;
1956        struct Qdisc *q = NULL;
1957        struct tcf_chain_info chain_info;
1958        struct tcf_chain *chain = NULL;
1959        struct tcf_block *block;
1960        struct tcf_proto *tp;
1961        unsigned long cl;
1962        void *fh;
1963        int err;
1964        int tp_created;
1965        bool rtnl_held = false;
1966
1967        if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1968                return -EPERM;
1969
1970replay:
1971        tp_created = 0;
1972
1973        err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
1974                                     rtm_tca_policy, extack);
1975        if (err < 0)
1976                return err;
1977
1978        t = nlmsg_data(n);
1979        protocol = TC_H_MIN(t->tcm_info);
1980        prio = TC_H_MAJ(t->tcm_info);
1981        prio_allocate = false;
1982        parent = t->tcm_parent;
1983        tp = NULL;
1984        cl = 0;
1985        block = NULL;
1986
1987        if (prio == 0) {
1988                /* If no priority is provided by the user,
1989                 * we allocate one.
1990                 */
1991                if (n->nlmsg_flags & NLM_F_CREATE) {
1992                        prio = TC_H_MAKE(0x80000000U, 0U);
1993                        prio_allocate = true;
1994                } else {
1995                        NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1996                        return -ENOENT;
1997                }
1998        }
1999
2000        /* Find head of filter chain. */
2001
2002        err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2003        if (err)
2004                return err;
2005
2006        if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2007                NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2008                err = -EINVAL;
2009                goto errout;
2010        }
2011
2012        /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2013         * block is shared (no qdisc found), qdisc is not unlocked, classifier
2014         * type is not specified, classifier is not unlocked.
2015         */
2016        if (rtnl_held ||
2017            (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2018            !tcf_proto_is_unlocked(name)) {
2019                rtnl_held = true;
2020                rtnl_lock();
2021        }
2022
2023        err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2024        if (err)
2025                goto errout;
2026
2027        block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2028                                 extack);
2029        if (IS_ERR(block)) {
2030                err = PTR_ERR(block);
2031                goto errout;
2032        }
2033        block->classid = parent;
2034
2035        chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2036        if (chain_index > TC_ACT_EXT_VAL_MASK) {
2037                NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2038                err = -EINVAL;
2039                goto errout;
2040        }
2041        chain = tcf_chain_get(block, chain_index, true);
2042        if (!chain) {
2043                NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2044                err = -ENOMEM;
2045                goto errout;
2046        }
2047
2048        mutex_lock(&chain->filter_chain_lock);
2049        tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2050                               prio, prio_allocate);
2051        if (IS_ERR(tp)) {
2052                NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2053                err = PTR_ERR(tp);
2054                goto errout_locked;
2055        }
2056
2057        if (tp == NULL) {
2058                struct tcf_proto *tp_new = NULL;
2059
2060                if (chain->flushing) {
2061                        err = -EAGAIN;
2062                        goto errout_locked;
2063                }
2064
2065                /* Proto-tcf does not exist, create new one */
2066
2067                if (tca[TCA_KIND] == NULL || !protocol) {
2068                        NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2069                        err = -EINVAL;
2070                        goto errout_locked;
2071                }
2072
2073                if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2074                        NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2075                        err = -ENOENT;
2076                        goto errout_locked;
2077                }
2078
2079                if (prio_allocate)
2080                        prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2081                                                               &chain_info));
2082
2083                mutex_unlock(&chain->filter_chain_lock);
2084                tp_new = tcf_proto_create(name, protocol, prio, chain,
2085                                          rtnl_held, extack);
2086                if (IS_ERR(tp_new)) {
2087                        err = PTR_ERR(tp_new);
2088                        goto errout_tp;
2089                }
2090
2091                tp_created = 1;
2092                tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2093                                                rtnl_held);
2094                if (IS_ERR(tp)) {
2095                        err = PTR_ERR(tp);
2096                        goto errout_tp;
2097                }
2098        } else {
2099                mutex_unlock(&chain->filter_chain_lock);
2100        }
2101
2102        if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2103                NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2104                err = -EINVAL;
2105                goto errout;
2106        }
2107
2108        fh = tp->ops->get(tp, t->tcm_handle);
2109
2110        if (!fh) {
2111                if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2112                        NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2113                        err = -ENOENT;
2114                        goto errout;
2115                }
2116        } else if (n->nlmsg_flags & NLM_F_EXCL) {
2117                tfilter_put(tp, fh);
2118                NL_SET_ERR_MSG(extack, "Filter already exists");
2119                err = -EEXIST;
2120                goto errout;
2121        }
2122
2123        if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2124                NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2125                err = -EINVAL;
2126                goto errout;
2127        }
2128
2129        err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2130                              n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
2131                              rtnl_held, extack);
2132        if (err == 0) {
2133                tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2134                               RTM_NEWTFILTER, false, rtnl_held);
2135                tfilter_put(tp, fh);
2136                /* q pointer is NULL for shared blocks */
2137                if (q)
2138                        q->flags &= ~TCQ_F_CAN_BYPASS;
2139        }
2140
2141errout:
2142        if (err && tp_created)
2143                tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2144errout_tp:
2145        if (chain) {
2146                if (tp && !IS_ERR(tp))
2147                        tcf_proto_put(tp, rtnl_held, NULL);
2148                if (!tp_created)
2149                        tcf_chain_put(chain);
2150        }
2151        tcf_block_release(q, block, rtnl_held);
2152
2153        if (rtnl_held)
2154                rtnl_unlock();
2155
2156        if (err == -EAGAIN) {
2157                /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2158                 * of target chain.
2159                 */
2160                rtnl_held = true;
2161                /* Replay the request. */
2162                goto replay;
2163        }
2164        return err;
2165
2166errout_locked:
2167        mutex_unlock(&chain->filter_chain_lock);
2168        goto errout;
2169}
2170
2171static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2172                          struct netlink_ext_ack *extack)
2173{
2174        struct net *net = sock_net(skb->sk);
2175        struct nlattr *tca[TCA_MAX + 1];
2176        char name[IFNAMSIZ];
2177        struct tcmsg *t;
2178        u32 protocol;
2179        u32 prio;
2180        u32 parent;
2181        u32 chain_index;
2182        struct Qdisc *q = NULL;
2183        struct tcf_chain_info chain_info;
2184        struct tcf_chain *chain = NULL;
2185        struct tcf_block *block = NULL;
2186        struct tcf_proto *tp = NULL;
2187        unsigned long cl = 0;
2188        void *fh = NULL;
2189        int err;
2190        bool rtnl_held = false;
2191
2192        if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2193                return -EPERM;
2194
2195        err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2196                                     rtm_tca_policy, extack);
2197        if (err < 0)
2198                return err;
2199
2200        t = nlmsg_data(n);
2201        protocol = TC_H_MIN(t->tcm_info);
2202        prio = TC_H_MAJ(t->tcm_info);
2203        parent = t->tcm_parent;
2204
2205        if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2206                NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2207                return -ENOENT;
2208        }
2209
2210        /* Find head of filter chain. */
2211
2212        err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2213        if (err)
2214                return err;
2215
2216        if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2217                NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2218                err = -EINVAL;
2219                goto errout;
2220        }
2221        /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2222         * found), qdisc is not unlocked, classifier type is not specified,
2223         * classifier is not unlocked.
2224         */
2225        if (!prio ||
2226            (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2227            !tcf_proto_is_unlocked(name)) {
2228                rtnl_held = true;
2229                rtnl_lock();
2230        }
2231
2232        err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2233        if (err)
2234                goto errout;
2235
2236        block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2237                                 extack);
2238        if (IS_ERR(block)) {
2239                err = PTR_ERR(block);
2240                goto errout;
2241        }
2242
2243        chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2244        if (chain_index > TC_ACT_EXT_VAL_MASK) {
2245                NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2246                err = -EINVAL;
2247                goto errout;
2248        }
2249        chain = tcf_chain_get(block, chain_index, false);
2250        if (!chain) {
2251                /* User requested flush on non-existent chain. Nothing to do,
2252                 * so just return success.
2253                 */
2254                if (prio == 0) {
2255                        err = 0;
2256                        goto errout;
2257                }
2258                NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2259                err = -ENOENT;
2260                goto errout;
2261        }
2262
2263        if (prio == 0) {
2264                tfilter_notify_chain(net, skb, block, q, parent, n,
2265                                     chain, RTM_DELTFILTER, rtnl_held);
2266                tcf_chain_flush(chain, rtnl_held);
2267                err = 0;
2268                goto errout;
2269        }
2270
2271        mutex_lock(&chain->filter_chain_lock);
2272        tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2273                               prio, false);
2274        if (!tp || IS_ERR(tp)) {
2275                NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2276                err = tp ? PTR_ERR(tp) : -ENOENT;
2277                goto errout_locked;
2278        } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2279                NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2280                err = -EINVAL;
2281                goto errout_locked;
2282        } else if (t->tcm_handle == 0) {
2283                tcf_proto_signal_destroying(chain, tp);
2284                tcf_chain_tp_remove(chain, &chain_info, tp);
2285                mutex_unlock(&chain->filter_chain_lock);
2286
2287                tcf_proto_put(tp, rtnl_held, NULL);
2288                tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2289                               RTM_DELTFILTER, false, rtnl_held);
2290                err = 0;
2291                goto errout;
2292        }
2293        mutex_unlock(&chain->filter_chain_lock);
2294
2295        fh = tp->ops->get(tp, t->tcm_handle);
2296
2297        if (!fh) {
2298                NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2299                err = -ENOENT;
2300        } else {
2301                bool last;
2302
2303                err = tfilter_del_notify(net, skb, n, tp, block,
2304                                         q, parent, fh, false, &last,
2305                                         rtnl_held, extack);
2306
2307                if (err)
2308                        goto errout;
2309                if (last)
2310                        tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2311        }
2312
2313errout:
2314        if (chain) {
2315                if (tp && !IS_ERR(tp))
2316                        tcf_proto_put(tp, rtnl_held, NULL);
2317                tcf_chain_put(chain);
2318        }
2319        tcf_block_release(q, block, rtnl_held);
2320
2321        if (rtnl_held)
2322                rtnl_unlock();
2323
2324        return err;
2325
2326errout_locked:
2327        mutex_unlock(&chain->filter_chain_lock);
2328        goto errout;
2329}
2330
2331static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2332                          struct netlink_ext_ack *extack)
2333{
2334        struct net *net = sock_net(skb->sk);
2335        struct nlattr *tca[TCA_MAX + 1];
2336        char name[IFNAMSIZ];
2337        struct tcmsg *t;
2338        u32 protocol;
2339        u32 prio;
2340        u32 parent;
2341        u32 chain_index;
2342        struct Qdisc *q = NULL;
2343        struct tcf_chain_info chain_info;
2344        struct tcf_chain *chain = NULL;
2345        struct tcf_block *block = NULL;
2346        struct tcf_proto *tp = NULL;
2347        unsigned long cl = 0;
2348        void *fh = NULL;
2349        int err;
2350        bool rtnl_held = false;
2351
2352        err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2353                                     rtm_tca_policy, extack);
2354        if (err < 0)
2355                return err;
2356
2357        t = nlmsg_data(n);
2358        protocol = TC_H_MIN(t->tcm_info);
2359        prio = TC_H_MAJ(t->tcm_info);
2360        parent = t->tcm_parent;
2361
2362        if (prio == 0) {
2363                NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2364                return -ENOENT;
2365        }
2366
2367        /* Find head of filter chain. */
2368
2369        err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2370        if (err)
2371                return err;
2372
2373        if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2374                NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2375                err = -EINVAL;
2376                goto errout;
2377        }
2378        /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2379         * unlocked, classifier type is not specified, classifier is not
2380         * unlocked.
2381         */
2382        if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2383            !tcf_proto_is_unlocked(name)) {
2384                rtnl_held = true;
2385                rtnl_lock();
2386        }
2387
2388        err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2389        if (err)
2390                goto errout;
2391
2392        block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2393                                 extack);
2394        if (IS_ERR(block)) {
2395                err = PTR_ERR(block);
2396                goto errout;
2397        }
2398
2399        chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2400        if (chain_index > TC_ACT_EXT_VAL_MASK) {
2401                NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2402                err = -EINVAL;
2403                goto errout;
2404        }
2405        chain = tcf_chain_get(block, chain_index, false);
2406        if (!chain) {
2407                NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2408                err = -EINVAL;
2409                goto errout;
2410        }
2411
2412        mutex_lock(&chain->filter_chain_lock);
2413        tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2414                               prio, false);
2415        mutex_unlock(&chain->filter_chain_lock);
2416        if (!tp || IS_ERR(tp)) {
2417                NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2418                err = tp ? PTR_ERR(tp) : -ENOENT;
2419                goto errout;
2420        } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2421                NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2422                err = -EINVAL;
2423                goto errout;
2424        }
2425
2426        fh = tp->ops->get(tp, t->tcm_handle);
2427
2428        if (!fh) {
2429                NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2430                err = -ENOENT;
2431        } else {
2432                err = tfilter_notify(net, skb, n, tp, block, q, parent,
2433                                     fh, RTM_NEWTFILTER, true, rtnl_held);
2434                if (err < 0)
2435                        NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2436        }
2437
2438        tfilter_put(tp, fh);
2439errout:
2440        if (chain) {
2441                if (tp && !IS_ERR(tp))
2442                        tcf_proto_put(tp, rtnl_held, NULL);
2443                tcf_chain_put(chain);
2444        }
2445        tcf_block_release(q, block, rtnl_held);
2446
2447        if (rtnl_held)
2448                rtnl_unlock();
2449
2450        return err;
2451}
2452
2453struct tcf_dump_args {
2454        struct tcf_walker w;
2455        struct sk_buff *skb;
2456        struct netlink_callback *cb;
2457        struct tcf_block *block;
2458        struct Qdisc *q;
2459        u32 parent;
2460        bool terse_dump;
2461};
2462
2463static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2464{
2465        struct tcf_dump_args *a = (void *)arg;
2466        struct net *net = sock_net(a->skb->sk);
2467
2468        return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2469                             n, NETLINK_CB(a->cb->skb).portid,
2470                             a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2471                             RTM_NEWTFILTER, a->terse_dump, true);
2472}
2473
2474static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2475                           struct sk_buff *skb, struct netlink_callback *cb,
2476                           long index_start, long *p_index, bool terse)
2477{
2478        struct net *net = sock_net(skb->sk);
2479        struct tcf_block *block = chain->block;
2480        struct tcmsg *tcm = nlmsg_data(cb->nlh);
2481        struct tcf_proto *tp, *tp_prev;
2482        struct tcf_dump_args arg;
2483
2484        for (tp = __tcf_get_next_proto(chain, NULL);
2485             tp;
2486             tp_prev = tp,
2487                     tp = __tcf_get_next_proto(chain, tp),
2488                     tcf_proto_put(tp_prev, true, NULL),
2489                     (*p_index)++) {
2490                if (*p_index < index_start)
2491                        continue;
2492                if (TC_H_MAJ(tcm->tcm_info) &&
2493                    TC_H_MAJ(tcm->tcm_info) != tp->prio)
2494                        continue;
2495                if (TC_H_MIN(tcm->tcm_info) &&
2496                    TC_H_MIN(tcm->tcm_info) != tp->protocol)
2497                        continue;
2498                if (*p_index > index_start)
2499                        memset(&cb->args[1], 0,
2500                               sizeof(cb->args) - sizeof(cb->args[0]));
2501                if (cb->args[1] == 0) {
2502                        if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2503                                          NETLINK_CB(cb->skb).portid,
2504                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
2505                                          RTM_NEWTFILTER, false, true) <= 0)
2506                                goto errout;
2507                        cb->args[1] = 1;
2508                }
2509                if (!tp->ops->walk)
2510                        continue;
2511                arg.w.fn = tcf_node_dump;
2512                arg.skb = skb;
2513                arg.cb = cb;
2514                arg.block = block;
2515                arg.q = q;
2516                arg.parent = parent;
2517                arg.w.stop = 0;
2518                arg.w.skip = cb->args[1] - 1;
2519                arg.w.count = 0;
2520                arg.w.cookie = cb->args[2];
2521                arg.terse_dump = terse;
2522                tp->ops->walk(tp, &arg.w, true);
2523                cb->args[2] = arg.w.cookie;
2524                cb->args[1] = arg.w.count + 1;
2525                if (arg.w.stop)
2526                        goto errout;
2527        }
2528        return true;
2529
2530errout:
2531        tcf_proto_put(tp, true, NULL);
2532        return false;
2533}
2534
2535static const struct nla_policy tcf_tfilter_dump_policy[TCA_MAX + 1] = {
2536        [TCA_DUMP_FLAGS] = NLA_POLICY_BITFIELD32(TCA_DUMP_FLAGS_TERSE),
2537};
2538
2539/* called with RTNL */
2540static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2541{
2542        struct tcf_chain *chain, *chain_prev;
2543        struct net *net = sock_net(skb->sk);
2544        struct nlattr *tca[TCA_MAX + 1];
2545        struct Qdisc *q = NULL;
2546        struct tcf_block *block;
2547        struct tcmsg *tcm = nlmsg_data(cb->nlh);
2548        bool terse_dump = false;
2549        long index_start;
2550        long index;
2551        u32 parent;
2552        int err;
2553
2554        if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2555                return skb->len;
2556
2557        err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2558                                     tcf_tfilter_dump_policy, cb->extack);
2559        if (err)
2560                return err;
2561
2562        if (tca[TCA_DUMP_FLAGS]) {
2563                struct nla_bitfield32 flags =
2564                        nla_get_bitfield32(tca[TCA_DUMP_FLAGS]);
2565
2566                terse_dump = flags.value & TCA_DUMP_FLAGS_TERSE;
2567        }
2568
2569        if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2570                block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2571                if (!block)
2572                        goto out;
2573                /* If we work with block index, q is NULL and parent value
2574                 * will never be used in the following code. The check
2575                 * in tcf_fill_node prevents it. However, compiler does not
2576                 * see that far, so set parent to zero to silence the warning
2577                 * about parent being uninitialized.
2578                 */
2579                parent = 0;
2580        } else {
2581                const struct Qdisc_class_ops *cops;
2582                struct net_device *dev;
2583                unsigned long cl = 0;
2584
2585                dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2586                if (!dev)
2587                        return skb->len;
2588
2589                parent = tcm->tcm_parent;
2590                if (!parent)
2591                        q = dev->qdisc;
2592                else
2593                        q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2594                if (!q)
2595                        goto out;
2596                cops = q->ops->cl_ops;
2597                if (!cops)
2598                        goto out;
2599                if (!cops->tcf_block)
2600                        goto out;
2601                if (TC_H_MIN(tcm->tcm_parent)) {
2602                        cl = cops->find(q, tcm->tcm_parent);
2603                        if (cl == 0)
2604                                goto out;
2605                }
2606                block = cops->tcf_block(q, cl, NULL);
2607                if (!block)
2608                        goto out;
2609                parent = block->classid;
2610                if (tcf_block_shared(block))
2611                        q = NULL;
2612        }
2613
2614        index_start = cb->args[0];
2615        index = 0;
2616
2617        for (chain = __tcf_get_next_chain(block, NULL);
2618             chain;
2619             chain_prev = chain,
2620                     chain = __tcf_get_next_chain(block, chain),
2621                     tcf_chain_put(chain_prev)) {
2622                if (tca[TCA_CHAIN] &&
2623                    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2624                        continue;
2625                if (!tcf_chain_dump(chain, q, parent, skb, cb,
2626                                    index_start, &index, terse_dump)) {
2627                        tcf_chain_put(chain);
2628                        err = -EMSGSIZE;
2629                        break;
2630                }
2631        }
2632
2633        if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2634                tcf_block_refcnt_put(block, true);
2635        cb->args[0] = index;
2636
2637out:
2638        /* If we did no progress, the error (EMSGSIZE) is real */
2639        if (skb->len == 0 && err)
2640                return err;
2641        return skb->len;
2642}
2643
2644static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2645                              void *tmplt_priv, u32 chain_index,
2646                              struct net *net, struct sk_buff *skb,
2647                              struct tcf_block *block,
2648                              u32 portid, u32 seq, u16 flags, int event)
2649{
2650        unsigned char *b = skb_tail_pointer(skb);
2651        const struct tcf_proto_ops *ops;
2652        struct nlmsghdr *nlh;
2653        struct tcmsg *tcm;
2654        void *priv;
2655
2656        ops = tmplt_ops;
2657        priv = tmplt_priv;
2658
2659        nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2660        if (!nlh)
2661                goto out_nlmsg_trim;
2662        tcm = nlmsg_data(nlh);
2663        tcm->tcm_family = AF_UNSPEC;
2664        tcm->tcm__pad1 = 0;
2665        tcm->tcm__pad2 = 0;
2666        tcm->tcm_handle = 0;
2667        if (block->q) {
2668                tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2669                tcm->tcm_parent = block->q->handle;
2670        } else {
2671                tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2672                tcm->tcm_block_index = block->index;
2673        }
2674
2675        if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2676                goto nla_put_failure;
2677
2678        if (ops) {
2679                if (nla_put_string(skb, TCA_KIND, ops->kind))
2680                        goto nla_put_failure;
2681                if (ops->tmplt_dump(skb, net, priv) < 0)
2682                        goto nla_put_failure;
2683        }
2684
2685        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2686        return skb->len;
2687
2688out_nlmsg_trim:
2689nla_put_failure:
2690        nlmsg_trim(skb, b);
2691        return -EMSGSIZE;
2692}
2693
2694static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2695                           u32 seq, u16 flags, int event, bool unicast)
2696{
2697        u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2698        struct tcf_block *block = chain->block;
2699        struct net *net = block->net;
2700        struct sk_buff *skb;
2701        int err = 0;
2702
2703        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2704        if (!skb)
2705                return -ENOBUFS;
2706
2707        if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2708                               chain->index, net, skb, block, portid,
2709                               seq, flags, event) <= 0) {
2710                kfree_skb(skb);
2711                return -EINVAL;
2712        }
2713
2714        if (unicast)
2715                err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2716        else
2717                err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2718                                     flags & NLM_F_ECHO);
2719
2720        if (err > 0)
2721                err = 0;
2722        return err;
2723}
2724
2725static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2726                                  void *tmplt_priv, u32 chain_index,
2727                                  struct tcf_block *block, struct sk_buff *oskb,
2728                                  u32 seq, u16 flags, bool unicast)
2729{
2730        u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2731        struct net *net = block->net;
2732        struct sk_buff *skb;
2733
2734        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2735        if (!skb)
2736                return -ENOBUFS;
2737
2738        if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2739                               block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2740                kfree_skb(skb);
2741                return -EINVAL;
2742        }
2743
2744        if (unicast)
2745                return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2746
2747        return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2748}
2749
2750static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2751                              struct nlattr **tca,
2752                              struct netlink_ext_ack *extack)
2753{
2754        const struct tcf_proto_ops *ops;
2755        char name[IFNAMSIZ];
2756        void *tmplt_priv;
2757
2758        /* If kind is not set, user did not specify template. */
2759        if (!tca[TCA_KIND])
2760                return 0;
2761
2762        if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2763                NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2764                return -EINVAL;
2765        }
2766
2767        ops = tcf_proto_lookup_ops(name, true, extack);
2768        if (IS_ERR(ops))
2769                return PTR_ERR(ops);
2770        if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2771                NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2772                return -EOPNOTSUPP;
2773        }
2774
2775        tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2776        if (IS_ERR(tmplt_priv)) {
2777                module_put(ops->owner);
2778                return PTR_ERR(tmplt_priv);
2779        }
2780        chain->tmplt_ops = ops;
2781        chain->tmplt_priv = tmplt_priv;
2782        return 0;
2783}
2784
2785static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2786                               void *tmplt_priv)
2787{
2788        /* If template ops are set, no work to do for us. */
2789        if (!tmplt_ops)
2790                return;
2791
2792        tmplt_ops->tmplt_destroy(tmplt_priv);
2793        module_put(tmplt_ops->owner);
2794}
2795
2796/* Add/delete/get a chain */
2797
2798static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2799                        struct netlink_ext_ack *extack)
2800{
2801        struct net *net = sock_net(skb->sk);
2802        struct nlattr *tca[TCA_MAX + 1];
2803        struct tcmsg *t;
2804        u32 parent;
2805        u32 chain_index;
2806        struct Qdisc *q = NULL;
2807        struct tcf_chain *chain = NULL;
2808        struct tcf_block *block;
2809        unsigned long cl;
2810        int err;
2811
2812        if (n->nlmsg_type != RTM_GETCHAIN &&
2813            !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2814                return -EPERM;
2815
2816replay:
2817        err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2818                                     rtm_tca_policy, extack);
2819        if (err < 0)
2820                return err;
2821
2822        t = nlmsg_data(n);
2823        parent = t->tcm_parent;
2824        cl = 0;
2825
2826        block = tcf_block_find(net, &q, &parent, &cl,
2827                               t->tcm_ifindex, t->tcm_block_index, extack);
2828        if (IS_ERR(block))
2829                return PTR_ERR(block);
2830
2831        chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2832        if (chain_index > TC_ACT_EXT_VAL_MASK) {
2833                NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2834                err = -EINVAL;
2835                goto errout_block;
2836        }
2837
2838        mutex_lock(&block->lock);
2839        chain = tcf_chain_lookup(block, chain_index);
2840        if (n->nlmsg_type == RTM_NEWCHAIN) {
2841                if (chain) {
2842                        if (tcf_chain_held_by_acts_only(chain)) {
2843                                /* The chain exists only because there is
2844                                 * some action referencing it.
2845                                 */
2846                                tcf_chain_hold(chain);
2847                        } else {
2848                                NL_SET_ERR_MSG(extack, "Filter chain already exists");
2849                                err = -EEXIST;
2850                                goto errout_block_locked;
2851                        }
2852                } else {
2853                        if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2854                                NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2855                                err = -ENOENT;
2856                                goto errout_block_locked;
2857                        }
2858                        chain = tcf_chain_create(block, chain_index);
2859                        if (!chain) {
2860                                NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2861                                err = -ENOMEM;
2862                                goto errout_block_locked;
2863                        }
2864                }
2865        } else {
2866                if (!chain || tcf_chain_held_by_acts_only(chain)) {
2867                        NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2868                        err = -EINVAL;
2869                        goto errout_block_locked;
2870                }
2871                tcf_chain_hold(chain);
2872        }
2873
2874        if (n->nlmsg_type == RTM_NEWCHAIN) {
2875                /* Modifying chain requires holding parent block lock. In case
2876                 * the chain was successfully added, take a reference to the
2877                 * chain. This ensures that an empty chain does not disappear at
2878                 * the end of this function.
2879                 */
2880                tcf_chain_hold(chain);
2881                chain->explicitly_created = true;
2882        }
2883        mutex_unlock(&block->lock);
2884
2885        switch (n->nlmsg_type) {
2886        case RTM_NEWCHAIN:
2887                err = tc_chain_tmplt_add(chain, net, tca, extack);
2888                if (err) {
2889                        tcf_chain_put_explicitly_created(chain);
2890                        goto errout;
2891                }
2892
2893                tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2894                                RTM_NEWCHAIN, false);
2895                break;
2896        case RTM_DELCHAIN:
2897                tfilter_notify_chain(net, skb, block, q, parent, n,
2898                                     chain, RTM_DELTFILTER, true);
2899                /* Flush the chain first as the user requested chain removal. */
2900                tcf_chain_flush(chain, true);
2901                /* In case the chain was successfully deleted, put a reference
2902                 * to the chain previously taken during addition.
2903                 */
2904                tcf_chain_put_explicitly_created(chain);
2905                break;
2906        case RTM_GETCHAIN:
2907                err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2908                                      n->nlmsg_seq, n->nlmsg_type, true);
2909                if (err < 0)
2910                        NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2911                break;
2912        default:
2913                err = -EOPNOTSUPP;
2914                NL_SET_ERR_MSG(extack, "Unsupported message type");
2915                goto errout;
2916        }
2917
2918errout:
2919        tcf_chain_put(chain);
2920errout_block:
2921        tcf_block_release(q, block, true);
2922        if (err == -EAGAIN)
2923                /* Replay the request. */
2924                goto replay;
2925        return err;
2926
2927errout_block_locked:
2928        mutex_unlock(&block->lock);
2929        goto errout_block;
2930}
2931
2932/* called with RTNL */
2933static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2934{
2935        struct net *net = sock_net(skb->sk);
2936        struct nlattr *tca[TCA_MAX + 1];
2937        struct Qdisc *q = NULL;
2938        struct tcf_block *block;
2939        struct tcmsg *tcm = nlmsg_data(cb->nlh);
2940        struct tcf_chain *chain;
2941        long index_start;
2942        long index;
2943        u32 parent;
2944        int err;
2945
2946        if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2947                return skb->len;
2948
2949        err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2950                                     rtm_tca_policy, cb->extack);
2951        if (err)
2952                return err;
2953
2954        if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2955                block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2956                if (!block)
2957                        goto out;
2958                /* If we work with block index, q is NULL and parent value
2959                 * will never be used in the following code. The check
2960                 * in tcf_fill_node prevents it. However, compiler does not
2961                 * see that far, so set parent to zero to silence the warning
2962                 * about parent being uninitialized.
2963                 */
2964                parent = 0;
2965        } else {
2966                const struct Qdisc_class_ops *cops;
2967                struct net_device *dev;
2968                unsigned long cl = 0;
2969
2970                dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2971                if (!dev)
2972                        return skb->len;
2973
2974                parent = tcm->tcm_parent;
2975                if (!parent) {
2976                        q = dev->qdisc;
2977                        parent = q->handle;
2978                } else {
2979                        q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2980                }
2981                if (!q)
2982                        goto out;
2983                cops = q->ops->cl_ops;
2984                if (!cops)
2985                        goto out;
2986                if (!cops->tcf_block)
2987                        goto out;
2988                if (TC_H_MIN(tcm->tcm_parent)) {
2989                        cl = cops->find(q, tcm->tcm_parent);
2990                        if (cl == 0)
2991                                goto out;
2992                }
2993                block = cops->tcf_block(q, cl, NULL);
2994                if (!block)
2995                        goto out;
2996                if (tcf_block_shared(block))
2997                        q = NULL;
2998        }
2999
3000        index_start = cb->args[0];
3001        index = 0;
3002
3003        mutex_lock(&block->lock);
3004        list_for_each_entry(chain, &block->chain_list, list) {
3005                if ((tca[TCA_CHAIN] &&
3006                     nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3007                        continue;
3008                if (index < index_start) {
3009                        index++;
3010                        continue;
3011                }
3012                if (tcf_chain_held_by_acts_only(chain))
3013                        continue;
3014                err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3015                                         chain->index, net, skb, block,
3016                                         NETLINK_CB(cb->skb).portid,
3017                                         cb->nlh->nlmsg_seq, NLM_F_MULTI,
3018                                         RTM_NEWCHAIN);
3019                if (err <= 0)
3020                        break;
3021                index++;
3022        }
3023        mutex_unlock(&block->lock);
3024
3025        if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3026                tcf_block_refcnt_put(block, true);
3027        cb->args[0] = index;
3028
3029out:
3030        /* If we did no progress, the error (EMSGSIZE) is real */
3031        if (skb->len == 0 && err)
3032                return err;
3033        return skb->len;
3034}
3035
3036void tcf_exts_destroy(struct tcf_exts *exts)
3037{
3038#ifdef CONFIG_NET_CLS_ACT
3039        if (exts->actions) {
3040                tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3041                kfree(exts->actions);
3042        }
3043        exts->nr_actions = 0;
3044#endif
3045}
3046EXPORT_SYMBOL(tcf_exts_destroy);
3047
3048int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3049                      struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
3050                      bool rtnl_held, struct netlink_ext_ack *extack)
3051{
3052#ifdef CONFIG_NET_CLS_ACT
3053        {
3054                struct tc_action *act;
3055                size_t attr_size = 0;
3056
3057                if (exts->police && tb[exts->police]) {
3058                        act = tcf_action_init_1(net, tp, tb[exts->police],
3059                                                rate_tlv, "police", ovr,
3060                                                TCA_ACT_BIND, rtnl_held,
3061                                                extack);
3062                        if (IS_ERR(act))
3063                                return PTR_ERR(act);
3064
3065                        act->type = exts->type = TCA_OLD_COMPAT;
3066                        exts->actions[0] = act;
3067                        exts->nr_actions = 1;
3068                } else if (exts->action && tb[exts->action]) {
3069                        int err;
3070
3071                        err = tcf_action_init(net, tp, tb[exts->action],
3072                                              rate_tlv, NULL, ovr, TCA_ACT_BIND,
3073                                              exts->actions, &attr_size,
3074                                              rtnl_held, extack);
3075                        if (err < 0)
3076                                return err;
3077                        exts->nr_actions = err;
3078                }
3079        }
3080#else
3081        if ((exts->action && tb[exts->action]) ||
3082            (exts->police && tb[exts->police])) {
3083                NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3084                return -EOPNOTSUPP;
3085        }
3086#endif
3087
3088        return 0;
3089}
3090EXPORT_SYMBOL(tcf_exts_validate);
3091
3092void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3093{
3094#ifdef CONFIG_NET_CLS_ACT
3095        struct tcf_exts old = *dst;
3096
3097        *dst = *src;
3098        tcf_exts_destroy(&old);
3099#endif
3100}
3101EXPORT_SYMBOL(tcf_exts_change);
3102
3103#ifdef CONFIG_NET_CLS_ACT
3104static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3105{
3106        if (exts->nr_actions == 0)
3107                return NULL;
3108        else
3109                return exts->actions[0];
3110}
3111#endif
3112
3113int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3114{
3115#ifdef CONFIG_NET_CLS_ACT
3116        struct nlattr *nest;
3117
3118        if (exts->action && tcf_exts_has_actions(exts)) {
3119                /*
3120                 * again for backward compatible mode - we want
3121                 * to work with both old and new modes of entering
3122                 * tc data even if iproute2  was newer - jhs
3123                 */
3124                if (exts->type != TCA_OLD_COMPAT) {
3125                        nest = nla_nest_start_noflag(skb, exts->action);
3126                        if (nest == NULL)
3127                                goto nla_put_failure;
3128
3129                        if (tcf_action_dump(skb, exts->actions, 0, 0, false)
3130                            < 0)
3131                                goto nla_put_failure;
3132                        nla_nest_end(skb, nest);
3133                } else if (exts->police) {
3134                        struct tc_action *act = tcf_exts_first_act(exts);
3135                        nest = nla_nest_start_noflag(skb, exts->police);
3136                        if (nest == NULL || !act)
3137                                goto nla_put_failure;
3138                        if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3139                                goto nla_put_failure;
3140                        nla_nest_end(skb, nest);
3141                }
3142        }
3143        return 0;
3144
3145nla_put_failure:
3146        nla_nest_cancel(skb, nest);
3147        return -1;
3148#else
3149        return 0;
3150#endif
3151}
3152EXPORT_SYMBOL(tcf_exts_dump);
3153
3154int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts)
3155{
3156#ifdef CONFIG_NET_CLS_ACT
3157        struct nlattr *nest;
3158
3159        if (!exts->action || !tcf_exts_has_actions(exts))
3160                return 0;
3161
3162        nest = nla_nest_start_noflag(skb, exts->action);
3163        if (!nest)
3164                goto nla_put_failure;
3165
3166        if (tcf_action_dump(skb, exts->actions, 0, 0, true) < 0)
3167                goto nla_put_failure;
3168        nla_nest_end(skb, nest);
3169        return 0;
3170
3171nla_put_failure:
3172        nla_nest_cancel(skb, nest);
3173        return -1;
3174#else
3175        return 0;
3176#endif
3177}
3178EXPORT_SYMBOL(tcf_exts_terse_dump);
3179
3180int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3181{
3182#ifdef CONFIG_NET_CLS_ACT
3183        struct tc_action *a = tcf_exts_first_act(exts);
3184        if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3185                return -1;
3186#endif
3187        return 0;
3188}
3189EXPORT_SYMBOL(tcf_exts_dump_stats);
3190
3191static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3192{
3193        if (*flags & TCA_CLS_FLAGS_IN_HW)
3194                return;
3195        *flags |= TCA_CLS_FLAGS_IN_HW;
3196        atomic_inc(&block->offloadcnt);
3197}
3198
3199static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3200{
3201        if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3202                return;
3203        *flags &= ~TCA_CLS_FLAGS_IN_HW;
3204        atomic_dec(&block->offloadcnt);
3205}
3206
3207static void tc_cls_offload_cnt_update(struct tcf_block *block,
3208                                      struct tcf_proto *tp, u32 *cnt,
3209                                      u32 *flags, u32 diff, bool add)
3210{
3211        lockdep_assert_held(&block->cb_lock);
3212
3213        spin_lock(&tp->lock);
3214        if (add) {
3215                if (!*cnt)
3216                        tcf_block_offload_inc(block, flags);
3217                *cnt += diff;
3218        } else {
3219                *cnt -= diff;
3220                if (!*cnt)
3221                        tcf_block_offload_dec(block, flags);
3222        }
3223        spin_unlock(&tp->lock);
3224}
3225
3226static void
3227tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3228                         u32 *cnt, u32 *flags)
3229{
3230        lockdep_assert_held(&block->cb_lock);
3231
3232        spin_lock(&tp->lock);
3233        tcf_block_offload_dec(block, flags);
3234        *cnt = 0;
3235        spin_unlock(&tp->lock);
3236}
3237
3238static int
3239__tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3240                   void *type_data, bool err_stop)
3241{
3242        struct flow_block_cb *block_cb;
3243        int ok_count = 0;
3244        int err;
3245
3246        list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3247                err = block_cb->cb(type, type_data, block_cb->cb_priv);
3248                if (err) {
3249                        if (err_stop)
3250                                return err;
3251                } else {
3252                        ok_count++;
3253                }
3254        }
3255        return ok_count;
3256}
3257
3258int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3259                     void *type_data, bool err_stop, bool rtnl_held)
3260{
3261        bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3262        int ok_count;
3263
3264retry:
3265        if (take_rtnl)
3266                rtnl_lock();
3267        down_read(&block->cb_lock);
3268        /* Need to obtain rtnl lock if block is bound to devs that require it.
3269         * In block bind code cb_lock is obtained while holding rtnl, so we must
3270         * obtain the locks in same order here.
3271         */
3272        if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3273                up_read(&block->cb_lock);
3274                take_rtnl = true;
3275                goto retry;
3276        }
3277
3278        ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3279
3280        up_read(&block->cb_lock);
3281        if (take_rtnl)
3282                rtnl_unlock();
3283        return ok_count;
3284}
3285EXPORT_SYMBOL(tc_setup_cb_call);
3286
3287/* Non-destructive filter add. If filter that wasn't already in hardware is
3288 * successfully offloaded, increment block offloads counter. On failure,
3289 * previously offloaded filter is considered to be intact and offloads counter
3290 * is not decremented.
3291 */
3292
3293int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3294                    enum tc_setup_type type, void *type_data, bool err_stop,
3295                    u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3296{
3297        bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3298        int ok_count;
3299
3300retry:
3301        if (take_rtnl)
3302                rtnl_lock();
3303        down_read(&block->cb_lock);
3304        /* Need to obtain rtnl lock if block is bound to devs that require it.
3305         * In block bind code cb_lock is obtained while holding rtnl, so we must
3306         * obtain the locks in same order here.
3307         */
3308        if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3309                up_read(&block->cb_lock);
3310                take_rtnl = true;
3311                goto retry;
3312        }
3313
3314        /* Make sure all netdevs sharing this block are offload-capable. */
3315        if (block->nooffloaddevcnt && err_stop) {
3316                ok_count = -EOPNOTSUPP;
3317                goto err_unlock;
3318        }
3319
3320        ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3321        if (ok_count < 0)
3322                goto err_unlock;
3323
3324        if (tp->ops->hw_add)
3325                tp->ops->hw_add(tp, type_data);
3326        if (ok_count > 0)
3327                tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3328                                          ok_count, true);
3329err_unlock:
3330        up_read(&block->cb_lock);
3331        if (take_rtnl)
3332                rtnl_unlock();
3333        return ok_count < 0 ? ok_count : 0;
3334}
3335EXPORT_SYMBOL(tc_setup_cb_add);
3336
3337/* Destructive filter replace. If filter that wasn't already in hardware is
3338 * successfully offloaded, increment block offload counter. On failure,
3339 * previously offloaded filter is considered to be destroyed and offload counter
3340 * is decremented.
3341 */
3342
3343int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3344                        enum tc_setup_type type, void *type_data, bool err_stop,
3345                        u32 *old_flags, unsigned int *old_in_hw_count,
3346                        u32 *new_flags, unsigned int *new_in_hw_count,
3347                        bool rtnl_held)
3348{
3349        bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3350        int ok_count;
3351
3352retry:
3353        if (take_rtnl)
3354                rtnl_lock();
3355        down_read(&block->cb_lock);
3356        /* Need to obtain rtnl lock if block is bound to devs that require it.
3357         * In block bind code cb_lock is obtained while holding rtnl, so we must
3358         * obtain the locks in same order here.
3359         */
3360        if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3361                up_read(&block->cb_lock);
3362                take_rtnl = true;
3363                goto retry;
3364        }
3365
3366        /* Make sure all netdevs sharing this block are offload-capable. */
3367        if (block->nooffloaddevcnt && err_stop) {
3368                ok_count = -EOPNOTSUPP;
3369                goto err_unlock;
3370        }
3371
3372        tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3373        if (tp->ops->hw_del)
3374                tp->ops->hw_del(tp, type_data);
3375
3376        ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3377        if (ok_count < 0)
3378                goto err_unlock;
3379
3380        if (tp->ops->hw_add)
3381                tp->ops->hw_add(tp, type_data);
3382        if (ok_count > 0)
3383                tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3384                                          new_flags, ok_count, true);
3385err_unlock:
3386        up_read(&block->cb_lock);
3387        if (take_rtnl)
3388                rtnl_unlock();
3389        return ok_count < 0 ? ok_count : 0;
3390}
3391EXPORT_SYMBOL(tc_setup_cb_replace);
3392
3393/* Destroy filter and decrement block offload counter, if filter was previously
3394 * offloaded.
3395 */
3396
3397int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3398                        enum tc_setup_type type, void *type_data, bool err_stop,
3399                        u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3400{
3401        bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3402        int ok_count;
3403
3404retry:
3405        if (take_rtnl)
3406                rtnl_lock();
3407        down_read(&block->cb_lock);
3408        /* Need to obtain rtnl lock if block is bound to devs that require it.
3409         * In block bind code cb_lock is obtained while holding rtnl, so we must
3410         * obtain the locks in same order here.
3411         */
3412        if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3413                up_read(&block->cb_lock);
3414                take_rtnl = true;
3415                goto retry;
3416        }
3417
3418        ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3419
3420        tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3421        if (tp->ops->hw_del)
3422                tp->ops->hw_del(tp, type_data);
3423
3424        up_read(&block->cb_lock);
3425        if (take_rtnl)
3426                rtnl_unlock();
3427        return ok_count < 0 ? ok_count : 0;
3428}
3429EXPORT_SYMBOL(tc_setup_cb_destroy);
3430
3431int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3432                          bool add, flow_setup_cb_t *cb,
3433                          enum tc_setup_type type, void *type_data,
3434                          void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3435{
3436        int err = cb(type, type_data, cb_priv);
3437
3438        if (err) {
3439                if (add && tc_skip_sw(*flags))
3440                        return err;
3441        } else {
3442                tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3443                                          add);
3444        }
3445
3446        return 0;
3447}
3448EXPORT_SYMBOL(tc_setup_cb_reoffload);
3449
3450static int tcf_act_get_cookie(struct flow_action_entry *entry,
3451                              const struct tc_action *act)
3452{
3453        struct tc_cookie *cookie;
3454        int err = 0;
3455
3456        rcu_read_lock();
3457        cookie = rcu_dereference(act->act_cookie);
3458        if (cookie) {
3459                entry->cookie = flow_action_cookie_create(cookie->data,
3460                                                          cookie->len,
3461                                                          GFP_ATOMIC);
3462                if (!entry->cookie)
3463                        err = -ENOMEM;
3464        }
3465        rcu_read_unlock();
3466        return err;
3467}
3468
3469static void tcf_act_put_cookie(struct flow_action_entry *entry)
3470{
3471        flow_action_cookie_destroy(entry->cookie);
3472}
3473
3474void tc_cleanup_flow_action(struct flow_action *flow_action)
3475{
3476        struct flow_action_entry *entry;
3477        int i;
3478
3479        flow_action_for_each(i, entry, flow_action) {
3480                tcf_act_put_cookie(entry);
3481                if (entry->destructor)
3482                        entry->destructor(entry->destructor_priv);
3483        }
3484}
3485EXPORT_SYMBOL(tc_cleanup_flow_action);
3486
3487static void tcf_mirred_get_dev(struct flow_action_entry *entry,
3488                               const struct tc_action *act)
3489{
3490#ifdef CONFIG_NET_CLS_ACT
3491        entry->dev = act->ops->get_dev(act, &entry->destructor);
3492        if (!entry->dev)
3493                return;
3494        entry->destructor_priv = entry->dev;
3495#endif
3496}
3497
3498static void tcf_tunnel_encap_put_tunnel(void *priv)
3499{
3500        struct ip_tunnel_info *tunnel = priv;
3501
3502        kfree(tunnel);
3503}
3504
3505static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
3506                                       const struct tc_action *act)
3507{
3508        entry->tunnel = tcf_tunnel_info_copy(act);
3509        if (!entry->tunnel)
3510                return -ENOMEM;
3511        entry->destructor = tcf_tunnel_encap_put_tunnel;
3512        entry->destructor_priv = entry->tunnel;
3513        return 0;
3514}
3515
3516static void tcf_sample_get_group(struct flow_action_entry *entry,
3517                                 const struct tc_action *act)
3518{
3519#ifdef CONFIG_NET_CLS_ACT
3520        entry->sample.psample_group =
3521                act->ops->get_psample_group(act, &entry->destructor);
3522        entry->destructor_priv = entry->sample.psample_group;
3523#endif
3524}
3525
3526static void tcf_gate_entry_destructor(void *priv)
3527{
3528        struct action_gate_entry *oe = priv;
3529
3530        kfree(oe);
3531}
3532
3533static int tcf_gate_get_entries(struct flow_action_entry *entry,
3534                                const struct tc_action *act)
3535{
3536        entry->gate.entries = tcf_gate_get_list(act);
3537
3538        if (!entry->gate.entries)
3539                return -EINVAL;
3540
3541        entry->destructor = tcf_gate_entry_destructor;
3542        entry->destructor_priv = entry->gate.entries;
3543
3544        return 0;
3545}
3546
3547static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
3548{
3549        if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY))
3550                return FLOW_ACTION_HW_STATS_DONT_CARE;
3551        else if (!hw_stats)
3552                return FLOW_ACTION_HW_STATS_DISABLED;
3553
3554        return hw_stats;
3555}
3556
3557int tc_setup_flow_action(struct flow_action *flow_action,
3558                         const struct tcf_exts *exts)
3559{
3560        struct tc_action *act;
3561        int i, j, k, err = 0;
3562
3563        BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3564        BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3565        BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3566
3567        if (!exts)
3568                return 0;
3569
3570        j = 0;
3571        tcf_exts_for_each_action(i, act, exts) {
3572                struct flow_action_entry *entry;
3573
3574                entry = &flow_action->entries[j];
3575                spin_lock_bh(&act->tcfa_lock);
3576                err = tcf_act_get_cookie(entry, act);
3577                if (err)
3578                        goto err_out_locked;
3579
3580                entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3581
3582                if (is_tcf_gact_ok(act)) {
3583                        entry->id = FLOW_ACTION_ACCEPT;
3584                } else if (is_tcf_gact_shot(act)) {
3585                        entry->id = FLOW_ACTION_DROP;
3586                } else if (is_tcf_gact_trap(act)) {
3587                        entry->id = FLOW_ACTION_TRAP;
3588                } else if (is_tcf_gact_goto_chain(act)) {
3589                        entry->id = FLOW_ACTION_GOTO;
3590                        entry->chain_index = tcf_gact_goto_chain_index(act);
3591                } else if (is_tcf_mirred_egress_redirect(act)) {
3592                        entry->id = FLOW_ACTION_REDIRECT;
3593                        tcf_mirred_get_dev(entry, act);
3594                } else if (is_tcf_mirred_egress_mirror(act)) {
3595                        entry->id = FLOW_ACTION_MIRRED;
3596                        tcf_mirred_get_dev(entry, act);
3597                } else if (is_tcf_mirred_ingress_redirect(act)) {
3598                        entry->id = FLOW_ACTION_REDIRECT_INGRESS;
3599                        tcf_mirred_get_dev(entry, act);
3600                } else if (is_tcf_mirred_ingress_mirror(act)) {
3601                        entry->id = FLOW_ACTION_MIRRED_INGRESS;
3602                        tcf_mirred_get_dev(entry, act);
3603                } else if (is_tcf_vlan(act)) {
3604                        switch (tcf_vlan_action(act)) {
3605                        case TCA_VLAN_ACT_PUSH:
3606                                entry->id = FLOW_ACTION_VLAN_PUSH;
3607                                entry->vlan.vid = tcf_vlan_push_vid(act);
3608                                entry->vlan.proto = tcf_vlan_push_proto(act);
3609                                entry->vlan.prio = tcf_vlan_push_prio(act);
3610                                break;
3611                        case TCA_VLAN_ACT_POP:
3612                                entry->id = FLOW_ACTION_VLAN_POP;
3613                                break;
3614                        case TCA_VLAN_ACT_MODIFY:
3615                                entry->id = FLOW_ACTION_VLAN_MANGLE;
3616                                entry->vlan.vid = tcf_vlan_push_vid(act);
3617                                entry->vlan.proto = tcf_vlan_push_proto(act);
3618                                entry->vlan.prio = tcf_vlan_push_prio(act);
3619                                break;
3620                        default:
3621                                err = -EOPNOTSUPP;
3622                                goto err_out_locked;
3623                        }
3624                } else if (is_tcf_tunnel_set(act)) {
3625                        entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3626                        err = tcf_tunnel_encap_get_tunnel(entry, act);
3627                        if (err)
3628                                goto err_out_locked;
3629                } else if (is_tcf_tunnel_release(act)) {
3630                        entry->id = FLOW_ACTION_TUNNEL_DECAP;
3631                } else if (is_tcf_pedit(act)) {
3632                        for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3633                                switch (tcf_pedit_cmd(act, k)) {
3634                                case TCA_PEDIT_KEY_EX_CMD_SET:
3635                                        entry->id = FLOW_ACTION_MANGLE;
3636                                        break;
3637                                case TCA_PEDIT_KEY_EX_CMD_ADD:
3638                                        entry->id = FLOW_ACTION_ADD;
3639                                        break;
3640                                default:
3641                                        err = -EOPNOTSUPP;
3642                                        goto err_out_locked;
3643                                }
3644                                entry->mangle.htype = tcf_pedit_htype(act, k);
3645                                entry->mangle.mask = tcf_pedit_mask(act, k);
3646                                entry->mangle.val = tcf_pedit_val(act, k);
3647                                entry->mangle.offset = tcf_pedit_offset(act, k);
3648                                entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3649                                entry = &flow_action->entries[++j];
3650                        }
3651                } else if (is_tcf_csum(act)) {
3652                        entry->id = FLOW_ACTION_CSUM;
3653                        entry->csum_flags = tcf_csum_update_flags(act);
3654                } else if (is_tcf_skbedit_mark(act)) {
3655                        entry->id = FLOW_ACTION_MARK;
3656                        entry->mark = tcf_skbedit_mark(act);
3657                } else if (is_tcf_sample(act)) {
3658                        entry->id = FLOW_ACTION_SAMPLE;
3659                        entry->sample.trunc_size = tcf_sample_trunc_size(act);
3660                        entry->sample.truncate = tcf_sample_truncate(act);
3661                        entry->sample.rate = tcf_sample_rate(act);
3662                        tcf_sample_get_group(entry, act);
3663                } else if (is_tcf_police(act)) {
3664                        entry->id = FLOW_ACTION_POLICE;
3665                        entry->police.burst = tcf_police_burst(act);
3666                        entry->police.rate_bytes_ps =
3667                                tcf_police_rate_bytes_ps(act);
3668                        entry->police.mtu = tcf_police_tcfp_mtu(act);
3669                        entry->police.index = act->tcfa_index;
3670                } else if (is_tcf_ct(act)) {
3671                        entry->id = FLOW_ACTION_CT;
3672                        entry->ct.action = tcf_ct_action(act);
3673                        entry->ct.zone = tcf_ct_zone(act);
3674                        entry->ct.flow_table = tcf_ct_ft(act);
3675                } else if (is_tcf_mpls(act)) {
3676                        switch (tcf_mpls_action(act)) {
3677                        case TCA_MPLS_ACT_PUSH:
3678                                entry->id = FLOW_ACTION_MPLS_PUSH;
3679                                entry->mpls_push.proto = tcf_mpls_proto(act);
3680                                entry->mpls_push.label = tcf_mpls_label(act);
3681                                entry->mpls_push.tc = tcf_mpls_tc(act);
3682                                entry->mpls_push.bos = tcf_mpls_bos(act);
3683                                entry->mpls_push.ttl = tcf_mpls_ttl(act);
3684                                break;
3685                        case TCA_MPLS_ACT_POP:
3686                                entry->id = FLOW_ACTION_MPLS_POP;
3687                                entry->mpls_pop.proto = tcf_mpls_proto(act);
3688                                break;
3689                        case TCA_MPLS_ACT_MODIFY:
3690                                entry->id = FLOW_ACTION_MPLS_MANGLE;
3691                                entry->mpls_mangle.label = tcf_mpls_label(act);
3692                                entry->mpls_mangle.tc = tcf_mpls_tc(act);
3693                                entry->mpls_mangle.bos = tcf_mpls_bos(act);
3694                                entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
3695                                break;
3696                        default:
3697                                goto err_out_locked;
3698                        }
3699                } else if (is_tcf_skbedit_ptype(act)) {
3700                        entry->id = FLOW_ACTION_PTYPE;
3701                        entry->ptype = tcf_skbedit_ptype(act);
3702                } else if (is_tcf_skbedit_priority(act)) {
3703                        entry->id = FLOW_ACTION_PRIORITY;
3704                        entry->priority = tcf_skbedit_priority(act);
3705                } else if (is_tcf_gate(act)) {
3706                        entry->id = FLOW_ACTION_GATE;
3707                        entry->gate.index = tcf_gate_index(act);
3708                        entry->gate.prio = tcf_gate_prio(act);
3709                        entry->gate.basetime = tcf_gate_basetime(act);
3710                        entry->gate.cycletime = tcf_gate_cycletime(act);
3711                        entry->gate.cycletimeext = tcf_gate_cycletimeext(act);
3712                        entry->gate.num_entries = tcf_gate_num_entries(act);
3713                        err = tcf_gate_get_entries(entry, act);
3714                        if (err)
3715                                goto err_out_locked;
3716                } else {
3717                        err = -EOPNOTSUPP;
3718                        goto err_out_locked;
3719                }
3720                spin_unlock_bh(&act->tcfa_lock);
3721
3722                if (!is_tcf_pedit(act))
3723                        j++;
3724        }
3725
3726err_out:
3727        if (err)
3728                tc_cleanup_flow_action(flow_action);
3729
3730        return err;
3731err_out_locked:
3732        spin_unlock_bh(&act->tcfa_lock);
3733        goto err_out;
3734}
3735EXPORT_SYMBOL(tc_setup_flow_action);
3736
3737unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3738{
3739        unsigned int num_acts = 0;
3740        struct tc_action *act;
3741        int i;
3742
3743        tcf_exts_for_each_action(i, act, exts) {
3744                if (is_tcf_pedit(act))
3745                        num_acts += tcf_pedit_nkeys(act);
3746                else
3747                        num_acts++;
3748        }
3749        return num_acts;
3750}
3751EXPORT_SYMBOL(tcf_exts_num_actions);
3752
3753#ifdef CONFIG_NET_CLS_ACT
3754static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
3755                                        u32 *p_block_index,
3756                                        struct netlink_ext_ack *extack)
3757{
3758        *p_block_index = nla_get_u32(block_index_attr);
3759        if (!*p_block_index) {
3760                NL_SET_ERR_MSG(extack, "Block number may not be zero");
3761                return -EINVAL;
3762        }
3763
3764        return 0;
3765}
3766
3767int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
3768                    enum flow_block_binder_type binder_type,
3769                    struct nlattr *block_index_attr,
3770                    struct netlink_ext_ack *extack)
3771{
3772        u32 block_index;
3773        int err;
3774
3775        if (!block_index_attr)
3776                return 0;
3777
3778        err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3779        if (err)
3780                return err;
3781
3782        if (!block_index)
3783                return 0;
3784
3785        qe->info.binder_type = binder_type;
3786        qe->info.chain_head_change = tcf_chain_head_change_dflt;
3787        qe->info.chain_head_change_priv = &qe->filter_chain;
3788        qe->info.block_index = block_index;
3789
3790        return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
3791}
3792EXPORT_SYMBOL(tcf_qevent_init);
3793
3794void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
3795{
3796        if (qe->info.block_index)
3797                tcf_block_put_ext(qe->block, sch, &qe->info);
3798}
3799EXPORT_SYMBOL(tcf_qevent_destroy);
3800
3801int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
3802                               struct netlink_ext_ack *extack)
3803{
3804        u32 block_index;
3805        int err;
3806
3807        if (!block_index_attr)
3808                return 0;
3809
3810        err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
3811        if (err)
3812                return err;
3813
3814        /* Bounce newly-configured block or change in block. */
3815        if (block_index != qe->info.block_index) {
3816                NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
3817                return -EINVAL;
3818        }
3819
3820        return 0;
3821}
3822EXPORT_SYMBOL(tcf_qevent_validate_change);
3823
3824struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
3825                                  struct sk_buff **to_free, int *ret)
3826{
3827        struct tcf_result cl_res;
3828        struct tcf_proto *fl;
3829
3830        if (!qe->info.block_index)
3831                return skb;
3832
3833        fl = rcu_dereference_bh(qe->filter_chain);
3834
3835        switch (tcf_classify(skb, fl, &cl_res, false)) {
3836        case TC_ACT_SHOT:
3837                qdisc_qstats_drop(sch);
3838                __qdisc_drop(skb, to_free);
3839                *ret = __NET_XMIT_BYPASS;
3840                return NULL;
3841        case TC_ACT_STOLEN:
3842        case TC_ACT_QUEUED:
3843        case TC_ACT_TRAP:
3844                __qdisc_drop(skb, to_free);
3845                *ret = __NET_XMIT_STOLEN;
3846                return NULL;
3847        case TC_ACT_REDIRECT:
3848                skb_do_redirect(skb);
3849                *ret = __NET_XMIT_STOLEN;
3850                return NULL;
3851        }
3852
3853        return skb;
3854}
3855EXPORT_SYMBOL(tcf_qevent_handle);
3856
3857int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
3858{
3859        if (!qe->info.block_index)
3860                return 0;
3861        return nla_put_u32(skb, attr_name, qe->info.block_index);
3862}
3863EXPORT_SYMBOL(tcf_qevent_dump);
3864#endif
3865
3866static __net_init int tcf_net_init(struct net *net)
3867{
3868        struct tcf_net *tn = net_generic(net, tcf_net_id);
3869
3870        spin_lock_init(&tn->idr_lock);
3871        idr_init(&tn->idr);
3872        return 0;
3873}
3874
3875static void __net_exit tcf_net_exit(struct net *net)
3876{
3877        struct tcf_net *tn = net_generic(net, tcf_net_id);
3878
3879        idr_destroy(&tn->idr);
3880}
3881
3882static struct pernet_operations tcf_net_ops = {
3883        .init = tcf_net_init,
3884        .exit = tcf_net_exit,
3885        .id   = &tcf_net_id,
3886        .size = sizeof(struct tcf_net),
3887};
3888
3889static int __init tc_filter_init(void)
3890{
3891        int err;
3892
3893        tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3894        if (!tc_filter_wq)
3895                return -ENOMEM;
3896
3897        err = register_pernet_subsys(&tcf_net_ops);
3898        if (err)
3899                goto err_register_pernet_subsys;
3900
3901        rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3902                      RTNL_FLAG_DOIT_UNLOCKED);
3903        rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3904                      RTNL_FLAG_DOIT_UNLOCKED);
3905        rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3906                      tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3907        rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3908        rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3909        rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3910                      tc_dump_chain, 0);
3911
3912        return 0;
3913
3914err_register_pernet_subsys:
3915        destroy_workqueue(tc_filter_wq);
3916        return err;
3917}
3918
3919subsys_initcall(tc_filter_init);
3920