linux/net/sched/cls_api.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/sched/cls_api.c  Packet classifier API.
   4 *
   5 * Authors:     Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
   6 *
   7 * Changes:
   8 *
   9 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/types.h>
  14#include <linux/kernel.h>
  15#include <linux/string.h>
  16#include <linux/errno.h>
  17#include <linux/err.h>
  18#include <linux/skbuff.h>
  19#include <linux/init.h>
  20#include <linux/kmod.h>
  21#include <linux/slab.h>
  22#include <linux/idr.h>
  23#include <linux/rhashtable.h>
  24#include <linux/jhash.h>
  25#include <linux/rculist.h>
  26#include <net/net_namespace.h>
  27#include <net/sock.h>
  28#include <net/netlink.h>
  29#include <net/pkt_sched.h>
  30#include <net/pkt_cls.h>
  31#include <net/tc_act/tc_pedit.h>
  32#include <net/tc_act/tc_mirred.h>
  33#include <net/tc_act/tc_vlan.h>
  34#include <net/tc_act/tc_tunnel_key.h>
  35#include <net/tc_act/tc_csum.h>
  36#include <net/tc_act/tc_gact.h>
  37#include <net/tc_act/tc_police.h>
  38#include <net/tc_act/tc_sample.h>
  39#include <net/tc_act/tc_skbedit.h>
  40#include <net/tc_act/tc_ct.h>
  41#include <net/tc_act/tc_mpls.h>
  42#include <net/flow_offload.h>
  43
  44extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
  45
  46/* The list of all installed classifier types */
  47static LIST_HEAD(tcf_proto_base);
  48
  49/* Protects list of registered TC modules. It is pure SMP lock. */
  50static DEFINE_RWLOCK(cls_mod_lock);
  51
  52static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
  53{
  54        return jhash_3words(tp->chain->index, tp->prio,
  55                            (__force __u32)tp->protocol, 0);
  56}
  57
  58static void tcf_proto_signal_destroying(struct tcf_chain *chain,
  59                                        struct tcf_proto *tp)
  60{
  61        struct tcf_block *block = chain->block;
  62
  63        mutex_lock(&block->proto_destroy_lock);
  64        hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
  65                     destroy_obj_hashfn(tp));
  66        mutex_unlock(&block->proto_destroy_lock);
  67}
  68
  69static bool tcf_proto_cmp(const struct tcf_proto *tp1,
  70                          const struct tcf_proto *tp2)
  71{
  72        return tp1->chain->index == tp2->chain->index &&
  73               tp1->prio == tp2->prio &&
  74               tp1->protocol == tp2->protocol;
  75}
  76
  77static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
  78                                        struct tcf_proto *tp)
  79{
  80        u32 hash = destroy_obj_hashfn(tp);
  81        struct tcf_proto *iter;
  82        bool found = false;
  83
  84        rcu_read_lock();
  85        hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
  86                                   destroy_ht_node, hash) {
  87                if (tcf_proto_cmp(tp, iter)) {
  88                        found = true;
  89                        break;
  90                }
  91        }
  92        rcu_read_unlock();
  93
  94        return found;
  95}
  96
  97static void
  98tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
  99{
 100        struct tcf_block *block = chain->block;
 101
 102        mutex_lock(&block->proto_destroy_lock);
 103        if (hash_hashed(&tp->destroy_ht_node))
 104                hash_del_rcu(&tp->destroy_ht_node);
 105        mutex_unlock(&block->proto_destroy_lock);
 106}
 107
 108/* Find classifier type by string name */
 109
 110static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
 111{
 112        const struct tcf_proto_ops *t, *res = NULL;
 113
 114        if (kind) {
 115                read_lock(&cls_mod_lock);
 116                list_for_each_entry(t, &tcf_proto_base, head) {
 117                        if (strcmp(kind, t->kind) == 0) {
 118                                if (try_module_get(t->owner))
 119                                        res = t;
 120                                break;
 121                        }
 122                }
 123                read_unlock(&cls_mod_lock);
 124        }
 125        return res;
 126}
 127
 128static const struct tcf_proto_ops *
 129tcf_proto_lookup_ops(const char *kind, bool rtnl_held,
 130                     struct netlink_ext_ack *extack)
 131{
 132        const struct tcf_proto_ops *ops;
 133
 134        ops = __tcf_proto_lookup_ops(kind);
 135        if (ops)
 136                return ops;
 137#ifdef CONFIG_MODULES
 138        if (rtnl_held)
 139                rtnl_unlock();
 140        request_module("cls_%s", kind);
 141        if (rtnl_held)
 142                rtnl_lock();
 143        ops = __tcf_proto_lookup_ops(kind);
 144        /* We dropped the RTNL semaphore in order to perform
 145         * the module load. So, even if we succeeded in loading
 146         * the module we have to replay the request. We indicate
 147         * this using -EAGAIN.
 148         */
 149        if (ops) {
 150                module_put(ops->owner);
 151                return ERR_PTR(-EAGAIN);
 152        }
 153#endif
 154        NL_SET_ERR_MSG(extack, "TC classifier not found");
 155        return ERR_PTR(-ENOENT);
 156}
 157
 158/* Register(unregister) new classifier type */
 159
 160int register_tcf_proto_ops(struct tcf_proto_ops *ops)
 161{
 162        struct tcf_proto_ops *t;
 163        int rc = -EEXIST;
 164
 165        write_lock(&cls_mod_lock);
 166        list_for_each_entry(t, &tcf_proto_base, head)
 167                if (!strcmp(ops->kind, t->kind))
 168                        goto out;
 169
 170        list_add_tail(&ops->head, &tcf_proto_base);
 171        rc = 0;
 172out:
 173        write_unlock(&cls_mod_lock);
 174        return rc;
 175}
 176EXPORT_SYMBOL(register_tcf_proto_ops);
 177
 178static struct workqueue_struct *tc_filter_wq;
 179
 180int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
 181{
 182        struct tcf_proto_ops *t;
 183        int rc = -ENOENT;
 184
 185        /* Wait for outstanding call_rcu()s, if any, from a
 186         * tcf_proto_ops's destroy() handler.
 187         */
 188        rcu_barrier();
 189        flush_workqueue(tc_filter_wq);
 190
 191        write_lock(&cls_mod_lock);
 192        list_for_each_entry(t, &tcf_proto_base, head) {
 193                if (t == ops) {
 194                        list_del(&t->head);
 195                        rc = 0;
 196                        break;
 197                }
 198        }
 199        write_unlock(&cls_mod_lock);
 200        return rc;
 201}
 202EXPORT_SYMBOL(unregister_tcf_proto_ops);
 203
 204bool tcf_queue_work(struct rcu_work *rwork, work_func_t func)
 205{
 206        INIT_RCU_WORK(rwork, func);
 207        return queue_rcu_work(tc_filter_wq, rwork);
 208}
 209EXPORT_SYMBOL(tcf_queue_work);
 210
 211/* Select new prio value from the range, managed by kernel. */
 212
 213static inline u32 tcf_auto_prio(struct tcf_proto *tp)
 214{
 215        u32 first = TC_H_MAKE(0xC0000000U, 0U);
 216
 217        if (tp)
 218                first = tp->prio - 1;
 219
 220        return TC_H_MAJ(first);
 221}
 222
 223static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
 224{
 225        if (kind)
 226                return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ;
 227        memset(name, 0, IFNAMSIZ);
 228        return false;
 229}
 230
 231static bool tcf_proto_is_unlocked(const char *kind)
 232{
 233        const struct tcf_proto_ops *ops;
 234        bool ret;
 235
 236        if (strlen(kind) == 0)
 237                return false;
 238
 239        ops = tcf_proto_lookup_ops(kind, false, NULL);
 240        /* On error return false to take rtnl lock. Proto lookup/create
 241         * functions will perform lookup again and properly handle errors.
 242         */
 243        if (IS_ERR(ops))
 244                return false;
 245
 246        ret = !!(ops->flags & TCF_PROTO_OPS_DOIT_UNLOCKED);
 247        module_put(ops->owner);
 248        return ret;
 249}
 250
 251static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
 252                                          u32 prio, struct tcf_chain *chain,
 253                                          bool rtnl_held,
 254                                          struct netlink_ext_ack *extack)
 255{
 256        struct tcf_proto *tp;
 257        int err;
 258
 259        tp = kzalloc(sizeof(*tp), GFP_KERNEL);
 260        if (!tp)
 261                return ERR_PTR(-ENOBUFS);
 262
 263        tp->ops = tcf_proto_lookup_ops(kind, rtnl_held, extack);
 264        if (IS_ERR(tp->ops)) {
 265                err = PTR_ERR(tp->ops);
 266                goto errout;
 267        }
 268        tp->classify = tp->ops->classify;
 269        tp->protocol = protocol;
 270        tp->prio = prio;
 271        tp->chain = chain;
 272        spin_lock_init(&tp->lock);
 273        refcount_set(&tp->refcnt, 1);
 274
 275        err = tp->ops->init(tp);
 276        if (err) {
 277                module_put(tp->ops->owner);
 278                goto errout;
 279        }
 280        return tp;
 281
 282errout:
 283        kfree(tp);
 284        return ERR_PTR(err);
 285}
 286
 287static void tcf_proto_get(struct tcf_proto *tp)
 288{
 289        refcount_inc(&tp->refcnt);
 290}
 291
 292static void tcf_chain_put(struct tcf_chain *chain);
 293
 294static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
 295                              bool sig_destroy, struct netlink_ext_ack *extack)
 296{
 297        tp->ops->destroy(tp, rtnl_held, extack);
 298        if (sig_destroy)
 299                tcf_proto_signal_destroyed(tp->chain, tp);
 300        tcf_chain_put(tp->chain);
 301        module_put(tp->ops->owner);
 302        kfree_rcu(tp, rcu);
 303}
 304
 305static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
 306                          struct netlink_ext_ack *extack)
 307{
 308        if (refcount_dec_and_test(&tp->refcnt))
 309                tcf_proto_destroy(tp, rtnl_held, true, extack);
 310}
 311
 312static bool tcf_proto_check_delete(struct tcf_proto *tp)
 313{
 314        if (tp->ops->delete_empty)
 315                return tp->ops->delete_empty(tp);
 316
 317        tp->deleting = true;
 318        return tp->deleting;
 319}
 320
 321static void tcf_proto_mark_delete(struct tcf_proto *tp)
 322{
 323        spin_lock(&tp->lock);
 324        tp->deleting = true;
 325        spin_unlock(&tp->lock);
 326}
 327
 328static bool tcf_proto_is_deleting(struct tcf_proto *tp)
 329{
 330        bool deleting;
 331
 332        spin_lock(&tp->lock);
 333        deleting = tp->deleting;
 334        spin_unlock(&tp->lock);
 335
 336        return deleting;
 337}
 338
 339#define ASSERT_BLOCK_LOCKED(block)                                      \
 340        lockdep_assert_held(&(block)->lock)
 341
 342struct tcf_filter_chain_list_item {
 343        struct list_head list;
 344        tcf_chain_head_change_t *chain_head_change;
 345        void *chain_head_change_priv;
 346};
 347
 348static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
 349                                          u32 chain_index)
 350{
 351        struct tcf_chain *chain;
 352
 353        ASSERT_BLOCK_LOCKED(block);
 354
 355        chain = kzalloc(sizeof(*chain), GFP_KERNEL);
 356        if (!chain)
 357                return NULL;
 358        list_add_tail_rcu(&chain->list, &block->chain_list);
 359        mutex_init(&chain->filter_chain_lock);
 360        chain->block = block;
 361        chain->index = chain_index;
 362        chain->refcnt = 1;
 363        if (!chain->index)
 364                block->chain0.chain = chain;
 365        return chain;
 366}
 367
 368static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
 369                                       struct tcf_proto *tp_head)
 370{
 371        if (item->chain_head_change)
 372                item->chain_head_change(tp_head, item->chain_head_change_priv);
 373}
 374
 375static void tcf_chain0_head_change(struct tcf_chain *chain,
 376                                   struct tcf_proto *tp_head)
 377{
 378        struct tcf_filter_chain_list_item *item;
 379        struct tcf_block *block = chain->block;
 380
 381        if (chain->index)
 382                return;
 383
 384        mutex_lock(&block->lock);
 385        list_for_each_entry(item, &block->chain0.filter_chain_list, list)
 386                tcf_chain_head_change_item(item, tp_head);
 387        mutex_unlock(&block->lock);
 388}
 389
 390/* Returns true if block can be safely freed. */
 391
 392static bool tcf_chain_detach(struct tcf_chain *chain)
 393{
 394        struct tcf_block *block = chain->block;
 395
 396        ASSERT_BLOCK_LOCKED(block);
 397
 398        list_del_rcu(&chain->list);
 399        if (!chain->index)
 400                block->chain0.chain = NULL;
 401
 402        if (list_empty(&block->chain_list) &&
 403            refcount_read(&block->refcnt) == 0)
 404                return true;
 405
 406        return false;
 407}
 408
 409static void tcf_block_destroy(struct tcf_block *block)
 410{
 411        mutex_destroy(&block->lock);
 412        mutex_destroy(&block->proto_destroy_lock);
 413        kfree_rcu(block, rcu);
 414}
 415
 416static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
 417{
 418        struct tcf_block *block = chain->block;
 419
 420        mutex_destroy(&chain->filter_chain_lock);
 421        kfree_rcu(chain, rcu);
 422        if (free_block)
 423                tcf_block_destroy(block);
 424}
 425
 426static void tcf_chain_hold(struct tcf_chain *chain)
 427{
 428        ASSERT_BLOCK_LOCKED(chain->block);
 429
 430        ++chain->refcnt;
 431}
 432
 433static bool tcf_chain_held_by_acts_only(struct tcf_chain *chain)
 434{
 435        ASSERT_BLOCK_LOCKED(chain->block);
 436
 437        /* In case all the references are action references, this
 438         * chain should not be shown to the user.
 439         */
 440        return chain->refcnt == chain->action_refcnt;
 441}
 442
 443static struct tcf_chain *tcf_chain_lookup(struct tcf_block *block,
 444                                          u32 chain_index)
 445{
 446        struct tcf_chain *chain;
 447
 448        ASSERT_BLOCK_LOCKED(block);
 449
 450        list_for_each_entry(chain, &block->chain_list, list) {
 451                if (chain->index == chain_index)
 452                        return chain;
 453        }
 454        return NULL;
 455}
 456
 457#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
 458static struct tcf_chain *tcf_chain_lookup_rcu(const struct tcf_block *block,
 459                                              u32 chain_index)
 460{
 461        struct tcf_chain *chain;
 462
 463        list_for_each_entry_rcu(chain, &block->chain_list, list) {
 464                if (chain->index == chain_index)
 465                        return chain;
 466        }
 467        return NULL;
 468}
 469#endif
 470
 471static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
 472                           u32 seq, u16 flags, int event, bool unicast);
 473
 474static struct tcf_chain *__tcf_chain_get(struct tcf_block *block,
 475                                         u32 chain_index, bool create,
 476                                         bool by_act)
 477{
 478        struct tcf_chain *chain = NULL;
 479        bool is_first_reference;
 480
 481        mutex_lock(&block->lock);
 482        chain = tcf_chain_lookup(block, chain_index);
 483        if (chain) {
 484                tcf_chain_hold(chain);
 485        } else {
 486                if (!create)
 487                        goto errout;
 488                chain = tcf_chain_create(block, chain_index);
 489                if (!chain)
 490                        goto errout;
 491        }
 492
 493        if (by_act)
 494                ++chain->action_refcnt;
 495        is_first_reference = chain->refcnt - chain->action_refcnt == 1;
 496        mutex_unlock(&block->lock);
 497
 498        /* Send notification only in case we got the first
 499         * non-action reference. Until then, the chain acts only as
 500         * a placeholder for actions pointing to it and user ought
 501         * not know about them.
 502         */
 503        if (is_first_reference && !by_act)
 504                tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
 505                                RTM_NEWCHAIN, false);
 506
 507        return chain;
 508
 509errout:
 510        mutex_unlock(&block->lock);
 511        return chain;
 512}
 513
 514static struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
 515                                       bool create)
 516{
 517        return __tcf_chain_get(block, chain_index, create, false);
 518}
 519
 520struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block, u32 chain_index)
 521{
 522        return __tcf_chain_get(block, chain_index, true, true);
 523}
 524EXPORT_SYMBOL(tcf_chain_get_by_act);
 525
 526static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
 527                               void *tmplt_priv);
 528static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
 529                                  void *tmplt_priv, u32 chain_index,
 530                                  struct tcf_block *block, struct sk_buff *oskb,
 531                                  u32 seq, u16 flags, bool unicast);
 532
 533static void __tcf_chain_put(struct tcf_chain *chain, bool by_act,
 534                            bool explicitly_created)
 535{
 536        struct tcf_block *block = chain->block;
 537        const struct tcf_proto_ops *tmplt_ops;
 538        bool free_block = false;
 539        unsigned int refcnt;
 540        void *tmplt_priv;
 541
 542        mutex_lock(&block->lock);
 543        if (explicitly_created) {
 544                if (!chain->explicitly_created) {
 545                        mutex_unlock(&block->lock);
 546                        return;
 547                }
 548                chain->explicitly_created = false;
 549        }
 550
 551        if (by_act)
 552                chain->action_refcnt--;
 553
 554        /* tc_chain_notify_delete can't be called while holding block lock.
 555         * However, when block is unlocked chain can be changed concurrently, so
 556         * save these to temporary variables.
 557         */
 558        refcnt = --chain->refcnt;
 559        tmplt_ops = chain->tmplt_ops;
 560        tmplt_priv = chain->tmplt_priv;
 561
 562        /* The last dropped non-action reference will trigger notification. */
 563        if (refcnt - chain->action_refcnt == 0 && !by_act) {
 564                tc_chain_notify_delete(tmplt_ops, tmplt_priv, chain->index,
 565                                       block, NULL, 0, 0, false);
 566                /* Last reference to chain, no need to lock. */
 567                chain->flushing = false;
 568        }
 569
 570        if (refcnt == 0)
 571                free_block = tcf_chain_detach(chain);
 572        mutex_unlock(&block->lock);
 573
 574        if (refcnt == 0) {
 575                tc_chain_tmplt_del(tmplt_ops, tmplt_priv);
 576                tcf_chain_destroy(chain, free_block);
 577        }
 578}
 579
 580static void tcf_chain_put(struct tcf_chain *chain)
 581{
 582        __tcf_chain_put(chain, false, false);
 583}
 584
 585void tcf_chain_put_by_act(struct tcf_chain *chain)
 586{
 587        __tcf_chain_put(chain, true, false);
 588}
 589EXPORT_SYMBOL(tcf_chain_put_by_act);
 590
 591static void tcf_chain_put_explicitly_created(struct tcf_chain *chain)
 592{
 593        __tcf_chain_put(chain, false, true);
 594}
 595
 596static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
 597{
 598        struct tcf_proto *tp, *tp_next;
 599
 600        mutex_lock(&chain->filter_chain_lock);
 601        tp = tcf_chain_dereference(chain->filter_chain, chain);
 602        while (tp) {
 603                tp_next = rcu_dereference_protected(tp->next, 1);
 604                tcf_proto_signal_destroying(chain, tp);
 605                tp = tp_next;
 606        }
 607        tp = tcf_chain_dereference(chain->filter_chain, chain);
 608        RCU_INIT_POINTER(chain->filter_chain, NULL);
 609        tcf_chain0_head_change(chain, NULL);
 610        chain->flushing = true;
 611        mutex_unlock(&chain->filter_chain_lock);
 612
 613        while (tp) {
 614                tp_next = rcu_dereference_protected(tp->next, 1);
 615                tcf_proto_put(tp, rtnl_held, NULL);
 616                tp = tp_next;
 617        }
 618}
 619
 620static int tcf_block_setup(struct tcf_block *block,
 621                           struct flow_block_offload *bo);
 622
 623static void tc_indr_block_cmd(struct net_device *dev, struct tcf_block *block,
 624                              flow_indr_block_bind_cb_t *cb, void *cb_priv,
 625                              enum flow_block_command command, bool ingress)
 626{
 627        struct flow_block_offload bo = {
 628                .command        = command,
 629                .binder_type    = ingress ?
 630                                  FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS :
 631                                  FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
 632                .net            = dev_net(dev),
 633                .block_shared   = tcf_block_non_null_shared(block),
 634        };
 635        INIT_LIST_HEAD(&bo.cb_list);
 636
 637        if (!block)
 638                return;
 639
 640        bo.block = &block->flow_block;
 641
 642        down_write(&block->cb_lock);
 643        cb(dev, cb_priv, TC_SETUP_BLOCK, &bo);
 644
 645        tcf_block_setup(block, &bo);
 646        up_write(&block->cb_lock);
 647}
 648
 649static struct tcf_block *tc_dev_block(struct net_device *dev, bool ingress)
 650{
 651        const struct Qdisc_class_ops *cops;
 652        const struct Qdisc_ops *ops;
 653        struct Qdisc *qdisc;
 654
 655        if (!dev_ingress_queue(dev))
 656                return NULL;
 657
 658        qdisc = dev_ingress_queue(dev)->qdisc_sleeping;
 659        if (!qdisc)
 660                return NULL;
 661
 662        ops = qdisc->ops;
 663        if (!ops)
 664                return NULL;
 665
 666        if (!ingress && !strcmp("ingress", ops->id))
 667                return NULL;
 668
 669        cops = ops->cl_ops;
 670        if (!cops)
 671                return NULL;
 672
 673        if (!cops->tcf_block)
 674                return NULL;
 675
 676        return cops->tcf_block(qdisc,
 677                               ingress ? TC_H_MIN_INGRESS : TC_H_MIN_EGRESS,
 678                               NULL);
 679}
 680
 681static void tc_indr_block_get_and_cmd(struct net_device *dev,
 682                                      flow_indr_block_bind_cb_t *cb,
 683                                      void *cb_priv,
 684                                      enum flow_block_command command)
 685{
 686        struct tcf_block *block;
 687
 688        block = tc_dev_block(dev, true);
 689        tc_indr_block_cmd(dev, block, cb, cb_priv, command, true);
 690
 691        block = tc_dev_block(dev, false);
 692        tc_indr_block_cmd(dev, block, cb, cb_priv, command, false);
 693}
 694
 695static void tc_indr_block_call(struct tcf_block *block,
 696                               struct net_device *dev,
 697                               struct tcf_block_ext_info *ei,
 698                               enum flow_block_command command,
 699                               struct netlink_ext_ack *extack)
 700{
 701        struct flow_block_offload bo = {
 702                .command        = command,
 703                .binder_type    = ei->binder_type,
 704                .net            = dev_net(dev),
 705                .block          = &block->flow_block,
 706                .block_shared   = tcf_block_shared(block),
 707                .extack         = extack,
 708        };
 709        INIT_LIST_HEAD(&bo.cb_list);
 710
 711        flow_indr_block_call(dev, &bo, command, TC_SETUP_BLOCK);
 712        tcf_block_setup(block, &bo);
 713}
 714
 715static bool tcf_block_offload_in_use(struct tcf_block *block)
 716{
 717        return atomic_read(&block->offloadcnt);
 718}
 719
 720static int tcf_block_offload_cmd(struct tcf_block *block,
 721                                 struct net_device *dev,
 722                                 struct tcf_block_ext_info *ei,
 723                                 enum flow_block_command command,
 724                                 struct netlink_ext_ack *extack)
 725{
 726        struct flow_block_offload bo = {};
 727        int err;
 728
 729        bo.net = dev_net(dev);
 730        bo.command = command;
 731        bo.binder_type = ei->binder_type;
 732        bo.block = &block->flow_block;
 733        bo.block_shared = tcf_block_shared(block);
 734        bo.extack = extack;
 735        INIT_LIST_HEAD(&bo.cb_list);
 736
 737        err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
 738        if (err < 0)
 739                return err;
 740
 741        return tcf_block_setup(block, &bo);
 742}
 743
 744static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
 745                                  struct tcf_block_ext_info *ei,
 746                                  struct netlink_ext_ack *extack)
 747{
 748        struct net_device *dev = q->dev_queue->dev;
 749        int err;
 750
 751        down_write(&block->cb_lock);
 752        if (!dev->netdev_ops->ndo_setup_tc)
 753                goto no_offload_dev_inc;
 754
 755        /* If tc offload feature is disabled and the block we try to bind
 756         * to already has some offloaded filters, forbid to bind.
 757         */
 758        if (!tc_can_offload(dev) && tcf_block_offload_in_use(block)) {
 759                NL_SET_ERR_MSG(extack, "Bind to offloaded block failed as dev has offload disabled");
 760                err = -EOPNOTSUPP;
 761                goto err_unlock;
 762        }
 763
 764        err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
 765        if (err == -EOPNOTSUPP)
 766                goto no_offload_dev_inc;
 767        if (err)
 768                goto err_unlock;
 769
 770        tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
 771        up_write(&block->cb_lock);
 772        return 0;
 773
 774no_offload_dev_inc:
 775        if (tcf_block_offload_in_use(block)) {
 776                err = -EOPNOTSUPP;
 777                goto err_unlock;
 778        }
 779        err = 0;
 780        block->nooffloaddevcnt++;
 781        tc_indr_block_call(block, dev, ei, FLOW_BLOCK_BIND, extack);
 782err_unlock:
 783        up_write(&block->cb_lock);
 784        return err;
 785}
 786
 787static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
 788                                     struct tcf_block_ext_info *ei)
 789{
 790        struct net_device *dev = q->dev_queue->dev;
 791        int err;
 792
 793        down_write(&block->cb_lock);
 794        tc_indr_block_call(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
 795
 796        if (!dev->netdev_ops->ndo_setup_tc)
 797                goto no_offload_dev_dec;
 798        err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
 799        if (err == -EOPNOTSUPP)
 800                goto no_offload_dev_dec;
 801        up_write(&block->cb_lock);
 802        return;
 803
 804no_offload_dev_dec:
 805        WARN_ON(block->nooffloaddevcnt-- == 0);
 806        up_write(&block->cb_lock);
 807}
 808
 809static int
 810tcf_chain0_head_change_cb_add(struct tcf_block *block,
 811                              struct tcf_block_ext_info *ei,
 812                              struct netlink_ext_ack *extack)
 813{
 814        struct tcf_filter_chain_list_item *item;
 815        struct tcf_chain *chain0;
 816
 817        item = kmalloc(sizeof(*item), GFP_KERNEL);
 818        if (!item) {
 819                NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
 820                return -ENOMEM;
 821        }
 822        item->chain_head_change = ei->chain_head_change;
 823        item->chain_head_change_priv = ei->chain_head_change_priv;
 824
 825        mutex_lock(&block->lock);
 826        chain0 = block->chain0.chain;
 827        if (chain0)
 828                tcf_chain_hold(chain0);
 829        else
 830                list_add(&item->list, &block->chain0.filter_chain_list);
 831        mutex_unlock(&block->lock);
 832
 833        if (chain0) {
 834                struct tcf_proto *tp_head;
 835
 836                mutex_lock(&chain0->filter_chain_lock);
 837
 838                tp_head = tcf_chain_dereference(chain0->filter_chain, chain0);
 839                if (tp_head)
 840                        tcf_chain_head_change_item(item, tp_head);
 841
 842                mutex_lock(&block->lock);
 843                list_add(&item->list, &block->chain0.filter_chain_list);
 844                mutex_unlock(&block->lock);
 845
 846                mutex_unlock(&chain0->filter_chain_lock);
 847                tcf_chain_put(chain0);
 848        }
 849
 850        return 0;
 851}
 852
 853static void
 854tcf_chain0_head_change_cb_del(struct tcf_block *block,
 855                              struct tcf_block_ext_info *ei)
 856{
 857        struct tcf_filter_chain_list_item *item;
 858
 859        mutex_lock(&block->lock);
 860        list_for_each_entry(item, &block->chain0.filter_chain_list, list) {
 861                if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
 862                    (item->chain_head_change == ei->chain_head_change &&
 863                     item->chain_head_change_priv == ei->chain_head_change_priv)) {
 864                        if (block->chain0.chain)
 865                                tcf_chain_head_change_item(item, NULL);
 866                        list_del(&item->list);
 867                        mutex_unlock(&block->lock);
 868
 869                        kfree(item);
 870                        return;
 871                }
 872        }
 873        mutex_unlock(&block->lock);
 874        WARN_ON(1);
 875}
 876
 877struct tcf_net {
 878        spinlock_t idr_lock; /* Protects idr */
 879        struct idr idr;
 880};
 881
 882static unsigned int tcf_net_id;
 883
 884static int tcf_block_insert(struct tcf_block *block, struct net *net,
 885                            struct netlink_ext_ack *extack)
 886{
 887        struct tcf_net *tn = net_generic(net, tcf_net_id);
 888        int err;
 889
 890        idr_preload(GFP_KERNEL);
 891        spin_lock(&tn->idr_lock);
 892        err = idr_alloc_u32(&tn->idr, block, &block->index, block->index,
 893                            GFP_NOWAIT);
 894        spin_unlock(&tn->idr_lock);
 895        idr_preload_end();
 896
 897        return err;
 898}
 899
 900static void tcf_block_remove(struct tcf_block *block, struct net *net)
 901{
 902        struct tcf_net *tn = net_generic(net, tcf_net_id);
 903
 904        spin_lock(&tn->idr_lock);
 905        idr_remove(&tn->idr, block->index);
 906        spin_unlock(&tn->idr_lock);
 907}
 908
 909static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
 910                                          u32 block_index,
 911                                          struct netlink_ext_ack *extack)
 912{
 913        struct tcf_block *block;
 914
 915        block = kzalloc(sizeof(*block), GFP_KERNEL);
 916        if (!block) {
 917                NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
 918                return ERR_PTR(-ENOMEM);
 919        }
 920        mutex_init(&block->lock);
 921        mutex_init(&block->proto_destroy_lock);
 922        init_rwsem(&block->cb_lock);
 923        flow_block_init(&block->flow_block);
 924        INIT_LIST_HEAD(&block->chain_list);
 925        INIT_LIST_HEAD(&block->owner_list);
 926        INIT_LIST_HEAD(&block->chain0.filter_chain_list);
 927
 928        refcount_set(&block->refcnt, 1);
 929        block->net = net;
 930        block->index = block_index;
 931
 932        /* Don't store q pointer for blocks which are shared */
 933        if (!tcf_block_shared(block))
 934                block->q = q;
 935        return block;
 936}
 937
 938static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
 939{
 940        struct tcf_net *tn = net_generic(net, tcf_net_id);
 941
 942        return idr_find(&tn->idr, block_index);
 943}
 944
 945static struct tcf_block *tcf_block_refcnt_get(struct net *net, u32 block_index)
 946{
 947        struct tcf_block *block;
 948
 949        rcu_read_lock();
 950        block = tcf_block_lookup(net, block_index);
 951        if (block && !refcount_inc_not_zero(&block->refcnt))
 952                block = NULL;
 953        rcu_read_unlock();
 954
 955        return block;
 956}
 957
 958static struct tcf_chain *
 959__tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
 960{
 961        mutex_lock(&block->lock);
 962        if (chain)
 963                chain = list_is_last(&chain->list, &block->chain_list) ?
 964                        NULL : list_next_entry(chain, list);
 965        else
 966                chain = list_first_entry_or_null(&block->chain_list,
 967                                                 struct tcf_chain, list);
 968
 969        /* skip all action-only chains */
 970        while (chain && tcf_chain_held_by_acts_only(chain))
 971                chain = list_is_last(&chain->list, &block->chain_list) ?
 972                        NULL : list_next_entry(chain, list);
 973
 974        if (chain)
 975                tcf_chain_hold(chain);
 976        mutex_unlock(&block->lock);
 977
 978        return chain;
 979}
 980
 981/* Function to be used by all clients that want to iterate over all chains on
 982 * block. It properly obtains block->lock and takes reference to chain before
 983 * returning it. Users of this function must be tolerant to concurrent chain
 984 * insertion/deletion or ensure that no concurrent chain modification is
 985 * possible. Note that all netlink dump callbacks cannot guarantee to provide
 986 * consistent dump because rtnl lock is released each time skb is filled with
 987 * data and sent to user-space.
 988 */
 989
 990struct tcf_chain *
 991tcf_get_next_chain(struct tcf_block *block, struct tcf_chain *chain)
 992{
 993        struct tcf_chain *chain_next = __tcf_get_next_chain(block, chain);
 994
 995        if (chain)
 996                tcf_chain_put(chain);
 997
 998        return chain_next;
 999}
1000EXPORT_SYMBOL(tcf_get_next_chain);
1001
1002static struct tcf_proto *
1003__tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp)
1004{
1005        u32 prio = 0;
1006
1007        ASSERT_RTNL();
1008        mutex_lock(&chain->filter_chain_lock);
1009
1010        if (!tp) {
1011                tp = tcf_chain_dereference(chain->filter_chain, chain);
1012        } else if (tcf_proto_is_deleting(tp)) {
1013                /* 'deleting' flag is set and chain->filter_chain_lock was
1014                 * unlocked, which means next pointer could be invalid. Restart
1015                 * search.
1016                 */
1017                prio = tp->prio + 1;
1018                tp = tcf_chain_dereference(chain->filter_chain, chain);
1019
1020                for (; tp; tp = tcf_chain_dereference(tp->next, chain))
1021                        if (!tp->deleting && tp->prio >= prio)
1022                                break;
1023        } else {
1024                tp = tcf_chain_dereference(tp->next, chain);
1025        }
1026
1027        if (tp)
1028                tcf_proto_get(tp);
1029
1030        mutex_unlock(&chain->filter_chain_lock);
1031
1032        return tp;
1033}
1034
1035/* Function to be used by all clients that want to iterate over all tp's on
1036 * chain. Users of this function must be tolerant to concurrent tp
1037 * insertion/deletion or ensure that no concurrent chain modification is
1038 * possible. Note that all netlink dump callbacks cannot guarantee to provide
1039 * consistent dump because rtnl lock is released each time skb is filled with
1040 * data and sent to user-space.
1041 */
1042
1043struct tcf_proto *
1044tcf_get_next_proto(struct tcf_chain *chain, struct tcf_proto *tp,
1045                   bool rtnl_held)
1046{
1047        struct tcf_proto *tp_next = __tcf_get_next_proto(chain, tp);
1048
1049        if (tp)
1050                tcf_proto_put(tp, rtnl_held, NULL);
1051
1052        return tp_next;
1053}
1054EXPORT_SYMBOL(tcf_get_next_proto);
1055
1056static void tcf_block_flush_all_chains(struct tcf_block *block, bool rtnl_held)
1057{
1058        struct tcf_chain *chain;
1059
1060        /* Last reference to block. At this point chains cannot be added or
1061         * removed concurrently.
1062         */
1063        for (chain = tcf_get_next_chain(block, NULL);
1064             chain;
1065             chain = tcf_get_next_chain(block, chain)) {
1066                tcf_chain_put_explicitly_created(chain);
1067                tcf_chain_flush(chain, rtnl_held);
1068        }
1069}
1070
1071/* Lookup Qdisc and increments its reference counter.
1072 * Set parent, if necessary.
1073 */
1074
1075static int __tcf_qdisc_find(struct net *net, struct Qdisc **q,
1076                            u32 *parent, int ifindex, bool rtnl_held,
1077                            struct netlink_ext_ack *extack)
1078{
1079        const struct Qdisc_class_ops *cops;
1080        struct net_device *dev;
1081        int err = 0;
1082
1083        if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1084                return 0;
1085
1086        rcu_read_lock();
1087
1088        /* Find link */
1089        dev = dev_get_by_index_rcu(net, ifindex);
1090        if (!dev) {
1091                rcu_read_unlock();
1092                return -ENODEV;
1093        }
1094
1095        /* Find qdisc */
1096        if (!*parent) {
1097                *q = dev->qdisc;
1098                *parent = (*q)->handle;
1099        } else {
1100                *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent));
1101                if (!*q) {
1102                        NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1103                        err = -EINVAL;
1104                        goto errout_rcu;
1105                }
1106        }
1107
1108        *q = qdisc_refcount_inc_nz(*q);
1109        if (!*q) {
1110                NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1111                err = -EINVAL;
1112                goto errout_rcu;
1113        }
1114
1115        /* Is it classful? */
1116        cops = (*q)->ops->cl_ops;
1117        if (!cops) {
1118                NL_SET_ERR_MSG(extack, "Qdisc not classful");
1119                err = -EINVAL;
1120                goto errout_qdisc;
1121        }
1122
1123        if (!cops->tcf_block) {
1124                NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1125                err = -EOPNOTSUPP;
1126                goto errout_qdisc;
1127        }
1128
1129errout_rcu:
1130        /* At this point we know that qdisc is not noop_qdisc,
1131         * which means that qdisc holds a reference to net_device
1132         * and we hold a reference to qdisc, so it is safe to release
1133         * rcu read lock.
1134         */
1135        rcu_read_unlock();
1136        return err;
1137
1138errout_qdisc:
1139        rcu_read_unlock();
1140
1141        if (rtnl_held)
1142                qdisc_put(*q);
1143        else
1144                qdisc_put_unlocked(*q);
1145        *q = NULL;
1146
1147        return err;
1148}
1149
1150static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl,
1151                               int ifindex, struct netlink_ext_ack *extack)
1152{
1153        if (ifindex == TCM_IFINDEX_MAGIC_BLOCK)
1154                return 0;
1155
1156        /* Do we search for filter, attached to class? */
1157        if (TC_H_MIN(parent)) {
1158                const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1159
1160                *cl = cops->find(q, parent);
1161                if (*cl == 0) {
1162                        NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1163                        return -ENOENT;
1164                }
1165        }
1166
1167        return 0;
1168}
1169
1170static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q,
1171                                          unsigned long cl, int ifindex,
1172                                          u32 block_index,
1173                                          struct netlink_ext_ack *extack)
1174{
1175        struct tcf_block *block;
1176
1177        if (ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1178                block = tcf_block_refcnt_get(net, block_index);
1179                if (!block) {
1180                        NL_SET_ERR_MSG(extack, "Block of given index was not found");
1181                        return ERR_PTR(-EINVAL);
1182                }
1183        } else {
1184                const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1185
1186                block = cops->tcf_block(q, cl, extack);
1187                if (!block)
1188                        return ERR_PTR(-EINVAL);
1189
1190                if (tcf_block_shared(block)) {
1191                        NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1192                        return ERR_PTR(-EOPNOTSUPP);
1193                }
1194
1195                /* Always take reference to block in order to support execution
1196                 * of rules update path of cls API without rtnl lock. Caller
1197                 * must release block when it is finished using it. 'if' block
1198                 * of this conditional obtain reference to block by calling
1199                 * tcf_block_refcnt_get().
1200                 */
1201                refcount_inc(&block->refcnt);
1202        }
1203
1204        return block;
1205}
1206
1207static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q,
1208                            struct tcf_block_ext_info *ei, bool rtnl_held)
1209{
1210        if (refcount_dec_and_mutex_lock(&block->refcnt, &block->lock)) {
1211                /* Flushing/putting all chains will cause the block to be
1212                 * deallocated when last chain is freed. However, if chain_list
1213                 * is empty, block has to be manually deallocated. After block
1214                 * reference counter reached 0, it is no longer possible to
1215                 * increment it or add new chains to block.
1216                 */
1217                bool free_block = list_empty(&block->chain_list);
1218
1219                mutex_unlock(&block->lock);
1220                if (tcf_block_shared(block))
1221                        tcf_block_remove(block, block->net);
1222
1223                if (q)
1224                        tcf_block_offload_unbind(block, q, ei);
1225
1226                if (free_block)
1227                        tcf_block_destroy(block);
1228                else
1229                        tcf_block_flush_all_chains(block, rtnl_held);
1230        } else if (q) {
1231                tcf_block_offload_unbind(block, q, ei);
1232        }
1233}
1234
1235static void tcf_block_refcnt_put(struct tcf_block *block, bool rtnl_held)
1236{
1237        __tcf_block_put(block, NULL, NULL, rtnl_held);
1238}
1239
1240/* Find tcf block.
1241 * Set q, parent, cl when appropriate.
1242 */
1243
1244static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q,
1245                                        u32 *parent, unsigned long *cl,
1246                                        int ifindex, u32 block_index,
1247                                        struct netlink_ext_ack *extack)
1248{
1249        struct tcf_block *block;
1250        int err = 0;
1251
1252        ASSERT_RTNL();
1253
1254        err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack);
1255        if (err)
1256                goto errout;
1257
1258        err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack);
1259        if (err)
1260                goto errout_qdisc;
1261
1262        block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack);
1263        if (IS_ERR(block)) {
1264                err = PTR_ERR(block);
1265                goto errout_qdisc;
1266        }
1267
1268        return block;
1269
1270errout_qdisc:
1271        if (*q)
1272                qdisc_put(*q);
1273errout:
1274        *q = NULL;
1275        return ERR_PTR(err);
1276}
1277
1278static void tcf_block_release(struct Qdisc *q, struct tcf_block *block,
1279                              bool rtnl_held)
1280{
1281        if (!IS_ERR_OR_NULL(block))
1282                tcf_block_refcnt_put(block, rtnl_held);
1283
1284        if (q) {
1285                if (rtnl_held)
1286                        qdisc_put(q);
1287                else
1288                        qdisc_put_unlocked(q);
1289        }
1290}
1291
1292struct tcf_block_owner_item {
1293        struct list_head list;
1294        struct Qdisc *q;
1295        enum flow_block_binder_type binder_type;
1296};
1297
1298static void
1299tcf_block_owner_netif_keep_dst(struct tcf_block *block,
1300                               struct Qdisc *q,
1301                               enum flow_block_binder_type binder_type)
1302{
1303        if (block->keep_dst &&
1304            binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1305            binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
1306                netif_keep_dst(qdisc_dev(q));
1307}
1308
1309void tcf_block_netif_keep_dst(struct tcf_block *block)
1310{
1311        struct tcf_block_owner_item *item;
1312
1313        block->keep_dst = true;
1314        list_for_each_entry(item, &block->owner_list, list)
1315                tcf_block_owner_netif_keep_dst(block, item->q,
1316                                               item->binder_type);
1317}
1318EXPORT_SYMBOL(tcf_block_netif_keep_dst);
1319
1320static int tcf_block_owner_add(struct tcf_block *block,
1321                               struct Qdisc *q,
1322                               enum flow_block_binder_type binder_type)
1323{
1324        struct tcf_block_owner_item *item;
1325
1326        item = kmalloc(sizeof(*item), GFP_KERNEL);
1327        if (!item)
1328                return -ENOMEM;
1329        item->q = q;
1330        item->binder_type = binder_type;
1331        list_add(&item->list, &block->owner_list);
1332        return 0;
1333}
1334
1335static void tcf_block_owner_del(struct tcf_block *block,
1336                                struct Qdisc *q,
1337                                enum flow_block_binder_type binder_type)
1338{
1339        struct tcf_block_owner_item *item;
1340
1341        list_for_each_entry(item, &block->owner_list, list) {
1342                if (item->q == q && item->binder_type == binder_type) {
1343                        list_del(&item->list);
1344                        kfree(item);
1345                        return;
1346                }
1347        }
1348        WARN_ON(1);
1349}
1350
1351int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
1352                      struct tcf_block_ext_info *ei,
1353                      struct netlink_ext_ack *extack)
1354{
1355        struct net *net = qdisc_net(q);
1356        struct tcf_block *block = NULL;
1357        int err;
1358
1359        if (ei->block_index)
1360                /* block_index not 0 means the shared block is requested */
1361                block = tcf_block_refcnt_get(net, ei->block_index);
1362
1363        if (!block) {
1364                block = tcf_block_create(net, q, ei->block_index, extack);
1365                if (IS_ERR(block))
1366                        return PTR_ERR(block);
1367                if (tcf_block_shared(block)) {
1368                        err = tcf_block_insert(block, net, extack);
1369                        if (err)
1370                                goto err_block_insert;
1371                }
1372        }
1373
1374        err = tcf_block_owner_add(block, q, ei->binder_type);
1375        if (err)
1376                goto err_block_owner_add;
1377
1378        tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
1379
1380        err = tcf_chain0_head_change_cb_add(block, ei, extack);
1381        if (err)
1382                goto err_chain0_head_change_cb_add;
1383
1384        err = tcf_block_offload_bind(block, q, ei, extack);
1385        if (err)
1386                goto err_block_offload_bind;
1387
1388        *p_block = block;
1389        return 0;
1390
1391err_block_offload_bind:
1392        tcf_chain0_head_change_cb_del(block, ei);
1393err_chain0_head_change_cb_add:
1394        tcf_block_owner_del(block, q, ei->binder_type);
1395err_block_owner_add:
1396err_block_insert:
1397        tcf_block_refcnt_put(block, true);
1398        return err;
1399}
1400EXPORT_SYMBOL(tcf_block_get_ext);
1401
1402static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
1403{
1404        struct tcf_proto __rcu **p_filter_chain = priv;
1405
1406        rcu_assign_pointer(*p_filter_chain, tp_head);
1407}
1408
1409int tcf_block_get(struct tcf_block **p_block,
1410                  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
1411                  struct netlink_ext_ack *extack)
1412{
1413        struct tcf_block_ext_info ei = {
1414                .chain_head_change = tcf_chain_head_change_dflt,
1415                .chain_head_change_priv = p_filter_chain,
1416        };
1417
1418        WARN_ON(!p_filter_chain);
1419        return tcf_block_get_ext(p_block, q, &ei, extack);
1420}
1421EXPORT_SYMBOL(tcf_block_get);
1422
1423/* XXX: Standalone actions are not allowed to jump to any chain, and bound
1424 * actions should be all removed after flushing.
1425 */
1426void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
1427                       struct tcf_block_ext_info *ei)
1428{
1429        if (!block)
1430                return;
1431        tcf_chain0_head_change_cb_del(block, ei);
1432        tcf_block_owner_del(block, q, ei->binder_type);
1433
1434        __tcf_block_put(block, q, ei, true);
1435}
1436EXPORT_SYMBOL(tcf_block_put_ext);
1437
1438void tcf_block_put(struct tcf_block *block)
1439{
1440        struct tcf_block_ext_info ei = {0, };
1441
1442        if (!block)
1443                return;
1444        tcf_block_put_ext(block, block->q, &ei);
1445}
1446
1447EXPORT_SYMBOL(tcf_block_put);
1448
1449static int
1450tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
1451                            void *cb_priv, bool add, bool offload_in_use,
1452                            struct netlink_ext_ack *extack)
1453{
1454        struct tcf_chain *chain, *chain_prev;
1455        struct tcf_proto *tp, *tp_prev;
1456        int err;
1457
1458        lockdep_assert_held(&block->cb_lock);
1459
1460        for (chain = __tcf_get_next_chain(block, NULL);
1461             chain;
1462             chain_prev = chain,
1463                     chain = __tcf_get_next_chain(block, chain),
1464                     tcf_chain_put(chain_prev)) {
1465                for (tp = __tcf_get_next_proto(chain, NULL); tp;
1466                     tp_prev = tp,
1467                             tp = __tcf_get_next_proto(chain, tp),
1468                             tcf_proto_put(tp_prev, true, NULL)) {
1469                        if (tp->ops->reoffload) {
1470                                err = tp->ops->reoffload(tp, add, cb, cb_priv,
1471                                                         extack);
1472                                if (err && add)
1473                                        goto err_playback_remove;
1474                        } else if (add && offload_in_use) {
1475                                err = -EOPNOTSUPP;
1476                                NL_SET_ERR_MSG(extack, "Filter HW offload failed - classifier without re-offloading support");
1477                                goto err_playback_remove;
1478                        }
1479                }
1480        }
1481
1482        return 0;
1483
1484err_playback_remove:
1485        tcf_proto_put(tp, true, NULL);
1486        tcf_chain_put(chain);
1487        tcf_block_playback_offloads(block, cb, cb_priv, false, offload_in_use,
1488                                    extack);
1489        return err;
1490}
1491
1492static int tcf_block_bind(struct tcf_block *block,
1493                          struct flow_block_offload *bo)
1494{
1495        struct flow_block_cb *block_cb, *next;
1496        int err, i = 0;
1497
1498        lockdep_assert_held(&block->cb_lock);
1499
1500        list_for_each_entry(block_cb, &bo->cb_list, list) {
1501                err = tcf_block_playback_offloads(block, block_cb->cb,
1502                                                  block_cb->cb_priv, true,
1503                                                  tcf_block_offload_in_use(block),
1504                                                  bo->extack);
1505                if (err)
1506                        goto err_unroll;
1507                if (!bo->unlocked_driver_cb)
1508                        block->lockeddevcnt++;
1509
1510                i++;
1511        }
1512        list_splice(&bo->cb_list, &block->flow_block.cb_list);
1513
1514        return 0;
1515
1516err_unroll:
1517        list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1518                if (i-- > 0) {
1519                        list_del(&block_cb->list);
1520                        tcf_block_playback_offloads(block, block_cb->cb,
1521                                                    block_cb->cb_priv, false,
1522                                                    tcf_block_offload_in_use(block),
1523                                                    NULL);
1524                        if (!bo->unlocked_driver_cb)
1525                                block->lockeddevcnt--;
1526                }
1527                flow_block_cb_free(block_cb);
1528        }
1529
1530        return err;
1531}
1532
1533static void tcf_block_unbind(struct tcf_block *block,
1534                             struct flow_block_offload *bo)
1535{
1536        struct flow_block_cb *block_cb, *next;
1537
1538        lockdep_assert_held(&block->cb_lock);
1539
1540        list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1541                tcf_block_playback_offloads(block, block_cb->cb,
1542                                            block_cb->cb_priv, false,
1543                                            tcf_block_offload_in_use(block),
1544                                            NULL);
1545                list_del(&block_cb->list);
1546                flow_block_cb_free(block_cb);
1547                if (!bo->unlocked_driver_cb)
1548                        block->lockeddevcnt--;
1549        }
1550}
1551
1552static int tcf_block_setup(struct tcf_block *block,
1553                           struct flow_block_offload *bo)
1554{
1555        int err;
1556
1557        switch (bo->command) {
1558        case FLOW_BLOCK_BIND:
1559                err = tcf_block_bind(block, bo);
1560                break;
1561        case FLOW_BLOCK_UNBIND:
1562                err = 0;
1563                tcf_block_unbind(block, bo);
1564                break;
1565        default:
1566                WARN_ON_ONCE(1);
1567                err = -EOPNOTSUPP;
1568        }
1569
1570        return err;
1571}
1572
1573/* Main classifier routine: scans classifier chain attached
1574 * to this qdisc, (optionally) tests for protocol and asks
1575 * specific classifiers.
1576 */
1577static inline int __tcf_classify(struct sk_buff *skb,
1578                                 const struct tcf_proto *tp,
1579                                 const struct tcf_proto *orig_tp,
1580                                 struct tcf_result *res,
1581                                 bool compat_mode,
1582                                 u32 *last_executed_chain)
1583{
1584#ifdef CONFIG_NET_CLS_ACT
1585        const int max_reclassify_loop = 4;
1586        const struct tcf_proto *first_tp;
1587        int limit = 0;
1588
1589reclassify:
1590#endif
1591        for (; tp; tp = rcu_dereference_bh(tp->next)) {
1592                __be16 protocol = tc_skb_protocol(skb);
1593                int err;
1594
1595                if (tp->protocol != protocol &&
1596                    tp->protocol != htons(ETH_P_ALL))
1597                        continue;
1598
1599                err = tp->classify(skb, tp, res);
1600#ifdef CONFIG_NET_CLS_ACT
1601                if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
1602                        first_tp = orig_tp;
1603                        *last_executed_chain = first_tp->chain->index;
1604                        goto reset;
1605                } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
1606                        first_tp = res->goto_tp;
1607                        *last_executed_chain = err & TC_ACT_EXT_VAL_MASK;
1608                        goto reset;
1609                }
1610#endif
1611                if (err >= 0)
1612                        return err;
1613        }
1614
1615        return TC_ACT_UNSPEC; /* signal: continue lookup */
1616#ifdef CONFIG_NET_CLS_ACT
1617reset:
1618        if (unlikely(limit++ >= max_reclassify_loop)) {
1619                net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
1620                                       tp->chain->block->index,
1621                                       tp->prio & 0xffff,
1622                                       ntohs(tp->protocol));
1623                return TC_ACT_SHOT;
1624        }
1625
1626        tp = first_tp;
1627        goto reclassify;
1628#endif
1629}
1630
1631int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1632                 struct tcf_result *res, bool compat_mode)
1633{
1634        u32 last_executed_chain = 0;
1635
1636        return __tcf_classify(skb, tp, tp, res, compat_mode,
1637                              &last_executed_chain);
1638}
1639EXPORT_SYMBOL(tcf_classify);
1640
1641int tcf_classify_ingress(struct sk_buff *skb,
1642                         const struct tcf_block *ingress_block,
1643                         const struct tcf_proto *tp,
1644                         struct tcf_result *res, bool compat_mode)
1645{
1646#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
1647        u32 last_executed_chain = 0;
1648
1649        return __tcf_classify(skb, tp, tp, res, compat_mode,
1650                              &last_executed_chain);
1651#else
1652        u32 last_executed_chain = tp ? tp->chain->index : 0;
1653        const struct tcf_proto *orig_tp = tp;
1654        struct tc_skb_ext *ext;
1655        int ret;
1656
1657        ext = skb_ext_find(skb, TC_SKB_EXT);
1658
1659        if (ext && ext->chain) {
1660                struct tcf_chain *fchain;
1661
1662                fchain = tcf_chain_lookup_rcu(ingress_block, ext->chain);
1663                if (!fchain)
1664                        return TC_ACT_SHOT;
1665
1666                /* Consume, so cloned/redirect skbs won't inherit ext */
1667                skb_ext_del(skb, TC_SKB_EXT);
1668
1669                tp = rcu_dereference_bh(fchain->filter_chain);
1670                last_executed_chain = fchain->index;
1671        }
1672
1673        ret = __tcf_classify(skb, tp, orig_tp, res, compat_mode,
1674                             &last_executed_chain);
1675
1676        /* If we missed on some chain */
1677        if (ret == TC_ACT_UNSPEC && last_executed_chain) {
1678                ext = skb_ext_add(skb, TC_SKB_EXT);
1679                if (WARN_ON_ONCE(!ext))
1680                        return TC_ACT_SHOT;
1681                ext->chain = last_executed_chain;
1682        }
1683
1684        return ret;
1685#endif
1686}
1687EXPORT_SYMBOL(tcf_classify_ingress);
1688
1689struct tcf_chain_info {
1690        struct tcf_proto __rcu **pprev;
1691        struct tcf_proto __rcu *next;
1692};
1693
1694static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain *chain,
1695                                           struct tcf_chain_info *chain_info)
1696{
1697        return tcf_chain_dereference(*chain_info->pprev, chain);
1698}
1699
1700static int tcf_chain_tp_insert(struct tcf_chain *chain,
1701                               struct tcf_chain_info *chain_info,
1702                               struct tcf_proto *tp)
1703{
1704        if (chain->flushing)
1705                return -EAGAIN;
1706
1707        if (*chain_info->pprev == chain->filter_chain)
1708                tcf_chain0_head_change(chain, tp);
1709        tcf_proto_get(tp);
1710        RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain, chain_info));
1711        rcu_assign_pointer(*chain_info->pprev, tp);
1712
1713        return 0;
1714}
1715
1716static void tcf_chain_tp_remove(struct tcf_chain *chain,
1717                                struct tcf_chain_info *chain_info,
1718                                struct tcf_proto *tp)
1719{
1720        struct tcf_proto *next = tcf_chain_dereference(chain_info->next, chain);
1721
1722        tcf_proto_mark_delete(tp);
1723        if (tp == chain->filter_chain)
1724                tcf_chain0_head_change(chain, next);
1725        RCU_INIT_POINTER(*chain_info->pprev, next);
1726}
1727
1728static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1729                                           struct tcf_chain_info *chain_info,
1730                                           u32 protocol, u32 prio,
1731                                           bool prio_allocate);
1732
1733/* Try to insert new proto.
1734 * If proto with specified priority already exists, free new proto
1735 * and return existing one.
1736 */
1737
1738static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1739                                                    struct tcf_proto *tp_new,
1740                                                    u32 protocol, u32 prio,
1741                                                    bool rtnl_held)
1742{
1743        struct tcf_chain_info chain_info;
1744        struct tcf_proto *tp;
1745        int err = 0;
1746
1747        mutex_lock(&chain->filter_chain_lock);
1748
1749        if (tcf_proto_exists_destroying(chain, tp_new)) {
1750                mutex_unlock(&chain->filter_chain_lock);
1751                tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1752                return ERR_PTR(-EAGAIN);
1753        }
1754
1755        tp = tcf_chain_tp_find(chain, &chain_info,
1756                               protocol, prio, false);
1757        if (!tp)
1758                err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
1759        mutex_unlock(&chain->filter_chain_lock);
1760
1761        if (tp) {
1762                tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1763                tp_new = tp;
1764        } else if (err) {
1765                tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1766                tp_new = ERR_PTR(err);
1767        }
1768
1769        return tp_new;
1770}
1771
1772static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1773                                      struct tcf_proto *tp, bool rtnl_held,
1774                                      struct netlink_ext_ack *extack)
1775{
1776        struct tcf_chain_info chain_info;
1777        struct tcf_proto *tp_iter;
1778        struct tcf_proto **pprev;
1779        struct tcf_proto *next;
1780
1781        mutex_lock(&chain->filter_chain_lock);
1782
1783        /* Atomically find and remove tp from chain. */
1784        for (pprev = &chain->filter_chain;
1785             (tp_iter = tcf_chain_dereference(*pprev, chain));
1786             pprev = &tp_iter->next) {
1787                if (tp_iter == tp) {
1788                        chain_info.pprev = pprev;
1789                        chain_info.next = tp_iter->next;
1790                        WARN_ON(tp_iter->deleting);
1791                        break;
1792                }
1793        }
1794        /* Verify that tp still exists and no new filters were inserted
1795         * concurrently.
1796         * Mark tp for deletion if it is empty.
1797         */
1798        if (!tp_iter || !tcf_proto_check_delete(tp)) {
1799                mutex_unlock(&chain->filter_chain_lock);
1800                return;
1801        }
1802
1803        tcf_proto_signal_destroying(chain, tp);
1804        next = tcf_chain_dereference(chain_info.next, chain);
1805        if (tp == chain->filter_chain)
1806                tcf_chain0_head_change(chain, next);
1807        RCU_INIT_POINTER(*chain_info.pprev, next);
1808        mutex_unlock(&chain->filter_chain_lock);
1809
1810        tcf_proto_put(tp, rtnl_held, extack);
1811}
1812
1813static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
1814                                           struct tcf_chain_info *chain_info,
1815                                           u32 protocol, u32 prio,
1816                                           bool prio_allocate)
1817{
1818        struct tcf_proto **pprev;
1819        struct tcf_proto *tp;
1820
1821        /* Check the chain for existence of proto-tcf with this priority */
1822        for (pprev = &chain->filter_chain;
1823             (tp = tcf_chain_dereference(*pprev, chain));
1824             pprev = &tp->next) {
1825                if (tp->prio >= prio) {
1826                        if (tp->prio == prio) {
1827                                if (prio_allocate ||
1828                                    (tp->protocol != protocol && protocol))
1829                                        return ERR_PTR(-EINVAL);
1830                        } else {
1831                                tp = NULL;
1832                        }
1833                        break;
1834                }
1835        }
1836        chain_info->pprev = pprev;
1837        if (tp) {
1838                chain_info->next = tp->next;
1839                tcf_proto_get(tp);
1840        } else {
1841                chain_info->next = NULL;
1842        }
1843        return tp;
1844}
1845
1846static int tcf_fill_node(struct net *net, struct sk_buff *skb,
1847                         struct tcf_proto *tp, struct tcf_block *block,
1848                         struct Qdisc *q, u32 parent, void *fh,
1849                         u32 portid, u32 seq, u16 flags, int event,
1850                         bool rtnl_held)
1851{
1852        struct tcmsg *tcm;
1853        struct nlmsghdr  *nlh;
1854        unsigned char *b = skb_tail_pointer(skb);
1855
1856        nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1857        if (!nlh)
1858                goto out_nlmsg_trim;
1859        tcm = nlmsg_data(nlh);
1860        tcm->tcm_family = AF_UNSPEC;
1861        tcm->tcm__pad1 = 0;
1862        tcm->tcm__pad2 = 0;
1863        if (q) {
1864                tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1865                tcm->tcm_parent = parent;
1866        } else {
1867                tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
1868                tcm->tcm_block_index = block->index;
1869        }
1870        tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
1871        if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
1872                goto nla_put_failure;
1873        if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
1874                goto nla_put_failure;
1875        if (!fh) {
1876                tcm->tcm_handle = 0;
1877        } else {
1878                if (tp->ops->dump &&
1879                    tp->ops->dump(net, tp, fh, skb, tcm, rtnl_held) < 0)
1880                        goto nla_put_failure;
1881        }
1882        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1883        return skb->len;
1884
1885out_nlmsg_trim:
1886nla_put_failure:
1887        nlmsg_trim(skb, b);
1888        return -1;
1889}
1890
1891static int tfilter_notify(struct net *net, struct sk_buff *oskb,
1892                          struct nlmsghdr *n, struct tcf_proto *tp,
1893                          struct tcf_block *block, struct Qdisc *q,
1894                          u32 parent, void *fh, int event, bool unicast,
1895                          bool rtnl_held)
1896{
1897        struct sk_buff *skb;
1898        u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1899        int err = 0;
1900
1901        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1902        if (!skb)
1903                return -ENOBUFS;
1904
1905        if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1906                          n->nlmsg_seq, n->nlmsg_flags, event,
1907                          rtnl_held) <= 0) {
1908                kfree_skb(skb);
1909                return -EINVAL;
1910        }
1911
1912        if (unicast)
1913                err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1914        else
1915                err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1916                                     n->nlmsg_flags & NLM_F_ECHO);
1917
1918        if (err > 0)
1919                err = 0;
1920        return err;
1921}
1922
1923static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
1924                              struct nlmsghdr *n, struct tcf_proto *tp,
1925                              struct tcf_block *block, struct Qdisc *q,
1926                              u32 parent, void *fh, bool unicast, bool *last,
1927                              bool rtnl_held, struct netlink_ext_ack *extack)
1928{
1929        struct sk_buff *skb;
1930        u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1931        int err;
1932
1933        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1934        if (!skb)
1935                return -ENOBUFS;
1936
1937        if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
1938                          n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
1939                          rtnl_held) <= 0) {
1940                NL_SET_ERR_MSG(extack, "Failed to build del event notification");
1941                kfree_skb(skb);
1942                return -EINVAL;
1943        }
1944
1945        err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
1946        if (err) {
1947                kfree_skb(skb);
1948                return err;
1949        }
1950
1951        if (unicast)
1952                err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
1953        else
1954                err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1955                                     n->nlmsg_flags & NLM_F_ECHO);
1956        if (err < 0)
1957                NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
1958
1959        if (err > 0)
1960                err = 0;
1961        return err;
1962}
1963
1964static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
1965                                 struct tcf_block *block, struct Qdisc *q,
1966                                 u32 parent, struct nlmsghdr *n,
1967                                 struct tcf_chain *chain, int event,
1968                                 bool rtnl_held)
1969{
1970        struct tcf_proto *tp;
1971
1972        for (tp = tcf_get_next_proto(chain, NULL, rtnl_held);
1973             tp; tp = tcf_get_next_proto(chain, tp, rtnl_held))
1974                tfilter_notify(net, oskb, n, tp, block,
1975                               q, parent, NULL, event, false, rtnl_held);
1976}
1977
1978static void tfilter_put(struct tcf_proto *tp, void *fh)
1979{
1980        if (tp->ops->put && fh)
1981                tp->ops->put(tp, fh);
1982}
1983
1984static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
1985                          struct netlink_ext_ack *extack)
1986{
1987        struct net *net = sock_net(skb->sk);
1988        struct nlattr *tca[TCA_MAX + 1];
1989        char name[IFNAMSIZ];
1990        struct tcmsg *t;
1991        u32 protocol;
1992        u32 prio;
1993        bool prio_allocate;
1994        u32 parent;
1995        u32 chain_index;
1996        struct Qdisc *q = NULL;
1997        struct tcf_chain_info chain_info;
1998        struct tcf_chain *chain = NULL;
1999        struct tcf_block *block;
2000        struct tcf_proto *tp;
2001        unsigned long cl;
2002        void *fh;
2003        int err;
2004        int tp_created;
2005        bool rtnl_held = false;
2006
2007        if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2008                return -EPERM;
2009
2010replay:
2011        tp_created = 0;
2012
2013        err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2014                                     rtm_tca_policy, extack);
2015        if (err < 0)
2016                return err;
2017
2018        t = nlmsg_data(n);
2019        protocol = TC_H_MIN(t->tcm_info);
2020        prio = TC_H_MAJ(t->tcm_info);
2021        prio_allocate = false;
2022        parent = t->tcm_parent;
2023        tp = NULL;
2024        cl = 0;
2025        block = NULL;
2026
2027        if (prio == 0) {
2028                /* If no priority is provided by the user,
2029                 * we allocate one.
2030                 */
2031                if (n->nlmsg_flags & NLM_F_CREATE) {
2032                        prio = TC_H_MAKE(0x80000000U, 0U);
2033                        prio_allocate = true;
2034                } else {
2035                        NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2036                        return -ENOENT;
2037                }
2038        }
2039
2040        /* Find head of filter chain. */
2041
2042        err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2043        if (err)
2044                return err;
2045
2046        if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2047                NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2048                err = -EINVAL;
2049                goto errout;
2050        }
2051
2052        /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
2053         * block is shared (no qdisc found), qdisc is not unlocked, classifier
2054         * type is not specified, classifier is not unlocked.
2055         */
2056        if (rtnl_held ||
2057            (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2058            !tcf_proto_is_unlocked(name)) {
2059                rtnl_held = true;
2060                rtnl_lock();
2061        }
2062
2063        err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2064        if (err)
2065                goto errout;
2066
2067        block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2068                                 extack);
2069        if (IS_ERR(block)) {
2070                err = PTR_ERR(block);
2071                goto errout;
2072        }
2073        block->classid = parent;
2074
2075        chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2076        if (chain_index > TC_ACT_EXT_VAL_MASK) {
2077                NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2078                err = -EINVAL;
2079                goto errout;
2080        }
2081        chain = tcf_chain_get(block, chain_index, true);
2082        if (!chain) {
2083                NL_SET_ERR_MSG(extack, "Cannot create specified filter chain");
2084                err = -ENOMEM;
2085                goto errout;
2086        }
2087
2088        mutex_lock(&chain->filter_chain_lock);
2089        tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2090                               prio, prio_allocate);
2091        if (IS_ERR(tp)) {
2092                NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2093                err = PTR_ERR(tp);
2094                goto errout_locked;
2095        }
2096
2097        if (tp == NULL) {
2098                struct tcf_proto *tp_new = NULL;
2099
2100                if (chain->flushing) {
2101                        err = -EAGAIN;
2102                        goto errout_locked;
2103                }
2104
2105                /* Proto-tcf does not exist, create new one */
2106
2107                if (tca[TCA_KIND] == NULL || !protocol) {
2108                        NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
2109                        err = -EINVAL;
2110                        goto errout_locked;
2111                }
2112
2113                if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2114                        NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2115                        err = -ENOENT;
2116                        goto errout_locked;
2117                }
2118
2119                if (prio_allocate)
2120                        prio = tcf_auto_prio(tcf_chain_tp_prev(chain,
2121                                                               &chain_info));
2122
2123                mutex_unlock(&chain->filter_chain_lock);
2124                tp_new = tcf_proto_create(name, protocol, prio, chain,
2125                                          rtnl_held, extack);
2126                if (IS_ERR(tp_new)) {
2127                        err = PTR_ERR(tp_new);
2128                        goto errout_tp;
2129                }
2130
2131                tp_created = 1;
2132                tp = tcf_chain_tp_insert_unique(chain, tp_new, protocol, prio,
2133                                                rtnl_held);
2134                if (IS_ERR(tp)) {
2135                        err = PTR_ERR(tp);
2136                        goto errout_tp;
2137                }
2138        } else {
2139                mutex_unlock(&chain->filter_chain_lock);
2140        }
2141
2142        if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2143                NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2144                err = -EINVAL;
2145                goto errout;
2146        }
2147
2148        fh = tp->ops->get(tp, t->tcm_handle);
2149
2150        if (!fh) {
2151                if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2152                        NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
2153                        err = -ENOENT;
2154                        goto errout;
2155                }
2156        } else if (n->nlmsg_flags & NLM_F_EXCL) {
2157                tfilter_put(tp, fh);
2158                NL_SET_ERR_MSG(extack, "Filter already exists");
2159                err = -EEXIST;
2160                goto errout;
2161        }
2162
2163        if (chain->tmplt_ops && chain->tmplt_ops != tp->ops) {
2164                NL_SET_ERR_MSG(extack, "Chain template is set to a different filter kind");
2165                err = -EINVAL;
2166                goto errout;
2167        }
2168
2169        err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
2170                              n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
2171                              rtnl_held, extack);
2172        if (err == 0) {
2173                tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2174                               RTM_NEWTFILTER, false, rtnl_held);
2175                tfilter_put(tp, fh);
2176                /* q pointer is NULL for shared blocks */
2177                if (q)
2178                        q->flags &= ~TCQ_F_CAN_BYPASS;
2179        }
2180
2181errout:
2182        if (err && tp_created)
2183                tcf_chain_tp_delete_empty(chain, tp, rtnl_held, NULL);
2184errout_tp:
2185        if (chain) {
2186                if (tp && !IS_ERR(tp))
2187                        tcf_proto_put(tp, rtnl_held, NULL);
2188                if (!tp_created)
2189                        tcf_chain_put(chain);
2190        }
2191        tcf_block_release(q, block, rtnl_held);
2192
2193        if (rtnl_held)
2194                rtnl_unlock();
2195
2196        if (err == -EAGAIN) {
2197                /* Take rtnl lock in case EAGAIN is caused by concurrent flush
2198                 * of target chain.
2199                 */
2200                rtnl_held = true;
2201                /* Replay the request. */
2202                goto replay;
2203        }
2204        return err;
2205
2206errout_locked:
2207        mutex_unlock(&chain->filter_chain_lock);
2208        goto errout;
2209}
2210
2211static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2212                          struct netlink_ext_ack *extack)
2213{
2214        struct net *net = sock_net(skb->sk);
2215        struct nlattr *tca[TCA_MAX + 1];
2216        char name[IFNAMSIZ];
2217        struct tcmsg *t;
2218        u32 protocol;
2219        u32 prio;
2220        u32 parent;
2221        u32 chain_index;
2222        struct Qdisc *q = NULL;
2223        struct tcf_chain_info chain_info;
2224        struct tcf_chain *chain = NULL;
2225        struct tcf_block *block = NULL;
2226        struct tcf_proto *tp = NULL;
2227        unsigned long cl = 0;
2228        void *fh = NULL;
2229        int err;
2230        bool rtnl_held = false;
2231
2232        if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2233                return -EPERM;
2234
2235        err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2236                                     rtm_tca_policy, extack);
2237        if (err < 0)
2238                return err;
2239
2240        t = nlmsg_data(n);
2241        protocol = TC_H_MIN(t->tcm_info);
2242        prio = TC_H_MAJ(t->tcm_info);
2243        parent = t->tcm_parent;
2244
2245        if (prio == 0 && (protocol || t->tcm_handle || tca[TCA_KIND])) {
2246                NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
2247                return -ENOENT;
2248        }
2249
2250        /* Find head of filter chain. */
2251
2252        err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2253        if (err)
2254                return err;
2255
2256        if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2257                NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2258                err = -EINVAL;
2259                goto errout;
2260        }
2261        /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
2262         * found), qdisc is not unlocked, classifier type is not specified,
2263         * classifier is not unlocked.
2264         */
2265        if (!prio ||
2266            (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2267            !tcf_proto_is_unlocked(name)) {
2268                rtnl_held = true;
2269                rtnl_lock();
2270        }
2271
2272        err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2273        if (err)
2274                goto errout;
2275
2276        block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2277                                 extack);
2278        if (IS_ERR(block)) {
2279                err = PTR_ERR(block);
2280                goto errout;
2281        }
2282
2283        chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2284        if (chain_index > TC_ACT_EXT_VAL_MASK) {
2285                NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2286                err = -EINVAL;
2287                goto errout;
2288        }
2289        chain = tcf_chain_get(block, chain_index, false);
2290        if (!chain) {
2291                /* User requested flush on non-existent chain. Nothing to do,
2292                 * so just return success.
2293                 */
2294                if (prio == 0) {
2295                        err = 0;
2296                        goto errout;
2297                }
2298                NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2299                err = -ENOENT;
2300                goto errout;
2301        }
2302
2303        if (prio == 0) {
2304                tfilter_notify_chain(net, skb, block, q, parent, n,
2305                                     chain, RTM_DELTFILTER, rtnl_held);
2306                tcf_chain_flush(chain, rtnl_held);
2307                err = 0;
2308                goto errout;
2309        }
2310
2311        mutex_lock(&chain->filter_chain_lock);
2312        tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2313                               prio, false);
2314        if (!tp || IS_ERR(tp)) {
2315                NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2316                err = tp ? PTR_ERR(tp) : -ENOENT;
2317                goto errout_locked;
2318        } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2319                NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2320                err = -EINVAL;
2321                goto errout_locked;
2322        } else if (t->tcm_handle == 0) {
2323                tcf_proto_signal_destroying(chain, tp);
2324                tcf_chain_tp_remove(chain, &chain_info, tp);
2325                mutex_unlock(&chain->filter_chain_lock);
2326
2327                tcf_proto_put(tp, rtnl_held, NULL);
2328                tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2329                               RTM_DELTFILTER, false, rtnl_held);
2330                err = 0;
2331                goto errout;
2332        }
2333        mutex_unlock(&chain->filter_chain_lock);
2334
2335        fh = tp->ops->get(tp, t->tcm_handle);
2336
2337        if (!fh) {
2338                NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2339                err = -ENOENT;
2340        } else {
2341                bool last;
2342
2343                err = tfilter_del_notify(net, skb, n, tp, block,
2344                                         q, parent, fh, false, &last,
2345                                         rtnl_held, extack);
2346
2347                if (err)
2348                        goto errout;
2349                if (last)
2350                        tcf_chain_tp_delete_empty(chain, tp, rtnl_held, extack);
2351        }
2352
2353errout:
2354        if (chain) {
2355                if (tp && !IS_ERR(tp))
2356                        tcf_proto_put(tp, rtnl_held, NULL);
2357                tcf_chain_put(chain);
2358        }
2359        tcf_block_release(q, block, rtnl_held);
2360
2361        if (rtnl_held)
2362                rtnl_unlock();
2363
2364        return err;
2365
2366errout_locked:
2367        mutex_unlock(&chain->filter_chain_lock);
2368        goto errout;
2369}
2370
2371static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2372                          struct netlink_ext_ack *extack)
2373{
2374        struct net *net = sock_net(skb->sk);
2375        struct nlattr *tca[TCA_MAX + 1];
2376        char name[IFNAMSIZ];
2377        struct tcmsg *t;
2378        u32 protocol;
2379        u32 prio;
2380        u32 parent;
2381        u32 chain_index;
2382        struct Qdisc *q = NULL;
2383        struct tcf_chain_info chain_info;
2384        struct tcf_chain *chain = NULL;
2385        struct tcf_block *block = NULL;
2386        struct tcf_proto *tp = NULL;
2387        unsigned long cl = 0;
2388        void *fh = NULL;
2389        int err;
2390        bool rtnl_held = false;
2391
2392        err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2393                                     rtm_tca_policy, extack);
2394        if (err < 0)
2395                return err;
2396
2397        t = nlmsg_data(n);
2398        protocol = TC_H_MIN(t->tcm_info);
2399        prio = TC_H_MAJ(t->tcm_info);
2400        parent = t->tcm_parent;
2401
2402        if (prio == 0) {
2403                NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
2404                return -ENOENT;
2405        }
2406
2407        /* Find head of filter chain. */
2408
2409        err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack);
2410        if (err)
2411                return err;
2412
2413        if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2414                NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
2415                err = -EINVAL;
2416                goto errout;
2417        }
2418        /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
2419         * unlocked, classifier type is not specified, classifier is not
2420         * unlocked.
2421         */
2422        if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
2423            !tcf_proto_is_unlocked(name)) {
2424                rtnl_held = true;
2425                rtnl_lock();
2426        }
2427
2428        err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack);
2429        if (err)
2430                goto errout;
2431
2432        block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index,
2433                                 extack);
2434        if (IS_ERR(block)) {
2435                err = PTR_ERR(block);
2436                goto errout;
2437        }
2438
2439        chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2440        if (chain_index > TC_ACT_EXT_VAL_MASK) {
2441                NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2442                err = -EINVAL;
2443                goto errout;
2444        }
2445        chain = tcf_chain_get(block, chain_index, false);
2446        if (!chain) {
2447                NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2448                err = -EINVAL;
2449                goto errout;
2450        }
2451
2452        mutex_lock(&chain->filter_chain_lock);
2453        tp = tcf_chain_tp_find(chain, &chain_info, protocol,
2454                               prio, false);
2455        mutex_unlock(&chain->filter_chain_lock);
2456        if (!tp || IS_ERR(tp)) {
2457                NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
2458                err = tp ? PTR_ERR(tp) : -ENOENT;
2459                goto errout;
2460        } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
2461                NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
2462                err = -EINVAL;
2463                goto errout;
2464        }
2465
2466        fh = tp->ops->get(tp, t->tcm_handle);
2467
2468        if (!fh) {
2469                NL_SET_ERR_MSG(extack, "Specified filter handle not found");
2470                err = -ENOENT;
2471        } else {
2472                err = tfilter_notify(net, skb, n, tp, block, q, parent,
2473                                     fh, RTM_NEWTFILTER, true, rtnl_held);
2474                if (err < 0)
2475                        NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
2476        }
2477
2478        tfilter_put(tp, fh);
2479errout:
2480        if (chain) {
2481                if (tp && !IS_ERR(tp))
2482                        tcf_proto_put(tp, rtnl_held, NULL);
2483                tcf_chain_put(chain);
2484        }
2485        tcf_block_release(q, block, rtnl_held);
2486
2487        if (rtnl_held)
2488                rtnl_unlock();
2489
2490        return err;
2491}
2492
2493struct tcf_dump_args {
2494        struct tcf_walker w;
2495        struct sk_buff *skb;
2496        struct netlink_callback *cb;
2497        struct tcf_block *block;
2498        struct Qdisc *q;
2499        u32 parent;
2500};
2501
2502static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
2503{
2504        struct tcf_dump_args *a = (void *)arg;
2505        struct net *net = sock_net(a->skb->sk);
2506
2507        return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
2508                             n, NETLINK_CB(a->cb->skb).portid,
2509                             a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
2510                             RTM_NEWTFILTER, true);
2511}
2512
2513static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
2514                           struct sk_buff *skb, struct netlink_callback *cb,
2515                           long index_start, long *p_index)
2516{
2517        struct net *net = sock_net(skb->sk);
2518        struct tcf_block *block = chain->block;
2519        struct tcmsg *tcm = nlmsg_data(cb->nlh);
2520        struct tcf_proto *tp, *tp_prev;
2521        struct tcf_dump_args arg;
2522
2523        for (tp = __tcf_get_next_proto(chain, NULL);
2524             tp;
2525             tp_prev = tp,
2526                     tp = __tcf_get_next_proto(chain, tp),
2527                     tcf_proto_put(tp_prev, true, NULL),
2528                     (*p_index)++) {
2529                if (*p_index < index_start)
2530                        continue;
2531                if (TC_H_MAJ(tcm->tcm_info) &&
2532                    TC_H_MAJ(tcm->tcm_info) != tp->prio)
2533                        continue;
2534                if (TC_H_MIN(tcm->tcm_info) &&
2535                    TC_H_MIN(tcm->tcm_info) != tp->protocol)
2536                        continue;
2537                if (*p_index > index_start)
2538                        memset(&cb->args[1], 0,
2539                               sizeof(cb->args) - sizeof(cb->args[0]));
2540                if (cb->args[1] == 0) {
2541                        if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
2542                                          NETLINK_CB(cb->skb).portid,
2543                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
2544                                          RTM_NEWTFILTER, true) <= 0)
2545                                goto errout;
2546                        cb->args[1] = 1;
2547                }
2548                if (!tp->ops->walk)
2549                        continue;
2550                arg.w.fn = tcf_node_dump;
2551                arg.skb = skb;
2552                arg.cb = cb;
2553                arg.block = block;
2554                arg.q = q;
2555                arg.parent = parent;
2556                arg.w.stop = 0;
2557                arg.w.skip = cb->args[1] - 1;
2558                arg.w.count = 0;
2559                arg.w.cookie = cb->args[2];
2560                tp->ops->walk(tp, &arg.w, true);
2561                cb->args[2] = arg.w.cookie;
2562                cb->args[1] = arg.w.count + 1;
2563                if (arg.w.stop)
2564                        goto errout;
2565        }
2566        return true;
2567
2568errout:
2569        tcf_proto_put(tp, true, NULL);
2570        return false;
2571}
2572
2573/* called with RTNL */
2574static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
2575{
2576        struct tcf_chain *chain, *chain_prev;
2577        struct net *net = sock_net(skb->sk);
2578        struct nlattr *tca[TCA_MAX + 1];
2579        struct Qdisc *q = NULL;
2580        struct tcf_block *block;
2581        struct tcmsg *tcm = nlmsg_data(cb->nlh);
2582        long index_start;
2583        long index;
2584        u32 parent;
2585        int err;
2586
2587        if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2588                return skb->len;
2589
2590        err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2591                                     NULL, cb->extack);
2592        if (err)
2593                return err;
2594
2595        if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2596                block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2597                if (!block)
2598                        goto out;
2599                /* If we work with block index, q is NULL and parent value
2600                 * will never be used in the following code. The check
2601                 * in tcf_fill_node prevents it. However, compiler does not
2602                 * see that far, so set parent to zero to silence the warning
2603                 * about parent being uninitialized.
2604                 */
2605                parent = 0;
2606        } else {
2607                const struct Qdisc_class_ops *cops;
2608                struct net_device *dev;
2609                unsigned long cl = 0;
2610
2611                dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2612                if (!dev)
2613                        return skb->len;
2614
2615                parent = tcm->tcm_parent;
2616                if (!parent)
2617                        q = dev->qdisc;
2618                else
2619                        q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
2620                if (!q)
2621                        goto out;
2622                cops = q->ops->cl_ops;
2623                if (!cops)
2624                        goto out;
2625                if (!cops->tcf_block)
2626                        goto out;
2627                if (TC_H_MIN(tcm->tcm_parent)) {
2628                        cl = cops->find(q, tcm->tcm_parent);
2629                        if (cl == 0)
2630                                goto out;
2631                }
2632                block = cops->tcf_block(q, cl, NULL);
2633                if (!block)
2634                        goto out;
2635                parent = block->classid;
2636                if (tcf_block_shared(block))
2637                        q = NULL;
2638        }
2639
2640        index_start = cb->args[0];
2641        index = 0;
2642
2643        for (chain = __tcf_get_next_chain(block, NULL);
2644             chain;
2645             chain_prev = chain,
2646                     chain = __tcf_get_next_chain(block, chain),
2647                     tcf_chain_put(chain_prev)) {
2648                if (tca[TCA_CHAIN] &&
2649                    nla_get_u32(tca[TCA_CHAIN]) != chain->index)
2650                        continue;
2651                if (!tcf_chain_dump(chain, q, parent, skb, cb,
2652                                    index_start, &index)) {
2653                        tcf_chain_put(chain);
2654                        err = -EMSGSIZE;
2655                        break;
2656                }
2657        }
2658
2659        if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
2660                tcf_block_refcnt_put(block, true);
2661        cb->args[0] = index;
2662
2663out:
2664        /* If we did no progress, the error (EMSGSIZE) is real */
2665        if (skb->len == 0 && err)
2666                return err;
2667        return skb->len;
2668}
2669
2670static int tc_chain_fill_node(const struct tcf_proto_ops *tmplt_ops,
2671                              void *tmplt_priv, u32 chain_index,
2672                              struct net *net, struct sk_buff *skb,
2673                              struct tcf_block *block,
2674                              u32 portid, u32 seq, u16 flags, int event)
2675{
2676        unsigned char *b = skb_tail_pointer(skb);
2677        const struct tcf_proto_ops *ops;
2678        struct nlmsghdr *nlh;
2679        struct tcmsg *tcm;
2680        void *priv;
2681
2682        ops = tmplt_ops;
2683        priv = tmplt_priv;
2684
2685        nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
2686        if (!nlh)
2687                goto out_nlmsg_trim;
2688        tcm = nlmsg_data(nlh);
2689        tcm->tcm_family = AF_UNSPEC;
2690        tcm->tcm__pad1 = 0;
2691        tcm->tcm__pad2 = 0;
2692        tcm->tcm_handle = 0;
2693        if (block->q) {
2694                tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex;
2695                tcm->tcm_parent = block->q->handle;
2696        } else {
2697                tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
2698                tcm->tcm_block_index = block->index;
2699        }
2700
2701        if (nla_put_u32(skb, TCA_CHAIN, chain_index))
2702                goto nla_put_failure;
2703
2704        if (ops) {
2705                if (nla_put_string(skb, TCA_KIND, ops->kind))
2706                        goto nla_put_failure;
2707                if (ops->tmplt_dump(skb, net, priv) < 0)
2708                        goto nla_put_failure;
2709        }
2710
2711        nlh->nlmsg_len = skb_tail_pointer(skb) - b;
2712        return skb->len;
2713
2714out_nlmsg_trim:
2715nla_put_failure:
2716        nlmsg_trim(skb, b);
2717        return -EMSGSIZE;
2718}
2719
2720static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb,
2721                           u32 seq, u16 flags, int event, bool unicast)
2722{
2723        u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2724        struct tcf_block *block = chain->block;
2725        struct net *net = block->net;
2726        struct sk_buff *skb;
2727        int err = 0;
2728
2729        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2730        if (!skb)
2731                return -ENOBUFS;
2732
2733        if (tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
2734                               chain->index, net, skb, block, portid,
2735                               seq, flags, event) <= 0) {
2736                kfree_skb(skb);
2737                return -EINVAL;
2738        }
2739
2740        if (unicast)
2741                err = netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2742        else
2743                err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
2744                                     flags & NLM_F_ECHO);
2745
2746        if (err > 0)
2747                err = 0;
2748        return err;
2749}
2750
2751static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops,
2752                                  void *tmplt_priv, u32 chain_index,
2753                                  struct tcf_block *block, struct sk_buff *oskb,
2754                                  u32 seq, u16 flags, bool unicast)
2755{
2756        u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
2757        struct net *net = block->net;
2758        struct sk_buff *skb;
2759
2760        skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2761        if (!skb)
2762                return -ENOBUFS;
2763
2764        if (tc_chain_fill_node(tmplt_ops, tmplt_priv, chain_index, net, skb,
2765                               block, portid, seq, flags, RTM_DELCHAIN) <= 0) {
2766                kfree_skb(skb);
2767                return -EINVAL;
2768        }
2769
2770        if (unicast)
2771                return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
2772
2773        return rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
2774}
2775
2776static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
2777                              struct nlattr **tca,
2778                              struct netlink_ext_ack *extack)
2779{
2780        const struct tcf_proto_ops *ops;
2781        char name[IFNAMSIZ];
2782        void *tmplt_priv;
2783
2784        /* If kind is not set, user did not specify template. */
2785        if (!tca[TCA_KIND])
2786                return 0;
2787
2788        if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
2789                NL_SET_ERR_MSG(extack, "Specified TC chain template name too long");
2790                return -EINVAL;
2791        }
2792
2793        ops = tcf_proto_lookup_ops(name, true, extack);
2794        if (IS_ERR(ops))
2795                return PTR_ERR(ops);
2796        if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
2797                NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
2798                return -EOPNOTSUPP;
2799        }
2800
2801        tmplt_priv = ops->tmplt_create(net, chain, tca, extack);
2802        if (IS_ERR(tmplt_priv)) {
2803                module_put(ops->owner);
2804                return PTR_ERR(tmplt_priv);
2805        }
2806        chain->tmplt_ops = ops;
2807        chain->tmplt_priv = tmplt_priv;
2808        return 0;
2809}
2810
2811static void tc_chain_tmplt_del(const struct tcf_proto_ops *tmplt_ops,
2812                               void *tmplt_priv)
2813{
2814        /* If template ops are set, no work to do for us. */
2815        if (!tmplt_ops)
2816                return;
2817
2818        tmplt_ops->tmplt_destroy(tmplt_priv);
2819        module_put(tmplt_ops->owner);
2820}
2821
2822/* Add/delete/get a chain */
2823
2824static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
2825                        struct netlink_ext_ack *extack)
2826{
2827        struct net *net = sock_net(skb->sk);
2828        struct nlattr *tca[TCA_MAX + 1];
2829        struct tcmsg *t;
2830        u32 parent;
2831        u32 chain_index;
2832        struct Qdisc *q = NULL;
2833        struct tcf_chain *chain = NULL;
2834        struct tcf_block *block;
2835        unsigned long cl;
2836        int err;
2837
2838        if (n->nlmsg_type != RTM_GETCHAIN &&
2839            !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
2840                return -EPERM;
2841
2842replay:
2843        err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX,
2844                                     rtm_tca_policy, extack);
2845        if (err < 0)
2846                return err;
2847
2848        t = nlmsg_data(n);
2849        parent = t->tcm_parent;
2850        cl = 0;
2851
2852        block = tcf_block_find(net, &q, &parent, &cl,
2853                               t->tcm_ifindex, t->tcm_block_index, extack);
2854        if (IS_ERR(block))
2855                return PTR_ERR(block);
2856
2857        chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
2858        if (chain_index > TC_ACT_EXT_VAL_MASK) {
2859                NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
2860                err = -EINVAL;
2861                goto errout_block;
2862        }
2863
2864        mutex_lock(&block->lock);
2865        chain = tcf_chain_lookup(block, chain_index);
2866        if (n->nlmsg_type == RTM_NEWCHAIN) {
2867                if (chain) {
2868                        if (tcf_chain_held_by_acts_only(chain)) {
2869                                /* The chain exists only because there is
2870                                 * some action referencing it.
2871                                 */
2872                                tcf_chain_hold(chain);
2873                        } else {
2874                                NL_SET_ERR_MSG(extack, "Filter chain already exists");
2875                                err = -EEXIST;
2876                                goto errout_block_locked;
2877                        }
2878                } else {
2879                        if (!(n->nlmsg_flags & NLM_F_CREATE)) {
2880                                NL_SET_ERR_MSG(extack, "Need both RTM_NEWCHAIN and NLM_F_CREATE to create a new chain");
2881                                err = -ENOENT;
2882                                goto errout_block_locked;
2883                        }
2884                        chain = tcf_chain_create(block, chain_index);
2885                        if (!chain) {
2886                                NL_SET_ERR_MSG(extack, "Failed to create filter chain");
2887                                err = -ENOMEM;
2888                                goto errout_block_locked;
2889                        }
2890                }
2891        } else {
2892                if (!chain || tcf_chain_held_by_acts_only(chain)) {
2893                        NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
2894                        err = -EINVAL;
2895                        goto errout_block_locked;
2896                }
2897                tcf_chain_hold(chain);
2898        }
2899
2900        if (n->nlmsg_type == RTM_NEWCHAIN) {
2901                /* Modifying chain requires holding parent block lock. In case
2902                 * the chain was successfully added, take a reference to the
2903                 * chain. This ensures that an empty chain does not disappear at
2904                 * the end of this function.
2905                 */
2906                tcf_chain_hold(chain);
2907                chain->explicitly_created = true;
2908        }
2909        mutex_unlock(&block->lock);
2910
2911        switch (n->nlmsg_type) {
2912        case RTM_NEWCHAIN:
2913                err = tc_chain_tmplt_add(chain, net, tca, extack);
2914                if (err) {
2915                        tcf_chain_put_explicitly_created(chain);
2916                        goto errout;
2917                }
2918
2919                tc_chain_notify(chain, NULL, 0, NLM_F_CREATE | NLM_F_EXCL,
2920                                RTM_NEWCHAIN, false);
2921                break;
2922        case RTM_DELCHAIN:
2923                tfilter_notify_chain(net, skb, block, q, parent, n,
2924                                     chain, RTM_DELTFILTER, true);
2925                /* Flush the chain first as the user requested chain removal. */
2926                tcf_chain_flush(chain, true);
2927                /* In case the chain was successfully deleted, put a reference
2928                 * to the chain previously taken during addition.
2929                 */
2930                tcf_chain_put_explicitly_created(chain);
2931                break;
2932        case RTM_GETCHAIN:
2933                err = tc_chain_notify(chain, skb, n->nlmsg_seq,
2934                                      n->nlmsg_seq, n->nlmsg_type, true);
2935                if (err < 0)
2936                        NL_SET_ERR_MSG(extack, "Failed to send chain notify message");
2937                break;
2938        default:
2939                err = -EOPNOTSUPP;
2940                NL_SET_ERR_MSG(extack, "Unsupported message type");
2941                goto errout;
2942        }
2943
2944errout:
2945        tcf_chain_put(chain);
2946errout_block:
2947        tcf_block_release(q, block, true);
2948        if (err == -EAGAIN)
2949                /* Replay the request. */
2950                goto replay;
2951        return err;
2952
2953errout_block_locked:
2954        mutex_unlock(&block->lock);
2955        goto errout_block;
2956}
2957
2958/* called with RTNL */
2959static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
2960{
2961        struct net *net = sock_net(skb->sk);
2962        struct nlattr *tca[TCA_MAX + 1];
2963        struct Qdisc *q = NULL;
2964        struct tcf_block *block;
2965        struct tcmsg *tcm = nlmsg_data(cb->nlh);
2966        struct tcf_chain *chain;
2967        long index_start;
2968        long index;
2969        u32 parent;
2970        int err;
2971
2972        if (nlmsg_len(cb->nlh) < sizeof(*tcm))
2973                return skb->len;
2974
2975        err = nlmsg_parse_deprecated(cb->nlh, sizeof(*tcm), tca, TCA_MAX,
2976                                     rtm_tca_policy, cb->extack);
2977        if (err)
2978                return err;
2979
2980        if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
2981                block = tcf_block_refcnt_get(net, tcm->tcm_block_index);
2982                if (!block)
2983                        goto out;
2984                /* If we work with block index, q is NULL and parent value
2985                 * will never be used in the following code. The check
2986                 * in tcf_fill_node prevents it. However, compiler does not
2987                 * see that far, so set parent to zero to silence the warning
2988                 * about parent being uninitialized.
2989                 */
2990                parent = 0;
2991        } else {
2992                const struct Qdisc_class_ops *cops;
2993                struct net_device *dev;
2994                unsigned long cl = 0;
2995
2996                dev = __dev_get_by_index(net, tcm->tcm_ifindex);
2997                if (!dev)
2998                        return skb->len;
2999
3000                parent = tcm->tcm_parent;
3001                if (!parent) {
3002                        q = dev->qdisc;
3003                        parent = q->handle;
3004                } else {
3005                        q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
3006                }
3007                if (!q)
3008                        goto out;
3009                cops = q->ops->cl_ops;
3010                if (!cops)
3011                        goto out;
3012                if (!cops->tcf_block)
3013                        goto out;
3014                if (TC_H_MIN(tcm->tcm_parent)) {
3015                        cl = cops->find(q, tcm->tcm_parent);
3016                        if (cl == 0)
3017                                goto out;
3018                }
3019                block = cops->tcf_block(q, cl, NULL);
3020                if (!block)
3021                        goto out;
3022                if (tcf_block_shared(block))
3023                        q = NULL;
3024        }
3025
3026        index_start = cb->args[0];
3027        index = 0;
3028
3029        mutex_lock(&block->lock);
3030        list_for_each_entry(chain, &block->chain_list, list) {
3031                if ((tca[TCA_CHAIN] &&
3032                     nla_get_u32(tca[TCA_CHAIN]) != chain->index))
3033                        continue;
3034                if (index < index_start) {
3035                        index++;
3036                        continue;
3037                }
3038                if (tcf_chain_held_by_acts_only(chain))
3039                        continue;
3040                err = tc_chain_fill_node(chain->tmplt_ops, chain->tmplt_priv,
3041                                         chain->index, net, skb, block,
3042                                         NETLINK_CB(cb->skb).portid,
3043                                         cb->nlh->nlmsg_seq, NLM_F_MULTI,
3044                                         RTM_NEWCHAIN);
3045                if (err <= 0)
3046                        break;
3047                index++;
3048        }
3049        mutex_unlock(&block->lock);
3050
3051        if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK)
3052                tcf_block_refcnt_put(block, true);
3053        cb->args[0] = index;
3054
3055out:
3056        /* If we did no progress, the error (EMSGSIZE) is real */
3057        if (skb->len == 0 && err)
3058                return err;
3059        return skb->len;
3060}
3061
3062void tcf_exts_destroy(struct tcf_exts *exts)
3063{
3064#ifdef CONFIG_NET_CLS_ACT
3065        if (exts->actions) {
3066                tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
3067                kfree(exts->actions);
3068        }
3069        exts->nr_actions = 0;
3070#endif
3071}
3072EXPORT_SYMBOL(tcf_exts_destroy);
3073
3074int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
3075                      struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
3076                      bool rtnl_held, struct netlink_ext_ack *extack)
3077{
3078#ifdef CONFIG_NET_CLS_ACT
3079        {
3080                struct tc_action *act;
3081                size_t attr_size = 0;
3082
3083                if (exts->police && tb[exts->police]) {
3084                        act = tcf_action_init_1(net, tp, tb[exts->police],
3085                                                rate_tlv, "police", ovr,
3086                                                TCA_ACT_BIND, rtnl_held,
3087                                                extack);
3088                        if (IS_ERR(act))
3089                                return PTR_ERR(act);
3090
3091                        act->type = exts->type = TCA_OLD_COMPAT;
3092                        exts->actions[0] = act;
3093                        exts->nr_actions = 1;
3094                } else if (exts->action && tb[exts->action]) {
3095                        int err;
3096
3097                        err = tcf_action_init(net, tp, tb[exts->action],
3098                                              rate_tlv, NULL, ovr, TCA_ACT_BIND,
3099                                              exts->actions, &attr_size,
3100                                              rtnl_held, extack);
3101                        if (err < 0)
3102                                return err;
3103                        exts->nr_actions = err;
3104                }
3105        }
3106#else
3107        if ((exts->action && tb[exts->action]) ||
3108            (exts->police && tb[exts->police])) {
3109                NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
3110                return -EOPNOTSUPP;
3111        }
3112#endif
3113
3114        return 0;
3115}
3116EXPORT_SYMBOL(tcf_exts_validate);
3117
3118void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
3119{
3120#ifdef CONFIG_NET_CLS_ACT
3121        struct tcf_exts old = *dst;
3122
3123        *dst = *src;
3124        tcf_exts_destroy(&old);
3125#endif
3126}
3127EXPORT_SYMBOL(tcf_exts_change);
3128
3129#ifdef CONFIG_NET_CLS_ACT
3130static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
3131{
3132        if (exts->nr_actions == 0)
3133                return NULL;
3134        else
3135                return exts->actions[0];
3136}
3137#endif
3138
3139int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
3140{
3141#ifdef CONFIG_NET_CLS_ACT
3142        struct nlattr *nest;
3143
3144        if (exts->action && tcf_exts_has_actions(exts)) {
3145                /*
3146                 * again for backward compatible mode - we want
3147                 * to work with both old and new modes of entering
3148                 * tc data even if iproute2  was newer - jhs
3149                 */
3150                if (exts->type != TCA_OLD_COMPAT) {
3151                        nest = nla_nest_start_noflag(skb, exts->action);
3152                        if (nest == NULL)
3153                                goto nla_put_failure;
3154
3155                        if (tcf_action_dump(skb, exts->actions, 0, 0) < 0)
3156                                goto nla_put_failure;
3157                        nla_nest_end(skb, nest);
3158                } else if (exts->police) {
3159                        struct tc_action *act = tcf_exts_first_act(exts);
3160                        nest = nla_nest_start_noflag(skb, exts->police);
3161                        if (nest == NULL || !act)
3162                                goto nla_put_failure;
3163                        if (tcf_action_dump_old(skb, act, 0, 0) < 0)
3164                                goto nla_put_failure;
3165                        nla_nest_end(skb, nest);
3166                }
3167        }
3168        return 0;
3169
3170nla_put_failure:
3171        nla_nest_cancel(skb, nest);
3172        return -1;
3173#else
3174        return 0;
3175#endif
3176}
3177EXPORT_SYMBOL(tcf_exts_dump);
3178
3179
3180int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
3181{
3182#ifdef CONFIG_NET_CLS_ACT
3183        struct tc_action *a = tcf_exts_first_act(exts);
3184        if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
3185                return -1;
3186#endif
3187        return 0;
3188}
3189EXPORT_SYMBOL(tcf_exts_dump_stats);
3190
3191static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
3192{
3193        if (*flags & TCA_CLS_FLAGS_IN_HW)
3194                return;
3195        *flags |= TCA_CLS_FLAGS_IN_HW;
3196        atomic_inc(&block->offloadcnt);
3197}
3198
3199static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
3200{
3201        if (!(*flags & TCA_CLS_FLAGS_IN_HW))
3202                return;
3203        *flags &= ~TCA_CLS_FLAGS_IN_HW;
3204        atomic_dec(&block->offloadcnt);
3205}
3206
3207static void tc_cls_offload_cnt_update(struct tcf_block *block,
3208                                      struct tcf_proto *tp, u32 *cnt,
3209                                      u32 *flags, u32 diff, bool add)
3210{
3211        lockdep_assert_held(&block->cb_lock);
3212
3213        spin_lock(&tp->lock);
3214        if (add) {
3215                if (!*cnt)
3216                        tcf_block_offload_inc(block, flags);
3217                *cnt += diff;
3218        } else {
3219                *cnt -= diff;
3220                if (!*cnt)
3221                        tcf_block_offload_dec(block, flags);
3222        }
3223        spin_unlock(&tp->lock);
3224}
3225
3226static void
3227tc_cls_offload_cnt_reset(struct tcf_block *block, struct tcf_proto *tp,
3228                         u32 *cnt, u32 *flags)
3229{
3230        lockdep_assert_held(&block->cb_lock);
3231
3232        spin_lock(&tp->lock);
3233        tcf_block_offload_dec(block, flags);
3234        *cnt = 0;
3235        spin_unlock(&tp->lock);
3236}
3237
3238static int
3239__tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3240                   void *type_data, bool err_stop)
3241{
3242        struct flow_block_cb *block_cb;
3243        int ok_count = 0;
3244        int err;
3245
3246        list_for_each_entry(block_cb, &block->flow_block.cb_list, list) {
3247                err = block_cb->cb(type, type_data, block_cb->cb_priv);
3248                if (err) {
3249                        if (err_stop)
3250                                return err;
3251                } else {
3252                        ok_count++;
3253                }
3254        }
3255        return ok_count;
3256}
3257
3258int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
3259                     void *type_data, bool err_stop, bool rtnl_held)
3260{
3261        bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3262        int ok_count;
3263
3264retry:
3265        if (take_rtnl)
3266                rtnl_lock();
3267        down_read(&block->cb_lock);
3268        /* Need to obtain rtnl lock if block is bound to devs that require it.
3269         * In block bind code cb_lock is obtained while holding rtnl, so we must
3270         * obtain the locks in same order here.
3271         */
3272        if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3273                up_read(&block->cb_lock);
3274                take_rtnl = true;
3275                goto retry;
3276        }
3277
3278        ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3279
3280        up_read(&block->cb_lock);
3281        if (take_rtnl)
3282                rtnl_unlock();
3283        return ok_count;
3284}
3285EXPORT_SYMBOL(tc_setup_cb_call);
3286
3287/* Non-destructive filter add. If filter that wasn't already in hardware is
3288 * successfully offloaded, increment block offloads counter. On failure,
3289 * previously offloaded filter is considered to be intact and offloads counter
3290 * is not decremented.
3291 */
3292
3293int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
3294                    enum tc_setup_type type, void *type_data, bool err_stop,
3295                    u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3296{
3297        bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3298        int ok_count;
3299
3300retry:
3301        if (take_rtnl)
3302                rtnl_lock();
3303        down_read(&block->cb_lock);
3304        /* Need to obtain rtnl lock if block is bound to devs that require it.
3305         * In block bind code cb_lock is obtained while holding rtnl, so we must
3306         * obtain the locks in same order here.
3307         */
3308        if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3309                up_read(&block->cb_lock);
3310                take_rtnl = true;
3311                goto retry;
3312        }
3313
3314        /* Make sure all netdevs sharing this block are offload-capable. */
3315        if (block->nooffloaddevcnt && err_stop) {
3316                ok_count = -EOPNOTSUPP;
3317                goto err_unlock;
3318        }
3319
3320        ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3321        if (ok_count < 0)
3322                goto err_unlock;
3323
3324        if (tp->ops->hw_add)
3325                tp->ops->hw_add(tp, type_data);
3326        if (ok_count > 0)
3327                tc_cls_offload_cnt_update(block, tp, in_hw_count, flags,
3328                                          ok_count, true);
3329err_unlock:
3330        up_read(&block->cb_lock);
3331        if (take_rtnl)
3332                rtnl_unlock();
3333        return ok_count < 0 ? ok_count : 0;
3334}
3335EXPORT_SYMBOL(tc_setup_cb_add);
3336
3337/* Destructive filter replace. If filter that wasn't already in hardware is
3338 * successfully offloaded, increment block offload counter. On failure,
3339 * previously offloaded filter is considered to be destroyed and offload counter
3340 * is decremented.
3341 */
3342
3343int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
3344                        enum tc_setup_type type, void *type_data, bool err_stop,
3345                        u32 *old_flags, unsigned int *old_in_hw_count,
3346                        u32 *new_flags, unsigned int *new_in_hw_count,
3347                        bool rtnl_held)
3348{
3349        bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3350        int ok_count;
3351
3352retry:
3353        if (take_rtnl)
3354                rtnl_lock();
3355        down_read(&block->cb_lock);
3356        /* Need to obtain rtnl lock if block is bound to devs that require it.
3357         * In block bind code cb_lock is obtained while holding rtnl, so we must
3358         * obtain the locks in same order here.
3359         */
3360        if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3361                up_read(&block->cb_lock);
3362                take_rtnl = true;
3363                goto retry;
3364        }
3365
3366        /* Make sure all netdevs sharing this block are offload-capable. */
3367        if (block->nooffloaddevcnt && err_stop) {
3368                ok_count = -EOPNOTSUPP;
3369                goto err_unlock;
3370        }
3371
3372        tc_cls_offload_cnt_reset(block, tp, old_in_hw_count, old_flags);
3373        if (tp->ops->hw_del)
3374                tp->ops->hw_del(tp, type_data);
3375
3376        ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3377        if (ok_count < 0)
3378                goto err_unlock;
3379
3380        if (tp->ops->hw_add)
3381                tp->ops->hw_add(tp, type_data);
3382        if (ok_count > 0)
3383                tc_cls_offload_cnt_update(block, tp, new_in_hw_count,
3384                                          new_flags, ok_count, true);
3385err_unlock:
3386        up_read(&block->cb_lock);
3387        if (take_rtnl)
3388                rtnl_unlock();
3389        return ok_count < 0 ? ok_count : 0;
3390}
3391EXPORT_SYMBOL(tc_setup_cb_replace);
3392
3393/* Destroy filter and decrement block offload counter, if filter was previously
3394 * offloaded.
3395 */
3396
3397int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
3398                        enum tc_setup_type type, void *type_data, bool err_stop,
3399                        u32 *flags, unsigned int *in_hw_count, bool rtnl_held)
3400{
3401        bool take_rtnl = READ_ONCE(block->lockeddevcnt) && !rtnl_held;
3402        int ok_count;
3403
3404retry:
3405        if (take_rtnl)
3406                rtnl_lock();
3407        down_read(&block->cb_lock);
3408        /* Need to obtain rtnl lock if block is bound to devs that require it.
3409         * In block bind code cb_lock is obtained while holding rtnl, so we must
3410         * obtain the locks in same order here.
3411         */
3412        if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
3413                up_read(&block->cb_lock);
3414                take_rtnl = true;
3415                goto retry;
3416        }
3417
3418        ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
3419
3420        tc_cls_offload_cnt_reset(block, tp, in_hw_count, flags);
3421        if (tp->ops->hw_del)
3422                tp->ops->hw_del(tp, type_data);
3423
3424        up_read(&block->cb_lock);
3425        if (take_rtnl)
3426                rtnl_unlock();
3427        return ok_count < 0 ? ok_count : 0;
3428}
3429EXPORT_SYMBOL(tc_setup_cb_destroy);
3430
3431int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
3432                          bool add, flow_setup_cb_t *cb,
3433                          enum tc_setup_type type, void *type_data,
3434                          void *cb_priv, u32 *flags, unsigned int *in_hw_count)
3435{
3436        int err = cb(type, type_data, cb_priv);
3437
3438        if (err) {
3439                if (add && tc_skip_sw(*flags))
3440                        return err;
3441        } else {
3442                tc_cls_offload_cnt_update(block, tp, in_hw_count, flags, 1,
3443                                          add);
3444        }
3445
3446        return 0;
3447}
3448EXPORT_SYMBOL(tc_setup_cb_reoffload);
3449
3450static int tcf_act_get_cookie(struct flow_action_entry *entry,
3451                              const struct tc_action *act)
3452{
3453        struct tc_cookie *cookie;
3454        int err = 0;
3455
3456        rcu_read_lock();
3457        cookie = rcu_dereference(act->act_cookie);
3458        if (cookie) {
3459                entry->cookie = flow_action_cookie_create(cookie->data,
3460                                                          cookie->len,
3461                                                          GFP_ATOMIC);
3462                if (!entry->cookie)
3463                        err = -ENOMEM;
3464        }
3465        rcu_read_unlock();
3466        return err;
3467}
3468
3469static void tcf_act_put_cookie(struct flow_action_entry *entry)
3470{
3471        flow_action_cookie_destroy(entry->cookie);
3472}
3473
3474void tc_cleanup_flow_action(struct flow_action *flow_action)
3475{
3476        struct flow_action_entry *entry;
3477        int i;
3478
3479        flow_action_for_each(i, entry, flow_action) {
3480                tcf_act_put_cookie(entry);
3481                if (entry->destructor)
3482                        entry->destructor(entry->destructor_priv);
3483        }
3484}
3485EXPORT_SYMBOL(tc_cleanup_flow_action);
3486
3487static void tcf_mirred_get_dev(struct flow_action_entry *entry,
3488                               const struct tc_action *act)
3489{
3490#ifdef CONFIG_NET_CLS_ACT
3491        entry->dev = act->ops->get_dev(act, &entry->destructor);
3492        if (!entry->dev)
3493                return;
3494        entry->destructor_priv = entry->dev;
3495#endif
3496}
3497
3498static void tcf_tunnel_encap_put_tunnel(void *priv)
3499{
3500        struct ip_tunnel_info *tunnel = priv;
3501
3502        kfree(tunnel);
3503}
3504
3505static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry,
3506                                       const struct tc_action *act)
3507{
3508        entry->tunnel = tcf_tunnel_info_copy(act);
3509        if (!entry->tunnel)
3510                return -ENOMEM;
3511        entry->destructor = tcf_tunnel_encap_put_tunnel;
3512        entry->destructor_priv = entry->tunnel;
3513        return 0;
3514}
3515
3516static void tcf_sample_get_group(struct flow_action_entry *entry,
3517                                 const struct tc_action *act)
3518{
3519#ifdef CONFIG_NET_CLS_ACT
3520        entry->sample.psample_group =
3521                act->ops->get_psample_group(act, &entry->destructor);
3522        entry->destructor_priv = entry->sample.psample_group;
3523#endif
3524}
3525
3526static enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
3527{
3528        if (WARN_ON_ONCE(hw_stats > TCA_ACT_HW_STATS_ANY))
3529                return FLOW_ACTION_HW_STATS_DONT_CARE;
3530        else if (!hw_stats)
3531                return FLOW_ACTION_HW_STATS_DISABLED;
3532
3533        return hw_stats;
3534}
3535
3536int tc_setup_flow_action(struct flow_action *flow_action,
3537                         const struct tcf_exts *exts)
3538{
3539        struct tc_action *act;
3540        int i, j, k, err = 0;
3541
3542        BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY);
3543        BUILD_BUG_ON(TCA_ACT_HW_STATS_IMMEDIATE != FLOW_ACTION_HW_STATS_IMMEDIATE);
3544        BUILD_BUG_ON(TCA_ACT_HW_STATS_DELAYED != FLOW_ACTION_HW_STATS_DELAYED);
3545
3546        if (!exts)
3547                return 0;
3548
3549        j = 0;
3550        tcf_exts_for_each_action(i, act, exts) {
3551                struct flow_action_entry *entry;
3552
3553                entry = &flow_action->entries[j];
3554                spin_lock_bh(&act->tcfa_lock);
3555                err = tcf_act_get_cookie(entry, act);
3556                if (err)
3557                        goto err_out_locked;
3558
3559                entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3560
3561                if (is_tcf_gact_ok(act)) {
3562                        entry->id = FLOW_ACTION_ACCEPT;
3563                } else if (is_tcf_gact_shot(act)) {
3564                        entry->id = FLOW_ACTION_DROP;
3565                } else if (is_tcf_gact_trap(act)) {
3566                        entry->id = FLOW_ACTION_TRAP;
3567                } else if (is_tcf_gact_goto_chain(act)) {
3568                        entry->id = FLOW_ACTION_GOTO;
3569                        entry->chain_index = tcf_gact_goto_chain_index(act);
3570                } else if (is_tcf_mirred_egress_redirect(act)) {
3571                        entry->id = FLOW_ACTION_REDIRECT;
3572                        tcf_mirred_get_dev(entry, act);
3573                } else if (is_tcf_mirred_egress_mirror(act)) {
3574                        entry->id = FLOW_ACTION_MIRRED;
3575                        tcf_mirred_get_dev(entry, act);
3576                } else if (is_tcf_mirred_ingress_redirect(act)) {
3577                        entry->id = FLOW_ACTION_REDIRECT_INGRESS;
3578                        tcf_mirred_get_dev(entry, act);
3579                } else if (is_tcf_mirred_ingress_mirror(act)) {
3580                        entry->id = FLOW_ACTION_MIRRED_INGRESS;
3581                        tcf_mirred_get_dev(entry, act);
3582                } else if (is_tcf_vlan(act)) {
3583                        switch (tcf_vlan_action(act)) {
3584                        case TCA_VLAN_ACT_PUSH:
3585                                entry->id = FLOW_ACTION_VLAN_PUSH;
3586                                entry->vlan.vid = tcf_vlan_push_vid(act);
3587                                entry->vlan.proto = tcf_vlan_push_proto(act);
3588                                entry->vlan.prio = tcf_vlan_push_prio(act);
3589                                break;
3590                        case TCA_VLAN_ACT_POP:
3591                                entry->id = FLOW_ACTION_VLAN_POP;
3592                                break;
3593                        case TCA_VLAN_ACT_MODIFY:
3594                                entry->id = FLOW_ACTION_VLAN_MANGLE;
3595                                entry->vlan.vid = tcf_vlan_push_vid(act);
3596                                entry->vlan.proto = tcf_vlan_push_proto(act);
3597                                entry->vlan.prio = tcf_vlan_push_prio(act);
3598                                break;
3599                        default:
3600                                err = -EOPNOTSUPP;
3601                                goto err_out_locked;
3602                        }
3603                } else if (is_tcf_tunnel_set(act)) {
3604                        entry->id = FLOW_ACTION_TUNNEL_ENCAP;
3605                        err = tcf_tunnel_encap_get_tunnel(entry, act);
3606                        if (err)
3607                                goto err_out_locked;
3608                } else if (is_tcf_tunnel_release(act)) {
3609                        entry->id = FLOW_ACTION_TUNNEL_DECAP;
3610                } else if (is_tcf_pedit(act)) {
3611                        for (k = 0; k < tcf_pedit_nkeys(act); k++) {
3612                                switch (tcf_pedit_cmd(act, k)) {
3613                                case TCA_PEDIT_KEY_EX_CMD_SET:
3614                                        entry->id = FLOW_ACTION_MANGLE;
3615                                        break;
3616                                case TCA_PEDIT_KEY_EX_CMD_ADD:
3617                                        entry->id = FLOW_ACTION_ADD;
3618                                        break;
3619                                default:
3620                                        err = -EOPNOTSUPP;
3621                                        goto err_out_locked;
3622                                }
3623                                entry->mangle.htype = tcf_pedit_htype(act, k);
3624                                entry->mangle.mask = tcf_pedit_mask(act, k);
3625                                entry->mangle.val = tcf_pedit_val(act, k);
3626                                entry->mangle.offset = tcf_pedit_offset(act, k);
3627                                entry->hw_stats = tc_act_hw_stats(act->hw_stats);
3628                                entry = &flow_action->entries[++j];
3629                        }
3630                } else if (is_tcf_csum(act)) {
3631                        entry->id = FLOW_ACTION_CSUM;
3632                        entry->csum_flags = tcf_csum_update_flags(act);
3633                } else if (is_tcf_skbedit_mark(act)) {
3634                        entry->id = FLOW_ACTION_MARK;
3635                        entry->mark = tcf_skbedit_mark(act);
3636                } else if (is_tcf_sample(act)) {
3637                        entry->id = FLOW_ACTION_SAMPLE;
3638                        entry->sample.trunc_size = tcf_sample_trunc_size(act);
3639                        entry->sample.truncate = tcf_sample_truncate(act);
3640                        entry->sample.rate = tcf_sample_rate(act);
3641                        tcf_sample_get_group(entry, act);
3642                } else if (is_tcf_police(act)) {
3643                        entry->id = FLOW_ACTION_POLICE;
3644                        entry->police.burst = tcf_police_tcfp_burst(act);
3645                        entry->police.rate_bytes_ps =
3646                                tcf_police_rate_bytes_ps(act);
3647                } else if (is_tcf_ct(act)) {
3648                        entry->id = FLOW_ACTION_CT;
3649                        entry->ct.action = tcf_ct_action(act);
3650                        entry->ct.zone = tcf_ct_zone(act);
3651                        entry->ct.flow_table = tcf_ct_ft(act);
3652                } else if (is_tcf_mpls(act)) {
3653                        switch (tcf_mpls_action(act)) {
3654                        case TCA_MPLS_ACT_PUSH:
3655                                entry->id = FLOW_ACTION_MPLS_PUSH;
3656                                entry->mpls_push.proto = tcf_mpls_proto(act);
3657                                entry->mpls_push.label = tcf_mpls_label(act);
3658                                entry->mpls_push.tc = tcf_mpls_tc(act);
3659                                entry->mpls_push.bos = tcf_mpls_bos(act);
3660                                entry->mpls_push.ttl = tcf_mpls_ttl(act);
3661                                break;
3662                        case TCA_MPLS_ACT_POP:
3663                                entry->id = FLOW_ACTION_MPLS_POP;
3664                                entry->mpls_pop.proto = tcf_mpls_proto(act);
3665                                break;
3666                        case TCA_MPLS_ACT_MODIFY:
3667                                entry->id = FLOW_ACTION_MPLS_MANGLE;
3668                                entry->mpls_mangle.label = tcf_mpls_label(act);
3669                                entry->mpls_mangle.tc = tcf_mpls_tc(act);
3670                                entry->mpls_mangle.bos = tcf_mpls_bos(act);
3671                                entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
3672                                break;
3673                        default:
3674                                goto err_out_locked;
3675                        }
3676                } else if (is_tcf_skbedit_ptype(act)) {
3677                        entry->id = FLOW_ACTION_PTYPE;
3678                        entry->ptype = tcf_skbedit_ptype(act);
3679                } else if (is_tcf_skbedit_priority(act)) {
3680                        entry->id = FLOW_ACTION_PRIORITY;
3681                        entry->priority = tcf_skbedit_priority(act);
3682                } else {
3683                        err = -EOPNOTSUPP;
3684                        goto err_out_locked;
3685                }
3686                spin_unlock_bh(&act->tcfa_lock);
3687
3688                if (!is_tcf_pedit(act))
3689                        j++;
3690        }
3691
3692err_out:
3693        if (err)
3694                tc_cleanup_flow_action(flow_action);
3695
3696        return err;
3697err_out_locked:
3698        spin_unlock_bh(&act->tcfa_lock);
3699        goto err_out;
3700}
3701EXPORT_SYMBOL(tc_setup_flow_action);
3702
3703unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
3704{
3705        unsigned int num_acts = 0;
3706        struct tc_action *act;
3707        int i;
3708
3709        tcf_exts_for_each_action(i, act, exts) {
3710                if (is_tcf_pedit(act))
3711                        num_acts += tcf_pedit_nkeys(act);
3712                else
3713                        num_acts++;
3714        }
3715        return num_acts;
3716}
3717EXPORT_SYMBOL(tcf_exts_num_actions);
3718
3719static __net_init int tcf_net_init(struct net *net)
3720{
3721        struct tcf_net *tn = net_generic(net, tcf_net_id);
3722
3723        spin_lock_init(&tn->idr_lock);
3724        idr_init(&tn->idr);
3725        return 0;
3726}
3727
3728static void __net_exit tcf_net_exit(struct net *net)
3729{
3730        struct tcf_net *tn = net_generic(net, tcf_net_id);
3731
3732        idr_destroy(&tn->idr);
3733}
3734
3735static struct pernet_operations tcf_net_ops = {
3736        .init = tcf_net_init,
3737        .exit = tcf_net_exit,
3738        .id   = &tcf_net_id,
3739        .size = sizeof(struct tcf_net),
3740};
3741
3742static struct flow_indr_block_entry block_entry = {
3743        .cb = tc_indr_block_get_and_cmd,
3744        .list = LIST_HEAD_INIT(block_entry.list),
3745};
3746
3747static int __init tc_filter_init(void)
3748{
3749        int err;
3750
3751        tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
3752        if (!tc_filter_wq)
3753                return -ENOMEM;
3754
3755        err = register_pernet_subsys(&tcf_net_ops);
3756        if (err)
3757                goto err_register_pernet_subsys;
3758
3759        flow_indr_add_block_cb(&block_entry);
3760
3761        rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
3762                      RTNL_FLAG_DOIT_UNLOCKED);
3763        rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
3764                      RTNL_FLAG_DOIT_UNLOCKED);
3765        rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
3766                      tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
3767        rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
3768        rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
3769        rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
3770                      tc_dump_chain, 0);
3771
3772        return 0;
3773
3774err_register_pernet_subsys:
3775        destroy_workqueue(tc_filter_wq);
3776        return err;
3777}
3778
3779subsys_initcall(tc_filter_init);
3780