linux/include/net/pkt_cls.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __NET_PKT_CLS_H
   3#define __NET_PKT_CLS_H
   4
   5#include <linux/pkt_cls.h>
   6#include <linux/workqueue.h>
   7#include <net/sch_generic.h>
   8#include <net/act_api.h>
   9
  10/* Basic packet classifier frontend definitions. */
  11
  12struct tcf_walker {
  13        int     stop;
  14        int     skip;
  15        int     count;
  16        int     (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
  17};
  18
  19int register_tcf_proto_ops(struct tcf_proto_ops *ops);
  20int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
  21
  22enum tcf_block_binder_type {
  23        TCF_BLOCK_BINDER_TYPE_UNSPEC,
  24        TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
  25        TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
  26};
  27
  28struct tcf_block_ext_info {
  29        enum tcf_block_binder_type binder_type;
  30        tcf_chain_head_change_t *chain_head_change;
  31        void *chain_head_change_priv;
  32        u32 block_index;
  33};
  34
  35struct tcf_block_cb;
  36bool tcf_queue_work(struct work_struct *work);
  37
  38#ifdef CONFIG_NET_CLS
  39struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
  40                                bool create);
  41void tcf_chain_put(struct tcf_chain *chain);
  42void tcf_block_netif_keep_dst(struct tcf_block *block);
  43int tcf_block_get(struct tcf_block **p_block,
  44                  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
  45                  struct netlink_ext_ack *extack);
  46int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
  47                      struct tcf_block_ext_info *ei,
  48                      struct netlink_ext_ack *extack);
  49void tcf_block_put(struct tcf_block *block);
  50void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
  51                       struct tcf_block_ext_info *ei);
  52
  53static inline bool tcf_block_shared(struct tcf_block *block)
  54{
  55        return block->index;
  56}
  57
  58static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
  59{
  60        WARN_ON(tcf_block_shared(block));
  61        return block->q;
  62}
  63
  64static inline struct net_device *tcf_block_dev(struct tcf_block *block)
  65{
  66        return tcf_block_q(block)->dev_queue->dev;
  67}
  68
  69void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
  70struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
  71                                         tc_setup_cb_t *cb, void *cb_ident);
  72void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
  73unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
  74struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
  75                                             tc_setup_cb_t *cb, void *cb_ident,
  76                                             void *cb_priv);
  77int tcf_block_cb_register(struct tcf_block *block,
  78                          tc_setup_cb_t *cb, void *cb_ident,
  79                          void *cb_priv);
  80void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb);
  81void tcf_block_cb_unregister(struct tcf_block *block,
  82                             tc_setup_cb_t *cb, void *cb_ident);
  83
  84int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
  85                 struct tcf_result *res, bool compat_mode);
  86
  87#else
  88static inline
  89int tcf_block_get(struct tcf_block **p_block,
  90                  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
  91                  struct netlink_ext_ack *extack)
  92{
  93        return 0;
  94}
  95
  96static inline
  97int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
  98                      struct tcf_block_ext_info *ei,
  99                      struct netlink_ext_ack *extack)
 100{
 101        return 0;
 102}
 103
 104static inline void tcf_block_put(struct tcf_block *block)
 105{
 106}
 107
 108static inline
 109void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
 110                       struct tcf_block_ext_info *ei)
 111{
 112}
 113
 114static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
 115{
 116        return NULL;
 117}
 118
 119static inline struct net_device *tcf_block_dev(struct tcf_block *block)
 120{
 121        return NULL;
 122}
 123
 124static inline
 125int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
 126                               void *cb_priv)
 127{
 128        return 0;
 129}
 130
 131static inline
 132void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
 133                                  void *cb_priv)
 134{
 135}
 136
 137static inline
 138void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
 139{
 140        return NULL;
 141}
 142
 143static inline
 144struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
 145                                         tc_setup_cb_t *cb, void *cb_ident)
 146{
 147        return NULL;
 148}
 149
 150static inline
 151void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
 152{
 153}
 154
 155static inline
 156unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
 157{
 158        return 0;
 159}
 160
 161static inline
 162struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
 163                                             tc_setup_cb_t *cb, void *cb_ident,
 164                                             void *cb_priv)
 165{
 166        return NULL;
 167}
 168
 169static inline
 170int tcf_block_cb_register(struct tcf_block *block,
 171                          tc_setup_cb_t *cb, void *cb_ident,
 172                          void *cb_priv)
 173{
 174        return 0;
 175}
 176
 177static inline
 178void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
 179{
 180}
 181
 182static inline
 183void tcf_block_cb_unregister(struct tcf_block *block,
 184                             tc_setup_cb_t *cb, void *cb_ident)
 185{
 186}
 187
 188static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 189                               struct tcf_result *res, bool compat_mode)
 190{
 191        return TC_ACT_UNSPEC;
 192}
 193#endif
 194
 195static inline unsigned long
 196__cls_set_class(unsigned long *clp, unsigned long cl)
 197{
 198        return xchg(clp, cl);
 199}
 200
 201static inline unsigned long
 202cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
 203{
 204        unsigned long old_cl;
 205
 206        sch_tree_lock(q);
 207        old_cl = __cls_set_class(clp, cl);
 208        sch_tree_unlock(q);
 209        return old_cl;
 210}
 211
 212static inline void
 213tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
 214{
 215        struct Qdisc *q = tp->chain->block->q;
 216        unsigned long cl;
 217
 218        /* Check q as it is not set for shared blocks. In that case,
 219         * setting class is not supported.
 220         */
 221        if (!q)
 222                return;
 223        cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
 224        cl = cls_set_class(q, &r->class, cl);
 225        if (cl)
 226                q->ops->cl_ops->unbind_tcf(q, cl);
 227}
 228
 229static inline void
 230tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
 231{
 232        struct Qdisc *q = tp->chain->block->q;
 233        unsigned long cl;
 234
 235        if (!q)
 236                return;
 237        if ((cl = __cls_set_class(&r->class, 0)) != 0)
 238                q->ops->cl_ops->unbind_tcf(q, cl);
 239}
 240
 241struct tcf_exts {
 242#ifdef CONFIG_NET_CLS_ACT
 243        __u32   type; /* for backward compat(TCA_OLD_COMPAT) */
 244        int nr_actions;
 245        struct tc_action **actions;
 246        struct net *net;
 247#endif
 248        /* Map to export classifier specific extension TLV types to the
 249         * generic extensions API. Unsupported extensions must be set to 0.
 250         */
 251        int action;
 252        int police;
 253};
 254
 255static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
 256{
 257#ifdef CONFIG_NET_CLS_ACT
 258        exts->type = 0;
 259        exts->nr_actions = 0;
 260        exts->net = NULL;
 261        exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
 262                                GFP_KERNEL);
 263        if (!exts->actions)
 264                return -ENOMEM;
 265#endif
 266        exts->action = action;
 267        exts->police = police;
 268        return 0;
 269}
 270
 271/* Return false if the netns is being destroyed in cleanup_net(). Callers
 272 * need to do cleanup synchronously in this case, otherwise may race with
 273 * tc_action_net_exit(). Return true for other cases.
 274 */
 275static inline bool tcf_exts_get_net(struct tcf_exts *exts)
 276{
 277#ifdef CONFIG_NET_CLS_ACT
 278        exts->net = maybe_get_net(exts->net);
 279        return exts->net != NULL;
 280#else
 281        return true;
 282#endif
 283}
 284
 285static inline void tcf_exts_put_net(struct tcf_exts *exts)
 286{
 287#ifdef CONFIG_NET_CLS_ACT
 288        if (exts->net)
 289                put_net(exts->net);
 290#endif
 291}
 292
 293static inline void tcf_exts_to_list(const struct tcf_exts *exts,
 294                                    struct list_head *actions)
 295{
 296#ifdef CONFIG_NET_CLS_ACT
 297        int i;
 298
 299        for (i = 0; i < exts->nr_actions; i++) {
 300                struct tc_action *a = exts->actions[i];
 301
 302                list_add_tail(&a->list, actions);
 303        }
 304#endif
 305}
 306
 307static inline void
 308tcf_exts_stats_update(const struct tcf_exts *exts,
 309                      u64 bytes, u64 packets, u64 lastuse)
 310{
 311#ifdef CONFIG_NET_CLS_ACT
 312        int i;
 313
 314        preempt_disable();
 315
 316        for (i = 0; i < exts->nr_actions; i++) {
 317                struct tc_action *a = exts->actions[i];
 318
 319                tcf_action_stats_update(a, bytes, packets, lastuse);
 320        }
 321
 322        preempt_enable();
 323#endif
 324}
 325
 326/**
 327 * tcf_exts_has_actions - check if at least one action is present
 328 * @exts: tc filter extensions handle
 329 *
 330 * Returns true if at least one action is present.
 331 */
 332static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
 333{
 334#ifdef CONFIG_NET_CLS_ACT
 335        return exts->nr_actions;
 336#else
 337        return false;
 338#endif
 339}
 340
 341/**
 342 * tcf_exts_has_one_action - check if exactly one action is present
 343 * @exts: tc filter extensions handle
 344 *
 345 * Returns true if exactly one action is present.
 346 */
 347static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
 348{
 349#ifdef CONFIG_NET_CLS_ACT
 350        return exts->nr_actions == 1;
 351#else
 352        return false;
 353#endif
 354}
 355
 356/**
 357 * tcf_exts_exec - execute tc filter extensions
 358 * @skb: socket buffer
 359 * @exts: tc filter extensions handle
 360 * @res: desired result
 361 *
 362 * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
 363 * a negative number if the filter must be considered unmatched or
 364 * a positive action code (TC_ACT_*) which must be returned to the
 365 * underlying layer.
 366 */
 367static inline int
 368tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
 369              struct tcf_result *res)
 370{
 371#ifdef CONFIG_NET_CLS_ACT
 372        return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
 373#endif
 374        return TC_ACT_OK;
 375}
 376
 377int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
 378                      struct nlattr **tb, struct nlattr *rate_tlv,
 379                      struct tcf_exts *exts, bool ovr,
 380                      struct netlink_ext_ack *extack);
 381void tcf_exts_destroy(struct tcf_exts *exts);
 382void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
 383int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
 384int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
 385
 386/**
 387 * struct tcf_pkt_info - packet information
 388 */
 389struct tcf_pkt_info {
 390        unsigned char *         ptr;
 391        int                     nexthdr;
 392};
 393
 394#ifdef CONFIG_NET_EMATCH
 395
 396struct tcf_ematch_ops;
 397
 398/**
 399 * struct tcf_ematch - extended match (ematch)
 400 * 
 401 * @matchid: identifier to allow userspace to reidentify a match
 402 * @flags: flags specifying attributes and the relation to other matches
 403 * @ops: the operations lookup table of the corresponding ematch module
 404 * @datalen: length of the ematch specific configuration data
 405 * @data: ematch specific data
 406 */
 407struct tcf_ematch {
 408        struct tcf_ematch_ops * ops;
 409        unsigned long           data;
 410        unsigned int            datalen;
 411        u16                     matchid;
 412        u16                     flags;
 413        struct net              *net;
 414};
 415
 416static inline int tcf_em_is_container(struct tcf_ematch *em)
 417{
 418        return !em->ops;
 419}
 420
 421static inline int tcf_em_is_simple(struct tcf_ematch *em)
 422{
 423        return em->flags & TCF_EM_SIMPLE;
 424}
 425
 426static inline int tcf_em_is_inverted(struct tcf_ematch *em)
 427{
 428        return em->flags & TCF_EM_INVERT;
 429}
 430
 431static inline int tcf_em_last_match(struct tcf_ematch *em)
 432{
 433        return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
 434}
 435
 436static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
 437{
 438        if (tcf_em_last_match(em))
 439                return 1;
 440
 441        if (result == 0 && em->flags & TCF_EM_REL_AND)
 442                return 1;
 443
 444        if (result != 0 && em->flags & TCF_EM_REL_OR)
 445                return 1;
 446
 447        return 0;
 448}
 449        
 450/**
 451 * struct tcf_ematch_tree - ematch tree handle
 452 *
 453 * @hdr: ematch tree header supplied by userspace
 454 * @matches: array of ematches
 455 */
 456struct tcf_ematch_tree {
 457        struct tcf_ematch_tree_hdr hdr;
 458        struct tcf_ematch *     matches;
 459        
 460};
 461
 462/**
 463 * struct tcf_ematch_ops - ematch module operations
 464 * 
 465 * @kind: identifier (kind) of this ematch module
 466 * @datalen: length of expected configuration data (optional)
 467 * @change: called during validation (optional)
 468 * @match: called during ematch tree evaluation, must return 1/0
 469 * @destroy: called during destroyage (optional)
 470 * @dump: called during dumping process (optional)
 471 * @owner: owner, must be set to THIS_MODULE
 472 * @link: link to previous/next ematch module (internal use)
 473 */
 474struct tcf_ematch_ops {
 475        int                     kind;
 476        int                     datalen;
 477        int                     (*change)(struct net *net, void *,
 478                                          int, struct tcf_ematch *);
 479        int                     (*match)(struct sk_buff *, struct tcf_ematch *,
 480                                         struct tcf_pkt_info *);
 481        void                    (*destroy)(struct tcf_ematch *);
 482        int                     (*dump)(struct sk_buff *, struct tcf_ematch *);
 483        struct module           *owner;
 484        struct list_head        link;
 485};
 486
 487int tcf_em_register(struct tcf_ematch_ops *);
 488void tcf_em_unregister(struct tcf_ematch_ops *);
 489int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
 490                         struct tcf_ematch_tree *);
 491void tcf_em_tree_destroy(struct tcf_ematch_tree *);
 492int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
 493int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
 494                        struct tcf_pkt_info *);
 495
 496/**
 497 * tcf_em_tree_match - evaulate an ematch tree
 498 *
 499 * @skb: socket buffer of the packet in question
 500 * @tree: ematch tree to be used for evaluation
 501 * @info: packet information examined by classifier
 502 *
 503 * This function matches @skb against the ematch tree in @tree by going
 504 * through all ematches respecting their logic relations returning
 505 * as soon as the result is obvious.
 506 *
 507 * Returns 1 if the ematch tree as-one matches, no ematches are configured
 508 * or ematch is not enabled in the kernel, otherwise 0 is returned.
 509 */
 510static inline int tcf_em_tree_match(struct sk_buff *skb,
 511                                    struct tcf_ematch_tree *tree,
 512                                    struct tcf_pkt_info *info)
 513{
 514        if (tree->hdr.nmatches)
 515                return __tcf_em_tree_match(skb, tree, info);
 516        else
 517                return 1;
 518}
 519
 520#define MODULE_ALIAS_TCF_EMATCH(kind)   MODULE_ALIAS("ematch-kind-" __stringify(kind))
 521
 522#else /* CONFIG_NET_EMATCH */
 523
 524struct tcf_ematch_tree {
 525};
 526
 527#define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
 528#define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
 529#define tcf_em_tree_dump(skb, t, tlv) (0)
 530#define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
 531
 532#endif /* CONFIG_NET_EMATCH */
 533
 534static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
 535{
 536        switch (layer) {
 537                case TCF_LAYER_LINK:
 538                        return skb_mac_header(skb);
 539                case TCF_LAYER_NETWORK:
 540                        return skb_network_header(skb);
 541                case TCF_LAYER_TRANSPORT:
 542                        return skb_transport_header(skb);
 543        }
 544
 545        return NULL;
 546}
 547
 548static inline int tcf_valid_offset(const struct sk_buff *skb,
 549                                   const unsigned char *ptr, const int len)
 550{
 551        return likely((ptr + len) <= skb_tail_pointer(skb) &&
 552                      ptr >= skb->head &&
 553                      (ptr <= (ptr + len)));
 554}
 555
 556#ifdef CONFIG_NET_CLS_IND
 557#include <net/net_namespace.h>
 558
 559static inline int
 560tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
 561                 struct netlink_ext_ack *extack)
 562{
 563        char indev[IFNAMSIZ];
 564        struct net_device *dev;
 565
 566        if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
 567                NL_SET_ERR_MSG(extack, "Interface name too long");
 568                return -EINVAL;
 569        }
 570        dev = __dev_get_by_name(net, indev);
 571        if (!dev)
 572                return -ENODEV;
 573        return dev->ifindex;
 574}
 575
 576static inline bool
 577tcf_match_indev(struct sk_buff *skb, int ifindex)
 578{
 579        if (!ifindex)
 580                return true;
 581        if  (!skb->skb_iif)
 582                return false;
 583        return ifindex == skb->skb_iif;
 584}
 585#endif /* CONFIG_NET_CLS_IND */
 586
 587int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
 588                     enum tc_setup_type type, void *type_data, bool err_stop);
 589
 590enum tc_block_command {
 591        TC_BLOCK_BIND,
 592        TC_BLOCK_UNBIND,
 593};
 594
 595struct tc_block_offload {
 596        enum tc_block_command command;
 597        enum tcf_block_binder_type binder_type;
 598        struct tcf_block *block;
 599};
 600
 601struct tc_cls_common_offload {
 602        u32 chain_index;
 603        __be16 protocol;
 604        u32 prio;
 605        struct netlink_ext_ack *extack;
 606};
 607
 608struct tc_cls_u32_knode {
 609        struct tcf_exts *exts;
 610        struct tc_u32_sel *sel;
 611        u32 handle;
 612        u32 val;
 613        u32 mask;
 614        u32 link_handle;
 615        u8 fshift;
 616};
 617
 618struct tc_cls_u32_hnode {
 619        u32 handle;
 620        u32 prio;
 621        unsigned int divisor;
 622};
 623
 624enum tc_clsu32_command {
 625        TC_CLSU32_NEW_KNODE,
 626        TC_CLSU32_REPLACE_KNODE,
 627        TC_CLSU32_DELETE_KNODE,
 628        TC_CLSU32_NEW_HNODE,
 629        TC_CLSU32_REPLACE_HNODE,
 630        TC_CLSU32_DELETE_HNODE,
 631};
 632
 633struct tc_cls_u32_offload {
 634        struct tc_cls_common_offload common;
 635        /* knode values */
 636        enum tc_clsu32_command command;
 637        union {
 638                struct tc_cls_u32_knode knode;
 639                struct tc_cls_u32_hnode hnode;
 640        };
 641};
 642
 643static inline bool tc_can_offload(const struct net_device *dev)
 644{
 645        return dev->features & NETIF_F_HW_TC;
 646}
 647
 648static inline bool tc_can_offload_extack(const struct net_device *dev,
 649                                         struct netlink_ext_ack *extack)
 650{
 651        bool can = tc_can_offload(dev);
 652
 653        if (!can)
 654                NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
 655
 656        return can;
 657}
 658
 659static inline bool
 660tc_cls_can_offload_and_chain0(const struct net_device *dev,
 661                              struct tc_cls_common_offload *common)
 662{
 663        if (!tc_can_offload_extack(dev, common->extack))
 664                return false;
 665        if (common->chain_index) {
 666                NL_SET_ERR_MSG(common->extack,
 667                               "Driver supports only offload of chain 0");
 668                return false;
 669        }
 670        return true;
 671}
 672
 673static inline bool tc_skip_hw(u32 flags)
 674{
 675        return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
 676}
 677
 678static inline bool tc_skip_sw(u32 flags)
 679{
 680        return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
 681}
 682
 683/* SKIP_HW and SKIP_SW are mutually exclusive flags. */
 684static inline bool tc_flags_valid(u32 flags)
 685{
 686        if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
 687                return false;
 688
 689        if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
 690                return false;
 691
 692        return true;
 693}
 694
 695static inline bool tc_in_hw(u32 flags)
 696{
 697        return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
 698}
 699
 700static inline void
 701tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
 702                           const struct tcf_proto *tp, u32 flags,
 703                           struct netlink_ext_ack *extack)
 704{
 705        cls_common->chain_index = tp->chain->index;
 706        cls_common->protocol = tp->protocol;
 707        cls_common->prio = tp->prio;
 708        if (tc_skip_sw(flags))
 709                cls_common->extack = extack;
 710}
 711
 712enum tc_fl_command {
 713        TC_CLSFLOWER_REPLACE,
 714        TC_CLSFLOWER_DESTROY,
 715        TC_CLSFLOWER_STATS,
 716};
 717
 718struct tc_cls_flower_offload {
 719        struct tc_cls_common_offload common;
 720        enum tc_fl_command command;
 721        unsigned long cookie;
 722        struct flow_dissector *dissector;
 723        struct fl_flow_key *mask;
 724        struct fl_flow_key *key;
 725        struct tcf_exts *exts;
 726        u32 classid;
 727};
 728
 729enum tc_matchall_command {
 730        TC_CLSMATCHALL_REPLACE,
 731        TC_CLSMATCHALL_DESTROY,
 732};
 733
 734struct tc_cls_matchall_offload {
 735        struct tc_cls_common_offload common;
 736        enum tc_matchall_command command;
 737        struct tcf_exts *exts;
 738        unsigned long cookie;
 739};
 740
 741enum tc_clsbpf_command {
 742        TC_CLSBPF_OFFLOAD,
 743        TC_CLSBPF_STATS,
 744};
 745
 746struct tc_cls_bpf_offload {
 747        struct tc_cls_common_offload common;
 748        enum tc_clsbpf_command command;
 749        struct tcf_exts *exts;
 750        struct bpf_prog *prog;
 751        struct bpf_prog *oldprog;
 752        const char *name;
 753        bool exts_integrated;
 754};
 755
 756struct tc_mqprio_qopt_offload {
 757        /* struct tc_mqprio_qopt must always be the first element */
 758        struct tc_mqprio_qopt qopt;
 759        u16 mode;
 760        u16 shaper;
 761        u32 flags;
 762        u64 min_rate[TC_QOPT_MAX_QUEUE];
 763        u64 max_rate[TC_QOPT_MAX_QUEUE];
 764};
 765
 766/* This structure holds cookie structure that is passed from user
 767 * to the kernel for actions and classifiers
 768 */
 769struct tc_cookie {
 770        u8  *data;
 771        u32 len;
 772};
 773
 774struct tc_qopt_offload_stats {
 775        struct gnet_stats_basic_packed *bstats;
 776        struct gnet_stats_queue *qstats;
 777};
 778
 779enum tc_red_command {
 780        TC_RED_REPLACE,
 781        TC_RED_DESTROY,
 782        TC_RED_STATS,
 783        TC_RED_XSTATS,
 784};
 785
 786struct tc_red_qopt_offload_params {
 787        u32 min;
 788        u32 max;
 789        u32 probability;
 790        bool is_ecn;
 791        struct gnet_stats_queue *qstats;
 792};
 793
 794struct tc_red_qopt_offload {
 795        enum tc_red_command command;
 796        u32 handle;
 797        u32 parent;
 798        union {
 799                struct tc_red_qopt_offload_params set;
 800                struct tc_qopt_offload_stats stats;
 801                struct red_stats *xstats;
 802        };
 803};
 804
 805enum tc_prio_command {
 806        TC_PRIO_REPLACE,
 807        TC_PRIO_DESTROY,
 808        TC_PRIO_STATS,
 809        TC_PRIO_GRAFT,
 810};
 811
 812struct tc_prio_qopt_offload_params {
 813        int bands;
 814        u8 priomap[TC_PRIO_MAX + 1];
 815        /* In case that a prio qdisc is offloaded and now is changed to a
 816         * non-offloadedable config, it needs to update the backlog & qlen
 817         * values to negate the HW backlog & qlen values (and only them).
 818         */
 819        struct gnet_stats_queue *qstats;
 820};
 821
 822struct tc_prio_qopt_offload_graft_params {
 823        u8 band;
 824        u32 child_handle;
 825};
 826
 827struct tc_prio_qopt_offload {
 828        enum tc_prio_command command;
 829        u32 handle;
 830        u32 parent;
 831        union {
 832                struct tc_prio_qopt_offload_params replace_params;
 833                struct tc_qopt_offload_stats stats;
 834                struct tc_prio_qopt_offload_graft_params graft_params;
 835        };
 836};
 837
 838#endif
 839