linux/include/net/flow_offload.h
<<
>>
Prefs
   1#ifndef _NET_FLOW_OFFLOAD_H
   2#define _NET_FLOW_OFFLOAD_H
   3
   4#include <linux/kernel.h>
   5#include <linux/list.h>
   6#include <linux/netlink.h>
   7#include <net/flow_dissector.h>
   8
   9struct flow_match {
  10        struct flow_dissector   *dissector;
  11        void                    *mask;
  12        void                    *key;
  13};
  14
  15struct flow_match_meta {
  16        struct flow_dissector_key_meta *key, *mask;
  17};
  18
  19struct flow_match_basic {
  20        struct flow_dissector_key_basic *key, *mask;
  21};
  22
  23struct flow_match_control {
  24        struct flow_dissector_key_control *key, *mask;
  25};
  26
  27struct flow_match_eth_addrs {
  28        struct flow_dissector_key_eth_addrs *key, *mask;
  29};
  30
  31struct flow_match_vlan {
  32        struct flow_dissector_key_vlan *key, *mask;
  33};
  34
  35struct flow_match_ipv4_addrs {
  36        struct flow_dissector_key_ipv4_addrs *key, *mask;
  37};
  38
  39struct flow_match_ipv6_addrs {
  40        struct flow_dissector_key_ipv6_addrs *key, *mask;
  41};
  42
  43struct flow_match_ip {
  44        struct flow_dissector_key_ip *key, *mask;
  45};
  46
  47struct flow_match_ports {
  48        struct flow_dissector_key_ports *key, *mask;
  49};
  50
  51struct flow_match_icmp {
  52        struct flow_dissector_key_icmp *key, *mask;
  53};
  54
  55struct flow_match_tcp {
  56        struct flow_dissector_key_tcp *key, *mask;
  57};
  58
  59struct flow_match_mpls {
  60        struct flow_dissector_key_mpls *key, *mask;
  61};
  62
  63struct flow_match_enc_keyid {
  64        struct flow_dissector_key_keyid *key, *mask;
  65};
  66
  67struct flow_match_enc_opts {
  68        struct flow_dissector_key_enc_opts *key, *mask;
  69};
  70
  71struct flow_match_ct {
  72        struct flow_dissector_key_ct *key, *mask;
  73};
  74
  75struct flow_rule;
  76
  77void flow_rule_match_meta(const struct flow_rule *rule,
  78                          struct flow_match_meta *out);
  79void flow_rule_match_basic(const struct flow_rule *rule,
  80                           struct flow_match_basic *out);
  81void flow_rule_match_control(const struct flow_rule *rule,
  82                             struct flow_match_control *out);
  83void flow_rule_match_eth_addrs(const struct flow_rule *rule,
  84                               struct flow_match_eth_addrs *out);
  85void flow_rule_match_vlan(const struct flow_rule *rule,
  86                          struct flow_match_vlan *out);
  87void flow_rule_match_cvlan(const struct flow_rule *rule,
  88                           struct flow_match_vlan *out);
  89void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
  90                                struct flow_match_ipv4_addrs *out);
  91void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
  92                                struct flow_match_ipv6_addrs *out);
  93void flow_rule_match_ip(const struct flow_rule *rule,
  94                        struct flow_match_ip *out);
  95void flow_rule_match_ports(const struct flow_rule *rule,
  96                           struct flow_match_ports *out);
  97void flow_rule_match_tcp(const struct flow_rule *rule,
  98                         struct flow_match_tcp *out);
  99void flow_rule_match_icmp(const struct flow_rule *rule,
 100                          struct flow_match_icmp *out);
 101void flow_rule_match_mpls(const struct flow_rule *rule,
 102                          struct flow_match_mpls *out);
 103void flow_rule_match_enc_control(const struct flow_rule *rule,
 104                                 struct flow_match_control *out);
 105void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
 106                                    struct flow_match_ipv4_addrs *out);
 107void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
 108                                    struct flow_match_ipv6_addrs *out);
 109void flow_rule_match_enc_ip(const struct flow_rule *rule,
 110                            struct flow_match_ip *out);
 111void flow_rule_match_enc_ports(const struct flow_rule *rule,
 112                               struct flow_match_ports *out);
 113void flow_rule_match_enc_keyid(const struct flow_rule *rule,
 114                               struct flow_match_enc_keyid *out);
 115void flow_rule_match_enc_opts(const struct flow_rule *rule,
 116                              struct flow_match_enc_opts *out);
 117void flow_rule_match_ct(const struct flow_rule *rule,
 118                        struct flow_match_ct *out);
 119
 120enum flow_action_id {
 121        FLOW_ACTION_ACCEPT              = 0,
 122        FLOW_ACTION_DROP,
 123        FLOW_ACTION_TRAP,
 124        FLOW_ACTION_GOTO,
 125        FLOW_ACTION_REDIRECT,
 126        FLOW_ACTION_MIRRED,
 127        FLOW_ACTION_REDIRECT_INGRESS,
 128        FLOW_ACTION_MIRRED_INGRESS,
 129        FLOW_ACTION_VLAN_PUSH,
 130        FLOW_ACTION_VLAN_POP,
 131        FLOW_ACTION_VLAN_MANGLE,
 132        FLOW_ACTION_TUNNEL_ENCAP,
 133        FLOW_ACTION_TUNNEL_DECAP,
 134        FLOW_ACTION_MANGLE,
 135        FLOW_ACTION_ADD,
 136        FLOW_ACTION_CSUM,
 137        FLOW_ACTION_MARK,
 138        FLOW_ACTION_PTYPE,
 139        FLOW_ACTION_PRIORITY,
 140        FLOW_ACTION_WAKE,
 141        FLOW_ACTION_QUEUE,
 142        FLOW_ACTION_SAMPLE,
 143        FLOW_ACTION_POLICE,
 144        FLOW_ACTION_CT,
 145        FLOW_ACTION_CT_METADATA,
 146        FLOW_ACTION_MPLS_PUSH,
 147        FLOW_ACTION_MPLS_POP,
 148        FLOW_ACTION_MPLS_MANGLE,
 149        FLOW_ACTION_GATE,
 150        NUM_FLOW_ACTIONS,
 151};
 152
 153/* This is mirroring enum pedit_header_type definition for easy mapping between
 154 * tc pedit action. Legacy TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK is mapped to
 155 * FLOW_ACT_MANGLE_UNSPEC, which is supported by no driver.
 156 */
 157enum flow_action_mangle_base {
 158        FLOW_ACT_MANGLE_UNSPEC          = 0,
 159        FLOW_ACT_MANGLE_HDR_TYPE_ETH,
 160        FLOW_ACT_MANGLE_HDR_TYPE_IP4,
 161        FLOW_ACT_MANGLE_HDR_TYPE_IP6,
 162        FLOW_ACT_MANGLE_HDR_TYPE_TCP,
 163        FLOW_ACT_MANGLE_HDR_TYPE_UDP,
 164};
 165
 166enum flow_action_hw_stats_bit {
 167        FLOW_ACTION_HW_STATS_IMMEDIATE_BIT,
 168        FLOW_ACTION_HW_STATS_DELAYED_BIT,
 169        FLOW_ACTION_HW_STATS_DISABLED_BIT,
 170
 171        FLOW_ACTION_HW_STATS_NUM_BITS
 172};
 173
 174enum flow_action_hw_stats {
 175        FLOW_ACTION_HW_STATS_IMMEDIATE =
 176                BIT(FLOW_ACTION_HW_STATS_IMMEDIATE_BIT),
 177        FLOW_ACTION_HW_STATS_DELAYED = BIT(FLOW_ACTION_HW_STATS_DELAYED_BIT),
 178        FLOW_ACTION_HW_STATS_ANY = FLOW_ACTION_HW_STATS_IMMEDIATE |
 179                                   FLOW_ACTION_HW_STATS_DELAYED,
 180        FLOW_ACTION_HW_STATS_DISABLED =
 181                BIT(FLOW_ACTION_HW_STATS_DISABLED_BIT),
 182        FLOW_ACTION_HW_STATS_DONT_CARE = BIT(FLOW_ACTION_HW_STATS_NUM_BITS) - 1,
 183};
 184
 185typedef void (*action_destr)(void *priv);
 186
 187struct flow_action_cookie {
 188        u32 cookie_len;
 189        u8 cookie[];
 190};
 191
 192struct flow_action_cookie *flow_action_cookie_create(void *data,
 193                                                     unsigned int len,
 194                                                     gfp_t gfp);
 195void flow_action_cookie_destroy(struct flow_action_cookie *cookie);
 196
 197struct flow_action_entry {
 198        enum flow_action_id             id;
 199        enum flow_action_hw_stats       hw_stats;
 200        action_destr                    destructor;
 201        void                            *destructor_priv;
 202        union {
 203                u32                     chain_index;    /* FLOW_ACTION_GOTO */
 204                struct net_device       *dev;           /* FLOW_ACTION_REDIRECT */
 205                struct {                                /* FLOW_ACTION_VLAN */
 206                        u16             vid;
 207                        __be16          proto;
 208                        u8              prio;
 209                } vlan;
 210                struct {                                /* FLOW_ACTION_MANGLE */
 211                                                        /* FLOW_ACTION_ADD */
 212                        enum flow_action_mangle_base htype;
 213                        u32             offset;
 214                        u32             mask;
 215                        u32             val;
 216                } mangle;
 217                struct ip_tunnel_info   *tunnel;        /* FLOW_ACTION_TUNNEL_ENCAP */
 218                u32                     csum_flags;     /* FLOW_ACTION_CSUM */
 219                u32                     mark;           /* FLOW_ACTION_MARK */
 220                u16                     ptype;          /* FLOW_ACTION_PTYPE */
 221                u32                     priority;       /* FLOW_ACTION_PRIORITY */
 222                struct {                                /* FLOW_ACTION_QUEUE */
 223                        u32             ctx;
 224                        u32             index;
 225                        u8              vf;
 226                } queue;
 227                struct {                                /* FLOW_ACTION_SAMPLE */
 228                        struct psample_group    *psample_group;
 229                        u32                     rate;
 230                        u32                     trunc_size;
 231                        bool                    truncate;
 232                } sample;
 233                struct {                                /* FLOW_ACTION_POLICE */
 234                        u32                     index;
 235                        u32                     burst;
 236                        u64                     rate_bytes_ps;
 237                        u32                     mtu;
 238                } police;
 239                struct {                                /* FLOW_ACTION_CT */
 240                        int action;
 241                        u16 zone;
 242                        struct nf_flowtable *flow_table;
 243                } ct;
 244                struct {
 245                        unsigned long cookie;
 246                        u32 mark;
 247                        u32 labels[4];
 248                } ct_metadata;
 249                struct {                                /* FLOW_ACTION_MPLS_PUSH */
 250                        u32             label;
 251                        __be16          proto;
 252                        u8              tc;
 253                        u8              bos;
 254                        u8              ttl;
 255                } mpls_push;
 256                struct {                                /* FLOW_ACTION_MPLS_POP */
 257                        __be16          proto;
 258                } mpls_pop;
 259                struct {                                /* FLOW_ACTION_MPLS_MANGLE */
 260                        u32             label;
 261                        u8              tc;
 262                        u8              bos;
 263                        u8              ttl;
 264                } mpls_mangle;
 265                struct {
 266                        u32             index;
 267                        s32             prio;
 268                        u64             basetime;
 269                        u64             cycletime;
 270                        u64             cycletimeext;
 271                        u32             num_entries;
 272                        struct action_gate_entry *entries;
 273                } gate;
 274        };
 275        struct flow_action_cookie *cookie; /* user defined action cookie */
 276};
 277
 278struct flow_action {
 279        unsigned int                    num_entries;
 280        struct flow_action_entry        entries[];
 281};
 282
 283static inline bool flow_action_has_entries(const struct flow_action *action)
 284{
 285        return action->num_entries;
 286}
 287
 288/**
 289 * flow_action_has_one_action() - check if exactly one action is present
 290 * @action: tc filter flow offload action
 291 *
 292 * Returns true if exactly one action is present.
 293 */
 294static inline bool flow_offload_has_one_action(const struct flow_action *action)
 295{
 296        return action->num_entries == 1;
 297}
 298
 299#define flow_action_for_each(__i, __act, __actions)                     \
 300        for (__i = 0, __act = &(__actions)->entries[0];                 \
 301             __i < (__actions)->num_entries;                            \
 302             __act = &(__actions)->entries[++__i])
 303
 304static inline bool
 305flow_action_mixed_hw_stats_check(const struct flow_action *action,
 306                                 struct netlink_ext_ack *extack)
 307{
 308        const struct flow_action_entry *action_entry;
 309        u8 last_hw_stats;
 310        int i;
 311
 312        if (flow_offload_has_one_action(action))
 313                return true;
 314
 315        flow_action_for_each(i, action_entry, action) {
 316                if (i && action_entry->hw_stats != last_hw_stats) {
 317                        NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
 318                        return false;
 319                }
 320                last_hw_stats = action_entry->hw_stats;
 321        }
 322        return true;
 323}
 324
 325static inline const struct flow_action_entry *
 326flow_action_first_entry_get(const struct flow_action *action)
 327{
 328        WARN_ON(!flow_action_has_entries(action));
 329        return &action->entries[0];
 330}
 331
 332static inline bool
 333__flow_action_hw_stats_check(const struct flow_action *action,
 334                             struct netlink_ext_ack *extack,
 335                             bool check_allow_bit,
 336                             enum flow_action_hw_stats_bit allow_bit)
 337{
 338        const struct flow_action_entry *action_entry;
 339
 340        if (!flow_action_has_entries(action))
 341                return true;
 342        if (!flow_action_mixed_hw_stats_check(action, extack))
 343                return false;
 344
 345        action_entry = flow_action_first_entry_get(action);
 346
 347        /* Zero is not a legal value for hw_stats, catch anyone passing it */
 348        WARN_ON_ONCE(!action_entry->hw_stats);
 349
 350        if (!check_allow_bit &&
 351            ~action_entry->hw_stats & FLOW_ACTION_HW_STATS_ANY) {
 352                NL_SET_ERR_MSG_MOD(extack, "Driver supports only default HW stats type \"any\"");
 353                return false;
 354        } else if (check_allow_bit &&
 355                   !(action_entry->hw_stats & BIT(allow_bit))) {
 356                NL_SET_ERR_MSG_MOD(extack, "Driver does not support selected HW stats type");
 357                return false;
 358        }
 359        return true;
 360}
 361
 362static inline bool
 363flow_action_hw_stats_check(const struct flow_action *action,
 364                           struct netlink_ext_ack *extack,
 365                           enum flow_action_hw_stats_bit allow_bit)
 366{
 367        return __flow_action_hw_stats_check(action, extack, true, allow_bit);
 368}
 369
 370static inline bool
 371flow_action_basic_hw_stats_check(const struct flow_action *action,
 372                                 struct netlink_ext_ack *extack)
 373{
 374        return __flow_action_hw_stats_check(action, extack, false, 0);
 375}
 376
 377struct flow_rule {
 378        struct flow_match       match;
 379        struct flow_action      action;
 380};
 381
 382struct flow_rule *flow_rule_alloc(unsigned int num_actions);
 383
 384static inline bool flow_rule_match_key(const struct flow_rule *rule,
 385                                       enum flow_dissector_key_id key)
 386{
 387        return dissector_uses_key(rule->match.dissector, key);
 388}
 389
 390struct flow_stats {
 391        u64     pkts;
 392        u64     bytes;
 393        u64     drops;
 394        u64     lastused;
 395        enum flow_action_hw_stats used_hw_stats;
 396        bool used_hw_stats_valid;
 397};
 398
 399static inline void flow_stats_update(struct flow_stats *flow_stats,
 400                                     u64 bytes, u64 pkts,
 401                                     u64 drops, u64 lastused,
 402                                     enum flow_action_hw_stats used_hw_stats)
 403{
 404        flow_stats->pkts        += pkts;
 405        flow_stats->bytes       += bytes;
 406        flow_stats->drops       += drops;
 407        flow_stats->lastused    = max_t(u64, flow_stats->lastused, lastused);
 408
 409        /* The driver should pass value with a maximum of one bit set.
 410         * Passing FLOW_ACTION_HW_STATS_ANY is invalid.
 411         */
 412        WARN_ON(used_hw_stats == FLOW_ACTION_HW_STATS_ANY);
 413        flow_stats->used_hw_stats |= used_hw_stats;
 414        flow_stats->used_hw_stats_valid = true;
 415}
 416
 417enum flow_block_command {
 418        FLOW_BLOCK_BIND,
 419        FLOW_BLOCK_UNBIND,
 420};
 421
 422enum flow_block_binder_type {
 423        FLOW_BLOCK_BINDER_TYPE_UNSPEC,
 424        FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
 425        FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
 426        FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
 427        FLOW_BLOCK_BINDER_TYPE_RED_MARK,
 428};
 429
 430struct flow_block {
 431        struct list_head cb_list;
 432};
 433
 434struct netlink_ext_ack;
 435
 436struct flow_block_offload {
 437        enum flow_block_command command;
 438        enum flow_block_binder_type binder_type;
 439        bool block_shared;
 440        bool unlocked_driver_cb;
 441        struct net *net;
 442        struct flow_block *block;
 443        struct list_head cb_list;
 444        struct list_head *driver_block_list;
 445        struct netlink_ext_ack *extack;
 446        struct Qdisc *sch;
 447};
 448
 449enum tc_setup_type;
 450typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data,
 451                            void *cb_priv);
 452
 453struct flow_block_cb;
 454
 455struct flow_block_indr {
 456        struct list_head                list;
 457        struct net_device               *dev;
 458        struct Qdisc                    *sch;
 459        enum flow_block_binder_type     binder_type;
 460        void                            *data;
 461        void                            *cb_priv;
 462        void                            (*cleanup)(struct flow_block_cb *block_cb);
 463};
 464
 465struct flow_block_cb {
 466        struct list_head        driver_list;
 467        struct list_head        list;
 468        flow_setup_cb_t         *cb;
 469        void                    *cb_ident;
 470        void                    *cb_priv;
 471        void                    (*release)(void *cb_priv);
 472        struct flow_block_indr  indr;
 473        unsigned int            refcnt;
 474};
 475
 476struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
 477                                          void *cb_ident, void *cb_priv,
 478                                          void (*release)(void *cb_priv));
 479struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
 480                                               void *cb_ident, void *cb_priv,
 481                                               void (*release)(void *cb_priv),
 482                                               struct flow_block_offload *bo,
 483                                               struct net_device *dev,
 484                                               struct Qdisc *sch, void *data,
 485                                               void *indr_cb_priv,
 486                                               void (*cleanup)(struct flow_block_cb *block_cb));
 487void flow_block_cb_free(struct flow_block_cb *block_cb);
 488
 489struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
 490                                           flow_setup_cb_t *cb, void *cb_ident);
 491
 492void *flow_block_cb_priv(struct flow_block_cb *block_cb);
 493void flow_block_cb_incref(struct flow_block_cb *block_cb);
 494unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb);
 495
 496static inline void flow_block_cb_add(struct flow_block_cb *block_cb,
 497                                     struct flow_block_offload *offload)
 498{
 499        list_add_tail(&block_cb->list, &offload->cb_list);
 500}
 501
 502static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
 503                                        struct flow_block_offload *offload)
 504{
 505        list_move(&block_cb->list, &offload->cb_list);
 506}
 507
 508static inline void flow_indr_block_cb_remove(struct flow_block_cb *block_cb,
 509                                             struct flow_block_offload *offload)
 510{
 511        list_del(&block_cb->indr.list);
 512        list_move(&block_cb->list, &offload->cb_list);
 513}
 514
 515bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
 516                           struct list_head *driver_block_list);
 517
 518int flow_block_cb_setup_simple(struct flow_block_offload *f,
 519                               struct list_head *driver_list,
 520                               flow_setup_cb_t *cb,
 521                               void *cb_ident, void *cb_priv, bool ingress_only);
 522
 523enum flow_cls_command {
 524        FLOW_CLS_REPLACE,
 525        FLOW_CLS_DESTROY,
 526        FLOW_CLS_STATS,
 527        FLOW_CLS_TMPLT_CREATE,
 528        FLOW_CLS_TMPLT_DESTROY,
 529};
 530
 531struct flow_cls_common_offload {
 532        u32 chain_index;
 533        __be16 protocol;
 534        u32 prio;
 535        struct netlink_ext_ack *extack;
 536};
 537
 538struct flow_cls_offload {
 539        struct flow_cls_common_offload common;
 540        enum flow_cls_command command;
 541        unsigned long cookie;
 542        struct flow_rule *rule;
 543        struct flow_stats stats;
 544        u32 classid;
 545};
 546
 547static inline struct flow_rule *
 548flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
 549{
 550        return flow_cmd->rule;
 551}
 552
 553static inline void flow_block_init(struct flow_block *flow_block)
 554{
 555        INIT_LIST_HEAD(&flow_block->cb_list);
 556}
 557
 558typedef int flow_indr_block_bind_cb_t(struct net_device *dev, struct Qdisc *sch, void *cb_priv,
 559                                      enum tc_setup_type type, void *type_data,
 560                                      void *data,
 561                                      void (*cleanup)(struct flow_block_cb *block_cb));
 562
 563int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
 564void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
 565                              void (*release)(void *cb_priv));
 566int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
 567                                enum tc_setup_type type, void *data,
 568                                struct flow_block_offload *bo,
 569                                void (*cleanup)(struct flow_block_cb *block_cb));
 570
 571#endif /* _NET_FLOW_OFFLOAD_H */
 572