linux/include/net/flow_offload.h
<<
>>
Prefs
   1#ifndef _NET_FLOW_OFFLOAD_H
   2#define _NET_FLOW_OFFLOAD_H
   3
   4#include <linux/kernel.h>
   5#include <linux/list.h>
   6#include <linux/netlink.h>
   7#include <net/flow_dissector.h>
   8
   9struct flow_match {
  10        struct flow_dissector   *dissector;
  11        void                    *mask;
  12        void                    *key;
  13};
  14
  15struct flow_match_meta {
  16        struct flow_dissector_key_meta *key, *mask;
  17};
  18
  19struct flow_match_basic {
  20        struct flow_dissector_key_basic *key, *mask;
  21};
  22
  23struct flow_match_control {
  24        struct flow_dissector_key_control *key, *mask;
  25};
  26
  27struct flow_match_eth_addrs {
  28        struct flow_dissector_key_eth_addrs *key, *mask;
  29};
  30
  31struct flow_match_vlan {
  32        struct flow_dissector_key_vlan *key, *mask;
  33};
  34
  35struct flow_match_ipv4_addrs {
  36        struct flow_dissector_key_ipv4_addrs *key, *mask;
  37};
  38
  39struct flow_match_ipv6_addrs {
  40        struct flow_dissector_key_ipv6_addrs *key, *mask;
  41};
  42
  43struct flow_match_ip {
  44        struct flow_dissector_key_ip *key, *mask;
  45};
  46
  47struct flow_match_ports {
  48        struct flow_dissector_key_ports *key, *mask;
  49};
  50
  51struct flow_match_icmp {
  52        struct flow_dissector_key_icmp *key, *mask;
  53};
  54
  55struct flow_match_tcp {
  56        struct flow_dissector_key_tcp *key, *mask;
  57};
  58
  59struct flow_match_mpls {
  60        struct flow_dissector_key_mpls *key, *mask;
  61};
  62
  63struct flow_match_enc_keyid {
  64        struct flow_dissector_key_keyid *key, *mask;
  65};
  66
  67struct flow_match_enc_opts {
  68        struct flow_dissector_key_enc_opts *key, *mask;
  69};
  70
  71struct flow_match_ct {
  72        struct flow_dissector_key_ct *key, *mask;
  73};
  74
  75struct flow_rule;
  76
  77void flow_rule_match_meta(const struct flow_rule *rule,
  78                          struct flow_match_meta *out);
  79void flow_rule_match_basic(const struct flow_rule *rule,
  80                           struct flow_match_basic *out);
  81void flow_rule_match_control(const struct flow_rule *rule,
  82                             struct flow_match_control *out);
  83void flow_rule_match_eth_addrs(const struct flow_rule *rule,
  84                               struct flow_match_eth_addrs *out);
  85void flow_rule_match_vlan(const struct flow_rule *rule,
  86                          struct flow_match_vlan *out);
  87void flow_rule_match_cvlan(const struct flow_rule *rule,
  88                           struct flow_match_vlan *out);
  89void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
  90                                struct flow_match_ipv4_addrs *out);
  91void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
  92                                struct flow_match_ipv6_addrs *out);
  93void flow_rule_match_ip(const struct flow_rule *rule,
  94                        struct flow_match_ip *out);
  95void flow_rule_match_ports(const struct flow_rule *rule,
  96                           struct flow_match_ports *out);
  97void flow_rule_match_tcp(const struct flow_rule *rule,
  98                         struct flow_match_tcp *out);
  99void flow_rule_match_icmp(const struct flow_rule *rule,
 100                          struct flow_match_icmp *out);
 101void flow_rule_match_mpls(const struct flow_rule *rule,
 102                          struct flow_match_mpls *out);
 103void flow_rule_match_enc_control(const struct flow_rule *rule,
 104                                 struct flow_match_control *out);
 105void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
 106                                    struct flow_match_ipv4_addrs *out);
 107void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
 108                                    struct flow_match_ipv6_addrs *out);
 109void flow_rule_match_enc_ip(const struct flow_rule *rule,
 110                            struct flow_match_ip *out);
 111void flow_rule_match_enc_ports(const struct flow_rule *rule,
 112                               struct flow_match_ports *out);
 113void flow_rule_match_enc_keyid(const struct flow_rule *rule,
 114                               struct flow_match_enc_keyid *out);
 115void flow_rule_match_enc_opts(const struct flow_rule *rule,
 116                              struct flow_match_enc_opts *out);
 117void flow_rule_match_ct(const struct flow_rule *rule,
 118                        struct flow_match_ct *out);
 119
 120enum flow_action_id {
 121        FLOW_ACTION_ACCEPT              = 0,
 122        FLOW_ACTION_DROP,
 123        FLOW_ACTION_TRAP,
 124        FLOW_ACTION_GOTO,
 125        FLOW_ACTION_REDIRECT,
 126        FLOW_ACTION_MIRRED,
 127        FLOW_ACTION_REDIRECT_INGRESS,
 128        FLOW_ACTION_MIRRED_INGRESS,
 129        FLOW_ACTION_VLAN_PUSH,
 130        FLOW_ACTION_VLAN_POP,
 131        FLOW_ACTION_VLAN_MANGLE,
 132        FLOW_ACTION_TUNNEL_ENCAP,
 133        FLOW_ACTION_TUNNEL_DECAP,
 134        FLOW_ACTION_MANGLE,
 135        FLOW_ACTION_ADD,
 136        FLOW_ACTION_CSUM,
 137        FLOW_ACTION_MARK,
 138        FLOW_ACTION_PTYPE,
 139        FLOW_ACTION_PRIORITY,
 140        FLOW_ACTION_WAKE,
 141        FLOW_ACTION_QUEUE,
 142        FLOW_ACTION_SAMPLE,
 143        FLOW_ACTION_POLICE,
 144        FLOW_ACTION_CT,
 145        FLOW_ACTION_CT_METADATA,
 146        FLOW_ACTION_MPLS_PUSH,
 147        FLOW_ACTION_MPLS_POP,
 148        FLOW_ACTION_MPLS_MANGLE,
 149        FLOW_ACTION_GATE,
 150        FLOW_ACTION_PPPOE_PUSH,
 151        NUM_FLOW_ACTIONS,
 152};
 153
 154/* This is mirroring enum pedit_header_type definition for easy mapping between
 155 * tc pedit action. Legacy TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK is mapped to
 156 * FLOW_ACT_MANGLE_UNSPEC, which is supported by no driver.
 157 */
 158enum flow_action_mangle_base {
 159        FLOW_ACT_MANGLE_UNSPEC          = 0,
 160        FLOW_ACT_MANGLE_HDR_TYPE_ETH,
 161        FLOW_ACT_MANGLE_HDR_TYPE_IP4,
 162        FLOW_ACT_MANGLE_HDR_TYPE_IP6,
 163        FLOW_ACT_MANGLE_HDR_TYPE_TCP,
 164        FLOW_ACT_MANGLE_HDR_TYPE_UDP,
 165};
 166
 167enum flow_action_hw_stats_bit {
 168        FLOW_ACTION_HW_STATS_IMMEDIATE_BIT,
 169        FLOW_ACTION_HW_STATS_DELAYED_BIT,
 170        FLOW_ACTION_HW_STATS_DISABLED_BIT,
 171
 172        FLOW_ACTION_HW_STATS_NUM_BITS
 173};
 174
 175enum flow_action_hw_stats {
 176        FLOW_ACTION_HW_STATS_IMMEDIATE =
 177                BIT(FLOW_ACTION_HW_STATS_IMMEDIATE_BIT),
 178        FLOW_ACTION_HW_STATS_DELAYED = BIT(FLOW_ACTION_HW_STATS_DELAYED_BIT),
 179        FLOW_ACTION_HW_STATS_ANY = FLOW_ACTION_HW_STATS_IMMEDIATE |
 180                                   FLOW_ACTION_HW_STATS_DELAYED,
 181        FLOW_ACTION_HW_STATS_DISABLED =
 182                BIT(FLOW_ACTION_HW_STATS_DISABLED_BIT),
 183        FLOW_ACTION_HW_STATS_DONT_CARE = BIT(FLOW_ACTION_HW_STATS_NUM_BITS) - 1,
 184};
 185
 186typedef void (*action_destr)(void *priv);
 187
 188struct flow_action_cookie {
 189        u32 cookie_len;
 190        u8 cookie[];
 191};
 192
 193struct flow_action_cookie *flow_action_cookie_create(void *data,
 194                                                     unsigned int len,
 195                                                     gfp_t gfp);
 196void flow_action_cookie_destroy(struct flow_action_cookie *cookie);
 197
 198struct flow_action_entry {
 199        enum flow_action_id             id;
 200        enum flow_action_hw_stats       hw_stats;
 201        action_destr                    destructor;
 202        void                            *destructor_priv;
 203        union {
 204                u32                     chain_index;    /* FLOW_ACTION_GOTO */
 205                struct net_device       *dev;           /* FLOW_ACTION_REDIRECT */
 206                struct {                                /* FLOW_ACTION_VLAN */
 207                        u16             vid;
 208                        __be16          proto;
 209                        u8              prio;
 210                } vlan;
 211                struct {                                /* FLOW_ACTION_MANGLE */
 212                                                        /* FLOW_ACTION_ADD */
 213                        enum flow_action_mangle_base htype;
 214                        u32             offset;
 215                        u32             mask;
 216                        u32             val;
 217                } mangle;
 218                struct ip_tunnel_info   *tunnel;        /* FLOW_ACTION_TUNNEL_ENCAP */
 219                u32                     csum_flags;     /* FLOW_ACTION_CSUM */
 220                u32                     mark;           /* FLOW_ACTION_MARK */
 221                u16                     ptype;          /* FLOW_ACTION_PTYPE */
 222                u32                     priority;       /* FLOW_ACTION_PRIORITY */
 223                struct {                                /* FLOW_ACTION_QUEUE */
 224                        u32             ctx;
 225                        u32             index;
 226                        u8              vf;
 227                } queue;
 228                struct {                                /* FLOW_ACTION_SAMPLE */
 229                        struct psample_group    *psample_group;
 230                        u32                     rate;
 231                        u32                     trunc_size;
 232                        bool                    truncate;
 233                } sample;
 234                struct {                                /* FLOW_ACTION_POLICE */
 235                        u32                     index;
 236                        u32                     burst;
 237                        u64                     rate_bytes_ps;
 238                        u64                     burst_pkt;
 239                        u64                     rate_pkt_ps;
 240                        u32                     mtu;
 241                } police;
 242                struct {                                /* FLOW_ACTION_CT */
 243                        int action;
 244                        u16 zone;
 245                        struct nf_flowtable *flow_table;
 246                } ct;
 247                struct {
 248                        unsigned long cookie;
 249                        u32 mark;
 250                        u32 labels[4];
 251                        bool orig_dir;
 252                } ct_metadata;
 253                struct {                                /* FLOW_ACTION_MPLS_PUSH */
 254                        u32             label;
 255                        __be16          proto;
 256                        u8              tc;
 257                        u8              bos;
 258                        u8              ttl;
 259                } mpls_push;
 260                struct {                                /* FLOW_ACTION_MPLS_POP */
 261                        __be16          proto;
 262                } mpls_pop;
 263                struct {                                /* FLOW_ACTION_MPLS_MANGLE */
 264                        u32             label;
 265                        u8              tc;
 266                        u8              bos;
 267                        u8              ttl;
 268                } mpls_mangle;
 269                struct {
 270                        u32             index;
 271                        s32             prio;
 272                        u64             basetime;
 273                        u64             cycletime;
 274                        u64             cycletimeext;
 275                        u32             num_entries;
 276                        struct action_gate_entry *entries;
 277                } gate;
 278                struct {                                /* FLOW_ACTION_PPPOE_PUSH */
 279                        u16             sid;
 280                } pppoe;
 281        };
 282        struct flow_action_cookie *cookie; /* user defined action cookie */
 283};
 284
 285struct flow_action {
 286        unsigned int                    num_entries;
 287        struct flow_action_entry        entries[];
 288};
 289
 290static inline bool flow_action_has_entries(const struct flow_action *action)
 291{
 292        return action->num_entries;
 293}
 294
 295/**
 296 * flow_offload_has_one_action() - check if exactly one action is present
 297 * @action: tc filter flow offload action
 298 *
 299 * Returns true if exactly one action is present.
 300 */
 301static inline bool flow_offload_has_one_action(const struct flow_action *action)
 302{
 303        return action->num_entries == 1;
 304}
 305
 306#define flow_action_for_each(__i, __act, __actions)                     \
 307        for (__i = 0, __act = &(__actions)->entries[0];                 \
 308             __i < (__actions)->num_entries;                            \
 309             __act = &(__actions)->entries[++__i])
 310
 311static inline bool
 312flow_action_mixed_hw_stats_check(const struct flow_action *action,
 313                                 struct netlink_ext_ack *extack)
 314{
 315        const struct flow_action_entry *action_entry;
 316        u8 last_hw_stats;
 317        int i;
 318
 319        if (flow_offload_has_one_action(action))
 320                return true;
 321
 322        flow_action_for_each(i, action_entry, action) {
 323                if (i && action_entry->hw_stats != last_hw_stats) {
 324                        NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
 325                        return false;
 326                }
 327                last_hw_stats = action_entry->hw_stats;
 328        }
 329        return true;
 330}
 331
 332static inline const struct flow_action_entry *
 333flow_action_first_entry_get(const struct flow_action *action)
 334{
 335        WARN_ON(!flow_action_has_entries(action));
 336        return &action->entries[0];
 337}
 338
 339static inline bool
 340__flow_action_hw_stats_check(const struct flow_action *action,
 341                             struct netlink_ext_ack *extack,
 342                             bool check_allow_bit,
 343                             enum flow_action_hw_stats_bit allow_bit)
 344{
 345        const struct flow_action_entry *action_entry;
 346
 347        if (!flow_action_has_entries(action))
 348                return true;
 349        if (!flow_action_mixed_hw_stats_check(action, extack))
 350                return false;
 351
 352        action_entry = flow_action_first_entry_get(action);
 353
 354        /* Zero is not a legal value for hw_stats, catch anyone passing it */
 355        WARN_ON_ONCE(!action_entry->hw_stats);
 356
 357        if (!check_allow_bit &&
 358            ~action_entry->hw_stats & FLOW_ACTION_HW_STATS_ANY) {
 359                NL_SET_ERR_MSG_MOD(extack, "Driver supports only default HW stats type \"any\"");
 360                return false;
 361        } else if (check_allow_bit &&
 362                   !(action_entry->hw_stats & BIT(allow_bit))) {
 363                NL_SET_ERR_MSG_MOD(extack, "Driver does not support selected HW stats type");
 364                return false;
 365        }
 366        return true;
 367}
 368
 369static inline bool
 370flow_action_hw_stats_check(const struct flow_action *action,
 371                           struct netlink_ext_ack *extack,
 372                           enum flow_action_hw_stats_bit allow_bit)
 373{
 374        return __flow_action_hw_stats_check(action, extack, true, allow_bit);
 375}
 376
 377static inline bool
 378flow_action_basic_hw_stats_check(const struct flow_action *action,
 379                                 struct netlink_ext_ack *extack)
 380{
 381        return __flow_action_hw_stats_check(action, extack, false, 0);
 382}
 383
 384struct flow_rule {
 385        struct flow_match       match;
 386        struct flow_action      action;
 387};
 388
 389struct flow_rule *flow_rule_alloc(unsigned int num_actions);
 390
 391static inline bool flow_rule_match_key(const struct flow_rule *rule,
 392                                       enum flow_dissector_key_id key)
 393{
 394        return dissector_uses_key(rule->match.dissector, key);
 395}
 396
 397struct flow_stats {
 398        u64     pkts;
 399        u64     bytes;
 400        u64     drops;
 401        u64     lastused;
 402        enum flow_action_hw_stats used_hw_stats;
 403        bool used_hw_stats_valid;
 404};
 405
 406static inline void flow_stats_update(struct flow_stats *flow_stats,
 407                                     u64 bytes, u64 pkts,
 408                                     u64 drops, u64 lastused,
 409                                     enum flow_action_hw_stats used_hw_stats)
 410{
 411        flow_stats->pkts        += pkts;
 412        flow_stats->bytes       += bytes;
 413        flow_stats->drops       += drops;
 414        flow_stats->lastused    = max_t(u64, flow_stats->lastused, lastused);
 415
 416        /* The driver should pass value with a maximum of one bit set.
 417         * Passing FLOW_ACTION_HW_STATS_ANY is invalid.
 418         */
 419        WARN_ON(used_hw_stats == FLOW_ACTION_HW_STATS_ANY);
 420        flow_stats->used_hw_stats |= used_hw_stats;
 421        flow_stats->used_hw_stats_valid = true;
 422}
 423
 424enum flow_block_command {
 425        FLOW_BLOCK_BIND,
 426        FLOW_BLOCK_UNBIND,
 427};
 428
 429enum flow_block_binder_type {
 430        FLOW_BLOCK_BINDER_TYPE_UNSPEC,
 431        FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
 432        FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
 433        FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
 434        FLOW_BLOCK_BINDER_TYPE_RED_MARK,
 435};
 436
 437struct flow_block {
 438        struct list_head cb_list;
 439};
 440
 441struct netlink_ext_ack;
 442
 443struct flow_block_offload {
 444        enum flow_block_command command;
 445        enum flow_block_binder_type binder_type;
 446        bool block_shared;
 447        bool unlocked_driver_cb;
 448        struct net *net;
 449        struct flow_block *block;
 450        struct list_head cb_list;
 451        struct list_head *driver_block_list;
 452        struct netlink_ext_ack *extack;
 453        struct Qdisc *sch;
 454};
 455
 456enum tc_setup_type;
 457typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data,
 458                            void *cb_priv);
 459
 460struct flow_block_cb;
 461
 462struct flow_block_indr {
 463        struct list_head                list;
 464        struct net_device               *dev;
 465        struct Qdisc                    *sch;
 466        enum flow_block_binder_type     binder_type;
 467        void                            *data;
 468        void                            *cb_priv;
 469        void                            (*cleanup)(struct flow_block_cb *block_cb);
 470};
 471
 472struct flow_block_cb {
 473        struct list_head        driver_list;
 474        struct list_head        list;
 475        flow_setup_cb_t         *cb;
 476        void                    *cb_ident;
 477        void                    *cb_priv;
 478        void                    (*release)(void *cb_priv);
 479        struct flow_block_indr  indr;
 480        unsigned int            refcnt;
 481};
 482
 483struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
 484                                          void *cb_ident, void *cb_priv,
 485                                          void (*release)(void *cb_priv));
 486struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
 487                                               void *cb_ident, void *cb_priv,
 488                                               void (*release)(void *cb_priv),
 489                                               struct flow_block_offload *bo,
 490                                               struct net_device *dev,
 491                                               struct Qdisc *sch, void *data,
 492                                               void *indr_cb_priv,
 493                                               void (*cleanup)(struct flow_block_cb *block_cb));
 494void flow_block_cb_free(struct flow_block_cb *block_cb);
 495
 496struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
 497                                           flow_setup_cb_t *cb, void *cb_ident);
 498
 499void *flow_block_cb_priv(struct flow_block_cb *block_cb);
 500void flow_block_cb_incref(struct flow_block_cb *block_cb);
 501unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb);
 502
 503static inline void flow_block_cb_add(struct flow_block_cb *block_cb,
 504                                     struct flow_block_offload *offload)
 505{
 506        list_add_tail(&block_cb->list, &offload->cb_list);
 507}
 508
 509static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
 510                                        struct flow_block_offload *offload)
 511{
 512        list_move(&block_cb->list, &offload->cb_list);
 513}
 514
 515static inline void flow_indr_block_cb_remove(struct flow_block_cb *block_cb,
 516                                             struct flow_block_offload *offload)
 517{
 518        list_del(&block_cb->indr.list);
 519        list_move(&block_cb->list, &offload->cb_list);
 520}
 521
 522bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
 523                           struct list_head *driver_block_list);
 524
 525int flow_block_cb_setup_simple(struct flow_block_offload *f,
 526                               struct list_head *driver_list,
 527                               flow_setup_cb_t *cb,
 528                               void *cb_ident, void *cb_priv, bool ingress_only);
 529
 530enum flow_cls_command {
 531        FLOW_CLS_REPLACE,
 532        FLOW_CLS_DESTROY,
 533        FLOW_CLS_STATS,
 534        FLOW_CLS_TMPLT_CREATE,
 535        FLOW_CLS_TMPLT_DESTROY,
 536};
 537
 538struct flow_cls_common_offload {
 539        u32 chain_index;
 540        __be16 protocol;
 541        u32 prio;
 542        struct netlink_ext_ack *extack;
 543};
 544
 545struct flow_cls_offload {
 546        struct flow_cls_common_offload common;
 547        enum flow_cls_command command;
 548        unsigned long cookie;
 549        struct flow_rule *rule;
 550        struct flow_stats stats;
 551        u32 classid;
 552};
 553
 554static inline struct flow_rule *
 555flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
 556{
 557        return flow_cmd->rule;
 558}
 559
 560static inline void flow_block_init(struct flow_block *flow_block)
 561{
 562        INIT_LIST_HEAD(&flow_block->cb_list);
 563}
 564
 565typedef int flow_indr_block_bind_cb_t(struct net_device *dev, struct Qdisc *sch, void *cb_priv,
 566                                      enum tc_setup_type type, void *type_data,
 567                                      void *data,
 568                                      void (*cleanup)(struct flow_block_cb *block_cb));
 569
 570int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
 571void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
 572                              void (*release)(void *cb_priv));
 573int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
 574                                enum tc_setup_type type, void *data,
 575                                struct flow_block_offload *bo,
 576                                void (*cleanup)(struct flow_block_cb *block_cb));
 577
 578#endif /* _NET_FLOW_OFFLOAD_H */
 579