linux/net/core/flow_offload.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#include <linux/kernel.h>
   3#include <linux/slab.h>
   4#include <net/flow_offload.h>
   5#include <linux/rtnetlink.h>
   6#include <linux/mutex.h>
   7
   8struct flow_rule *flow_rule_alloc(unsigned int num_actions)
   9{
  10        struct flow_rule *rule;
  11
  12        rule = kzalloc(struct_size(rule, action.entries, num_actions),
  13                       GFP_KERNEL);
  14        if (!rule)
  15                return NULL;
  16
  17        rule->action.num_entries = num_actions;
  18
  19        return rule;
  20}
  21EXPORT_SYMBOL(flow_rule_alloc);
  22
  23#define FLOW_DISSECTOR_MATCH(__rule, __type, __out)                             \
  24        const struct flow_match *__m = &(__rule)->match;                        \
  25        struct flow_dissector *__d = (__m)->dissector;                          \
  26                                                                                \
  27        (__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key);      \
  28        (__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask);    \
  29
  30void flow_rule_match_meta(const struct flow_rule *rule,
  31                          struct flow_match_meta *out)
  32{
  33        FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
  34}
  35EXPORT_SYMBOL(flow_rule_match_meta);
  36
  37void flow_rule_match_basic(const struct flow_rule *rule,
  38                           struct flow_match_basic *out)
  39{
  40        FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
  41}
  42EXPORT_SYMBOL(flow_rule_match_basic);
  43
  44void flow_rule_match_control(const struct flow_rule *rule,
  45                             struct flow_match_control *out)
  46{
  47        FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
  48}
  49EXPORT_SYMBOL(flow_rule_match_control);
  50
  51void flow_rule_match_eth_addrs(const struct flow_rule *rule,
  52                               struct flow_match_eth_addrs *out)
  53{
  54        FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
  55}
  56EXPORT_SYMBOL(flow_rule_match_eth_addrs);
  57
  58void flow_rule_match_vlan(const struct flow_rule *rule,
  59                          struct flow_match_vlan *out)
  60{
  61        FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
  62}
  63EXPORT_SYMBOL(flow_rule_match_vlan);
  64
  65void flow_rule_match_cvlan(const struct flow_rule *rule,
  66                           struct flow_match_vlan *out)
  67{
  68        FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
  69}
  70EXPORT_SYMBOL(flow_rule_match_cvlan);
  71
  72void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
  73                                struct flow_match_ipv4_addrs *out)
  74{
  75        FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
  76}
  77EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
  78
  79void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
  80                                struct flow_match_ipv6_addrs *out)
  81{
  82        FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
  83}
  84EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
  85
  86void flow_rule_match_ip(const struct flow_rule *rule,
  87                        struct flow_match_ip *out)
  88{
  89        FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
  90}
  91EXPORT_SYMBOL(flow_rule_match_ip);
  92
  93void flow_rule_match_ports(const struct flow_rule *rule,
  94                           struct flow_match_ports *out)
  95{
  96        FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
  97}
  98EXPORT_SYMBOL(flow_rule_match_ports);
  99
 100void flow_rule_match_tcp(const struct flow_rule *rule,
 101                         struct flow_match_tcp *out)
 102{
 103        FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
 104}
 105EXPORT_SYMBOL(flow_rule_match_tcp);
 106
 107void flow_rule_match_icmp(const struct flow_rule *rule,
 108                          struct flow_match_icmp *out)
 109{
 110        FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
 111}
 112EXPORT_SYMBOL(flow_rule_match_icmp);
 113
 114void flow_rule_match_mpls(const struct flow_rule *rule,
 115                          struct flow_match_mpls *out)
 116{
 117        FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
 118}
 119EXPORT_SYMBOL(flow_rule_match_mpls);
 120
 121void flow_rule_match_enc_control(const struct flow_rule *rule,
 122                                 struct flow_match_control *out)
 123{
 124        FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
 125}
 126EXPORT_SYMBOL(flow_rule_match_enc_control);
 127
 128void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
 129                                    struct flow_match_ipv4_addrs *out)
 130{
 131        FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
 132}
 133EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
 134
 135void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
 136                                    struct flow_match_ipv6_addrs *out)
 137{
 138        FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
 139}
 140EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
 141
 142void flow_rule_match_enc_ip(const struct flow_rule *rule,
 143                            struct flow_match_ip *out)
 144{
 145        FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
 146}
 147EXPORT_SYMBOL(flow_rule_match_enc_ip);
 148
 149void flow_rule_match_enc_ports(const struct flow_rule *rule,
 150                               struct flow_match_ports *out)
 151{
 152        FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
 153}
 154EXPORT_SYMBOL(flow_rule_match_enc_ports);
 155
 156void flow_rule_match_enc_keyid(const struct flow_rule *rule,
 157                               struct flow_match_enc_keyid *out)
 158{
 159        FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
 160}
 161EXPORT_SYMBOL(flow_rule_match_enc_keyid);
 162
 163void flow_rule_match_enc_opts(const struct flow_rule *rule,
 164                              struct flow_match_enc_opts *out)
 165{
 166        FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
 167}
 168EXPORT_SYMBOL(flow_rule_match_enc_opts);
 169
 170struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
 171                                          void *cb_ident, void *cb_priv,
 172                                          void (*release)(void *cb_priv))
 173{
 174        struct flow_block_cb *block_cb;
 175
 176        block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
 177        if (!block_cb)
 178                return ERR_PTR(-ENOMEM);
 179
 180        block_cb->cb = cb;
 181        block_cb->cb_ident = cb_ident;
 182        block_cb->cb_priv = cb_priv;
 183        block_cb->release = release;
 184
 185        return block_cb;
 186}
 187EXPORT_SYMBOL(flow_block_cb_alloc);
 188
 189void flow_block_cb_free(struct flow_block_cb *block_cb)
 190{
 191        if (block_cb->release)
 192                block_cb->release(block_cb->cb_priv);
 193
 194        kfree(block_cb);
 195}
 196EXPORT_SYMBOL(flow_block_cb_free);
 197
 198struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
 199                                           flow_setup_cb_t *cb, void *cb_ident)
 200{
 201        struct flow_block_cb *block_cb;
 202
 203        list_for_each_entry(block_cb, &block->cb_list, list) {
 204                if (block_cb->cb == cb &&
 205                    block_cb->cb_ident == cb_ident)
 206                        return block_cb;
 207        }
 208
 209        return NULL;
 210}
 211EXPORT_SYMBOL(flow_block_cb_lookup);
 212
 213void *flow_block_cb_priv(struct flow_block_cb *block_cb)
 214{
 215        return block_cb->cb_priv;
 216}
 217EXPORT_SYMBOL(flow_block_cb_priv);
 218
 219void flow_block_cb_incref(struct flow_block_cb *block_cb)
 220{
 221        block_cb->refcnt++;
 222}
 223EXPORT_SYMBOL(flow_block_cb_incref);
 224
 225unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
 226{
 227        return --block_cb->refcnt;
 228}
 229EXPORT_SYMBOL(flow_block_cb_decref);
 230
 231bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
 232                           struct list_head *driver_block_list)
 233{
 234        struct flow_block_cb *block_cb;
 235
 236        list_for_each_entry(block_cb, driver_block_list, driver_list) {
 237                if (block_cb->cb == cb &&
 238                    block_cb->cb_ident == cb_ident)
 239                        return true;
 240        }
 241
 242        return false;
 243}
 244EXPORT_SYMBOL(flow_block_cb_is_busy);
 245
 246int flow_block_cb_setup_simple(struct flow_block_offload *f,
 247                               struct list_head *driver_block_list,
 248                               flow_setup_cb_t *cb,
 249                               void *cb_ident, void *cb_priv,
 250                               bool ingress_only)
 251{
 252        struct flow_block_cb *block_cb;
 253
 254        if (ingress_only &&
 255            f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
 256                return -EOPNOTSUPP;
 257
 258        f->driver_block_list = driver_block_list;
 259
 260        switch (f->command) {
 261        case FLOW_BLOCK_BIND:
 262                if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
 263                        return -EBUSY;
 264
 265                block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
 266                if (IS_ERR(block_cb))
 267                        return PTR_ERR(block_cb);
 268
 269                flow_block_cb_add(block_cb, f);
 270                list_add_tail(&block_cb->driver_list, driver_block_list);
 271                return 0;
 272        case FLOW_BLOCK_UNBIND:
 273                block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
 274                if (!block_cb)
 275                        return -ENOENT;
 276
 277                flow_block_cb_remove(block_cb, f);
 278                list_del(&block_cb->driver_list);
 279                return 0;
 280        default:
 281                return -EOPNOTSUPP;
 282        }
 283}
 284EXPORT_SYMBOL(flow_block_cb_setup_simple);
 285
 286static LIST_HEAD(block_ing_cb_list);
 287
 288static struct rhashtable indr_setup_block_ht;
 289
 290struct flow_indr_block_cb {
 291        struct list_head list;
 292        void *cb_priv;
 293        flow_indr_block_bind_cb_t *cb;
 294        void *cb_ident;
 295};
 296
 297struct flow_indr_block_dev {
 298        struct rhash_head ht_node;
 299        struct net_device *dev;
 300        unsigned int refcnt;
 301        struct list_head cb_list;
 302};
 303
 304static const struct rhashtable_params flow_indr_setup_block_ht_params = {
 305        .key_offset     = offsetof(struct flow_indr_block_dev, dev),
 306        .head_offset    = offsetof(struct flow_indr_block_dev, ht_node),
 307        .key_len        = sizeof(struct net_device *),
 308};
 309
 310static struct flow_indr_block_dev *
 311flow_indr_block_dev_lookup(struct net_device *dev)
 312{
 313        return rhashtable_lookup_fast(&indr_setup_block_ht, &dev,
 314                                      flow_indr_setup_block_ht_params);
 315}
 316
 317static struct flow_indr_block_dev *
 318flow_indr_block_dev_get(struct net_device *dev)
 319{
 320        struct flow_indr_block_dev *indr_dev;
 321
 322        indr_dev = flow_indr_block_dev_lookup(dev);
 323        if (indr_dev)
 324                goto inc_ref;
 325
 326        indr_dev = kzalloc(sizeof(*indr_dev), GFP_KERNEL);
 327        if (!indr_dev)
 328                return NULL;
 329
 330        INIT_LIST_HEAD(&indr_dev->cb_list);
 331        indr_dev->dev = dev;
 332        if (rhashtable_insert_fast(&indr_setup_block_ht, &indr_dev->ht_node,
 333                                   flow_indr_setup_block_ht_params)) {
 334                kfree(indr_dev);
 335                return NULL;
 336        }
 337
 338inc_ref:
 339        indr_dev->refcnt++;
 340        return indr_dev;
 341}
 342
 343static void flow_indr_block_dev_put(struct flow_indr_block_dev *indr_dev)
 344{
 345        if (--indr_dev->refcnt)
 346                return;
 347
 348        rhashtable_remove_fast(&indr_setup_block_ht, &indr_dev->ht_node,
 349                               flow_indr_setup_block_ht_params);
 350        kfree(indr_dev);
 351}
 352
 353static struct flow_indr_block_cb *
 354flow_indr_block_cb_lookup(struct flow_indr_block_dev *indr_dev,
 355                          flow_indr_block_bind_cb_t *cb, void *cb_ident)
 356{
 357        struct flow_indr_block_cb *indr_block_cb;
 358
 359        list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
 360                if (indr_block_cb->cb == cb &&
 361                    indr_block_cb->cb_ident == cb_ident)
 362                        return indr_block_cb;
 363        return NULL;
 364}
 365
 366static struct flow_indr_block_cb *
 367flow_indr_block_cb_add(struct flow_indr_block_dev *indr_dev, void *cb_priv,
 368                       flow_indr_block_bind_cb_t *cb, void *cb_ident)
 369{
 370        struct flow_indr_block_cb *indr_block_cb;
 371
 372        indr_block_cb = flow_indr_block_cb_lookup(indr_dev, cb, cb_ident);
 373        if (indr_block_cb)
 374                return ERR_PTR(-EEXIST);
 375
 376        indr_block_cb = kzalloc(sizeof(*indr_block_cb), GFP_KERNEL);
 377        if (!indr_block_cb)
 378                return ERR_PTR(-ENOMEM);
 379
 380        indr_block_cb->cb_priv = cb_priv;
 381        indr_block_cb->cb = cb;
 382        indr_block_cb->cb_ident = cb_ident;
 383        list_add(&indr_block_cb->list, &indr_dev->cb_list);
 384
 385        return indr_block_cb;
 386}
 387
 388static void flow_indr_block_cb_del(struct flow_indr_block_cb *indr_block_cb)
 389{
 390        list_del(&indr_block_cb->list);
 391        kfree(indr_block_cb);
 392}
 393
 394static DEFINE_MUTEX(flow_indr_block_ing_cb_lock);
 395
 396static void flow_block_ing_cmd(struct net_device *dev,
 397                               flow_indr_block_bind_cb_t *cb,
 398                               void *cb_priv,
 399                               enum flow_block_command command)
 400{
 401        struct flow_indr_block_ing_entry *entry;
 402
 403        mutex_lock(&flow_indr_block_ing_cb_lock);
 404        list_for_each_entry(entry, &block_ing_cb_list, list) {
 405                entry->cb(dev, cb, cb_priv, command);
 406        }
 407        mutex_unlock(&flow_indr_block_ing_cb_lock);
 408}
 409
 410int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
 411                                  flow_indr_block_bind_cb_t *cb,
 412                                  void *cb_ident)
 413{
 414        struct flow_indr_block_cb *indr_block_cb;
 415        struct flow_indr_block_dev *indr_dev;
 416        int err;
 417
 418        indr_dev = flow_indr_block_dev_get(dev);
 419        if (!indr_dev)
 420                return -ENOMEM;
 421
 422        indr_block_cb = flow_indr_block_cb_add(indr_dev, cb_priv, cb, cb_ident);
 423        err = PTR_ERR_OR_ZERO(indr_block_cb);
 424        if (err)
 425                goto err_dev_put;
 426
 427        flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
 428                           FLOW_BLOCK_BIND);
 429
 430        return 0;
 431
 432err_dev_put:
 433        flow_indr_block_dev_put(indr_dev);
 434        return err;
 435}
 436EXPORT_SYMBOL_GPL(__flow_indr_block_cb_register);
 437
 438int flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
 439                                flow_indr_block_bind_cb_t *cb,
 440                                void *cb_ident)
 441{
 442        int err;
 443
 444        rtnl_lock();
 445        err = __flow_indr_block_cb_register(dev, cb_priv, cb, cb_ident);
 446        rtnl_unlock();
 447
 448        return err;
 449}
 450EXPORT_SYMBOL_GPL(flow_indr_block_cb_register);
 451
 452void __flow_indr_block_cb_unregister(struct net_device *dev,
 453                                     flow_indr_block_bind_cb_t *cb,
 454                                     void *cb_ident)
 455{
 456        struct flow_indr_block_cb *indr_block_cb;
 457        struct flow_indr_block_dev *indr_dev;
 458
 459        indr_dev = flow_indr_block_dev_lookup(dev);
 460        if (!indr_dev)
 461                return;
 462
 463        indr_block_cb = flow_indr_block_cb_lookup(indr_dev, cb, cb_ident);
 464        if (!indr_block_cb)
 465                return;
 466
 467        flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
 468                           FLOW_BLOCK_UNBIND);
 469
 470        flow_indr_block_cb_del(indr_block_cb);
 471        flow_indr_block_dev_put(indr_dev);
 472}
 473EXPORT_SYMBOL_GPL(__flow_indr_block_cb_unregister);
 474
 475void flow_indr_block_cb_unregister(struct net_device *dev,
 476                                   flow_indr_block_bind_cb_t *cb,
 477                                   void *cb_ident)
 478{
 479        rtnl_lock();
 480        __flow_indr_block_cb_unregister(dev, cb, cb_ident);
 481        rtnl_unlock();
 482}
 483EXPORT_SYMBOL_GPL(flow_indr_block_cb_unregister);
 484
 485void flow_indr_block_call(struct net_device *dev,
 486                          struct flow_block_offload *bo,
 487                          enum flow_block_command command)
 488{
 489        struct flow_indr_block_cb *indr_block_cb;
 490        struct flow_indr_block_dev *indr_dev;
 491
 492        indr_dev = flow_indr_block_dev_lookup(dev);
 493        if (!indr_dev)
 494                return;
 495
 496        list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
 497                indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
 498                                  bo);
 499}
 500EXPORT_SYMBOL_GPL(flow_indr_block_call);
 501
 502void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry)
 503{
 504        mutex_lock(&flow_indr_block_ing_cb_lock);
 505        list_add_tail(&entry->list, &block_ing_cb_list);
 506        mutex_unlock(&flow_indr_block_ing_cb_lock);
 507}
 508EXPORT_SYMBOL_GPL(flow_indr_add_block_ing_cb);
 509
 510void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry)
 511{
 512        mutex_lock(&flow_indr_block_ing_cb_lock);
 513        list_del(&entry->list);
 514        mutex_unlock(&flow_indr_block_ing_cb_lock);
 515}
 516EXPORT_SYMBOL_GPL(flow_indr_del_block_ing_cb);
 517
 518static int __init init_flow_indr_rhashtable(void)
 519{
 520        return rhashtable_init(&indr_setup_block_ht,
 521                               &flow_indr_setup_block_ht_params);
 522}
 523subsys_initcall(init_flow_indr_rhashtable);
 524