linux/net/netfilter/nf_tables_offload.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#include <linux/init.h>
   3#include <linux/module.h>
   4#include <linux/netfilter.h>
   5#include <net/flow_offload.h>
   6#include <net/netfilter/nf_tables.h>
   7#include <net/netfilter/nf_tables_offload.h>
   8#include <net/pkt_cls.h>
   9
  10static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
  11{
  12        struct nft_flow_rule *flow;
  13
  14        flow = kzalloc(sizeof(struct nft_flow_rule), GFP_KERNEL);
  15        if (!flow)
  16                return NULL;
  17
  18        flow->rule = flow_rule_alloc(num_actions);
  19        if (!flow->rule) {
  20                kfree(flow);
  21                return NULL;
  22        }
  23
  24        flow->rule->match.dissector     = &flow->match.dissector;
  25        flow->rule->match.mask          = &flow->match.mask;
  26        flow->rule->match.key           = &flow->match.key;
  27
  28        return flow;
  29}
  30
  31struct nft_flow_rule *nft_flow_rule_create(struct net *net,
  32                                           const struct nft_rule *rule)
  33{
  34        struct nft_offload_ctx *ctx;
  35        struct nft_flow_rule *flow;
  36        int num_actions = 0, err;
  37        struct nft_expr *expr;
  38
  39        expr = nft_expr_first(rule);
  40        while (expr->ops && expr != nft_expr_last(rule)) {
  41                if (expr->ops->offload_flags & NFT_OFFLOAD_F_ACTION)
  42                        num_actions++;
  43
  44                expr = nft_expr_next(expr);
  45        }
  46
  47        flow = nft_flow_rule_alloc(num_actions);
  48        if (!flow)
  49                return ERR_PTR(-ENOMEM);
  50
  51        expr = nft_expr_first(rule);
  52
  53        ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL);
  54        if (!ctx) {
  55                err = -ENOMEM;
  56                goto err_out;
  57        }
  58        ctx->net = net;
  59        ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
  60
  61        while (expr->ops && expr != nft_expr_last(rule)) {
  62                if (!expr->ops->offload) {
  63                        err = -EOPNOTSUPP;
  64                        goto err_out;
  65                }
  66                err = expr->ops->offload(ctx, flow, expr);
  67                if (err < 0)
  68                        goto err_out;
  69
  70                expr = nft_expr_next(expr);
  71        }
  72        flow->proto = ctx->dep.l3num;
  73        kfree(ctx);
  74
  75        return flow;
  76err_out:
  77        kfree(ctx);
  78        nft_flow_rule_destroy(flow);
  79
  80        return ERR_PTR(err);
  81}
  82
  83void nft_flow_rule_destroy(struct nft_flow_rule *flow)
  84{
  85        struct flow_action_entry *entry;
  86        int i;
  87
  88        flow_action_for_each(i, entry, &flow->rule->action) {
  89                switch (entry->id) {
  90                case FLOW_ACTION_REDIRECT:
  91                case FLOW_ACTION_MIRRED:
  92                        dev_put(entry->dev);
  93                        break;
  94                default:
  95                        break;
  96                }
  97        }
  98        kfree(flow->rule);
  99        kfree(flow);
 100}
 101
 102void nft_offload_set_dependency(struct nft_offload_ctx *ctx,
 103                                enum nft_offload_dep_type type)
 104{
 105        ctx->dep.type = type;
 106}
 107
 108void nft_offload_update_dependency(struct nft_offload_ctx *ctx,
 109                                   const void *data, u32 len)
 110{
 111        switch (ctx->dep.type) {
 112        case NFT_OFFLOAD_DEP_NETWORK:
 113                WARN_ON(len != sizeof(__u16));
 114                memcpy(&ctx->dep.l3num, data, sizeof(__u16));
 115                break;
 116        case NFT_OFFLOAD_DEP_TRANSPORT:
 117                WARN_ON(len != sizeof(__u8));
 118                memcpy(&ctx->dep.protonum, data, sizeof(__u8));
 119                break;
 120        default:
 121                break;
 122        }
 123        ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
 124}
 125
 126static void nft_flow_offload_common_init(struct flow_cls_common_offload *common,
 127                                         __be16 proto, int priority,
 128                                         struct netlink_ext_ack *extack)
 129{
 130        common->protocol = proto;
 131        common->prio = priority;
 132        common->extack = extack;
 133}
 134
 135static int nft_setup_cb_call(struct nft_base_chain *basechain,
 136                             enum tc_setup_type type, void *type_data)
 137{
 138        struct flow_block_cb *block_cb;
 139        int err;
 140
 141        list_for_each_entry(block_cb, &basechain->flow_block.cb_list, list) {
 142                err = block_cb->cb(type, type_data, block_cb->cb_priv);
 143                if (err < 0)
 144                        return err;
 145        }
 146        return 0;
 147}
 148
 149int nft_chain_offload_priority(struct nft_base_chain *basechain)
 150{
 151        if (basechain->ops.priority <= 0 ||
 152            basechain->ops.priority > USHRT_MAX)
 153                return -1;
 154
 155        return 0;
 156}
 157
 158static int nft_flow_offload_rule(struct nft_chain *chain,
 159                                 struct nft_rule *rule,
 160                                 struct nft_flow_rule *flow,
 161                                 enum flow_cls_command command)
 162{
 163        struct flow_cls_offload cls_flow = {};
 164        struct nft_base_chain *basechain;
 165        struct netlink_ext_ack extack;
 166        __be16 proto = ETH_P_ALL;
 167
 168        if (!nft_is_base_chain(chain))
 169                return -EOPNOTSUPP;
 170
 171        basechain = nft_base_chain(chain);
 172
 173        if (flow)
 174                proto = flow->proto;
 175
 176        nft_flow_offload_common_init(&cls_flow.common, proto,
 177                                     basechain->ops.priority, &extack);
 178        cls_flow.command = command;
 179        cls_flow.cookie = (unsigned long) rule;
 180        if (flow)
 181                cls_flow.rule = flow->rule;
 182
 183        return nft_setup_cb_call(basechain, TC_SETUP_CLSFLOWER, &cls_flow);
 184}
 185
 186static int nft_flow_offload_bind(struct flow_block_offload *bo,
 187                                 struct nft_base_chain *basechain)
 188{
 189        list_splice(&bo->cb_list, &basechain->flow_block.cb_list);
 190        return 0;
 191}
 192
 193static int nft_flow_offload_unbind(struct flow_block_offload *bo,
 194                                   struct nft_base_chain *basechain)
 195{
 196        struct flow_block_cb *block_cb, *next;
 197
 198        list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
 199                list_del(&block_cb->list);
 200                flow_block_cb_free(block_cb);
 201        }
 202
 203        return 0;
 204}
 205
 206static int nft_block_setup(struct nft_base_chain *basechain,
 207                           struct flow_block_offload *bo,
 208                           enum flow_block_command cmd)
 209{
 210        int err;
 211
 212        switch (cmd) {
 213        case FLOW_BLOCK_BIND:
 214                err = nft_flow_offload_bind(bo, basechain);
 215                break;
 216        case FLOW_BLOCK_UNBIND:
 217                err = nft_flow_offload_unbind(bo, basechain);
 218                break;
 219        default:
 220                WARN_ON_ONCE(1);
 221                err = -EOPNOTSUPP;
 222        }
 223
 224        return err;
 225}
 226
 227static int nft_block_offload_cmd(struct nft_base_chain *chain,
 228                                 struct net_device *dev,
 229                                 enum flow_block_command cmd)
 230{
 231        struct netlink_ext_ack extack = {};
 232        struct flow_block_offload bo = {};
 233        int err;
 234
 235        bo.net = dev_net(dev);
 236        bo.block = &chain->flow_block;
 237        bo.command = cmd;
 238        bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
 239        bo.extack = &extack;
 240        INIT_LIST_HEAD(&bo.cb_list);
 241
 242        err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
 243        if (err < 0)
 244                return err;
 245
 246        return nft_block_setup(chain, &bo, cmd);
 247}
 248
 249static void nft_indr_block_ing_cmd(struct net_device *dev,
 250                                   struct nft_base_chain *chain,
 251                                   flow_indr_block_bind_cb_t *cb,
 252                                   void *cb_priv,
 253                                   enum flow_block_command cmd)
 254{
 255        struct netlink_ext_ack extack = {};
 256        struct flow_block_offload bo = {};
 257
 258        if (!chain)
 259                return;
 260
 261        bo.net = dev_net(dev);
 262        bo.block = &chain->flow_block;
 263        bo.command = cmd;
 264        bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
 265        bo.extack = &extack;
 266        INIT_LIST_HEAD(&bo.cb_list);
 267
 268        cb(dev, cb_priv, TC_SETUP_BLOCK, &bo);
 269
 270        nft_block_setup(chain, &bo, cmd);
 271}
 272
 273static int nft_indr_block_offload_cmd(struct nft_base_chain *chain,
 274                                      struct net_device *dev,
 275                                      enum flow_block_command cmd)
 276{
 277        struct flow_block_offload bo = {};
 278        struct netlink_ext_ack extack = {};
 279
 280        bo.net = dev_net(dev);
 281        bo.block = &chain->flow_block;
 282        bo.command = cmd;
 283        bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
 284        bo.extack = &extack;
 285        INIT_LIST_HEAD(&bo.cb_list);
 286
 287        flow_indr_block_call(dev, &bo, cmd);
 288
 289        if (list_empty(&bo.cb_list))
 290                return -EOPNOTSUPP;
 291
 292        return nft_block_setup(chain, &bo, cmd);
 293}
 294
 295#define FLOW_SETUP_BLOCK TC_SETUP_BLOCK
 296
 297static int nft_flow_offload_chain(struct nft_chain *chain,
 298                                  u8 *ppolicy,
 299                                  enum flow_block_command cmd)
 300{
 301        struct nft_base_chain *basechain;
 302        struct net_device *dev;
 303        u8 policy;
 304
 305        if (!nft_is_base_chain(chain))
 306                return -EOPNOTSUPP;
 307
 308        basechain = nft_base_chain(chain);
 309        dev = basechain->ops.dev;
 310        if (!dev)
 311                return -EOPNOTSUPP;
 312
 313        policy = ppolicy ? *ppolicy : basechain->policy;
 314
 315        /* Only default policy to accept is supported for now. */
 316        if (cmd == FLOW_BLOCK_BIND && policy == NF_DROP)
 317                return -EOPNOTSUPP;
 318
 319        if (dev->netdev_ops->ndo_setup_tc)
 320                return nft_block_offload_cmd(basechain, dev, cmd);
 321        else
 322                return nft_indr_block_offload_cmd(basechain, dev, cmd);
 323}
 324
 325int nft_flow_rule_offload_commit(struct net *net)
 326{
 327        struct nft_trans *trans;
 328        int err = 0;
 329        u8 policy;
 330
 331        list_for_each_entry(trans, &net->nft.commit_list, list) {
 332                if (trans->ctx.family != NFPROTO_NETDEV)
 333                        continue;
 334
 335                switch (trans->msg_type) {
 336                case NFT_MSG_NEWCHAIN:
 337                        if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
 338                            nft_trans_chain_update(trans))
 339                                continue;
 340
 341                        policy = nft_trans_chain_policy(trans);
 342                        err = nft_flow_offload_chain(trans->ctx.chain, &policy,
 343                                                     FLOW_BLOCK_BIND);
 344                        break;
 345                case NFT_MSG_DELCHAIN:
 346                        if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
 347                                continue;
 348
 349                        policy = nft_trans_chain_policy(trans);
 350                        err = nft_flow_offload_chain(trans->ctx.chain, &policy,
 351                                                     FLOW_BLOCK_UNBIND);
 352                        break;
 353                case NFT_MSG_NEWRULE:
 354                        if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
 355                                continue;
 356
 357                        if (trans->ctx.flags & NLM_F_REPLACE ||
 358                            !(trans->ctx.flags & NLM_F_APPEND))
 359                                return -EOPNOTSUPP;
 360
 361                        err = nft_flow_offload_rule(trans->ctx.chain,
 362                                                    nft_trans_rule(trans),
 363                                                    nft_trans_flow_rule(trans),
 364                                                    FLOW_CLS_REPLACE);
 365                        nft_flow_rule_destroy(nft_trans_flow_rule(trans));
 366                        break;
 367                case NFT_MSG_DELRULE:
 368                        if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
 369                                continue;
 370
 371                        err = nft_flow_offload_rule(trans->ctx.chain,
 372                                                    nft_trans_rule(trans),
 373                                                    nft_trans_flow_rule(trans),
 374                                                    FLOW_CLS_DESTROY);
 375                        break;
 376                }
 377
 378                if (err)
 379                        return err;
 380        }
 381
 382        return err;
 383}
 384
 385static struct nft_chain *__nft_offload_get_chain(struct net_device *dev)
 386{
 387        struct nft_base_chain *basechain;
 388        struct net *net = dev_net(dev);
 389        const struct nft_table *table;
 390        struct nft_chain *chain;
 391
 392        list_for_each_entry(table, &net->nft.tables, list) {
 393                if (table->family != NFPROTO_NETDEV)
 394                        continue;
 395
 396                list_for_each_entry(chain, &table->chains, list) {
 397                        if (!nft_is_base_chain(chain) ||
 398                            !(chain->flags & NFT_CHAIN_HW_OFFLOAD))
 399                                continue;
 400
 401                        basechain = nft_base_chain(chain);
 402                        if (strncmp(basechain->dev_name, dev->name, IFNAMSIZ))
 403                                continue;
 404
 405                        return chain;
 406                }
 407        }
 408
 409        return NULL;
 410}
 411
 412static void nft_indr_block_cb(struct net_device *dev,
 413                              flow_indr_block_bind_cb_t *cb, void *cb_priv,
 414                              enum flow_block_command cmd)
 415{
 416        struct net *net = dev_net(dev);
 417        struct nft_chain *chain;
 418
 419        mutex_lock(&net->nft.commit_mutex);
 420        chain = __nft_offload_get_chain(dev);
 421        if (chain) {
 422                struct nft_base_chain *basechain;
 423
 424                basechain = nft_base_chain(chain);
 425                nft_indr_block_ing_cmd(dev, basechain, cb, cb_priv, cmd);
 426        }
 427        mutex_unlock(&net->nft.commit_mutex);
 428}
 429
 430static void nft_offload_chain_clean(struct nft_chain *chain)
 431{
 432        struct nft_rule *rule;
 433
 434        list_for_each_entry(rule, &chain->rules, list) {
 435                nft_flow_offload_rule(chain, rule,
 436                                      NULL, FLOW_CLS_DESTROY);
 437        }
 438
 439        nft_flow_offload_chain(chain, NULL, FLOW_BLOCK_UNBIND);
 440}
 441
 442static int nft_offload_netdev_event(struct notifier_block *this,
 443                                    unsigned long event, void *ptr)
 444{
 445        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 446        struct net *net = dev_net(dev);
 447        struct nft_chain *chain;
 448
 449        mutex_lock(&net->nft.commit_mutex);
 450        chain = __nft_offload_get_chain(dev);
 451        if (chain)
 452                nft_offload_chain_clean(chain);
 453        mutex_unlock(&net->nft.commit_mutex);
 454
 455        return NOTIFY_DONE;
 456}
 457
 458static struct flow_indr_block_ing_entry block_ing_entry = {
 459        .cb     = nft_indr_block_cb,
 460        .list   = LIST_HEAD_INIT(block_ing_entry.list),
 461};
 462
 463static struct notifier_block nft_offload_netdev_notifier = {
 464        .notifier_call  = nft_offload_netdev_event,
 465};
 466
 467int nft_offload_init(void)
 468{
 469        int err;
 470
 471        err = register_netdevice_notifier(&nft_offload_netdev_notifier);
 472        if (err < 0)
 473                return err;
 474
 475        flow_indr_add_block_ing_cb(&block_ing_entry);
 476
 477        return 0;
 478}
 479
 480void nft_offload_exit(void)
 481{
 482        flow_indr_del_block_ing_cb(&block_ing_entry);
 483        unregister_netdevice_notifier(&nft_offload_netdev_notifier);
 484}
 485