linux/net/sched/act_skbedit.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2008, Intel Corporation.
   4 *
   5 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/init.h>
  10#include <linux/kernel.h>
  11#include <linux/skbuff.h>
  12#include <linux/rtnetlink.h>
  13#include <net/netlink.h>
  14#include <net/pkt_sched.h>
  15#include <net/ip.h>
  16#include <net/ipv6.h>
  17#include <net/dsfield.h>
  18#include <net/pkt_cls.h>
  19
  20#include <linux/tc_act/tc_skbedit.h>
  21#include <net/tc_act/tc_skbedit.h>
  22
  23static unsigned int skbedit_net_id;
  24static struct tc_action_ops act_skbedit_ops;
  25
  26static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
  27                           struct tcf_result *res)
  28{
  29        struct tcf_skbedit *d = to_skbedit(a);
  30        struct tcf_skbedit_params *params;
  31        int action;
  32
  33        tcf_lastuse_update(&d->tcf_tm);
  34        bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
  35
  36        params = rcu_dereference_bh(d->params);
  37        action = READ_ONCE(d->tcf_action);
  38
  39        if (params->flags & SKBEDIT_F_PRIORITY)
  40                skb->priority = params->priority;
  41        if (params->flags & SKBEDIT_F_INHERITDSFIELD) {
  42                int wlen = skb_network_offset(skb);
  43
  44                switch (tc_skb_protocol(skb)) {
  45                case htons(ETH_P_IP):
  46                        wlen += sizeof(struct iphdr);
  47                        if (!pskb_may_pull(skb, wlen))
  48                                goto err;
  49                        skb->priority = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
  50                        break;
  51
  52                case htons(ETH_P_IPV6):
  53                        wlen += sizeof(struct ipv6hdr);
  54                        if (!pskb_may_pull(skb, wlen))
  55                                goto err;
  56                        skb->priority = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
  57                        break;
  58                }
  59        }
  60        if (params->flags & SKBEDIT_F_QUEUE_MAPPING &&
  61            skb->dev->real_num_tx_queues > params->queue_mapping)
  62                skb_set_queue_mapping(skb, params->queue_mapping);
  63        if (params->flags & SKBEDIT_F_MARK) {
  64                skb->mark &= ~params->mask;
  65                skb->mark |= params->mark & params->mask;
  66        }
  67        if (params->flags & SKBEDIT_F_PTYPE)
  68                skb->pkt_type = params->ptype;
  69        return action;
  70
  71err:
  72        qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats));
  73        return TC_ACT_SHOT;
  74}
  75
  76static void tcf_skbedit_stats_update(struct tc_action *a, u64 bytes,
  77                                     u32 packets, u64 lastuse, bool hw)
  78{
  79        struct tcf_skbedit *d = to_skbedit(a);
  80        struct tcf_t *tm = &d->tcf_tm;
  81
  82        tcf_action_update_stats(a, bytes, packets, false, hw);
  83        tm->lastuse = max_t(u64, tm->lastuse, lastuse);
  84}
  85
  86static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
  87        [TCA_SKBEDIT_PARMS]             = { .len = sizeof(struct tc_skbedit) },
  88        [TCA_SKBEDIT_PRIORITY]          = { .len = sizeof(u32) },
  89        [TCA_SKBEDIT_QUEUE_MAPPING]     = { .len = sizeof(u16) },
  90        [TCA_SKBEDIT_MARK]              = { .len = sizeof(u32) },
  91        [TCA_SKBEDIT_PTYPE]             = { .len = sizeof(u16) },
  92        [TCA_SKBEDIT_MASK]              = { .len = sizeof(u32) },
  93        [TCA_SKBEDIT_FLAGS]             = { .len = sizeof(u64) },
  94};
  95
  96static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
  97                            struct nlattr *est, struct tc_action **a,
  98                            int ovr, int bind, bool rtnl_held,
  99                            struct tcf_proto *tp, u32 act_flags,
 100                            struct netlink_ext_ack *extack)
 101{
 102        struct tc_action_net *tn = net_generic(net, skbedit_net_id);
 103        struct tcf_skbedit_params *params_new;
 104        struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
 105        struct tcf_chain *goto_ch = NULL;
 106        struct tc_skbedit *parm;
 107        struct tcf_skbedit *d;
 108        u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL;
 109        u16 *queue_mapping = NULL, *ptype = NULL;
 110        bool exists = false;
 111        int ret = 0, err;
 112        u32 index;
 113
 114        if (nla == NULL)
 115                return -EINVAL;
 116
 117        err = nla_parse_nested_deprecated(tb, TCA_SKBEDIT_MAX, nla,
 118                                          skbedit_policy, NULL);
 119        if (err < 0)
 120                return err;
 121
 122        if (tb[TCA_SKBEDIT_PARMS] == NULL)
 123                return -EINVAL;
 124
 125        if (tb[TCA_SKBEDIT_PRIORITY] != NULL) {
 126                flags |= SKBEDIT_F_PRIORITY;
 127                priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]);
 128        }
 129
 130        if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) {
 131                flags |= SKBEDIT_F_QUEUE_MAPPING;
 132                queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]);
 133        }
 134
 135        if (tb[TCA_SKBEDIT_PTYPE] != NULL) {
 136                ptype = nla_data(tb[TCA_SKBEDIT_PTYPE]);
 137                if (!skb_pkt_type_ok(*ptype))
 138                        return -EINVAL;
 139                flags |= SKBEDIT_F_PTYPE;
 140        }
 141
 142        if (tb[TCA_SKBEDIT_MARK] != NULL) {
 143                flags |= SKBEDIT_F_MARK;
 144                mark = nla_data(tb[TCA_SKBEDIT_MARK]);
 145        }
 146
 147        if (tb[TCA_SKBEDIT_MASK] != NULL) {
 148                flags |= SKBEDIT_F_MASK;
 149                mask = nla_data(tb[TCA_SKBEDIT_MASK]);
 150        }
 151
 152        if (tb[TCA_SKBEDIT_FLAGS] != NULL) {
 153                u64 *pure_flags = nla_data(tb[TCA_SKBEDIT_FLAGS]);
 154
 155                if (*pure_flags & SKBEDIT_F_INHERITDSFIELD)
 156                        flags |= SKBEDIT_F_INHERITDSFIELD;
 157        }
 158
 159        parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
 160        index = parm->index;
 161        err = tcf_idr_check_alloc(tn, &index, a, bind);
 162        if (err < 0)
 163                return err;
 164        exists = err;
 165        if (exists && bind)
 166                return 0;
 167
 168        if (!flags) {
 169                if (exists)
 170                        tcf_idr_release(*a, bind);
 171                else
 172                        tcf_idr_cleanup(tn, index);
 173                return -EINVAL;
 174        }
 175
 176        if (!exists) {
 177                ret = tcf_idr_create(tn, index, est, a,
 178                                     &act_skbedit_ops, bind, true, 0);
 179                if (ret) {
 180                        tcf_idr_cleanup(tn, index);
 181                        return ret;
 182                }
 183
 184                d = to_skbedit(*a);
 185                ret = ACT_P_CREATED;
 186        } else {
 187                d = to_skbedit(*a);
 188                if (!ovr) {
 189                        tcf_idr_release(*a, bind);
 190                        return -EEXIST;
 191                }
 192        }
 193        err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
 194        if (err < 0)
 195                goto release_idr;
 196
 197        params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
 198        if (unlikely(!params_new)) {
 199                err = -ENOMEM;
 200                goto put_chain;
 201        }
 202
 203        params_new->flags = flags;
 204        if (flags & SKBEDIT_F_PRIORITY)
 205                params_new->priority = *priority;
 206        if (flags & SKBEDIT_F_QUEUE_MAPPING)
 207                params_new->queue_mapping = *queue_mapping;
 208        if (flags & SKBEDIT_F_MARK)
 209                params_new->mark = *mark;
 210        if (flags & SKBEDIT_F_PTYPE)
 211                params_new->ptype = *ptype;
 212        /* default behaviour is to use all the bits */
 213        params_new->mask = 0xffffffff;
 214        if (flags & SKBEDIT_F_MASK)
 215                params_new->mask = *mask;
 216
 217        spin_lock_bh(&d->tcf_lock);
 218        goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
 219        params_new = rcu_replace_pointer(d->params, params_new,
 220                                         lockdep_is_held(&d->tcf_lock));
 221        spin_unlock_bh(&d->tcf_lock);
 222        if (params_new)
 223                kfree_rcu(params_new, rcu);
 224        if (goto_ch)
 225                tcf_chain_put_by_act(goto_ch);
 226
 227        if (ret == ACT_P_CREATED)
 228                tcf_idr_insert(tn, *a);
 229        return ret;
 230put_chain:
 231        if (goto_ch)
 232                tcf_chain_put_by_act(goto_ch);
 233release_idr:
 234        tcf_idr_release(*a, bind);
 235        return err;
 236}
 237
 238static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
 239                            int bind, int ref)
 240{
 241        unsigned char *b = skb_tail_pointer(skb);
 242        struct tcf_skbedit *d = to_skbedit(a);
 243        struct tcf_skbedit_params *params;
 244        struct tc_skbedit opt = {
 245                .index   = d->tcf_index,
 246                .refcnt  = refcount_read(&d->tcf_refcnt) - ref,
 247                .bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
 248        };
 249        u64 pure_flags = 0;
 250        struct tcf_t t;
 251
 252        spin_lock_bh(&d->tcf_lock);
 253        params = rcu_dereference_protected(d->params,
 254                                           lockdep_is_held(&d->tcf_lock));
 255        opt.action = d->tcf_action;
 256
 257        if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
 258                goto nla_put_failure;
 259        if ((params->flags & SKBEDIT_F_PRIORITY) &&
 260            nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, params->priority))
 261                goto nla_put_failure;
 262        if ((params->flags & SKBEDIT_F_QUEUE_MAPPING) &&
 263            nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, params->queue_mapping))
 264                goto nla_put_failure;
 265        if ((params->flags & SKBEDIT_F_MARK) &&
 266            nla_put_u32(skb, TCA_SKBEDIT_MARK, params->mark))
 267                goto nla_put_failure;
 268        if ((params->flags & SKBEDIT_F_PTYPE) &&
 269            nla_put_u16(skb, TCA_SKBEDIT_PTYPE, params->ptype))
 270                goto nla_put_failure;
 271        if ((params->flags & SKBEDIT_F_MASK) &&
 272            nla_put_u32(skb, TCA_SKBEDIT_MASK, params->mask))
 273                goto nla_put_failure;
 274        if (params->flags & SKBEDIT_F_INHERITDSFIELD)
 275                pure_flags |= SKBEDIT_F_INHERITDSFIELD;
 276        if (pure_flags != 0 &&
 277            nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags))
 278                goto nla_put_failure;
 279
 280        tcf_tm_dump(&t, &d->tcf_tm);
 281        if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD))
 282                goto nla_put_failure;
 283        spin_unlock_bh(&d->tcf_lock);
 284
 285        return skb->len;
 286
 287nla_put_failure:
 288        spin_unlock_bh(&d->tcf_lock);
 289        nlmsg_trim(skb, b);
 290        return -1;
 291}
 292
 293static void tcf_skbedit_cleanup(struct tc_action *a)
 294{
 295        struct tcf_skbedit *d = to_skbedit(a);
 296        struct tcf_skbedit_params *params;
 297
 298        params = rcu_dereference_protected(d->params, 1);
 299        if (params)
 300                kfree_rcu(params, rcu);
 301}
 302
 303static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb,
 304                              struct netlink_callback *cb, int type,
 305                              const struct tc_action_ops *ops,
 306                              struct netlink_ext_ack *extack)
 307{
 308        struct tc_action_net *tn = net_generic(net, skbedit_net_id);
 309
 310        return tcf_generic_walker(tn, skb, cb, type, ops, extack);
 311}
 312
 313static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index)
 314{
 315        struct tc_action_net *tn = net_generic(net, skbedit_net_id);
 316
 317        return tcf_idr_search(tn, a, index);
 318}
 319
 320static size_t tcf_skbedit_get_fill_size(const struct tc_action *act)
 321{
 322        return nla_total_size(sizeof(struct tc_skbedit))
 323                + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */
 324                + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */
 325                + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */
 326                + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */
 327                + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */
 328                + nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */
 329}
 330
 331static struct tc_action_ops act_skbedit_ops = {
 332        .kind           =       "skbedit",
 333        .id             =       TCA_ID_SKBEDIT,
 334        .owner          =       THIS_MODULE,
 335        .act            =       tcf_skbedit_act,
 336        .stats_update   =       tcf_skbedit_stats_update,
 337        .dump           =       tcf_skbedit_dump,
 338        .init           =       tcf_skbedit_init,
 339        .cleanup        =       tcf_skbedit_cleanup,
 340        .walk           =       tcf_skbedit_walker,
 341        .get_fill_size  =       tcf_skbedit_get_fill_size,
 342        .lookup         =       tcf_skbedit_search,
 343        .size           =       sizeof(struct tcf_skbedit),
 344};
 345
 346static __net_init int skbedit_init_net(struct net *net)
 347{
 348        struct tc_action_net *tn = net_generic(net, skbedit_net_id);
 349
 350        return tc_action_net_init(net, tn, &act_skbedit_ops);
 351}
 352
 353static void __net_exit skbedit_exit_net(struct list_head *net_list)
 354{
 355        tc_action_net_exit(net_list, skbedit_net_id);
 356}
 357
 358static struct pernet_operations skbedit_net_ops = {
 359        .init = skbedit_init_net,
 360        .exit_batch = skbedit_exit_net,
 361        .id   = &skbedit_net_id,
 362        .size = sizeof(struct tc_action_net),
 363};
 364
 365MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>");
 366MODULE_DESCRIPTION("SKB Editing");
 367MODULE_LICENSE("GPL");
 368
 369static int __init skbedit_init_module(void)
 370{
 371        return tcf_register_action(&act_skbedit_ops, &skbedit_net_ops);
 372}
 373
 374static void __exit skbedit_cleanup_module(void)
 375{
 376        tcf_unregister_action(&act_skbedit_ops, &skbedit_net_ops);
 377}
 378
 379module_init(skbedit_init_module);
 380module_exit(skbedit_cleanup_module);
 381