linux/net/sched/act_skbedit.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2008, Intel Corporation.
   4 *
   5 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/init.h>
  10#include <linux/kernel.h>
  11#include <linux/skbuff.h>
  12#include <linux/rtnetlink.h>
  13#include <net/netlink.h>
  14#include <net/pkt_sched.h>
  15#include <net/ip.h>
  16#include <net/ipv6.h>
  17#include <net/dsfield.h>
  18#include <net/pkt_cls.h>
  19
  20#include <linux/tc_act/tc_skbedit.h>
  21#include <net/tc_act/tc_skbedit.h>
  22
  23static unsigned int skbedit_net_id;
  24static struct tc_action_ops act_skbedit_ops;
  25
  26static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
  27                           struct tcf_result *res)
  28{
  29        struct tcf_skbedit *d = to_skbedit(a);
  30        struct tcf_skbedit_params *params;
  31        int action;
  32
  33        tcf_lastuse_update(&d->tcf_tm);
  34        bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
  35
  36        params = rcu_dereference_bh(d->params);
  37        action = READ_ONCE(d->tcf_action);
  38
  39        if (params->flags & SKBEDIT_F_PRIORITY)
  40                skb->priority = params->priority;
  41        if (params->flags & SKBEDIT_F_INHERITDSFIELD) {
  42                int wlen = skb_network_offset(skb);
  43
  44                switch (skb_protocol(skb, true)) {
  45                case htons(ETH_P_IP):
  46                        wlen += sizeof(struct iphdr);
  47                        if (!pskb_may_pull(skb, wlen))
  48                                goto err;
  49                        skb->priority = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
  50                        break;
  51
  52                case htons(ETH_P_IPV6):
  53                        wlen += sizeof(struct ipv6hdr);
  54                        if (!pskb_may_pull(skb, wlen))
  55                                goto err;
  56                        skb->priority = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
  57                        break;
  58                }
  59        }
  60        if (params->flags & SKBEDIT_F_QUEUE_MAPPING &&
  61            skb->dev->real_num_tx_queues > params->queue_mapping)
  62                skb_set_queue_mapping(skb, params->queue_mapping);
  63        if (params->flags & SKBEDIT_F_MARK) {
  64                skb->mark &= ~params->mask;
  65                skb->mark |= params->mark & params->mask;
  66        }
  67        if (params->flags & SKBEDIT_F_PTYPE)
  68                skb->pkt_type = params->ptype;
  69        return action;
  70
  71err:
  72        qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats));
  73        return TC_ACT_SHOT;
  74}
  75
  76static void tcf_skbedit_stats_update(struct tc_action *a, u64 bytes,
  77                                     u64 packets, u64 drops,
  78                                     u64 lastuse, bool hw)
  79{
  80        struct tcf_skbedit *d = to_skbedit(a);
  81        struct tcf_t *tm = &d->tcf_tm;
  82
  83        tcf_action_update_stats(a, bytes, packets, drops, hw);
  84        tm->lastuse = max_t(u64, tm->lastuse, lastuse);
  85}
  86
  87static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
  88        [TCA_SKBEDIT_PARMS]             = { .len = sizeof(struct tc_skbedit) },
  89        [TCA_SKBEDIT_PRIORITY]          = { .len = sizeof(u32) },
  90        [TCA_SKBEDIT_QUEUE_MAPPING]     = { .len = sizeof(u16) },
  91        [TCA_SKBEDIT_MARK]              = { .len = sizeof(u32) },
  92        [TCA_SKBEDIT_PTYPE]             = { .len = sizeof(u16) },
  93        [TCA_SKBEDIT_MASK]              = { .len = sizeof(u32) },
  94        [TCA_SKBEDIT_FLAGS]             = { .len = sizeof(u64) },
  95};
  96
  97static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
  98                            struct nlattr *est, struct tc_action **a,
  99                            struct tcf_proto *tp, u32 act_flags,
 100                            struct netlink_ext_ack *extack)
 101{
 102        struct tc_action_net *tn = net_generic(net, skbedit_net_id);
 103        bool bind = act_flags & TCA_ACT_FLAGS_BIND;
 104        struct tcf_skbedit_params *params_new;
 105        struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
 106        struct tcf_chain *goto_ch = NULL;
 107        struct tc_skbedit *parm;
 108        struct tcf_skbedit *d;
 109        u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL;
 110        u16 *queue_mapping = NULL, *ptype = NULL;
 111        bool exists = false;
 112        int ret = 0, err;
 113        u32 index;
 114
 115        if (nla == NULL)
 116                return -EINVAL;
 117
 118        err = nla_parse_nested_deprecated(tb, TCA_SKBEDIT_MAX, nla,
 119                                          skbedit_policy, NULL);
 120        if (err < 0)
 121                return err;
 122
 123        if (tb[TCA_SKBEDIT_PARMS] == NULL)
 124                return -EINVAL;
 125
 126        if (tb[TCA_SKBEDIT_PRIORITY] != NULL) {
 127                flags |= SKBEDIT_F_PRIORITY;
 128                priority = nla_data(tb[TCA_SKBEDIT_PRIORITY]);
 129        }
 130
 131        if (tb[TCA_SKBEDIT_QUEUE_MAPPING] != NULL) {
 132                flags |= SKBEDIT_F_QUEUE_MAPPING;
 133                queue_mapping = nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING]);
 134        }
 135
 136        if (tb[TCA_SKBEDIT_PTYPE] != NULL) {
 137                ptype = nla_data(tb[TCA_SKBEDIT_PTYPE]);
 138                if (!skb_pkt_type_ok(*ptype))
 139                        return -EINVAL;
 140                flags |= SKBEDIT_F_PTYPE;
 141        }
 142
 143        if (tb[TCA_SKBEDIT_MARK] != NULL) {
 144                flags |= SKBEDIT_F_MARK;
 145                mark = nla_data(tb[TCA_SKBEDIT_MARK]);
 146        }
 147
 148        if (tb[TCA_SKBEDIT_MASK] != NULL) {
 149                flags |= SKBEDIT_F_MASK;
 150                mask = nla_data(tb[TCA_SKBEDIT_MASK]);
 151        }
 152
 153        if (tb[TCA_SKBEDIT_FLAGS] != NULL) {
 154                u64 *pure_flags = nla_data(tb[TCA_SKBEDIT_FLAGS]);
 155
 156                if (*pure_flags & SKBEDIT_F_INHERITDSFIELD)
 157                        flags |= SKBEDIT_F_INHERITDSFIELD;
 158        }
 159
 160        parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
 161        index = parm->index;
 162        err = tcf_idr_check_alloc(tn, &index, a, bind);
 163        if (err < 0)
 164                return err;
 165        exists = err;
 166        if (exists && bind)
 167                return 0;
 168
 169        if (!flags) {
 170                if (exists)
 171                        tcf_idr_release(*a, bind);
 172                else
 173                        tcf_idr_cleanup(tn, index);
 174                return -EINVAL;
 175        }
 176
 177        if (!exists) {
 178                ret = tcf_idr_create(tn, index, est, a,
 179                                     &act_skbedit_ops, bind, true, 0);
 180                if (ret) {
 181                        tcf_idr_cleanup(tn, index);
 182                        return ret;
 183                }
 184
 185                d = to_skbedit(*a);
 186                ret = ACT_P_CREATED;
 187        } else {
 188                d = to_skbedit(*a);
 189                if (!(act_flags & TCA_ACT_FLAGS_REPLACE)) {
 190                        tcf_idr_release(*a, bind);
 191                        return -EEXIST;
 192                }
 193        }
 194        err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
 195        if (err < 0)
 196                goto release_idr;
 197
 198        params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
 199        if (unlikely(!params_new)) {
 200                err = -ENOMEM;
 201                goto put_chain;
 202        }
 203
 204        params_new->flags = flags;
 205        if (flags & SKBEDIT_F_PRIORITY)
 206                params_new->priority = *priority;
 207        if (flags & SKBEDIT_F_QUEUE_MAPPING)
 208                params_new->queue_mapping = *queue_mapping;
 209        if (flags & SKBEDIT_F_MARK)
 210                params_new->mark = *mark;
 211        if (flags & SKBEDIT_F_PTYPE)
 212                params_new->ptype = *ptype;
 213        /* default behaviour is to use all the bits */
 214        params_new->mask = 0xffffffff;
 215        if (flags & SKBEDIT_F_MASK)
 216                params_new->mask = *mask;
 217
 218        spin_lock_bh(&d->tcf_lock);
 219        goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
 220        params_new = rcu_replace_pointer(d->params, params_new,
 221                                         lockdep_is_held(&d->tcf_lock));
 222        spin_unlock_bh(&d->tcf_lock);
 223        if (params_new)
 224                kfree_rcu(params_new, rcu);
 225        if (goto_ch)
 226                tcf_chain_put_by_act(goto_ch);
 227
 228        return ret;
 229put_chain:
 230        if (goto_ch)
 231                tcf_chain_put_by_act(goto_ch);
 232release_idr:
 233        tcf_idr_release(*a, bind);
 234        return err;
 235}
 236
 237static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
 238                            int bind, int ref)
 239{
 240        unsigned char *b = skb_tail_pointer(skb);
 241        struct tcf_skbedit *d = to_skbedit(a);
 242        struct tcf_skbedit_params *params;
 243        struct tc_skbedit opt = {
 244                .index   = d->tcf_index,
 245                .refcnt  = refcount_read(&d->tcf_refcnt) - ref,
 246                .bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
 247        };
 248        u64 pure_flags = 0;
 249        struct tcf_t t;
 250
 251        spin_lock_bh(&d->tcf_lock);
 252        params = rcu_dereference_protected(d->params,
 253                                           lockdep_is_held(&d->tcf_lock));
 254        opt.action = d->tcf_action;
 255
 256        if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
 257                goto nla_put_failure;
 258        if ((params->flags & SKBEDIT_F_PRIORITY) &&
 259            nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, params->priority))
 260                goto nla_put_failure;
 261        if ((params->flags & SKBEDIT_F_QUEUE_MAPPING) &&
 262            nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, params->queue_mapping))
 263                goto nla_put_failure;
 264        if ((params->flags & SKBEDIT_F_MARK) &&
 265            nla_put_u32(skb, TCA_SKBEDIT_MARK, params->mark))
 266                goto nla_put_failure;
 267        if ((params->flags & SKBEDIT_F_PTYPE) &&
 268            nla_put_u16(skb, TCA_SKBEDIT_PTYPE, params->ptype))
 269                goto nla_put_failure;
 270        if ((params->flags & SKBEDIT_F_MASK) &&
 271            nla_put_u32(skb, TCA_SKBEDIT_MASK, params->mask))
 272                goto nla_put_failure;
 273        if (params->flags & SKBEDIT_F_INHERITDSFIELD)
 274                pure_flags |= SKBEDIT_F_INHERITDSFIELD;
 275        if (pure_flags != 0 &&
 276            nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags))
 277                goto nla_put_failure;
 278
 279        tcf_tm_dump(&t, &d->tcf_tm);
 280        if (nla_put_64bit(skb, TCA_SKBEDIT_TM, sizeof(t), &t, TCA_SKBEDIT_PAD))
 281                goto nla_put_failure;
 282        spin_unlock_bh(&d->tcf_lock);
 283
 284        return skb->len;
 285
 286nla_put_failure:
 287        spin_unlock_bh(&d->tcf_lock);
 288        nlmsg_trim(skb, b);
 289        return -1;
 290}
 291
 292static void tcf_skbedit_cleanup(struct tc_action *a)
 293{
 294        struct tcf_skbedit *d = to_skbedit(a);
 295        struct tcf_skbedit_params *params;
 296
 297        params = rcu_dereference_protected(d->params, 1);
 298        if (params)
 299                kfree_rcu(params, rcu);
 300}
 301
 302static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb,
 303                              struct netlink_callback *cb, int type,
 304                              const struct tc_action_ops *ops,
 305                              struct netlink_ext_ack *extack)
 306{
 307        struct tc_action_net *tn = net_generic(net, skbedit_net_id);
 308
 309        return tcf_generic_walker(tn, skb, cb, type, ops, extack);
 310}
 311
 312static int tcf_skbedit_search(struct net *net, struct tc_action **a, u32 index)
 313{
 314        struct tc_action_net *tn = net_generic(net, skbedit_net_id);
 315
 316        return tcf_idr_search(tn, a, index);
 317}
 318
 319static size_t tcf_skbedit_get_fill_size(const struct tc_action *act)
 320{
 321        return nla_total_size(sizeof(struct tc_skbedit))
 322                + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */
 323                + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */
 324                + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */
 325                + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */
 326                + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */
 327                + nla_total_size_64bit(sizeof(u64)); /* TCA_SKBEDIT_FLAGS */
 328}
 329
 330static struct tc_action_ops act_skbedit_ops = {
 331        .kind           =       "skbedit",
 332        .id             =       TCA_ID_SKBEDIT,
 333        .owner          =       THIS_MODULE,
 334        .act            =       tcf_skbedit_act,
 335        .stats_update   =       tcf_skbedit_stats_update,
 336        .dump           =       tcf_skbedit_dump,
 337        .init           =       tcf_skbedit_init,
 338        .cleanup        =       tcf_skbedit_cleanup,
 339        .walk           =       tcf_skbedit_walker,
 340        .get_fill_size  =       tcf_skbedit_get_fill_size,
 341        .lookup         =       tcf_skbedit_search,
 342        .size           =       sizeof(struct tcf_skbedit),
 343};
 344
 345static __net_init int skbedit_init_net(struct net *net)
 346{
 347        struct tc_action_net *tn = net_generic(net, skbedit_net_id);
 348
 349        return tc_action_net_init(net, tn, &act_skbedit_ops);
 350}
 351
 352static void __net_exit skbedit_exit_net(struct list_head *net_list)
 353{
 354        tc_action_net_exit(net_list, skbedit_net_id);
 355}
 356
 357static struct pernet_operations skbedit_net_ops = {
 358        .init = skbedit_init_net,
 359        .exit_batch = skbedit_exit_net,
 360        .id   = &skbedit_net_id,
 361        .size = sizeof(struct tc_action_net),
 362};
 363
 364MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>");
 365MODULE_DESCRIPTION("SKB Editing");
 366MODULE_LICENSE("GPL");
 367
 368static int __init skbedit_init_module(void)
 369{
 370        return tcf_register_action(&act_skbedit_ops, &skbedit_net_ops);
 371}
 372
 373static void __exit skbedit_cleanup_module(void)
 374{
 375        tcf_unregister_action(&act_skbedit_ops, &skbedit_net_ops);
 376}
 377
 378module_init(skbedit_init_module);
 379module_exit(skbedit_cleanup_module);
 380