linux/net/bpf/test_run.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2017 Facebook
   3 */
   4#include <linux/bpf.h>
   5#include <linux/slab.h>
   6#include <linux/vmalloc.h>
   7#include <linux/etherdevice.h>
   8#include <linux/filter.h>
   9#include <linux/sched/signal.h>
  10#include <net/bpf_sk_storage.h>
  11#include <net/sock.h>
  12#include <net/tcp.h>
  13#include <linux/error-injection.h>
  14#include <linux/smp.h>
  15
  16#define CREATE_TRACE_POINTS
  17#include <trace/events/bpf_test_run.h>
  18
  19static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
  20                        u32 *retval, u32 *time, bool xdp)
  21{
  22        struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
  23        enum bpf_cgroup_storage_type stype;
  24        u64 time_start, time_spent = 0;
  25        int ret = 0;
  26        u32 i;
  27
  28        for_each_cgroup_storage_type(stype) {
  29                storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
  30                if (IS_ERR(storage[stype])) {
  31                        storage[stype] = NULL;
  32                        for_each_cgroup_storage_type(stype)
  33                                bpf_cgroup_storage_free(storage[stype]);
  34                        return -ENOMEM;
  35                }
  36        }
  37
  38        if (!repeat)
  39                repeat = 1;
  40
  41        rcu_read_lock();
  42        migrate_disable();
  43        time_start = ktime_get_ns();
  44        for (i = 0; i < repeat; i++) {
  45                bpf_cgroup_storage_set(storage);
  46
  47                if (xdp)
  48                        *retval = bpf_prog_run_xdp(prog, ctx);
  49                else
  50                        *retval = BPF_PROG_RUN(prog, ctx);
  51
  52                if (signal_pending(current)) {
  53                        ret = -EINTR;
  54                        break;
  55                }
  56
  57                if (need_resched()) {
  58                        time_spent += ktime_get_ns() - time_start;
  59                        migrate_enable();
  60                        rcu_read_unlock();
  61
  62                        cond_resched();
  63
  64                        rcu_read_lock();
  65                        migrate_disable();
  66                        time_start = ktime_get_ns();
  67                }
  68        }
  69        time_spent += ktime_get_ns() - time_start;
  70        migrate_enable();
  71        rcu_read_unlock();
  72
  73        do_div(time_spent, repeat);
  74        *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
  75
  76        for_each_cgroup_storage_type(stype)
  77                bpf_cgroup_storage_free(storage[stype]);
  78
  79        return ret;
  80}
  81
  82static int bpf_test_finish(const union bpf_attr *kattr,
  83                           union bpf_attr __user *uattr, const void *data,
  84                           u32 size, u32 retval, u32 duration)
  85{
  86        void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
  87        int err = -EFAULT;
  88        u32 copy_size = size;
  89
  90        /* Clamp copy if the user has provided a size hint, but copy the full
  91         * buffer if not to retain old behaviour.
  92         */
  93        if (kattr->test.data_size_out &&
  94            copy_size > kattr->test.data_size_out) {
  95                copy_size = kattr->test.data_size_out;
  96                err = -ENOSPC;
  97        }
  98
  99        if (data_out && copy_to_user(data_out, data, copy_size))
 100                goto out;
 101        if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
 102                goto out;
 103        if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
 104                goto out;
 105        if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
 106                goto out;
 107        if (err != -ENOSPC)
 108                err = 0;
 109out:
 110        trace_bpf_test_finish(&err);
 111        return err;
 112}
 113
 114/* Integer types of various sizes and pointer combinations cover variety of
 115 * architecture dependent calling conventions. 7+ can be supported in the
 116 * future.
 117 */
 118__diag_push();
 119__diag_ignore(GCC, 8, "-Wmissing-prototypes",
 120              "Global functions as their definitions will be in vmlinux BTF");
 121int noinline bpf_fentry_test1(int a)
 122{
 123        return a + 1;
 124}
 125
 126int noinline bpf_fentry_test2(int a, u64 b)
 127{
 128        return a + b;
 129}
 130
 131int noinline bpf_fentry_test3(char a, int b, u64 c)
 132{
 133        return a + b + c;
 134}
 135
 136int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
 137{
 138        return (long)a + b + c + d;
 139}
 140
 141int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
 142{
 143        return a + (long)b + c + d + e;
 144}
 145
 146int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
 147{
 148        return a + (long)b + c + d + (long)e + f;
 149}
 150
 151struct bpf_fentry_test_t {
 152        struct bpf_fentry_test_t *a;
 153};
 154
 155int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
 156{
 157        return (long)arg;
 158}
 159
 160int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
 161{
 162        return (long)arg->a;
 163}
 164
 165int noinline bpf_modify_return_test(int a, int *b)
 166{
 167        *b += 1;
 168        return a + *b;
 169}
 170__diag_pop();
 171
 172ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
 173
 174static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
 175                           u32 headroom, u32 tailroom)
 176{
 177        void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
 178        u32 user_size = kattr->test.data_size_in;
 179        void *data;
 180
 181        if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
 182                return ERR_PTR(-EINVAL);
 183
 184        if (user_size > size)
 185                return ERR_PTR(-EMSGSIZE);
 186
 187        data = kzalloc(size + headroom + tailroom, GFP_USER);
 188        if (!data)
 189                return ERR_PTR(-ENOMEM);
 190
 191        if (copy_from_user(data + headroom, data_in, user_size)) {
 192                kfree(data);
 193                return ERR_PTR(-EFAULT);
 194        }
 195
 196        return data;
 197}
 198
 199int bpf_prog_test_run_tracing(struct bpf_prog *prog,
 200                              const union bpf_attr *kattr,
 201                              union bpf_attr __user *uattr)
 202{
 203        struct bpf_fentry_test_t arg = {};
 204        u16 side_effect = 0, ret = 0;
 205        int b = 2, err = -EFAULT;
 206        u32 retval = 0;
 207
 208        if (kattr->test.flags || kattr->test.cpu)
 209                return -EINVAL;
 210
 211        switch (prog->expected_attach_type) {
 212        case BPF_TRACE_FENTRY:
 213        case BPF_TRACE_FEXIT:
 214                if (bpf_fentry_test1(1) != 2 ||
 215                    bpf_fentry_test2(2, 3) != 5 ||
 216                    bpf_fentry_test3(4, 5, 6) != 15 ||
 217                    bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
 218                    bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
 219                    bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
 220                    bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
 221                    bpf_fentry_test8(&arg) != 0)
 222                        goto out;
 223                break;
 224        case BPF_MODIFY_RETURN:
 225                ret = bpf_modify_return_test(1, &b);
 226                if (b != 2)
 227                        side_effect = 1;
 228                break;
 229        default:
 230                goto out;
 231        }
 232
 233        retval = ((u32)side_effect << 16) | ret;
 234        if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
 235                goto out;
 236
 237        err = 0;
 238out:
 239        trace_bpf_test_finish(&err);
 240        return err;
 241}
 242
 243struct bpf_raw_tp_test_run_info {
 244        struct bpf_prog *prog;
 245        void *ctx;
 246        u32 retval;
 247};
 248
 249static void
 250__bpf_prog_test_run_raw_tp(void *data)
 251{
 252        struct bpf_raw_tp_test_run_info *info = data;
 253
 254        rcu_read_lock();
 255        info->retval = BPF_PROG_RUN(info->prog, info->ctx);
 256        rcu_read_unlock();
 257}
 258
 259int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
 260                             const union bpf_attr *kattr,
 261                             union bpf_attr __user *uattr)
 262{
 263        void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
 264        __u32 ctx_size_in = kattr->test.ctx_size_in;
 265        struct bpf_raw_tp_test_run_info info;
 266        int cpu = kattr->test.cpu, err = 0;
 267        int current_cpu;
 268
 269        /* doesn't support data_in/out, ctx_out, duration, or repeat */
 270        if (kattr->test.data_in || kattr->test.data_out ||
 271            kattr->test.ctx_out || kattr->test.duration ||
 272            kattr->test.repeat)
 273                return -EINVAL;
 274
 275        if (ctx_size_in < prog->aux->max_ctx_offset ||
 276            ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
 277                return -EINVAL;
 278
 279        if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
 280                return -EINVAL;
 281
 282        if (ctx_size_in) {
 283                info.ctx = kzalloc(ctx_size_in, GFP_USER);
 284                if (!info.ctx)
 285                        return -ENOMEM;
 286                if (copy_from_user(info.ctx, ctx_in, ctx_size_in)) {
 287                        err = -EFAULT;
 288                        goto out;
 289                }
 290        } else {
 291                info.ctx = NULL;
 292        }
 293
 294        info.prog = prog;
 295
 296        current_cpu = get_cpu();
 297        if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
 298            cpu == current_cpu) {
 299                __bpf_prog_test_run_raw_tp(&info);
 300        } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
 301                /* smp_call_function_single() also checks cpu_online()
 302                 * after csd_lock(). However, since cpu is from user
 303                 * space, let's do an extra quick check to filter out
 304                 * invalid value before smp_call_function_single().
 305                 */
 306                err = -ENXIO;
 307        } else {
 308                err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
 309                                               &info, 1);
 310        }
 311        put_cpu();
 312
 313        if (!err &&
 314            copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
 315                err = -EFAULT;
 316
 317out:
 318        kfree(info.ctx);
 319        return err;
 320}
 321
 322static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
 323{
 324        void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
 325        void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
 326        u32 size = kattr->test.ctx_size_in;
 327        void *data;
 328        int err;
 329
 330        if (!data_in && !data_out)
 331                return NULL;
 332
 333        data = kzalloc(max_size, GFP_USER);
 334        if (!data)
 335                return ERR_PTR(-ENOMEM);
 336
 337        if (data_in) {
 338                err = bpf_check_uarg_tail_zero(data_in, max_size, size);
 339                if (err) {
 340                        kfree(data);
 341                        return ERR_PTR(err);
 342                }
 343
 344                size = min_t(u32, max_size, size);
 345                if (copy_from_user(data, data_in, size)) {
 346                        kfree(data);
 347                        return ERR_PTR(-EFAULT);
 348                }
 349        }
 350        return data;
 351}
 352
 353static int bpf_ctx_finish(const union bpf_attr *kattr,
 354                          union bpf_attr __user *uattr, const void *data,
 355                          u32 size)
 356{
 357        void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
 358        int err = -EFAULT;
 359        u32 copy_size = size;
 360
 361        if (!data || !data_out)
 362                return 0;
 363
 364        if (copy_size > kattr->test.ctx_size_out) {
 365                copy_size = kattr->test.ctx_size_out;
 366                err = -ENOSPC;
 367        }
 368
 369        if (copy_to_user(data_out, data, copy_size))
 370                goto out;
 371        if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
 372                goto out;
 373        if (err != -ENOSPC)
 374                err = 0;
 375out:
 376        return err;
 377}
 378
 379/**
 380 * range_is_zero - test whether buffer is initialized
 381 * @buf: buffer to check
 382 * @from: check from this position
 383 * @to: check up until (excluding) this position
 384 *
 385 * This function returns true if the there is a non-zero byte
 386 * in the buf in the range [from,to).
 387 */
 388static inline bool range_is_zero(void *buf, size_t from, size_t to)
 389{
 390        return !memchr_inv((u8 *)buf + from, 0, to - from);
 391}
 392
 393static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
 394{
 395        struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
 396
 397        if (!__skb)
 398                return 0;
 399
 400        /* make sure the fields we don't use are zeroed */
 401        if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
 402                return -EINVAL;
 403
 404        /* mark is allowed */
 405
 406        if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
 407                           offsetof(struct __sk_buff, priority)))
 408                return -EINVAL;
 409
 410        /* priority is allowed */
 411
 412        if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority),
 413                           offsetof(struct __sk_buff, ifindex)))
 414                return -EINVAL;
 415
 416        /* ifindex is allowed */
 417
 418        if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
 419                           offsetof(struct __sk_buff, cb)))
 420                return -EINVAL;
 421
 422        /* cb is allowed */
 423
 424        if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
 425                           offsetof(struct __sk_buff, tstamp)))
 426                return -EINVAL;
 427
 428        /* tstamp is allowed */
 429        /* wire_len is allowed */
 430        /* gso_segs is allowed */
 431
 432        if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
 433                           offsetof(struct __sk_buff, gso_size)))
 434                return -EINVAL;
 435
 436        /* gso_size is allowed */
 437
 438        if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
 439                           sizeof(struct __sk_buff)))
 440                return -EINVAL;
 441
 442        skb->mark = __skb->mark;
 443        skb->priority = __skb->priority;
 444        skb->tstamp = __skb->tstamp;
 445        memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
 446
 447        if (__skb->wire_len == 0) {
 448                cb->pkt_len = skb->len;
 449        } else {
 450                if (__skb->wire_len < skb->len ||
 451                    __skb->wire_len > GSO_MAX_SIZE)
 452                        return -EINVAL;
 453                cb->pkt_len = __skb->wire_len;
 454        }
 455
 456        if (__skb->gso_segs > GSO_MAX_SEGS)
 457                return -EINVAL;
 458        skb_shinfo(skb)->gso_segs = __skb->gso_segs;
 459        skb_shinfo(skb)->gso_size = __skb->gso_size;
 460
 461        return 0;
 462}
 463
 464static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
 465{
 466        struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
 467
 468        if (!__skb)
 469                return;
 470
 471        __skb->mark = skb->mark;
 472        __skb->priority = skb->priority;
 473        __skb->ifindex = skb->dev->ifindex;
 474        __skb->tstamp = skb->tstamp;
 475        memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
 476        __skb->wire_len = cb->pkt_len;
 477        __skb->gso_segs = skb_shinfo(skb)->gso_segs;
 478}
 479
 480int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
 481                          union bpf_attr __user *uattr)
 482{
 483        bool is_l2 = false, is_direct_pkt_access = false;
 484        struct net *net = current->nsproxy->net_ns;
 485        struct net_device *dev = net->loopback_dev;
 486        u32 size = kattr->test.data_size_in;
 487        u32 repeat = kattr->test.repeat;
 488        struct __sk_buff *ctx = NULL;
 489        u32 retval, duration;
 490        int hh_len = ETH_HLEN;
 491        struct sk_buff *skb;
 492        struct sock *sk;
 493        void *data;
 494        int ret;
 495
 496        if (kattr->test.flags || kattr->test.cpu)
 497                return -EINVAL;
 498
 499        data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
 500                             SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
 501        if (IS_ERR(data))
 502                return PTR_ERR(data);
 503
 504        ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
 505        if (IS_ERR(ctx)) {
 506                kfree(data);
 507                return PTR_ERR(ctx);
 508        }
 509
 510        switch (prog->type) {
 511        case BPF_PROG_TYPE_SCHED_CLS:
 512        case BPF_PROG_TYPE_SCHED_ACT:
 513                is_l2 = true;
 514                fallthrough;
 515        case BPF_PROG_TYPE_LWT_IN:
 516        case BPF_PROG_TYPE_LWT_OUT:
 517        case BPF_PROG_TYPE_LWT_XMIT:
 518                is_direct_pkt_access = true;
 519                break;
 520        default:
 521                break;
 522        }
 523
 524        sk = kzalloc(sizeof(struct sock), GFP_USER);
 525        if (!sk) {
 526                kfree(data);
 527                kfree(ctx);
 528                return -ENOMEM;
 529        }
 530        sock_net_set(sk, net);
 531        sock_init_data(NULL, sk);
 532
 533        skb = build_skb(data, 0);
 534        if (!skb) {
 535                kfree(data);
 536                kfree(ctx);
 537                kfree(sk);
 538                return -ENOMEM;
 539        }
 540        skb->sk = sk;
 541
 542        skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
 543        __skb_put(skb, size);
 544        if (ctx && ctx->ifindex > 1) {
 545                dev = dev_get_by_index(net, ctx->ifindex);
 546                if (!dev) {
 547                        ret = -ENODEV;
 548                        goto out;
 549                }
 550        }
 551        skb->protocol = eth_type_trans(skb, dev);
 552        skb_reset_network_header(skb);
 553
 554        switch (skb->protocol) {
 555        case htons(ETH_P_IP):
 556                sk->sk_family = AF_INET;
 557                if (sizeof(struct iphdr) <= skb_headlen(skb)) {
 558                        sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
 559                        sk->sk_daddr = ip_hdr(skb)->daddr;
 560                }
 561                break;
 562#if IS_ENABLED(CONFIG_IPV6)
 563        case htons(ETH_P_IPV6):
 564                sk->sk_family = AF_INET6;
 565                if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
 566                        sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
 567                        sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
 568                }
 569                break;
 570#endif
 571        default:
 572                break;
 573        }
 574
 575        if (is_l2)
 576                __skb_push(skb, hh_len);
 577        if (is_direct_pkt_access)
 578                bpf_compute_data_pointers(skb);
 579        ret = convert___skb_to_skb(skb, ctx);
 580        if (ret)
 581                goto out;
 582        ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
 583        if (ret)
 584                goto out;
 585        if (!is_l2) {
 586                if (skb_headroom(skb) < hh_len) {
 587                        int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
 588
 589                        if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
 590                                ret = -ENOMEM;
 591                                goto out;
 592                        }
 593                }
 594                memset(__skb_push(skb, hh_len), 0, hh_len);
 595        }
 596        convert_skb_to___skb(skb, ctx);
 597
 598        size = skb->len;
 599        /* bpf program can never convert linear skb to non-linear */
 600        if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
 601                size = skb_headlen(skb);
 602        ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
 603        if (!ret)
 604                ret = bpf_ctx_finish(kattr, uattr, ctx,
 605                                     sizeof(struct __sk_buff));
 606out:
 607        if (dev && dev != net->loopback_dev)
 608                dev_put(dev);
 609        kfree_skb(skb);
 610        bpf_sk_storage_free(sk);
 611        kfree(sk);
 612        kfree(ctx);
 613        return ret;
 614}
 615
 616int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
 617                          union bpf_attr __user *uattr)
 618{
 619        u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 620        u32 headroom = XDP_PACKET_HEADROOM;
 621        u32 size = kattr->test.data_size_in;
 622        u32 repeat = kattr->test.repeat;
 623        struct netdev_rx_queue *rxqueue;
 624        struct xdp_buff xdp = {};
 625        u32 retval, duration;
 626        u32 max_data_sz;
 627        void *data;
 628        int ret;
 629
 630        if (kattr->test.ctx_in || kattr->test.ctx_out)
 631                return -EINVAL;
 632
 633        /* XDP have extra tailroom as (most) drivers use full page */
 634        max_data_sz = 4096 - headroom - tailroom;
 635
 636        data = bpf_test_init(kattr, max_data_sz, headroom, tailroom);
 637        if (IS_ERR(data))
 638                return PTR_ERR(data);
 639
 640        rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
 641        xdp_init_buff(&xdp, headroom + max_data_sz + tailroom,
 642                      &rxqueue->xdp_rxq);
 643        xdp_prepare_buff(&xdp, data, headroom, size, true);
 644
 645        bpf_prog_change_xdp(NULL, prog);
 646        ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
 647        if (ret)
 648                goto out;
 649        if (xdp.data != data + headroom || xdp.data_end != xdp.data + size)
 650                size = xdp.data_end - xdp.data;
 651        ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
 652out:
 653        bpf_prog_change_xdp(prog, NULL);
 654        kfree(data);
 655        return ret;
 656}
 657
 658static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
 659{
 660        /* make sure the fields we don't use are zeroed */
 661        if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
 662                return -EINVAL;
 663
 664        /* flags is allowed */
 665
 666        if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
 667                           sizeof(struct bpf_flow_keys)))
 668                return -EINVAL;
 669
 670        return 0;
 671}
 672
 673int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
 674                                     const union bpf_attr *kattr,
 675                                     union bpf_attr __user *uattr)
 676{
 677        u32 size = kattr->test.data_size_in;
 678        struct bpf_flow_dissector ctx = {};
 679        u32 repeat = kattr->test.repeat;
 680        struct bpf_flow_keys *user_ctx;
 681        struct bpf_flow_keys flow_keys;
 682        u64 time_start, time_spent = 0;
 683        const struct ethhdr *eth;
 684        unsigned int flags = 0;
 685        u32 retval, duration;
 686        void *data;
 687        int ret;
 688        u32 i;
 689
 690        if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
 691                return -EINVAL;
 692
 693        if (kattr->test.flags || kattr->test.cpu)
 694                return -EINVAL;
 695
 696        if (size < ETH_HLEN)
 697                return -EINVAL;
 698
 699        data = bpf_test_init(kattr, size, 0, 0);
 700        if (IS_ERR(data))
 701                return PTR_ERR(data);
 702
 703        eth = (struct ethhdr *)data;
 704
 705        if (!repeat)
 706                repeat = 1;
 707
 708        user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
 709        if (IS_ERR(user_ctx)) {
 710                kfree(data);
 711                return PTR_ERR(user_ctx);
 712        }
 713        if (user_ctx) {
 714                ret = verify_user_bpf_flow_keys(user_ctx);
 715                if (ret)
 716                        goto out;
 717                flags = user_ctx->flags;
 718        }
 719
 720        ctx.flow_keys = &flow_keys;
 721        ctx.data = data;
 722        ctx.data_end = (__u8 *)data + size;
 723
 724        rcu_read_lock();
 725        preempt_disable();
 726        time_start = ktime_get_ns();
 727        for (i = 0; i < repeat; i++) {
 728                retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
 729                                          size, flags);
 730
 731                if (signal_pending(current)) {
 732                        preempt_enable();
 733                        rcu_read_unlock();
 734
 735                        ret = -EINTR;
 736                        goto out;
 737                }
 738
 739                if (need_resched()) {
 740                        time_spent += ktime_get_ns() - time_start;
 741                        preempt_enable();
 742                        rcu_read_unlock();
 743
 744                        cond_resched();
 745
 746                        rcu_read_lock();
 747                        preempt_disable();
 748                        time_start = ktime_get_ns();
 749                }
 750        }
 751        time_spent += ktime_get_ns() - time_start;
 752        preempt_enable();
 753        rcu_read_unlock();
 754
 755        do_div(time_spent, repeat);
 756        duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
 757
 758        ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
 759                              retval, duration);
 760        if (!ret)
 761                ret = bpf_ctx_finish(kattr, uattr, user_ctx,
 762                                     sizeof(struct bpf_flow_keys));
 763
 764out:
 765        kfree(user_ctx);
 766        kfree(data);
 767        return ret;
 768}
 769