linux/net/bpf/test_run.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (c) 2017 Facebook
   3 */
   4#include <linux/bpf.h>
   5#include <linux/slab.h>
   6#include <linux/vmalloc.h>
   7#include <linux/etherdevice.h>
   8#include <linux/filter.h>
   9#include <linux/sched/signal.h>
  10#include <net/bpf_sk_storage.h>
  11#include <net/sock.h>
  12#include <net/tcp.h>
  13#include <linux/error-injection.h>
  14#include <linux/smp.h>
  15
  16#define CREATE_TRACE_POINTS
  17#include <trace/events/bpf_test_run.h>
  18
  19static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
  20                        u32 *retval, u32 *time, bool xdp)
  21{
  22        struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
  23        enum bpf_cgroup_storage_type stype;
  24        u64 time_start, time_spent = 0;
  25        int ret = 0;
  26        u32 i;
  27
  28        for_each_cgroup_storage_type(stype) {
  29                storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
  30                if (IS_ERR(storage[stype])) {
  31                        storage[stype] = NULL;
  32                        for_each_cgroup_storage_type(stype)
  33                                bpf_cgroup_storage_free(storage[stype]);
  34                        return -ENOMEM;
  35                }
  36        }
  37
  38        if (!repeat)
  39                repeat = 1;
  40
  41        rcu_read_lock();
  42        migrate_disable();
  43        time_start = ktime_get_ns();
  44        for (i = 0; i < repeat; i++) {
  45                bpf_cgroup_storage_set(storage);
  46
  47                if (xdp)
  48                        *retval = bpf_prog_run_xdp(prog, ctx);
  49                else
  50                        *retval = BPF_PROG_RUN(prog, ctx);
  51
  52                if (signal_pending(current)) {
  53                        ret = -EINTR;
  54                        break;
  55                }
  56
  57                if (need_resched()) {
  58                        time_spent += ktime_get_ns() - time_start;
  59                        migrate_enable();
  60                        rcu_read_unlock();
  61
  62                        cond_resched();
  63
  64                        rcu_read_lock();
  65                        migrate_disable();
  66                        time_start = ktime_get_ns();
  67                }
  68        }
  69        time_spent += ktime_get_ns() - time_start;
  70        migrate_enable();
  71        rcu_read_unlock();
  72
  73        do_div(time_spent, repeat);
  74        *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
  75
  76        for_each_cgroup_storage_type(stype)
  77                bpf_cgroup_storage_free(storage[stype]);
  78
  79        return ret;
  80}
  81
  82static int bpf_test_finish(const union bpf_attr *kattr,
  83                           union bpf_attr __user *uattr, const void *data,
  84                           u32 size, u32 retval, u32 duration)
  85{
  86        void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
  87        int err = -EFAULT;
  88        u32 copy_size = size;
  89
  90        /* Clamp copy if the user has provided a size hint, but copy the full
  91         * buffer if not to retain old behaviour.
  92         */
  93        if (kattr->test.data_size_out &&
  94            copy_size > kattr->test.data_size_out) {
  95                copy_size = kattr->test.data_size_out;
  96                err = -ENOSPC;
  97        }
  98
  99        if (data_out && copy_to_user(data_out, data, copy_size))
 100                goto out;
 101        if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
 102                goto out;
 103        if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
 104                goto out;
 105        if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
 106                goto out;
 107        if (err != -ENOSPC)
 108                err = 0;
 109out:
 110        trace_bpf_test_finish(&err);
 111        return err;
 112}
 113
 114/* Integer types of various sizes and pointer combinations cover variety of
 115 * architecture dependent calling conventions. 7+ can be supported in the
 116 * future.
 117 */
 118__diag_push();
 119__diag_ignore(GCC, 8, "-Wmissing-prototypes",
 120              "Global functions as their definitions will be in vmlinux BTF");
 121int noinline bpf_fentry_test1(int a)
 122{
 123        return a + 1;
 124}
 125
 126int noinline bpf_fentry_test2(int a, u64 b)
 127{
 128        return a + b;
 129}
 130
 131int noinline bpf_fentry_test3(char a, int b, u64 c)
 132{
 133        return a + b + c;
 134}
 135
 136int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
 137{
 138        return (long)a + b + c + d;
 139}
 140
 141int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
 142{
 143        return a + (long)b + c + d + e;
 144}
 145
 146int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
 147{
 148        return a + (long)b + c + d + (long)e + f;
 149}
 150
 151struct bpf_fentry_test_t {
 152        struct bpf_fentry_test_t *a;
 153};
 154
 155int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
 156{
 157        return (long)arg;
 158}
 159
 160int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
 161{
 162        return (long)arg->a;
 163}
 164
 165int noinline bpf_modify_return_test(int a, int *b)
 166{
 167        *b += 1;
 168        return a + *b;
 169}
 170__diag_pop();
 171
 172ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
 173
 174static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
 175                           u32 headroom, u32 tailroom)
 176{
 177        void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
 178        u32 user_size = kattr->test.data_size_in;
 179        void *data;
 180
 181        if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
 182                return ERR_PTR(-EINVAL);
 183
 184        if (user_size > size)
 185                return ERR_PTR(-EMSGSIZE);
 186
 187        data = kzalloc(size + headroom + tailroom, GFP_USER);
 188        if (!data)
 189                return ERR_PTR(-ENOMEM);
 190
 191        if (copy_from_user(data + headroom, data_in, user_size)) {
 192                kfree(data);
 193                return ERR_PTR(-EFAULT);
 194        }
 195
 196        return data;
 197}
 198
 199int bpf_prog_test_run_tracing(struct bpf_prog *prog,
 200                              const union bpf_attr *kattr,
 201                              union bpf_attr __user *uattr)
 202{
 203        struct bpf_fentry_test_t arg = {};
 204        u16 side_effect = 0, ret = 0;
 205        int b = 2, err = -EFAULT;
 206        u32 retval = 0;
 207
 208        if (kattr->test.flags || kattr->test.cpu)
 209                return -EINVAL;
 210
 211        switch (prog->expected_attach_type) {
 212        case BPF_TRACE_FENTRY:
 213        case BPF_TRACE_FEXIT:
 214                if (bpf_fentry_test1(1) != 2 ||
 215                    bpf_fentry_test2(2, 3) != 5 ||
 216                    bpf_fentry_test3(4, 5, 6) != 15 ||
 217                    bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
 218                    bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
 219                    bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
 220                    bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
 221                    bpf_fentry_test8(&arg) != 0)
 222                        goto out;
 223                break;
 224        case BPF_MODIFY_RETURN:
 225                ret = bpf_modify_return_test(1, &b);
 226                if (b != 2)
 227                        side_effect = 1;
 228                break;
 229        default:
 230                goto out;
 231        }
 232
 233        retval = ((u32)side_effect << 16) | ret;
 234        if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
 235                goto out;
 236
 237        err = 0;
 238out:
 239        trace_bpf_test_finish(&err);
 240        return err;
 241}
 242
 243struct bpf_raw_tp_test_run_info {
 244        struct bpf_prog *prog;
 245        void *ctx;
 246        u32 retval;
 247};
 248
 249static void
 250__bpf_prog_test_run_raw_tp(void *data)
 251{
 252        struct bpf_raw_tp_test_run_info *info = data;
 253
 254        rcu_read_lock();
 255        info->retval = BPF_PROG_RUN(info->prog, info->ctx);
 256        rcu_read_unlock();
 257}
 258
 259int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
 260                             const union bpf_attr *kattr,
 261                             union bpf_attr __user *uattr)
 262{
 263        void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
 264        __u32 ctx_size_in = kattr->test.ctx_size_in;
 265        struct bpf_raw_tp_test_run_info info;
 266        int cpu = kattr->test.cpu, err = 0;
 267        int current_cpu;
 268
 269        /* doesn't support data_in/out, ctx_out, duration, or repeat */
 270        if (kattr->test.data_in || kattr->test.data_out ||
 271            kattr->test.ctx_out || kattr->test.duration ||
 272            kattr->test.repeat)
 273                return -EINVAL;
 274
 275        if (ctx_size_in < prog->aux->max_ctx_offset)
 276                return -EINVAL;
 277
 278        if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
 279                return -EINVAL;
 280
 281        if (ctx_size_in) {
 282                info.ctx = kzalloc(ctx_size_in, GFP_USER);
 283                if (!info.ctx)
 284                        return -ENOMEM;
 285                if (copy_from_user(info.ctx, ctx_in, ctx_size_in)) {
 286                        err = -EFAULT;
 287                        goto out;
 288                }
 289        } else {
 290                info.ctx = NULL;
 291        }
 292
 293        info.prog = prog;
 294
 295        current_cpu = get_cpu();
 296        if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
 297            cpu == current_cpu) {
 298                __bpf_prog_test_run_raw_tp(&info);
 299        } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
 300                /* smp_call_function_single() also checks cpu_online()
 301                 * after csd_lock(). However, since cpu is from user
 302                 * space, let's do an extra quick check to filter out
 303                 * invalid value before smp_call_function_single().
 304                 */
 305                err = -ENXIO;
 306        } else {
 307                err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
 308                                               &info, 1);
 309        }
 310        put_cpu();
 311
 312        if (!err &&
 313            copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
 314                err = -EFAULT;
 315
 316out:
 317        kfree(info.ctx);
 318        return err;
 319}
 320
 321static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
 322{
 323        void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
 324        void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
 325        u32 size = kattr->test.ctx_size_in;
 326        void *data;
 327        int err;
 328
 329        if (!data_in && !data_out)
 330                return NULL;
 331
 332        data = kzalloc(max_size, GFP_USER);
 333        if (!data)
 334                return ERR_PTR(-ENOMEM);
 335
 336        if (data_in) {
 337                err = bpf_check_uarg_tail_zero(data_in, max_size, size);
 338                if (err) {
 339                        kfree(data);
 340                        return ERR_PTR(err);
 341                }
 342
 343                size = min_t(u32, max_size, size);
 344                if (copy_from_user(data, data_in, size)) {
 345                        kfree(data);
 346                        return ERR_PTR(-EFAULT);
 347                }
 348        }
 349        return data;
 350}
 351
 352static int bpf_ctx_finish(const union bpf_attr *kattr,
 353                          union bpf_attr __user *uattr, const void *data,
 354                          u32 size)
 355{
 356        void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
 357        int err = -EFAULT;
 358        u32 copy_size = size;
 359
 360        if (!data || !data_out)
 361                return 0;
 362
 363        if (copy_size > kattr->test.ctx_size_out) {
 364                copy_size = kattr->test.ctx_size_out;
 365                err = -ENOSPC;
 366        }
 367
 368        if (copy_to_user(data_out, data, copy_size))
 369                goto out;
 370        if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
 371                goto out;
 372        if (err != -ENOSPC)
 373                err = 0;
 374out:
 375        return err;
 376}
 377
 378/**
 379 * range_is_zero - test whether buffer is initialized
 380 * @buf: buffer to check
 381 * @from: check from this position
 382 * @to: check up until (excluding) this position
 383 *
 384 * This function returns true if the there is a non-zero byte
 385 * in the buf in the range [from,to).
 386 */
 387static inline bool range_is_zero(void *buf, size_t from, size_t to)
 388{
 389        return !memchr_inv((u8 *)buf + from, 0, to - from);
 390}
 391
 392static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
 393{
 394        struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
 395
 396        if (!__skb)
 397                return 0;
 398
 399        /* make sure the fields we don't use are zeroed */
 400        if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
 401                return -EINVAL;
 402
 403        /* mark is allowed */
 404
 405        if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
 406                           offsetof(struct __sk_buff, priority)))
 407                return -EINVAL;
 408
 409        /* priority is allowed */
 410
 411        if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority),
 412                           offsetof(struct __sk_buff, ifindex)))
 413                return -EINVAL;
 414
 415        /* ifindex is allowed */
 416
 417        if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
 418                           offsetof(struct __sk_buff, cb)))
 419                return -EINVAL;
 420
 421        /* cb is allowed */
 422
 423        if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
 424                           offsetof(struct __sk_buff, tstamp)))
 425                return -EINVAL;
 426
 427        /* tstamp is allowed */
 428        /* wire_len is allowed */
 429        /* gso_segs is allowed */
 430
 431        if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
 432                           offsetof(struct __sk_buff, gso_size)))
 433                return -EINVAL;
 434
 435        /* gso_size is allowed */
 436
 437        if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
 438                           sizeof(struct __sk_buff)))
 439                return -EINVAL;
 440
 441        skb->mark = __skb->mark;
 442        skb->priority = __skb->priority;
 443        skb->tstamp = __skb->tstamp;
 444        memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
 445
 446        if (__skb->wire_len == 0) {
 447                cb->pkt_len = skb->len;
 448        } else {
 449                if (__skb->wire_len < skb->len ||
 450                    __skb->wire_len > GSO_MAX_SIZE)
 451                        return -EINVAL;
 452                cb->pkt_len = __skb->wire_len;
 453        }
 454
 455        if (__skb->gso_segs > GSO_MAX_SEGS)
 456                return -EINVAL;
 457        skb_shinfo(skb)->gso_segs = __skb->gso_segs;
 458        skb_shinfo(skb)->gso_size = __skb->gso_size;
 459
 460        return 0;
 461}
 462
 463static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
 464{
 465        struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
 466
 467        if (!__skb)
 468                return;
 469
 470        __skb->mark = skb->mark;
 471        __skb->priority = skb->priority;
 472        __skb->ifindex = skb->dev->ifindex;
 473        __skb->tstamp = skb->tstamp;
 474        memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
 475        __skb->wire_len = cb->pkt_len;
 476        __skb->gso_segs = skb_shinfo(skb)->gso_segs;
 477}
 478
 479int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
 480                          union bpf_attr __user *uattr)
 481{
 482        bool is_l2 = false, is_direct_pkt_access = false;
 483        struct net *net = current->nsproxy->net_ns;
 484        struct net_device *dev = net->loopback_dev;
 485        u32 size = kattr->test.data_size_in;
 486        u32 repeat = kattr->test.repeat;
 487        struct __sk_buff *ctx = NULL;
 488        u32 retval, duration;
 489        int hh_len = ETH_HLEN;
 490        struct sk_buff *skb;
 491        struct sock *sk;
 492        void *data;
 493        int ret;
 494
 495        if (kattr->test.flags || kattr->test.cpu)
 496                return -EINVAL;
 497
 498        data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
 499                             SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
 500        if (IS_ERR(data))
 501                return PTR_ERR(data);
 502
 503        ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
 504        if (IS_ERR(ctx)) {
 505                kfree(data);
 506                return PTR_ERR(ctx);
 507        }
 508
 509        switch (prog->type) {
 510        case BPF_PROG_TYPE_SCHED_CLS:
 511        case BPF_PROG_TYPE_SCHED_ACT:
 512                is_l2 = true;
 513                fallthrough;
 514        case BPF_PROG_TYPE_LWT_IN:
 515        case BPF_PROG_TYPE_LWT_OUT:
 516        case BPF_PROG_TYPE_LWT_XMIT:
 517                is_direct_pkt_access = true;
 518                break;
 519        default:
 520                break;
 521        }
 522
 523        sk = kzalloc(sizeof(struct sock), GFP_USER);
 524        if (!sk) {
 525                kfree(data);
 526                kfree(ctx);
 527                return -ENOMEM;
 528        }
 529        sock_net_set(sk, net);
 530        sock_init_data(NULL, sk);
 531
 532        skb = build_skb(data, 0);
 533        if (!skb) {
 534                kfree(data);
 535                kfree(ctx);
 536                kfree(sk);
 537                return -ENOMEM;
 538        }
 539        skb->sk = sk;
 540
 541        skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
 542        __skb_put(skb, size);
 543        if (ctx && ctx->ifindex > 1) {
 544                dev = dev_get_by_index(net, ctx->ifindex);
 545                if (!dev) {
 546                        ret = -ENODEV;
 547                        goto out;
 548                }
 549        }
 550        skb->protocol = eth_type_trans(skb, dev);
 551        skb_reset_network_header(skb);
 552
 553        switch (skb->protocol) {
 554        case htons(ETH_P_IP):
 555                sk->sk_family = AF_INET;
 556                if (sizeof(struct iphdr) <= skb_headlen(skb)) {
 557                        sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
 558                        sk->sk_daddr = ip_hdr(skb)->daddr;
 559                }
 560                break;
 561#if IS_ENABLED(CONFIG_IPV6)
 562        case htons(ETH_P_IPV6):
 563                sk->sk_family = AF_INET6;
 564                if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
 565                        sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
 566                        sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
 567                }
 568                break;
 569#endif
 570        default:
 571                break;
 572        }
 573
 574        if (is_l2)
 575                __skb_push(skb, hh_len);
 576        if (is_direct_pkt_access)
 577                bpf_compute_data_pointers(skb);
 578        ret = convert___skb_to_skb(skb, ctx);
 579        if (ret)
 580                goto out;
 581        ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
 582        if (ret)
 583                goto out;
 584        if (!is_l2) {
 585                if (skb_headroom(skb) < hh_len) {
 586                        int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
 587
 588                        if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
 589                                ret = -ENOMEM;
 590                                goto out;
 591                        }
 592                }
 593                memset(__skb_push(skb, hh_len), 0, hh_len);
 594        }
 595        convert_skb_to___skb(skb, ctx);
 596
 597        size = skb->len;
 598        /* bpf program can never convert linear skb to non-linear */
 599        if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
 600                size = skb_headlen(skb);
 601        ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
 602        if (!ret)
 603                ret = bpf_ctx_finish(kattr, uattr, ctx,
 604                                     sizeof(struct __sk_buff));
 605out:
 606        if (dev && dev != net->loopback_dev)
 607                dev_put(dev);
 608        kfree_skb(skb);
 609        bpf_sk_storage_free(sk);
 610        kfree(sk);
 611        kfree(ctx);
 612        return ret;
 613}
 614
 615int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
 616                          union bpf_attr __user *uattr)
 617{
 618        u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 619        u32 headroom = XDP_PACKET_HEADROOM;
 620        u32 size = kattr->test.data_size_in;
 621        u32 repeat = kattr->test.repeat;
 622        struct netdev_rx_queue *rxqueue;
 623        struct xdp_buff xdp = {};
 624        u32 retval, duration;
 625        u32 max_data_sz;
 626        void *data;
 627        int ret;
 628
 629        if (kattr->test.ctx_in || kattr->test.ctx_out)
 630                return -EINVAL;
 631
 632        /* XDP have extra tailroom as (most) drivers use full page */
 633        max_data_sz = 4096 - headroom - tailroom;
 634
 635        data = bpf_test_init(kattr, max_data_sz, headroom, tailroom);
 636        if (IS_ERR(data))
 637                return PTR_ERR(data);
 638
 639        xdp.data_hard_start = data;
 640        xdp.data = data + headroom;
 641        xdp.data_meta = xdp.data;
 642        xdp.data_end = xdp.data + size;
 643        xdp.frame_sz = headroom + max_data_sz + tailroom;
 644
 645        rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
 646        xdp.rxq = &rxqueue->xdp_rxq;
 647        bpf_prog_change_xdp(NULL, prog);
 648        ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
 649        if (ret)
 650                goto out;
 651        if (xdp.data != data + headroom || xdp.data_end != xdp.data + size)
 652                size = xdp.data_end - xdp.data;
 653        ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
 654out:
 655        bpf_prog_change_xdp(prog, NULL);
 656        kfree(data);
 657        return ret;
 658}
 659
 660static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
 661{
 662        /* make sure the fields we don't use are zeroed */
 663        if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
 664                return -EINVAL;
 665
 666        /* flags is allowed */
 667
 668        if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
 669                           sizeof(struct bpf_flow_keys)))
 670                return -EINVAL;
 671
 672        return 0;
 673}
 674
 675int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
 676                                     const union bpf_attr *kattr,
 677                                     union bpf_attr __user *uattr)
 678{
 679        u32 size = kattr->test.data_size_in;
 680        struct bpf_flow_dissector ctx = {};
 681        u32 repeat = kattr->test.repeat;
 682        struct bpf_flow_keys *user_ctx;
 683        struct bpf_flow_keys flow_keys;
 684        u64 time_start, time_spent = 0;
 685        const struct ethhdr *eth;
 686        unsigned int flags = 0;
 687        u32 retval, duration;
 688        void *data;
 689        int ret;
 690        u32 i;
 691
 692        if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
 693                return -EINVAL;
 694
 695        if (kattr->test.flags || kattr->test.cpu)
 696                return -EINVAL;
 697
 698        if (size < ETH_HLEN)
 699                return -EINVAL;
 700
 701        data = bpf_test_init(kattr, size, 0, 0);
 702        if (IS_ERR(data))
 703                return PTR_ERR(data);
 704
 705        eth = (struct ethhdr *)data;
 706
 707        if (!repeat)
 708                repeat = 1;
 709
 710        user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
 711        if (IS_ERR(user_ctx)) {
 712                kfree(data);
 713                return PTR_ERR(user_ctx);
 714        }
 715        if (user_ctx) {
 716                ret = verify_user_bpf_flow_keys(user_ctx);
 717                if (ret)
 718                        goto out;
 719                flags = user_ctx->flags;
 720        }
 721
 722        ctx.flow_keys = &flow_keys;
 723        ctx.data = data;
 724        ctx.data_end = (__u8 *)data + size;
 725
 726        rcu_read_lock();
 727        preempt_disable();
 728        time_start = ktime_get_ns();
 729        for (i = 0; i < repeat; i++) {
 730                retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
 731                                          size, flags);
 732
 733                if (signal_pending(current)) {
 734                        preempt_enable();
 735                        rcu_read_unlock();
 736
 737                        ret = -EINTR;
 738                        goto out;
 739                }
 740
 741                if (need_resched()) {
 742                        time_spent += ktime_get_ns() - time_start;
 743                        preempt_enable();
 744                        rcu_read_unlock();
 745
 746                        cond_resched();
 747
 748                        rcu_read_lock();
 749                        preempt_disable();
 750                        time_start = ktime_get_ns();
 751                }
 752        }
 753        time_spent += ktime_get_ns() - time_start;
 754        preempt_enable();
 755        rcu_read_unlock();
 756
 757        do_div(time_spent, repeat);
 758        duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
 759
 760        ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
 761                              retval, duration);
 762        if (!ret)
 763                ret = bpf_ctx_finish(kattr, uattr, user_ctx,
 764                                     sizeof(struct bpf_flow_keys));
 765
 766out:
 767        kfree(user_ctx);
 768        kfree(data);
 769        return ret;
 770}
 771