linux/net/bpf/test_run.c
<<
>>
Prefs
   1/* Copyright (c) 2017 Facebook
   2 *
   3 * This program is free software; you can redistribute it and/or
   4 * modify it under the terms of version 2 of the GNU General Public
   5 * License as published by the Free Software Foundation.
   6 */
   7#include <linux/bpf.h>
   8#include <linux/slab.h>
   9#include <linux/vmalloc.h>
  10#include <linux/etherdevice.h>
  11#include <linux/filter.h>
  12#include <linux/sched/signal.h>
  13
  14static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx)
  15{
  16        u32 ret;
  17
  18        preempt_disable();
  19        rcu_read_lock();
  20        ret = BPF_PROG_RUN(prog, ctx);
  21        rcu_read_unlock();
  22        preempt_enable();
  23
  24        return ret;
  25}
  26
  27static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
  28{
  29        u64 time_start, time_spent = 0;
  30        u32 ret = 0, i;
  31
  32        if (!repeat)
  33                repeat = 1;
  34        time_start = ktime_get_ns();
  35        for (i = 0; i < repeat; i++) {
  36                ret = bpf_test_run_one(prog, ctx);
  37                if (need_resched()) {
  38                        if (signal_pending(current))
  39                                break;
  40                        time_spent += ktime_get_ns() - time_start;
  41                        cond_resched();
  42                        time_start = ktime_get_ns();
  43                }
  44        }
  45        time_spent += ktime_get_ns() - time_start;
  46        do_div(time_spent, repeat);
  47        *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
  48
  49        return ret;
  50}
  51
  52static int bpf_test_finish(const union bpf_attr *kattr,
  53                           union bpf_attr __user *uattr, const void *data,
  54                           u32 size, u32 retval, u32 duration)
  55{
  56        void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
  57        int err = -EFAULT;
  58
  59        if (data_out && copy_to_user(data_out, data, size))
  60                goto out;
  61        if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
  62                goto out;
  63        if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
  64                goto out;
  65        if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
  66                goto out;
  67        err = 0;
  68out:
  69        return err;
  70}
  71
  72static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
  73                           u32 headroom, u32 tailroom)
  74{
  75        void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
  76        void *data;
  77
  78        if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
  79                return ERR_PTR(-EINVAL);
  80
  81        data = kzalloc(size + headroom + tailroom, GFP_USER);
  82        if (!data)
  83                return ERR_PTR(-ENOMEM);
  84
  85        if (copy_from_user(data + headroom, data_in, size)) {
  86                kfree(data);
  87                return ERR_PTR(-EFAULT);
  88        }
  89        return data;
  90}
  91
  92int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
  93                          union bpf_attr __user *uattr)
  94{
  95        bool is_l2 = false, is_direct_pkt_access = false;
  96        u32 size = kattr->test.data_size_in;
  97        u32 repeat = kattr->test.repeat;
  98        u32 retval, duration;
  99        struct sk_buff *skb;
 100        void *data;
 101        int ret;
 102
 103        data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
 104                             SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
 105        if (IS_ERR(data))
 106                return PTR_ERR(data);
 107
 108        switch (prog->type) {
 109        case BPF_PROG_TYPE_SCHED_CLS:
 110        case BPF_PROG_TYPE_SCHED_ACT:
 111                is_l2 = true;
 112                /* fall through */
 113        case BPF_PROG_TYPE_LWT_IN:
 114        case BPF_PROG_TYPE_LWT_OUT:
 115        case BPF_PROG_TYPE_LWT_XMIT:
 116                is_direct_pkt_access = true;
 117                break;
 118        default:
 119                break;
 120        }
 121
 122        skb = build_skb(data, 0);
 123        if (!skb) {
 124                kfree(data);
 125                return -ENOMEM;
 126        }
 127
 128        skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
 129        __skb_put(skb, size);
 130        skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
 131        skb_reset_network_header(skb);
 132
 133        if (is_l2)
 134                __skb_push(skb, ETH_HLEN);
 135        if (is_direct_pkt_access)
 136                bpf_compute_data_pointers(skb);
 137        retval = bpf_test_run(prog, skb, repeat, &duration);
 138        if (!is_l2)
 139                __skb_push(skb, ETH_HLEN);
 140        size = skb->len;
 141        /* bpf program can never convert linear skb to non-linear */
 142        if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
 143                size = skb_headlen(skb);
 144        ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
 145        kfree_skb(skb);
 146        return ret;
 147}
 148
 149int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
 150                          union bpf_attr __user *uattr)
 151{
 152        u32 size = kattr->test.data_size_in;
 153        u32 repeat = kattr->test.repeat;
 154        struct netdev_rx_queue *rxqueue;
 155        struct xdp_buff xdp = {};
 156        u32 retval, duration;
 157        void *data;
 158        int ret;
 159
 160        data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
 161        if (IS_ERR(data))
 162                return PTR_ERR(data);
 163
 164        xdp.data_hard_start = data;
 165        xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
 166        xdp.data_meta = xdp.data;
 167        xdp.data_end = xdp.data + size;
 168
 169        rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
 170        xdp.rxq = &rxqueue->xdp_rxq;
 171
 172        retval = bpf_test_run(prog, &xdp, repeat, &duration);
 173        if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN)
 174                size = xdp.data_end - xdp.data;
 175        ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
 176        kfree(data);
 177        return ret;
 178}
 179