linux/net/core/filter.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Linux Socket Filter - Kernel level socket filtering
   4 *
   5 * Based on the design of the Berkeley Packet Filter. The new
   6 * internal format has been designed by PLUMgrid:
   7 *
   8 *      Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
   9 *
  10 * Authors:
  11 *
  12 *      Jay Schulist <jschlst@samba.org>
  13 *      Alexei Starovoitov <ast@plumgrid.com>
  14 *      Daniel Borkmann <dborkman@redhat.com>
  15 *
  16 * Andi Kleen - Fix a few bad bugs and races.
  17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
  18 */
  19
  20#include <linux/module.h>
  21#include <linux/types.h>
  22#include <linux/mm.h>
  23#include <linux/fcntl.h>
  24#include <linux/socket.h>
  25#include <linux/sock_diag.h>
  26#include <linux/in.h>
  27#include <linux/inet.h>
  28#include <linux/netdevice.h>
  29#include <linux/if_packet.h>
  30#include <linux/if_arp.h>
  31#include <linux/gfp.h>
  32#include <net/inet_common.h>
  33#include <net/ip.h>
  34#include <net/protocol.h>
  35#include <net/netlink.h>
  36#include <linux/skbuff.h>
  37#include <linux/skmsg.h>
  38#include <net/sock.h>
  39#include <net/flow_dissector.h>
  40#include <linux/errno.h>
  41#include <linux/timer.h>
  42#include <linux/uaccess.h>
  43#include <asm/unaligned.h>
  44#include <asm/cmpxchg.h>
  45#include <linux/filter.h>
  46#include <linux/ratelimit.h>
  47#include <linux/seccomp.h>
  48#include <linux/if_vlan.h>
  49#include <linux/bpf.h>
  50#include <net/sch_generic.h>
  51#include <net/cls_cgroup.h>
  52#include <net/dst_metadata.h>
  53#include <net/dst.h>
  54#include <net/sock_reuseport.h>
  55#include <net/busy_poll.h>
  56#include <net/tcp.h>
  57#include <net/xfrm.h>
  58#include <net/udp.h>
  59#include <linux/bpf_trace.h>
  60#include <net/xdp_sock.h>
  61#include <linux/inetdevice.h>
  62#include <net/inet_hashtables.h>
  63#include <net/inet6_hashtables.h>
  64#include <net/ip_fib.h>
  65#include <net/nexthop.h>
  66#include <net/flow.h>
  67#include <net/arp.h>
  68#include <net/ipv6.h>
  69#include <net/net_namespace.h>
  70#include <linux/seg6_local.h>
  71#include <net/seg6.h>
  72#include <net/seg6_local.h>
  73#include <net/lwtunnel.h>
  74#include <net/ipv6_stubs.h>
  75#include <net/bpf_sk_storage.h>
  76
  77/**
  78 *      sk_filter_trim_cap - run a packet through a socket filter
  79 *      @sk: sock associated with &sk_buff
  80 *      @skb: buffer to filter
  81 *      @cap: limit on how short the eBPF program may trim the packet
  82 *
  83 * Run the eBPF program and then cut skb->data to correct size returned by
  84 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
  85 * than pkt_len we keep whole skb->data. This is the socket level
  86 * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
  87 * be accepted or -EPERM if the packet should be tossed.
  88 *
  89 */
  90int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
  91{
  92        int err;
  93        struct sk_filter *filter;
  94
  95        /*
  96         * If the skb was allocated from pfmemalloc reserves, only
  97         * allow SOCK_MEMALLOC sockets to use it as this socket is
  98         * helping free memory
  99         */
 100        if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
 101                NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
 102                return -ENOMEM;
 103        }
 104        err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
 105        if (err)
 106                return err;
 107
 108        err = security_sock_rcv_skb(sk, skb);
 109        if (err)
 110                return err;
 111
 112        rcu_read_lock();
 113        filter = rcu_dereference(sk->sk_filter);
 114        if (filter) {
 115                struct sock *save_sk = skb->sk;
 116                unsigned int pkt_len;
 117
 118                skb->sk = sk;
 119                pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
 120                skb->sk = save_sk;
 121                err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
 122        }
 123        rcu_read_unlock();
 124
 125        return err;
 126}
 127EXPORT_SYMBOL(sk_filter_trim_cap);
 128
 129BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb)
 130{
 131        return skb_get_poff(skb);
 132}
 133
 134BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
 135{
 136        struct nlattr *nla;
 137
 138        if (skb_is_nonlinear(skb))
 139                return 0;
 140
 141        if (skb->len < sizeof(struct nlattr))
 142                return 0;
 143
 144        if (a > skb->len - sizeof(struct nlattr))
 145                return 0;
 146
 147        nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
 148        if (nla)
 149                return (void *) nla - (void *) skb->data;
 150
 151        return 0;
 152}
 153
 154BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
 155{
 156        struct nlattr *nla;
 157
 158        if (skb_is_nonlinear(skb))
 159                return 0;
 160
 161        if (skb->len < sizeof(struct nlattr))
 162                return 0;
 163
 164        if (a > skb->len - sizeof(struct nlattr))
 165                return 0;
 166
 167        nla = (struct nlattr *) &skb->data[a];
 168        if (nla->nla_len > skb->len - a)
 169                return 0;
 170
 171        nla = nla_find_nested(nla, x);
 172        if (nla)
 173                return (void *) nla - (void *) skb->data;
 174
 175        return 0;
 176}
 177
 178BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *,
 179           data, int, headlen, int, offset)
 180{
 181        u8 tmp, *ptr;
 182        const int len = sizeof(tmp);
 183
 184        if (offset >= 0) {
 185                if (headlen - offset >= len)
 186                        return *(u8 *)(data + offset);
 187                if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
 188                        return tmp;
 189        } else {
 190                ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
 191                if (likely(ptr))
 192                        return *(u8 *)ptr;
 193        }
 194
 195        return -EFAULT;
 196}
 197
 198BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
 199           int, offset)
 200{
 201        return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len,
 202                                         offset);
 203}
 204
 205BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *,
 206           data, int, headlen, int, offset)
 207{
 208        u16 tmp, *ptr;
 209        const int len = sizeof(tmp);
 210
 211        if (offset >= 0) {
 212                if (headlen - offset >= len)
 213                        return get_unaligned_be16(data + offset);
 214                if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
 215                        return be16_to_cpu(tmp);
 216        } else {
 217                ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
 218                if (likely(ptr))
 219                        return get_unaligned_be16(ptr);
 220        }
 221
 222        return -EFAULT;
 223}
 224
 225BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
 226           int, offset)
 227{
 228        return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len,
 229                                          offset);
 230}
 231
 232BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *,
 233           data, int, headlen, int, offset)
 234{
 235        u32 tmp, *ptr;
 236        const int len = sizeof(tmp);
 237
 238        if (likely(offset >= 0)) {
 239                if (headlen - offset >= len)
 240                        return get_unaligned_be32(data + offset);
 241                if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
 242                        return be32_to_cpu(tmp);
 243        } else {
 244                ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
 245                if (likely(ptr))
 246                        return get_unaligned_be32(ptr);
 247        }
 248
 249        return -EFAULT;
 250}
 251
 252BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb,
 253           int, offset)
 254{
 255        return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len,
 256                                          offset);
 257}
 258
 259BPF_CALL_0(bpf_get_raw_cpu_id)
 260{
 261        return raw_smp_processor_id();
 262}
 263
 264static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
 265        .func           = bpf_get_raw_cpu_id,
 266        .gpl_only       = false,
 267        .ret_type       = RET_INTEGER,
 268};
 269
 270static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
 271                              struct bpf_insn *insn_buf)
 272{
 273        struct bpf_insn *insn = insn_buf;
 274
 275        switch (skb_field) {
 276        case SKF_AD_MARK:
 277                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
 278
 279                *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
 280                                      offsetof(struct sk_buff, mark));
 281                break;
 282
 283        case SKF_AD_PKTTYPE:
 284                *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
 285                *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
 286#ifdef __BIG_ENDIAN_BITFIELD
 287                *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
 288#endif
 289                break;
 290
 291        case SKF_AD_QUEUE:
 292                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
 293
 294                *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
 295                                      offsetof(struct sk_buff, queue_mapping));
 296                break;
 297
 298        case SKF_AD_VLAN_TAG:
 299                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
 300
 301                /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
 302                *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
 303                                      offsetof(struct sk_buff, vlan_tci));
 304                break;
 305        case SKF_AD_VLAN_TAG_PRESENT:
 306                *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET());
 307                if (PKT_VLAN_PRESENT_BIT)
 308                        *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT);
 309                if (PKT_VLAN_PRESENT_BIT < 7)
 310                        *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
 311                break;
 312        }
 313
 314        return insn - insn_buf;
 315}
 316
 317static bool convert_bpf_extensions(struct sock_filter *fp,
 318                                   struct bpf_insn **insnp)
 319{
 320        struct bpf_insn *insn = *insnp;
 321        u32 cnt;
 322
 323        switch (fp->k) {
 324        case SKF_AD_OFF + SKF_AD_PROTOCOL:
 325                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
 326
 327                /* A = *(u16 *) (CTX + offsetof(protocol)) */
 328                *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
 329                                      offsetof(struct sk_buff, protocol));
 330                /* A = ntohs(A) [emitting a nop or swap16] */
 331                *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
 332                break;
 333
 334        case SKF_AD_OFF + SKF_AD_PKTTYPE:
 335                cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
 336                insn += cnt - 1;
 337                break;
 338
 339        case SKF_AD_OFF + SKF_AD_IFINDEX:
 340        case SKF_AD_OFF + SKF_AD_HATYPE:
 341                BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
 342                BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
 343
 344                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
 345                                      BPF_REG_TMP, BPF_REG_CTX,
 346                                      offsetof(struct sk_buff, dev));
 347                /* if (tmp != 0) goto pc + 1 */
 348                *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
 349                *insn++ = BPF_EXIT_INSN();
 350                if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
 351                        *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
 352                                            offsetof(struct net_device, ifindex));
 353                else
 354                        *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
 355                                            offsetof(struct net_device, type));
 356                break;
 357
 358        case SKF_AD_OFF + SKF_AD_MARK:
 359                cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
 360                insn += cnt - 1;
 361                break;
 362
 363        case SKF_AD_OFF + SKF_AD_RXHASH:
 364                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
 365
 366                *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
 367                                    offsetof(struct sk_buff, hash));
 368                break;
 369
 370        case SKF_AD_OFF + SKF_AD_QUEUE:
 371                cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
 372                insn += cnt - 1;
 373                break;
 374
 375        case SKF_AD_OFF + SKF_AD_VLAN_TAG:
 376                cnt = convert_skb_access(SKF_AD_VLAN_TAG,
 377                                         BPF_REG_A, BPF_REG_CTX, insn);
 378                insn += cnt - 1;
 379                break;
 380
 381        case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
 382                cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
 383                                         BPF_REG_A, BPF_REG_CTX, insn);
 384                insn += cnt - 1;
 385                break;
 386
 387        case SKF_AD_OFF + SKF_AD_VLAN_TPID:
 388                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
 389
 390                /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
 391                *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
 392                                      offsetof(struct sk_buff, vlan_proto));
 393                /* A = ntohs(A) [emitting a nop or swap16] */
 394                *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
 395                break;
 396
 397        case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
 398        case SKF_AD_OFF + SKF_AD_NLATTR:
 399        case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
 400        case SKF_AD_OFF + SKF_AD_CPU:
 401        case SKF_AD_OFF + SKF_AD_RANDOM:
 402                /* arg1 = CTX */
 403                *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
 404                /* arg2 = A */
 405                *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
 406                /* arg3 = X */
 407                *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
 408                /* Emit call(arg1=CTX, arg2=A, arg3=X) */
 409                switch (fp->k) {
 410                case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
 411                        *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset);
 412                        break;
 413                case SKF_AD_OFF + SKF_AD_NLATTR:
 414                        *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr);
 415                        break;
 416                case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
 417                        *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest);
 418                        break;
 419                case SKF_AD_OFF + SKF_AD_CPU:
 420                        *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id);
 421                        break;
 422                case SKF_AD_OFF + SKF_AD_RANDOM:
 423                        *insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
 424                        bpf_user_rnd_init_once();
 425                        break;
 426                }
 427                break;
 428
 429        case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
 430                /* A ^= X */
 431                *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
 432                break;
 433
 434        default:
 435                /* This is just a dummy call to avoid letting the compiler
 436                 * evict __bpf_call_base() as an optimization. Placed here
 437                 * where no-one bothers.
 438                 */
 439                BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
 440                return false;
 441        }
 442
 443        *insnp = insn;
 444        return true;
 445}
 446
 447static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
 448{
 449        const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS);
 450        int size = bpf_size_to_bytes(BPF_SIZE(fp->code));
 451        bool endian = BPF_SIZE(fp->code) == BPF_H ||
 452                      BPF_SIZE(fp->code) == BPF_W;
 453        bool indirect = BPF_MODE(fp->code) == BPF_IND;
 454        const int ip_align = NET_IP_ALIGN;
 455        struct bpf_insn *insn = *insnp;
 456        int offset = fp->k;
 457
 458        if (!indirect &&
 459            ((unaligned_ok && offset >= 0) ||
 460             (!unaligned_ok && offset >= 0 &&
 461              offset + ip_align >= 0 &&
 462              offset + ip_align % size == 0))) {
 463                bool ldx_off_ok = offset <= S16_MAX;
 464
 465                *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
 466                if (offset)
 467                        *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
 468                *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
 469                                      size, 2 + endian + (!ldx_off_ok * 2));
 470                if (ldx_off_ok) {
 471                        *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
 472                                              BPF_REG_D, offset);
 473                } else {
 474                        *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D);
 475                        *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset);
 476                        *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
 477                                              BPF_REG_TMP, 0);
 478                }
 479                if (endian)
 480                        *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
 481                *insn++ = BPF_JMP_A(8);
 482        }
 483
 484        *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
 485        *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D);
 486        *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H);
 487        if (!indirect) {
 488                *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset);
 489        } else {
 490                *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X);
 491                if (fp->k)
 492                        *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset);
 493        }
 494
 495        switch (BPF_SIZE(fp->code)) {
 496        case BPF_B:
 497                *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8);
 498                break;
 499        case BPF_H:
 500                *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16);
 501                break;
 502        case BPF_W:
 503                *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32);
 504                break;
 505        default:
 506                return false;
 507        }
 508
 509        *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2);
 510        *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
 511        *insn   = BPF_EXIT_INSN();
 512
 513        *insnp = insn;
 514        return true;
 515}
 516
 517/**
 518 *      bpf_convert_filter - convert filter program
 519 *      @prog: the user passed filter program
 520 *      @len: the length of the user passed filter program
 521 *      @new_prog: allocated 'struct bpf_prog' or NULL
 522 *      @new_len: pointer to store length of converted program
 523 *      @seen_ld_abs: bool whether we've seen ld_abs/ind
 524 *
 525 * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
 526 * style extended BPF (eBPF).
 527 * Conversion workflow:
 528 *
 529 * 1) First pass for calculating the new program length:
 530 *   bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs)
 531 *
 532 * 2) 2nd pass to remap in two passes: 1st pass finds new
 533 *    jump offsets, 2nd pass remapping:
 534 *   bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs)
 535 */
 536static int bpf_convert_filter(struct sock_filter *prog, int len,
 537                              struct bpf_prog *new_prog, int *new_len,
 538                              bool *seen_ld_abs)
 539{
 540        int new_flen = 0, pass = 0, target, i, stack_off;
 541        struct bpf_insn *new_insn, *first_insn = NULL;
 542        struct sock_filter *fp;
 543        int *addrs = NULL;
 544        u8 bpf_src;
 545
 546        BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
 547        BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
 548
 549        if (len <= 0 || len > BPF_MAXINSNS)
 550                return -EINVAL;
 551
 552        if (new_prog) {
 553                first_insn = new_prog->insnsi;
 554                addrs = kcalloc(len, sizeof(*addrs),
 555                                GFP_KERNEL | __GFP_NOWARN);
 556                if (!addrs)
 557                        return -ENOMEM;
 558        }
 559
 560do_pass:
 561        new_insn = first_insn;
 562        fp = prog;
 563
 564        /* Classic BPF related prologue emission. */
 565        if (new_prog) {
 566                /* Classic BPF expects A and X to be reset first. These need
 567                 * to be guaranteed to be the first two instructions.
 568                 */
 569                *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
 570                *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
 571
 572                /* All programs must keep CTX in callee saved BPF_REG_CTX.
 573                 * In eBPF case it's done by the compiler, here we need to
 574                 * do this ourself. Initial CTX is present in BPF_REG_ARG1.
 575                 */
 576                *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
 577                if (*seen_ld_abs) {
 578                        /* For packet access in classic BPF, cache skb->data
 579                         * in callee-saved BPF R8 and skb->len - skb->data_len
 580                         * (headlen) in BPF R9. Since classic BPF is read-only
 581                         * on CTX, we only need to cache it once.
 582                         */
 583                        *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
 584                                                  BPF_REG_D, BPF_REG_CTX,
 585                                                  offsetof(struct sk_buff, data));
 586                        *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX,
 587                                                  offsetof(struct sk_buff, len));
 588                        *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX,
 589                                                  offsetof(struct sk_buff, data_len));
 590                        *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP);
 591                }
 592        } else {
 593                new_insn += 3;
 594        }
 595
 596        for (i = 0; i < len; fp++, i++) {
 597                struct bpf_insn tmp_insns[32] = { };
 598                struct bpf_insn *insn = tmp_insns;
 599
 600                if (addrs)
 601                        addrs[i] = new_insn - first_insn;
 602
 603                switch (fp->code) {
 604                /* All arithmetic insns and skb loads map as-is. */
 605                case BPF_ALU | BPF_ADD | BPF_X:
 606                case BPF_ALU | BPF_ADD | BPF_K:
 607                case BPF_ALU | BPF_SUB | BPF_X:
 608                case BPF_ALU | BPF_SUB | BPF_K:
 609                case BPF_ALU | BPF_AND | BPF_X:
 610                case BPF_ALU | BPF_AND | BPF_K:
 611                case BPF_ALU | BPF_OR | BPF_X:
 612                case BPF_ALU | BPF_OR | BPF_K:
 613                case BPF_ALU | BPF_LSH | BPF_X:
 614                case BPF_ALU | BPF_LSH | BPF_K:
 615                case BPF_ALU | BPF_RSH | BPF_X:
 616                case BPF_ALU | BPF_RSH | BPF_K:
 617                case BPF_ALU | BPF_XOR | BPF_X:
 618                case BPF_ALU | BPF_XOR | BPF_K:
 619                case BPF_ALU | BPF_MUL | BPF_X:
 620                case BPF_ALU | BPF_MUL | BPF_K:
 621                case BPF_ALU | BPF_DIV | BPF_X:
 622                case BPF_ALU | BPF_DIV | BPF_K:
 623                case BPF_ALU | BPF_MOD | BPF_X:
 624                case BPF_ALU | BPF_MOD | BPF_K:
 625                case BPF_ALU | BPF_NEG:
 626                case BPF_LD | BPF_ABS | BPF_W:
 627                case BPF_LD | BPF_ABS | BPF_H:
 628                case BPF_LD | BPF_ABS | BPF_B:
 629                case BPF_LD | BPF_IND | BPF_W:
 630                case BPF_LD | BPF_IND | BPF_H:
 631                case BPF_LD | BPF_IND | BPF_B:
 632                        /* Check for overloaded BPF extension and
 633                         * directly convert it if found, otherwise
 634                         * just move on with mapping.
 635                         */
 636                        if (BPF_CLASS(fp->code) == BPF_LD &&
 637                            BPF_MODE(fp->code) == BPF_ABS &&
 638                            convert_bpf_extensions(fp, &insn))
 639                                break;
 640                        if (BPF_CLASS(fp->code) == BPF_LD &&
 641                            convert_bpf_ld_abs(fp, &insn)) {
 642                                *seen_ld_abs = true;
 643                                break;
 644                        }
 645
 646                        if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
 647                            fp->code == (BPF_ALU | BPF_MOD | BPF_X)) {
 648                                *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
 649                                /* Error with exception code on div/mod by 0.
 650                                 * For cBPF programs, this was always return 0.
 651                                 */
 652                                *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2);
 653                                *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
 654                                *insn++ = BPF_EXIT_INSN();
 655                        }
 656
 657                        *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
 658                        break;
 659
 660                /* Jump transformation cannot use BPF block macros
 661                 * everywhere as offset calculation and target updates
 662                 * require a bit more work than the rest, i.e. jump
 663                 * opcodes map as-is, but offsets need adjustment.
 664                 */
 665
 666#define BPF_EMIT_JMP                                                    \
 667        do {                                                            \
 668                const s32 off_min = S16_MIN, off_max = S16_MAX;         \
 669                s32 off;                                                \
 670                                                                        \
 671                if (target >= len || target < 0)                        \
 672                        goto err;                                       \
 673                off = addrs ? addrs[target] - addrs[i] - 1 : 0;         \
 674                /* Adjust pc relative offset for 2nd or 3rd insn. */    \
 675                off -= insn - tmp_insns;                                \
 676                /* Reject anything not fitting into insn->off. */       \
 677                if (off < off_min || off > off_max)                     \
 678                        goto err;                                       \
 679                insn->off = off;                                        \
 680        } while (0)
 681
 682                case BPF_JMP | BPF_JA:
 683                        target = i + fp->k + 1;
 684                        insn->code = fp->code;
 685                        BPF_EMIT_JMP;
 686                        break;
 687
 688                case BPF_JMP | BPF_JEQ | BPF_K:
 689                case BPF_JMP | BPF_JEQ | BPF_X:
 690                case BPF_JMP | BPF_JSET | BPF_K:
 691                case BPF_JMP | BPF_JSET | BPF_X:
 692                case BPF_JMP | BPF_JGT | BPF_K:
 693                case BPF_JMP | BPF_JGT | BPF_X:
 694                case BPF_JMP | BPF_JGE | BPF_K:
 695                case BPF_JMP | BPF_JGE | BPF_X:
 696                        if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
 697                                /* BPF immediates are signed, zero extend
 698                                 * immediate into tmp register and use it
 699                                 * in compare insn.
 700                                 */
 701                                *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
 702
 703                                insn->dst_reg = BPF_REG_A;
 704                                insn->src_reg = BPF_REG_TMP;
 705                                bpf_src = BPF_X;
 706                        } else {
 707                                insn->dst_reg = BPF_REG_A;
 708                                insn->imm = fp->k;
 709                                bpf_src = BPF_SRC(fp->code);
 710                                insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
 711                        }
 712
 713                        /* Common case where 'jump_false' is next insn. */
 714                        if (fp->jf == 0) {
 715                                insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
 716                                target = i + fp->jt + 1;
 717                                BPF_EMIT_JMP;
 718                                break;
 719                        }
 720
 721                        /* Convert some jumps when 'jump_true' is next insn. */
 722                        if (fp->jt == 0) {
 723                                switch (BPF_OP(fp->code)) {
 724                                case BPF_JEQ:
 725                                        insn->code = BPF_JMP | BPF_JNE | bpf_src;
 726                                        break;
 727                                case BPF_JGT:
 728                                        insn->code = BPF_JMP | BPF_JLE | bpf_src;
 729                                        break;
 730                                case BPF_JGE:
 731                                        insn->code = BPF_JMP | BPF_JLT | bpf_src;
 732                                        break;
 733                                default:
 734                                        goto jmp_rest;
 735                                }
 736
 737                                target = i + fp->jf + 1;
 738                                BPF_EMIT_JMP;
 739                                break;
 740                        }
 741jmp_rest:
 742                        /* Other jumps are mapped into two insns: Jxx and JA. */
 743                        target = i + fp->jt + 1;
 744                        insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
 745                        BPF_EMIT_JMP;
 746                        insn++;
 747
 748                        insn->code = BPF_JMP | BPF_JA;
 749                        target = i + fp->jf + 1;
 750                        BPF_EMIT_JMP;
 751                        break;
 752
 753                /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
 754                case BPF_LDX | BPF_MSH | BPF_B: {
 755                        struct sock_filter tmp = {
 756                                .code   = BPF_LD | BPF_ABS | BPF_B,
 757                                .k      = fp->k,
 758                        };
 759
 760                        *seen_ld_abs = true;
 761
 762                        /* X = A */
 763                        *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
 764                        /* A = BPF_R0 = *(u8 *) (skb->data + K) */
 765                        convert_bpf_ld_abs(&tmp, &insn);
 766                        insn++;
 767                        /* A &= 0xf */
 768                        *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
 769                        /* A <<= 2 */
 770                        *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
 771                        /* tmp = X */
 772                        *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X);
 773                        /* X = A */
 774                        *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
 775                        /* A = tmp */
 776                        *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
 777                        break;
 778                }
 779                /* RET_K is remaped into 2 insns. RET_A case doesn't need an
 780                 * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
 781                 */
 782                case BPF_RET | BPF_A:
 783                case BPF_RET | BPF_K:
 784                        if (BPF_RVAL(fp->code) == BPF_K)
 785                                *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0,
 786                                                        0, fp->k);
 787                        *insn = BPF_EXIT_INSN();
 788                        break;
 789
 790                /* Store to stack. */
 791                case BPF_ST:
 792                case BPF_STX:
 793                        stack_off = fp->k * 4  + 4;
 794                        *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
 795                                            BPF_ST ? BPF_REG_A : BPF_REG_X,
 796                                            -stack_off);
 797                        /* check_load_and_stores() verifies that classic BPF can
 798                         * load from stack only after write, so tracking
 799                         * stack_depth for ST|STX insns is enough
 800                         */
 801                        if (new_prog && new_prog->aux->stack_depth < stack_off)
 802                                new_prog->aux->stack_depth = stack_off;
 803                        break;
 804
 805                /* Load from stack. */
 806                case BPF_LD | BPF_MEM:
 807                case BPF_LDX | BPF_MEM:
 808                        stack_off = fp->k * 4  + 4;
 809                        *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD  ?
 810                                            BPF_REG_A : BPF_REG_X, BPF_REG_FP,
 811                                            -stack_off);
 812                        break;
 813
 814                /* A = K or X = K */
 815                case BPF_LD | BPF_IMM:
 816                case BPF_LDX | BPF_IMM:
 817                        *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
 818                                              BPF_REG_A : BPF_REG_X, fp->k);
 819                        break;
 820
 821                /* X = A */
 822                case BPF_MISC | BPF_TAX:
 823                        *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
 824                        break;
 825
 826                /* A = X */
 827                case BPF_MISC | BPF_TXA:
 828                        *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
 829                        break;
 830
 831                /* A = skb->len or X = skb->len */
 832                case BPF_LD | BPF_W | BPF_LEN:
 833                case BPF_LDX | BPF_W | BPF_LEN:
 834                        *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
 835                                            BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
 836                                            offsetof(struct sk_buff, len));
 837                        break;
 838
 839                /* Access seccomp_data fields. */
 840                case BPF_LDX | BPF_ABS | BPF_W:
 841                        /* A = *(u32 *) (ctx + K) */
 842                        *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
 843                        break;
 844
 845                /* Unknown instruction. */
 846                default:
 847                        goto err;
 848                }
 849
 850                insn++;
 851                if (new_prog)
 852                        memcpy(new_insn, tmp_insns,
 853                               sizeof(*insn) * (insn - tmp_insns));
 854                new_insn += insn - tmp_insns;
 855        }
 856
 857        if (!new_prog) {
 858                /* Only calculating new length. */
 859                *new_len = new_insn - first_insn;
 860                if (*seen_ld_abs)
 861                        *new_len += 4; /* Prologue bits. */
 862                return 0;
 863        }
 864
 865        pass++;
 866        if (new_flen != new_insn - first_insn) {
 867                new_flen = new_insn - first_insn;
 868                if (pass > 2)
 869                        goto err;
 870                goto do_pass;
 871        }
 872
 873        kfree(addrs);
 874        BUG_ON(*new_len != new_flen);
 875        return 0;
 876err:
 877        kfree(addrs);
 878        return -EINVAL;
 879}
 880
 881/* Security:
 882 *
 883 * As we dont want to clear mem[] array for each packet going through
 884 * __bpf_prog_run(), we check that filter loaded by user never try to read
 885 * a cell if not previously written, and we check all branches to be sure
 886 * a malicious user doesn't try to abuse us.
 887 */
 888static int check_load_and_stores(const struct sock_filter *filter, int flen)
 889{
 890        u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
 891        int pc, ret = 0;
 892
 893        BUILD_BUG_ON(BPF_MEMWORDS > 16);
 894
 895        masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
 896        if (!masks)
 897                return -ENOMEM;
 898
 899        memset(masks, 0xff, flen * sizeof(*masks));
 900
 901        for (pc = 0; pc < flen; pc++) {
 902                memvalid &= masks[pc];
 903
 904                switch (filter[pc].code) {
 905                case BPF_ST:
 906                case BPF_STX:
 907                        memvalid |= (1 << filter[pc].k);
 908                        break;
 909                case BPF_LD | BPF_MEM:
 910                case BPF_LDX | BPF_MEM:
 911                        if (!(memvalid & (1 << filter[pc].k))) {
 912                                ret = -EINVAL;
 913                                goto error;
 914                        }
 915                        break;
 916                case BPF_JMP | BPF_JA:
 917                        /* A jump must set masks on target */
 918                        masks[pc + 1 + filter[pc].k] &= memvalid;
 919                        memvalid = ~0;
 920                        break;
 921                case BPF_JMP | BPF_JEQ | BPF_K:
 922                case BPF_JMP | BPF_JEQ | BPF_X:
 923                case BPF_JMP | BPF_JGE | BPF_K:
 924                case BPF_JMP | BPF_JGE | BPF_X:
 925                case BPF_JMP | BPF_JGT | BPF_K:
 926                case BPF_JMP | BPF_JGT | BPF_X:
 927                case BPF_JMP | BPF_JSET | BPF_K:
 928                case BPF_JMP | BPF_JSET | BPF_X:
 929                        /* A jump must set masks on targets */
 930                        masks[pc + 1 + filter[pc].jt] &= memvalid;
 931                        masks[pc + 1 + filter[pc].jf] &= memvalid;
 932                        memvalid = ~0;
 933                        break;
 934                }
 935        }
 936error:
 937        kfree(masks);
 938        return ret;
 939}
 940
 941static bool chk_code_allowed(u16 code_to_probe)
 942{
 943        static const bool codes[] = {
 944                /* 32 bit ALU operations */
 945                [BPF_ALU | BPF_ADD | BPF_K] = true,
 946                [BPF_ALU | BPF_ADD | BPF_X] = true,
 947                [BPF_ALU | BPF_SUB | BPF_K] = true,
 948                [BPF_ALU | BPF_SUB | BPF_X] = true,
 949                [BPF_ALU | BPF_MUL | BPF_K] = true,
 950                [BPF_ALU | BPF_MUL | BPF_X] = true,
 951                [BPF_ALU | BPF_DIV | BPF_K] = true,
 952                [BPF_ALU | BPF_DIV | BPF_X] = true,
 953                [BPF_ALU | BPF_MOD | BPF_K] = true,
 954                [BPF_ALU | BPF_MOD | BPF_X] = true,
 955                [BPF_ALU | BPF_AND | BPF_K] = true,
 956                [BPF_ALU | BPF_AND | BPF_X] = true,
 957                [BPF_ALU | BPF_OR | BPF_K] = true,
 958                [BPF_ALU | BPF_OR | BPF_X] = true,
 959                [BPF_ALU | BPF_XOR | BPF_K] = true,
 960                [BPF_ALU | BPF_XOR | BPF_X] = true,
 961                [BPF_ALU | BPF_LSH | BPF_K] = true,
 962                [BPF_ALU | BPF_LSH | BPF_X] = true,
 963                [BPF_ALU | BPF_RSH | BPF_K] = true,
 964                [BPF_ALU | BPF_RSH | BPF_X] = true,
 965                [BPF_ALU | BPF_NEG] = true,
 966                /* Load instructions */
 967                [BPF_LD | BPF_W | BPF_ABS] = true,
 968                [BPF_LD | BPF_H | BPF_ABS] = true,
 969                [BPF_LD | BPF_B | BPF_ABS] = true,
 970                [BPF_LD | BPF_W | BPF_LEN] = true,
 971                [BPF_LD | BPF_W | BPF_IND] = true,
 972                [BPF_LD | BPF_H | BPF_IND] = true,
 973                [BPF_LD | BPF_B | BPF_IND] = true,
 974                [BPF_LD | BPF_IMM] = true,
 975                [BPF_LD | BPF_MEM] = true,
 976                [BPF_LDX | BPF_W | BPF_LEN] = true,
 977                [BPF_LDX | BPF_B | BPF_MSH] = true,
 978                [BPF_LDX | BPF_IMM] = true,
 979                [BPF_LDX | BPF_MEM] = true,
 980                /* Store instructions */
 981                [BPF_ST] = true,
 982                [BPF_STX] = true,
 983                /* Misc instructions */
 984                [BPF_MISC | BPF_TAX] = true,
 985                [BPF_MISC | BPF_TXA] = true,
 986                /* Return instructions */
 987                [BPF_RET | BPF_K] = true,
 988                [BPF_RET | BPF_A] = true,
 989                /* Jump instructions */
 990                [BPF_JMP | BPF_JA] = true,
 991                [BPF_JMP | BPF_JEQ | BPF_K] = true,
 992                [BPF_JMP | BPF_JEQ | BPF_X] = true,
 993                [BPF_JMP | BPF_JGE | BPF_K] = true,
 994                [BPF_JMP | BPF_JGE | BPF_X] = true,
 995                [BPF_JMP | BPF_JGT | BPF_K] = true,
 996                [BPF_JMP | BPF_JGT | BPF_X] = true,
 997                [BPF_JMP | BPF_JSET | BPF_K] = true,
 998                [BPF_JMP | BPF_JSET | BPF_X] = true,
 999        };
1000
1001        if (code_to_probe >= ARRAY_SIZE(codes))
1002                return false;
1003
1004        return codes[code_to_probe];
1005}
1006
1007static bool bpf_check_basics_ok(const struct sock_filter *filter,
1008                                unsigned int flen)
1009{
1010        if (filter == NULL)
1011                return false;
1012        if (flen == 0 || flen > BPF_MAXINSNS)
1013                return false;
1014
1015        return true;
1016}
1017
1018/**
1019 *      bpf_check_classic - verify socket filter code
1020 *      @filter: filter to verify
1021 *      @flen: length of filter
1022 *
1023 * Check the user's filter code. If we let some ugly
1024 * filter code slip through kaboom! The filter must contain
1025 * no references or jumps that are out of range, no illegal
1026 * instructions, and must end with a RET instruction.
1027 *
1028 * All jumps are forward as they are not signed.
1029 *
1030 * Returns 0 if the rule set is legal or -EINVAL if not.
1031 */
1032static int bpf_check_classic(const struct sock_filter *filter,
1033                             unsigned int flen)
1034{
1035        bool anc_found;
1036        int pc;
1037
1038        /* Check the filter code now */
1039        for (pc = 0; pc < flen; pc++) {
1040                const struct sock_filter *ftest = &filter[pc];
1041
1042                /* May we actually operate on this code? */
1043                if (!chk_code_allowed(ftest->code))
1044                        return -EINVAL;
1045
1046                /* Some instructions need special checks */
1047                switch (ftest->code) {
1048                case BPF_ALU | BPF_DIV | BPF_K:
1049                case BPF_ALU | BPF_MOD | BPF_K:
1050                        /* Check for division by zero */
1051                        if (ftest->k == 0)
1052                                return -EINVAL;
1053                        break;
1054                case BPF_ALU | BPF_LSH | BPF_K:
1055                case BPF_ALU | BPF_RSH | BPF_K:
1056                        if (ftest->k >= 32)
1057                                return -EINVAL;
1058                        break;
1059                case BPF_LD | BPF_MEM:
1060                case BPF_LDX | BPF_MEM:
1061                case BPF_ST:
1062                case BPF_STX:
1063                        /* Check for invalid memory addresses */
1064                        if (ftest->k >= BPF_MEMWORDS)
1065                                return -EINVAL;
1066                        break;
1067                case BPF_JMP | BPF_JA:
1068                        /* Note, the large ftest->k might cause loops.
1069                         * Compare this with conditional jumps below,
1070                         * where offsets are limited. --ANK (981016)
1071                         */
1072                        if (ftest->k >= (unsigned int)(flen - pc - 1))
1073                                return -EINVAL;
1074                        break;
1075                case BPF_JMP | BPF_JEQ | BPF_K:
1076                case BPF_JMP | BPF_JEQ | BPF_X:
1077                case BPF_JMP | BPF_JGE | BPF_K:
1078                case BPF_JMP | BPF_JGE | BPF_X:
1079                case BPF_JMP | BPF_JGT | BPF_K:
1080                case BPF_JMP | BPF_JGT | BPF_X:
1081                case BPF_JMP | BPF_JSET | BPF_K:
1082                case BPF_JMP | BPF_JSET | BPF_X:
1083                        /* Both conditionals must be safe */
1084                        if (pc + ftest->jt + 1 >= flen ||
1085                            pc + ftest->jf + 1 >= flen)
1086                                return -EINVAL;
1087                        break;
1088                case BPF_LD | BPF_W | BPF_ABS:
1089                case BPF_LD | BPF_H | BPF_ABS:
1090                case BPF_LD | BPF_B | BPF_ABS:
1091                        anc_found = false;
1092                        if (bpf_anc_helper(ftest) & BPF_ANC)
1093                                anc_found = true;
1094                        /* Ancillary operation unknown or unsupported */
1095                        if (anc_found == false && ftest->k >= SKF_AD_OFF)
1096                                return -EINVAL;
1097                }
1098        }
1099
1100        /* Last instruction must be a RET code */
1101        switch (filter[flen - 1].code) {
1102        case BPF_RET | BPF_K:
1103        case BPF_RET | BPF_A:
1104                return check_load_and_stores(filter, flen);
1105        }
1106
1107        return -EINVAL;
1108}
1109
1110static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
1111                                      const struct sock_fprog *fprog)
1112{
1113        unsigned int fsize = bpf_classic_proglen(fprog);
1114        struct sock_fprog_kern *fkprog;
1115
1116        fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
1117        if (!fp->orig_prog)
1118                return -ENOMEM;
1119
1120        fkprog = fp->orig_prog;
1121        fkprog->len = fprog->len;
1122
1123        fkprog->filter = kmemdup(fp->insns, fsize,
1124                                 GFP_KERNEL | __GFP_NOWARN);
1125        if (!fkprog->filter) {
1126                kfree(fp->orig_prog);
1127                return -ENOMEM;
1128        }
1129
1130        return 0;
1131}
1132
1133static void bpf_release_orig_filter(struct bpf_prog *fp)
1134{
1135        struct sock_fprog_kern *fprog = fp->orig_prog;
1136
1137        if (fprog) {
1138                kfree(fprog->filter);
1139                kfree(fprog);
1140        }
1141}
1142
1143static void __bpf_prog_release(struct bpf_prog *prog)
1144{
1145        if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
1146                bpf_prog_put(prog);
1147        } else {
1148                bpf_release_orig_filter(prog);
1149                bpf_prog_free(prog);
1150        }
1151}
1152
1153static void __sk_filter_release(struct sk_filter *fp)
1154{
1155        __bpf_prog_release(fp->prog);
1156        kfree(fp);
1157}
1158
1159/**
1160 *      sk_filter_release_rcu - Release a socket filter by rcu_head
1161 *      @rcu: rcu_head that contains the sk_filter to free
1162 */
1163static void sk_filter_release_rcu(struct rcu_head *rcu)
1164{
1165        struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
1166
1167        __sk_filter_release(fp);
1168}
1169
1170/**
1171 *      sk_filter_release - release a socket filter
1172 *      @fp: filter to remove
1173 *
1174 *      Remove a filter from a socket and release its resources.
1175 */
1176static void sk_filter_release(struct sk_filter *fp)
1177{
1178        if (refcount_dec_and_test(&fp->refcnt))
1179                call_rcu(&fp->rcu, sk_filter_release_rcu);
1180}
1181
1182void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
1183{
1184        u32 filter_size = bpf_prog_size(fp->prog->len);
1185
1186        atomic_sub(filter_size, &sk->sk_omem_alloc);
1187        sk_filter_release(fp);
1188}
1189
1190/* try to charge the socket memory if there is space available
1191 * return true on success
1192 */
1193static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1194{
1195        u32 filter_size = bpf_prog_size(fp->prog->len);
1196
1197        /* same check as in sock_kmalloc() */
1198        if (filter_size <= sysctl_optmem_max &&
1199            atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
1200                atomic_add(filter_size, &sk->sk_omem_alloc);
1201                return true;
1202        }
1203        return false;
1204}
1205
1206bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1207{
1208        if (!refcount_inc_not_zero(&fp->refcnt))
1209                return false;
1210
1211        if (!__sk_filter_charge(sk, fp)) {
1212                sk_filter_release(fp);
1213                return false;
1214        }
1215        return true;
1216}
1217
1218static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
1219{
1220        struct sock_filter *old_prog;
1221        struct bpf_prog *old_fp;
1222        int err, new_len, old_len = fp->len;
1223        bool seen_ld_abs = false;
1224
1225        /* We are free to overwrite insns et al right here as it
1226         * won't be used at this point in time anymore internally
1227         * after the migration to the internal BPF instruction
1228         * representation.
1229         */
1230        BUILD_BUG_ON(sizeof(struct sock_filter) !=
1231                     sizeof(struct bpf_insn));
1232
1233        /* Conversion cannot happen on overlapping memory areas,
1234         * so we need to keep the user BPF around until the 2nd
1235         * pass. At this time, the user BPF is stored in fp->insns.
1236         */
1237        old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
1238                           GFP_KERNEL | __GFP_NOWARN);
1239        if (!old_prog) {
1240                err = -ENOMEM;
1241                goto out_err;
1242        }
1243
1244        /* 1st pass: calculate the new program length. */
1245        err = bpf_convert_filter(old_prog, old_len, NULL, &new_len,
1246                                 &seen_ld_abs);
1247        if (err)
1248                goto out_err_free;
1249
1250        /* Expand fp for appending the new filter representation. */
1251        old_fp = fp;
1252        fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
1253        if (!fp) {
1254                /* The old_fp is still around in case we couldn't
1255                 * allocate new memory, so uncharge on that one.
1256                 */
1257                fp = old_fp;
1258                err = -ENOMEM;
1259                goto out_err_free;
1260        }
1261
1262        fp->len = new_len;
1263
1264        /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
1265        err = bpf_convert_filter(old_prog, old_len, fp, &new_len,
1266                                 &seen_ld_abs);
1267        if (err)
1268                /* 2nd bpf_convert_filter() can fail only if it fails
1269                 * to allocate memory, remapping must succeed. Note,
1270                 * that at this time old_fp has already been released
1271                 * by krealloc().
1272                 */
1273                goto out_err_free;
1274
1275        fp = bpf_prog_select_runtime(fp, &err);
1276        if (err)
1277                goto out_err_free;
1278
1279        kfree(old_prog);
1280        return fp;
1281
1282out_err_free:
1283        kfree(old_prog);
1284out_err:
1285        __bpf_prog_release(fp);
1286        return ERR_PTR(err);
1287}
1288
1289static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
1290                                           bpf_aux_classic_check_t trans)
1291{
1292        int err;
1293
1294        fp->bpf_func = NULL;
1295        fp->jited = 0;
1296
1297        err = bpf_check_classic(fp->insns, fp->len);
1298        if (err) {
1299                __bpf_prog_release(fp);
1300                return ERR_PTR(err);
1301        }
1302
1303        /* There might be additional checks and transformations
1304         * needed on classic filters, f.e. in case of seccomp.
1305         */
1306        if (trans) {
1307                err = trans(fp->insns, fp->len);
1308                if (err) {
1309                        __bpf_prog_release(fp);
1310                        return ERR_PTR(err);
1311                }
1312        }
1313
1314        /* Probe if we can JIT compile the filter and if so, do
1315         * the compilation of the filter.
1316         */
1317        bpf_jit_compile(fp);
1318
1319        /* JIT compiler couldn't process this filter, so do the
1320         * internal BPF translation for the optimized interpreter.
1321         */
1322        if (!fp->jited)
1323                fp = bpf_migrate_filter(fp);
1324
1325        return fp;
1326}
1327
1328/**
1329 *      bpf_prog_create - create an unattached filter
1330 *      @pfp: the unattached filter that is created
1331 *      @fprog: the filter program
1332 *
1333 * Create a filter independent of any socket. We first run some
1334 * sanity checks on it to make sure it does not explode on us later.
1335 * If an error occurs or there is insufficient memory for the filter
1336 * a negative errno code is returned. On success the return is zero.
1337 */
1338int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
1339{
1340        unsigned int fsize = bpf_classic_proglen(fprog);
1341        struct bpf_prog *fp;
1342
1343        /* Make sure new filter is there and in the right amounts. */
1344        if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1345                return -EINVAL;
1346
1347        fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1348        if (!fp)
1349                return -ENOMEM;
1350
1351        memcpy(fp->insns, fprog->filter, fsize);
1352
1353        fp->len = fprog->len;
1354        /* Since unattached filters are not copied back to user
1355         * space through sk_get_filter(), we do not need to hold
1356         * a copy here, and can spare us the work.
1357         */
1358        fp->orig_prog = NULL;
1359
1360        /* bpf_prepare_filter() already takes care of freeing
1361         * memory in case something goes wrong.
1362         */
1363        fp = bpf_prepare_filter(fp, NULL);
1364        if (IS_ERR(fp))
1365                return PTR_ERR(fp);
1366
1367        *pfp = fp;
1368        return 0;
1369}
1370EXPORT_SYMBOL_GPL(bpf_prog_create);
1371
1372/**
1373 *      bpf_prog_create_from_user - create an unattached filter from user buffer
1374 *      @pfp: the unattached filter that is created
1375 *      @fprog: the filter program
1376 *      @trans: post-classic verifier transformation handler
1377 *      @save_orig: save classic BPF program
1378 *
1379 * This function effectively does the same as bpf_prog_create(), only
1380 * that it builds up its insns buffer from user space provided buffer.
1381 * It also allows for passing a bpf_aux_classic_check_t handler.
1382 */
1383int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
1384                              bpf_aux_classic_check_t trans, bool save_orig)
1385{
1386        unsigned int fsize = bpf_classic_proglen(fprog);
1387        struct bpf_prog *fp;
1388        int err;
1389
1390        /* Make sure new filter is there and in the right amounts. */
1391        if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1392                return -EINVAL;
1393
1394        fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1395        if (!fp)
1396                return -ENOMEM;
1397
1398        if (copy_from_user(fp->insns, fprog->filter, fsize)) {
1399                __bpf_prog_free(fp);
1400                return -EFAULT;
1401        }
1402
1403        fp->len = fprog->len;
1404        fp->orig_prog = NULL;
1405
1406        if (save_orig) {
1407                err = bpf_prog_store_orig_filter(fp, fprog);
1408                if (err) {
1409                        __bpf_prog_free(fp);
1410                        return -ENOMEM;
1411                }
1412        }
1413
1414        /* bpf_prepare_filter() already takes care of freeing
1415         * memory in case something goes wrong.
1416         */
1417        fp = bpf_prepare_filter(fp, trans);
1418        if (IS_ERR(fp))
1419                return PTR_ERR(fp);
1420
1421        *pfp = fp;
1422        return 0;
1423}
1424EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
1425
1426void bpf_prog_destroy(struct bpf_prog *fp)
1427{
1428        __bpf_prog_release(fp);
1429}
1430EXPORT_SYMBOL_GPL(bpf_prog_destroy);
1431
1432static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
1433{
1434        struct sk_filter *fp, *old_fp;
1435
1436        fp = kmalloc(sizeof(*fp), GFP_KERNEL);
1437        if (!fp)
1438                return -ENOMEM;
1439
1440        fp->prog = prog;
1441
1442        if (!__sk_filter_charge(sk, fp)) {
1443                kfree(fp);
1444                return -ENOMEM;
1445        }
1446        refcount_set(&fp->refcnt, 1);
1447
1448        old_fp = rcu_dereference_protected(sk->sk_filter,
1449                                           lockdep_sock_is_held(sk));
1450        rcu_assign_pointer(sk->sk_filter, fp);
1451
1452        if (old_fp)
1453                sk_filter_uncharge(sk, old_fp);
1454
1455        return 0;
1456}
1457
1458static
1459struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
1460{
1461        unsigned int fsize = bpf_classic_proglen(fprog);
1462        struct bpf_prog *prog;
1463        int err;
1464
1465        if (sock_flag(sk, SOCK_FILTER_LOCKED))
1466                return ERR_PTR(-EPERM);
1467
1468        /* Make sure new filter is there and in the right amounts. */
1469        if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1470                return ERR_PTR(-EINVAL);
1471
1472        prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1473        if (!prog)
1474                return ERR_PTR(-ENOMEM);
1475
1476        if (copy_from_user(prog->insns, fprog->filter, fsize)) {
1477                __bpf_prog_free(prog);
1478                return ERR_PTR(-EFAULT);
1479        }
1480
1481        prog->len = fprog->len;
1482
1483        err = bpf_prog_store_orig_filter(prog, fprog);
1484        if (err) {
1485                __bpf_prog_free(prog);
1486                return ERR_PTR(-ENOMEM);
1487        }
1488
1489        /* bpf_prepare_filter() already takes care of freeing
1490         * memory in case something goes wrong.
1491         */
1492        return bpf_prepare_filter(prog, NULL);
1493}
1494
1495/**
1496 *      sk_attach_filter - attach a socket filter
1497 *      @fprog: the filter program
1498 *      @sk: the socket to use
1499 *
1500 * Attach the user's filter code. We first run some sanity checks on
1501 * it to make sure it does not explode on us later. If an error
1502 * occurs or there is insufficient memory for the filter a negative
1503 * errno code is returned. On success the return is zero.
1504 */
1505int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1506{
1507        struct bpf_prog *prog = __get_filter(fprog, sk);
1508        int err;
1509
1510        if (IS_ERR(prog))
1511                return PTR_ERR(prog);
1512
1513        err = __sk_attach_prog(prog, sk);
1514        if (err < 0) {
1515                __bpf_prog_release(prog);
1516                return err;
1517        }
1518
1519        return 0;
1520}
1521EXPORT_SYMBOL_GPL(sk_attach_filter);
1522
1523int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1524{
1525        struct bpf_prog *prog = __get_filter(fprog, sk);
1526        int err;
1527
1528        if (IS_ERR(prog))
1529                return PTR_ERR(prog);
1530
1531        if (bpf_prog_size(prog->len) > sysctl_optmem_max)
1532                err = -ENOMEM;
1533        else
1534                err = reuseport_attach_prog(sk, prog);
1535
1536        if (err)
1537                __bpf_prog_release(prog);
1538
1539        return err;
1540}
1541
1542static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
1543{
1544        if (sock_flag(sk, SOCK_FILTER_LOCKED))
1545                return ERR_PTR(-EPERM);
1546
1547        return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
1548}
1549
1550int sk_attach_bpf(u32 ufd, struct sock *sk)
1551{
1552        struct bpf_prog *prog = __get_bpf(ufd, sk);
1553        int err;
1554
1555        if (IS_ERR(prog))
1556                return PTR_ERR(prog);
1557
1558        err = __sk_attach_prog(prog, sk);
1559        if (err < 0) {
1560                bpf_prog_put(prog);
1561                return err;
1562        }
1563
1564        return 0;
1565}
1566
1567int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
1568{
1569        struct bpf_prog *prog;
1570        int err;
1571
1572        if (sock_flag(sk, SOCK_FILTER_LOCKED))
1573                return -EPERM;
1574
1575        prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
1576        if (IS_ERR(prog) && PTR_ERR(prog) == -EINVAL)
1577                prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT);
1578        if (IS_ERR(prog))
1579                return PTR_ERR(prog);
1580
1581        if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) {
1582                /* Like other non BPF_PROG_TYPE_SOCKET_FILTER
1583                 * bpf prog (e.g. sockmap).  It depends on the
1584                 * limitation imposed by bpf_prog_load().
1585                 * Hence, sysctl_optmem_max is not checked.
1586                 */
1587                if ((sk->sk_type != SOCK_STREAM &&
1588                     sk->sk_type != SOCK_DGRAM) ||
1589                    (sk->sk_protocol != IPPROTO_UDP &&
1590                     sk->sk_protocol != IPPROTO_TCP) ||
1591                    (sk->sk_family != AF_INET &&
1592                     sk->sk_family != AF_INET6)) {
1593                        err = -ENOTSUPP;
1594                        goto err_prog_put;
1595                }
1596        } else {
1597                /* BPF_PROG_TYPE_SOCKET_FILTER */
1598                if (bpf_prog_size(prog->len) > sysctl_optmem_max) {
1599                        err = -ENOMEM;
1600                        goto err_prog_put;
1601                }
1602        }
1603
1604        err = reuseport_attach_prog(sk, prog);
1605err_prog_put:
1606        if (err)
1607                bpf_prog_put(prog);
1608
1609        return err;
1610}
1611
1612void sk_reuseport_prog_free(struct bpf_prog *prog)
1613{
1614        if (!prog)
1615                return;
1616
1617        if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
1618                bpf_prog_put(prog);
1619        else
1620                bpf_prog_destroy(prog);
1621}
1622
1623struct bpf_scratchpad {
1624        union {
1625                __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
1626                u8     buff[MAX_BPF_STACK];
1627        };
1628};
1629
1630static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
1631
1632static inline int __bpf_try_make_writable(struct sk_buff *skb,
1633                                          unsigned int write_len)
1634{
1635        return skb_ensure_writable(skb, write_len);
1636}
1637
1638static inline int bpf_try_make_writable(struct sk_buff *skb,
1639                                        unsigned int write_len)
1640{
1641        int err = __bpf_try_make_writable(skb, write_len);
1642
1643        bpf_compute_data_pointers(skb);
1644        return err;
1645}
1646
1647static int bpf_try_make_head_writable(struct sk_buff *skb)
1648{
1649        return bpf_try_make_writable(skb, skb_headlen(skb));
1650}
1651
1652static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
1653{
1654        if (skb_at_tc_ingress(skb))
1655                skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1656}
1657
1658static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
1659{
1660        if (skb_at_tc_ingress(skb))
1661                skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1662}
1663
1664BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
1665           const void *, from, u32, len, u64, flags)
1666{
1667        void *ptr;
1668
1669        if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
1670                return -EINVAL;
1671        if (unlikely(offset > 0xffff))
1672                return -EFAULT;
1673        if (unlikely(bpf_try_make_writable(skb, offset + len)))
1674                return -EFAULT;
1675
1676        ptr = skb->data + offset;
1677        if (flags & BPF_F_RECOMPUTE_CSUM)
1678                __skb_postpull_rcsum(skb, ptr, len, offset);
1679
1680        memcpy(ptr, from, len);
1681
1682        if (flags & BPF_F_RECOMPUTE_CSUM)
1683                __skb_postpush_rcsum(skb, ptr, len, offset);
1684        if (flags & BPF_F_INVALIDATE_HASH)
1685                skb_clear_hash(skb);
1686
1687        return 0;
1688}
1689
1690static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
1691        .func           = bpf_skb_store_bytes,
1692        .gpl_only       = false,
1693        .ret_type       = RET_INTEGER,
1694        .arg1_type      = ARG_PTR_TO_CTX,
1695        .arg2_type      = ARG_ANYTHING,
1696        .arg3_type      = ARG_PTR_TO_MEM,
1697        .arg4_type      = ARG_CONST_SIZE,
1698        .arg5_type      = ARG_ANYTHING,
1699};
1700
1701BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
1702           void *, to, u32, len)
1703{
1704        void *ptr;
1705
1706        if (unlikely(offset > 0xffff))
1707                goto err_clear;
1708
1709        ptr = skb_header_pointer(skb, offset, len, to);
1710        if (unlikely(!ptr))
1711                goto err_clear;
1712        if (ptr != to)
1713                memcpy(to, ptr, len);
1714
1715        return 0;
1716err_clear:
1717        memset(to, 0, len);
1718        return -EFAULT;
1719}
1720
1721static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
1722        .func           = bpf_skb_load_bytes,
1723        .gpl_only       = false,
1724        .ret_type       = RET_INTEGER,
1725        .arg1_type      = ARG_PTR_TO_CTX,
1726        .arg2_type      = ARG_ANYTHING,
1727        .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
1728        .arg4_type      = ARG_CONST_SIZE,
1729};
1730
1731BPF_CALL_4(bpf_flow_dissector_load_bytes,
1732           const struct bpf_flow_dissector *, ctx, u32, offset,
1733           void *, to, u32, len)
1734{
1735        void *ptr;
1736
1737        if (unlikely(offset > 0xffff))
1738                goto err_clear;
1739
1740        if (unlikely(!ctx->skb))
1741                goto err_clear;
1742
1743        ptr = skb_header_pointer(ctx->skb, offset, len, to);
1744        if (unlikely(!ptr))
1745                goto err_clear;
1746        if (ptr != to)
1747                memcpy(to, ptr, len);
1748
1749        return 0;
1750err_clear:
1751        memset(to, 0, len);
1752        return -EFAULT;
1753}
1754
1755static const struct bpf_func_proto bpf_flow_dissector_load_bytes_proto = {
1756        .func           = bpf_flow_dissector_load_bytes,
1757        .gpl_only       = false,
1758        .ret_type       = RET_INTEGER,
1759        .arg1_type      = ARG_PTR_TO_CTX,
1760        .arg2_type      = ARG_ANYTHING,
1761        .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
1762        .arg4_type      = ARG_CONST_SIZE,
1763};
1764
1765BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
1766           u32, offset, void *, to, u32, len, u32, start_header)
1767{
1768        u8 *end = skb_tail_pointer(skb);
1769        u8 *net = skb_network_header(skb);
1770        u8 *mac = skb_mac_header(skb);
1771        u8 *ptr;
1772
1773        if (unlikely(offset > 0xffff || len > (end - mac)))
1774                goto err_clear;
1775
1776        switch (start_header) {
1777        case BPF_HDR_START_MAC:
1778                ptr = mac + offset;
1779                break;
1780        case BPF_HDR_START_NET:
1781                ptr = net + offset;
1782                break;
1783        default:
1784                goto err_clear;
1785        }
1786
1787        if (likely(ptr >= mac && ptr + len <= end)) {
1788                memcpy(to, ptr, len);
1789                return 0;
1790        }
1791
1792err_clear:
1793        memset(to, 0, len);
1794        return -EFAULT;
1795}
1796
1797static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = {
1798        .func           = bpf_skb_load_bytes_relative,
1799        .gpl_only       = false,
1800        .ret_type       = RET_INTEGER,
1801        .arg1_type      = ARG_PTR_TO_CTX,
1802        .arg2_type      = ARG_ANYTHING,
1803        .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
1804        .arg4_type      = ARG_CONST_SIZE,
1805        .arg5_type      = ARG_ANYTHING,
1806};
1807
1808BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
1809{
1810        /* Idea is the following: should the needed direct read/write
1811         * test fail during runtime, we can pull in more data and redo
1812         * again, since implicitly, we invalidate previous checks here.
1813         *
1814         * Or, since we know how much we need to make read/writeable,
1815         * this can be done once at the program beginning for direct
1816         * access case. By this we overcome limitations of only current
1817         * headroom being accessible.
1818         */
1819        return bpf_try_make_writable(skb, len ? : skb_headlen(skb));
1820}
1821
1822static const struct bpf_func_proto bpf_skb_pull_data_proto = {
1823        .func           = bpf_skb_pull_data,
1824        .gpl_only       = false,
1825        .ret_type       = RET_INTEGER,
1826        .arg1_type      = ARG_PTR_TO_CTX,
1827        .arg2_type      = ARG_ANYTHING,
1828};
1829
1830BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk)
1831{
1832        return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL;
1833}
1834
1835static const struct bpf_func_proto bpf_sk_fullsock_proto = {
1836        .func           = bpf_sk_fullsock,
1837        .gpl_only       = false,
1838        .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
1839        .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
1840};
1841
1842static inline int sk_skb_try_make_writable(struct sk_buff *skb,
1843                                           unsigned int write_len)
1844{
1845        int err = __bpf_try_make_writable(skb, write_len);
1846
1847        bpf_compute_data_end_sk_skb(skb);
1848        return err;
1849}
1850
1851BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len)
1852{
1853        /* Idea is the following: should the needed direct read/write
1854         * test fail during runtime, we can pull in more data and redo
1855         * again, since implicitly, we invalidate previous checks here.
1856         *
1857         * Or, since we know how much we need to make read/writeable,
1858         * this can be done once at the program beginning for direct
1859         * access case. By this we overcome limitations of only current
1860         * headroom being accessible.
1861         */
1862        return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb));
1863}
1864
1865static const struct bpf_func_proto sk_skb_pull_data_proto = {
1866        .func           = sk_skb_pull_data,
1867        .gpl_only       = false,
1868        .ret_type       = RET_INTEGER,
1869        .arg1_type      = ARG_PTR_TO_CTX,
1870        .arg2_type      = ARG_ANYTHING,
1871};
1872
1873BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
1874           u64, from, u64, to, u64, flags)
1875{
1876        __sum16 *ptr;
1877
1878        if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
1879                return -EINVAL;
1880        if (unlikely(offset > 0xffff || offset & 1))
1881                return -EFAULT;
1882        if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1883                return -EFAULT;
1884
1885        ptr = (__sum16 *)(skb->data + offset);
1886        switch (flags & BPF_F_HDR_FIELD_MASK) {
1887        case 0:
1888                if (unlikely(from != 0))
1889                        return -EINVAL;
1890
1891                csum_replace_by_diff(ptr, to);
1892                break;
1893        case 2:
1894                csum_replace2(ptr, from, to);
1895                break;
1896        case 4:
1897                csum_replace4(ptr, from, to);
1898                break;
1899        default:
1900                return -EINVAL;
1901        }
1902
1903        return 0;
1904}
1905
1906static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
1907        .func           = bpf_l3_csum_replace,
1908        .gpl_only       = false,
1909        .ret_type       = RET_INTEGER,
1910        .arg1_type      = ARG_PTR_TO_CTX,
1911        .arg2_type      = ARG_ANYTHING,
1912        .arg3_type      = ARG_ANYTHING,
1913        .arg4_type      = ARG_ANYTHING,
1914        .arg5_type      = ARG_ANYTHING,
1915};
1916
1917BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
1918           u64, from, u64, to, u64, flags)
1919{
1920        bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
1921        bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
1922        bool do_mforce = flags & BPF_F_MARK_ENFORCE;
1923        __sum16 *ptr;
1924
1925        if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE |
1926                               BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
1927                return -EINVAL;
1928        if (unlikely(offset > 0xffff || offset & 1))
1929                return -EFAULT;
1930        if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1931                return -EFAULT;
1932
1933        ptr = (__sum16 *)(skb->data + offset);
1934        if (is_mmzero && !do_mforce && !*ptr)
1935                return 0;
1936
1937        switch (flags & BPF_F_HDR_FIELD_MASK) {
1938        case 0:
1939                if (unlikely(from != 0))
1940                        return -EINVAL;
1941
1942                inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
1943                break;
1944        case 2:
1945                inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
1946                break;
1947        case 4:
1948                inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
1949                break;
1950        default:
1951                return -EINVAL;
1952        }
1953
1954        if (is_mmzero && !*ptr)
1955                *ptr = CSUM_MANGLED_0;
1956        return 0;
1957}
1958
1959static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
1960        .func           = bpf_l4_csum_replace,
1961        .gpl_only       = false,
1962        .ret_type       = RET_INTEGER,
1963        .arg1_type      = ARG_PTR_TO_CTX,
1964        .arg2_type      = ARG_ANYTHING,
1965        .arg3_type      = ARG_ANYTHING,
1966        .arg4_type      = ARG_ANYTHING,
1967        .arg5_type      = ARG_ANYTHING,
1968};
1969
1970BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
1971           __be32 *, to, u32, to_size, __wsum, seed)
1972{
1973        struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
1974        u32 diff_size = from_size + to_size;
1975        int i, j = 0;
1976
1977        /* This is quite flexible, some examples:
1978         *
1979         * from_size == 0, to_size > 0,  seed := csum --> pushing data
1980         * from_size > 0,  to_size == 0, seed := csum --> pulling data
1981         * from_size > 0,  to_size > 0,  seed := 0    --> diffing data
1982         *
1983         * Even for diffing, from_size and to_size don't need to be equal.
1984         */
1985        if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
1986                     diff_size > sizeof(sp->diff)))
1987                return -EINVAL;
1988
1989        for (i = 0; i < from_size / sizeof(__be32); i++, j++)
1990                sp->diff[j] = ~from[i];
1991        for (i = 0; i <   to_size / sizeof(__be32); i++, j++)
1992                sp->diff[j] = to[i];
1993
1994        return csum_partial(sp->diff, diff_size, seed);
1995}
1996
1997static const struct bpf_func_proto bpf_csum_diff_proto = {
1998        .func           = bpf_csum_diff,
1999        .gpl_only       = false,
2000        .pkt_access     = true,
2001        .ret_type       = RET_INTEGER,
2002        .arg1_type      = ARG_PTR_TO_MEM_OR_NULL,
2003        .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
2004        .arg3_type      = ARG_PTR_TO_MEM_OR_NULL,
2005        .arg4_type      = ARG_CONST_SIZE_OR_ZERO,
2006        .arg5_type      = ARG_ANYTHING,
2007};
2008
2009BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum)
2010{
2011        /* The interface is to be used in combination with bpf_csum_diff()
2012         * for direct packet writes. csum rotation for alignment as well
2013         * as emulating csum_sub() can be done from the eBPF program.
2014         */
2015        if (skb->ip_summed == CHECKSUM_COMPLETE)
2016                return (skb->csum = csum_add(skb->csum, csum));
2017
2018        return -ENOTSUPP;
2019}
2020
2021static const struct bpf_func_proto bpf_csum_update_proto = {
2022        .func           = bpf_csum_update,
2023        .gpl_only       = false,
2024        .ret_type       = RET_INTEGER,
2025        .arg1_type      = ARG_PTR_TO_CTX,
2026        .arg2_type      = ARG_ANYTHING,
2027};
2028
2029static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
2030{
2031        return dev_forward_skb(dev, skb);
2032}
2033
2034static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
2035                                      struct sk_buff *skb)
2036{
2037        int ret = ____dev_forward_skb(dev, skb);
2038
2039        if (likely(!ret)) {
2040                skb->dev = dev;
2041                ret = netif_rx(skb);
2042        }
2043
2044        return ret;
2045}
2046
2047static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
2048{
2049        int ret;
2050
2051        if (dev_xmit_recursion()) {
2052                net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
2053                kfree_skb(skb);
2054                return -ENETDOWN;
2055        }
2056
2057        skb->dev = dev;
2058
2059        dev_xmit_recursion_inc();
2060        ret = dev_queue_xmit(skb);
2061        dev_xmit_recursion_dec();
2062
2063        return ret;
2064}
2065
2066static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
2067                                 u32 flags)
2068{
2069        unsigned int mlen = skb_network_offset(skb);
2070
2071        if (mlen) {
2072                __skb_pull(skb, mlen);
2073
2074                /* At ingress, the mac header has already been pulled once.
2075                 * At egress, skb_pospull_rcsum has to be done in case that
2076                 * the skb is originated from ingress (i.e. a forwarded skb)
2077                 * to ensure that rcsum starts at net header.
2078                 */
2079                if (!skb_at_tc_ingress(skb))
2080                        skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
2081        }
2082        skb_pop_mac_header(skb);
2083        skb_reset_mac_len(skb);
2084        return flags & BPF_F_INGRESS ?
2085               __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
2086}
2087
2088static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
2089                                 u32 flags)
2090{
2091        /* Verify that a link layer header is carried */
2092        if (unlikely(skb->mac_header >= skb->network_header)) {
2093                kfree_skb(skb);
2094                return -ERANGE;
2095        }
2096
2097        bpf_push_mac_rcsum(skb);
2098        return flags & BPF_F_INGRESS ?
2099               __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
2100}
2101
2102static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
2103                          u32 flags)
2104{
2105        if (dev_is_mac_header_xmit(dev))
2106                return __bpf_redirect_common(skb, dev, flags);
2107        else
2108                return __bpf_redirect_no_mac(skb, dev, flags);
2109}
2110
2111BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
2112{
2113        struct net_device *dev;
2114        struct sk_buff *clone;
2115        int ret;
2116
2117        if (unlikely(flags & ~(BPF_F_INGRESS)))
2118                return -EINVAL;
2119
2120        dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
2121        if (unlikely(!dev))
2122                return -EINVAL;
2123
2124        clone = skb_clone(skb, GFP_ATOMIC);
2125        if (unlikely(!clone))
2126                return -ENOMEM;
2127
2128        /* For direct write, we need to keep the invariant that the skbs
2129         * we're dealing with need to be uncloned. Should uncloning fail
2130         * here, we need to free the just generated clone to unclone once
2131         * again.
2132         */
2133        ret = bpf_try_make_head_writable(skb);
2134        if (unlikely(ret)) {
2135                kfree_skb(clone);
2136                return -ENOMEM;
2137        }
2138
2139        return __bpf_redirect(clone, dev, flags);
2140}
2141
2142static const struct bpf_func_proto bpf_clone_redirect_proto = {
2143        .func           = bpf_clone_redirect,
2144        .gpl_only       = false,
2145        .ret_type       = RET_INTEGER,
2146        .arg1_type      = ARG_PTR_TO_CTX,
2147        .arg2_type      = ARG_ANYTHING,
2148        .arg3_type      = ARG_ANYTHING,
2149};
2150
2151DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
2152EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
2153
2154BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
2155{
2156        struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
2157
2158        if (unlikely(flags & ~(BPF_F_INGRESS)))
2159                return TC_ACT_SHOT;
2160
2161        ri->flags = flags;
2162        ri->tgt_index = ifindex;
2163
2164        return TC_ACT_REDIRECT;
2165}
2166
2167int skb_do_redirect(struct sk_buff *skb)
2168{
2169        struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
2170        struct net_device *dev;
2171
2172        dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->tgt_index);
2173        ri->tgt_index = 0;
2174        if (unlikely(!dev)) {
2175                kfree_skb(skb);
2176                return -EINVAL;
2177        }
2178
2179        return __bpf_redirect(skb, dev, ri->flags);
2180}
2181
2182static const struct bpf_func_proto bpf_redirect_proto = {
2183        .func           = bpf_redirect,
2184        .gpl_only       = false,
2185        .ret_type       = RET_INTEGER,
2186        .arg1_type      = ARG_ANYTHING,
2187        .arg2_type      = ARG_ANYTHING,
2188};
2189
2190BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes)
2191{
2192        msg->apply_bytes = bytes;
2193        return 0;
2194}
2195
2196static const struct bpf_func_proto bpf_msg_apply_bytes_proto = {
2197        .func           = bpf_msg_apply_bytes,
2198        .gpl_only       = false,
2199        .ret_type       = RET_INTEGER,
2200        .arg1_type      = ARG_PTR_TO_CTX,
2201        .arg2_type      = ARG_ANYTHING,
2202};
2203
2204BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes)
2205{
2206        msg->cork_bytes = bytes;
2207        return 0;
2208}
2209
2210static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
2211        .func           = bpf_msg_cork_bytes,
2212        .gpl_only       = false,
2213        .ret_type       = RET_INTEGER,
2214        .arg1_type      = ARG_PTR_TO_CTX,
2215        .arg2_type      = ARG_ANYTHING,
2216};
2217
2218BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
2219           u32, end, u64, flags)
2220{
2221        u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start;
2222        u32 first_sge, last_sge, i, shift, bytes_sg_total;
2223        struct scatterlist *sge;
2224        u8 *raw, *to, *from;
2225        struct page *page;
2226
2227        if (unlikely(flags || end <= start))
2228                return -EINVAL;
2229
2230        /* First find the starting scatterlist element */
2231        i = msg->sg.start;
2232        do {
2233                len = sk_msg_elem(msg, i)->length;
2234                if (start < offset + len)
2235                        break;
2236                offset += len;
2237                sk_msg_iter_var_next(i);
2238        } while (i != msg->sg.end);
2239
2240        if (unlikely(start >= offset + len))
2241                return -EINVAL;
2242
2243        first_sge = i;
2244        /* The start may point into the sg element so we need to also
2245         * account for the headroom.
2246         */
2247        bytes_sg_total = start - offset + bytes;
2248        if (!msg->sg.copy[i] && bytes_sg_total <= len)
2249                goto out;
2250
2251        /* At this point we need to linearize multiple scatterlist
2252         * elements or a single shared page. Either way we need to
2253         * copy into a linear buffer exclusively owned by BPF. Then
2254         * place the buffer in the scatterlist and fixup the original
2255         * entries by removing the entries now in the linear buffer
2256         * and shifting the remaining entries. For now we do not try
2257         * to copy partial entries to avoid complexity of running out
2258         * of sg_entry slots. The downside is reading a single byte
2259         * will copy the entire sg entry.
2260         */
2261        do {
2262                copy += sk_msg_elem(msg, i)->length;
2263                sk_msg_iter_var_next(i);
2264                if (bytes_sg_total <= copy)
2265                        break;
2266        } while (i != msg->sg.end);
2267        last_sge = i;
2268
2269        if (unlikely(bytes_sg_total > copy))
2270                return -EINVAL;
2271
2272        page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
2273                           get_order(copy));
2274        if (unlikely(!page))
2275                return -ENOMEM;
2276
2277        raw = page_address(page);
2278        i = first_sge;
2279        do {
2280                sge = sk_msg_elem(msg, i);
2281                from = sg_virt(sge);
2282                len = sge->length;
2283                to = raw + poffset;
2284
2285                memcpy(to, from, len);
2286                poffset += len;
2287                sge->length = 0;
2288                put_page(sg_page(sge));
2289
2290                sk_msg_iter_var_next(i);
2291        } while (i != last_sge);
2292
2293        sg_set_page(&msg->sg.data[first_sge], page, copy, 0);
2294
2295        /* To repair sg ring we need to shift entries. If we only
2296         * had a single entry though we can just replace it and
2297         * be done. Otherwise walk the ring and shift the entries.
2298         */
2299        WARN_ON_ONCE(last_sge == first_sge);
2300        shift = last_sge > first_sge ?
2301                last_sge - first_sge - 1 :
2302                MAX_SKB_FRAGS - first_sge + last_sge - 1;
2303        if (!shift)
2304                goto out;
2305
2306        i = first_sge;
2307        sk_msg_iter_var_next(i);
2308        do {
2309                u32 move_from;
2310
2311                if (i + shift >= MAX_MSG_FRAGS)
2312                        move_from = i + shift - MAX_MSG_FRAGS;
2313                else
2314                        move_from = i + shift;
2315                if (move_from == msg->sg.end)
2316                        break;
2317
2318                msg->sg.data[i] = msg->sg.data[move_from];
2319                msg->sg.data[move_from].length = 0;
2320                msg->sg.data[move_from].page_link = 0;
2321                msg->sg.data[move_from].offset = 0;
2322                sk_msg_iter_var_next(i);
2323        } while (1);
2324
2325        msg->sg.end = msg->sg.end - shift > msg->sg.end ?
2326                      msg->sg.end - shift + MAX_MSG_FRAGS :
2327                      msg->sg.end - shift;
2328out:
2329        msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset;
2330        msg->data_end = msg->data + bytes;
2331        return 0;
2332}
2333
2334static const struct bpf_func_proto bpf_msg_pull_data_proto = {
2335        .func           = bpf_msg_pull_data,
2336        .gpl_only       = false,
2337        .ret_type       = RET_INTEGER,
2338        .arg1_type      = ARG_PTR_TO_CTX,
2339        .arg2_type      = ARG_ANYTHING,
2340        .arg3_type      = ARG_ANYTHING,
2341        .arg4_type      = ARG_ANYTHING,
2342};
2343
2344BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
2345           u32, len, u64, flags)
2346{
2347        struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge;
2348        u32 new, i = 0, l, space, copy = 0, offset = 0;
2349        u8 *raw, *to, *from;
2350        struct page *page;
2351
2352        if (unlikely(flags))
2353                return -EINVAL;
2354
2355        /* First find the starting scatterlist element */
2356        i = msg->sg.start;
2357        do {
2358                l = sk_msg_elem(msg, i)->length;
2359
2360                if (start < offset + l)
2361                        break;
2362                offset += l;
2363                sk_msg_iter_var_next(i);
2364        } while (i != msg->sg.end);
2365
2366        if (start >= offset + l)
2367                return -EINVAL;
2368
2369        space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
2370
2371        /* If no space available will fallback to copy, we need at
2372         * least one scatterlist elem available to push data into
2373         * when start aligns to the beginning of an element or two
2374         * when it falls inside an element. We handle the start equals
2375         * offset case because its the common case for inserting a
2376         * header.
2377         */
2378        if (!space || (space == 1 && start != offset))
2379                copy = msg->sg.data[i].length;
2380
2381        page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
2382                           get_order(copy + len));
2383        if (unlikely(!page))
2384                return -ENOMEM;
2385
2386        if (copy) {
2387                int front, back;
2388
2389                raw = page_address(page);
2390
2391                psge = sk_msg_elem(msg, i);
2392                front = start - offset;
2393                back = psge->length - front;
2394                from = sg_virt(psge);
2395
2396                if (front)
2397                        memcpy(raw, from, front);
2398
2399                if (back) {
2400                        from += front;
2401                        to = raw + front + len;
2402
2403                        memcpy(to, from, back);
2404                }
2405
2406                put_page(sg_page(psge));
2407        } else if (start - offset) {
2408                psge = sk_msg_elem(msg, i);
2409                rsge = sk_msg_elem_cpy(msg, i);
2410
2411                psge->length = start - offset;
2412                rsge.length -= psge->length;
2413                rsge.offset += start;
2414
2415                sk_msg_iter_var_next(i);
2416                sg_unmark_end(psge);
2417                sk_msg_iter_next(msg, end);
2418        }
2419
2420        /* Slot(s) to place newly allocated data */
2421        new = i;
2422
2423        /* Shift one or two slots as needed */
2424        if (!copy) {
2425                sge = sk_msg_elem_cpy(msg, i);
2426
2427                sk_msg_iter_var_next(i);
2428                sg_unmark_end(&sge);
2429                sk_msg_iter_next(msg, end);
2430
2431                nsge = sk_msg_elem_cpy(msg, i);
2432                if (rsge.length) {
2433                        sk_msg_iter_var_next(i);
2434                        nnsge = sk_msg_elem_cpy(msg, i);
2435                }
2436
2437                while (i != msg->sg.end) {
2438                        msg->sg.data[i] = sge;
2439                        sge = nsge;
2440                        sk_msg_iter_var_next(i);
2441                        if (rsge.length) {
2442                                nsge = nnsge;
2443                                nnsge = sk_msg_elem_cpy(msg, i);
2444                        } else {
2445                                nsge = sk_msg_elem_cpy(msg, i);
2446                        }
2447                }
2448        }
2449
2450        /* Place newly allocated data buffer */
2451        sk_mem_charge(msg->sk, len);
2452        msg->sg.size += len;
2453        msg->sg.copy[new] = false;
2454        sg_set_page(&msg->sg.data[new], page, len + copy, 0);
2455        if (rsge.length) {
2456                get_page(sg_page(&rsge));
2457                sk_msg_iter_var_next(new);
2458                msg->sg.data[new] = rsge;
2459        }
2460
2461        sk_msg_compute_data_pointers(msg);
2462        return 0;
2463}
2464
2465static const struct bpf_func_proto bpf_msg_push_data_proto = {
2466        .func           = bpf_msg_push_data,
2467        .gpl_only       = false,
2468        .ret_type       = RET_INTEGER,
2469        .arg1_type      = ARG_PTR_TO_CTX,
2470        .arg2_type      = ARG_ANYTHING,
2471        .arg3_type      = ARG_ANYTHING,
2472        .arg4_type      = ARG_ANYTHING,
2473};
2474
2475static void sk_msg_shift_left(struct sk_msg *msg, int i)
2476{
2477        int prev;
2478
2479        do {
2480                prev = i;
2481                sk_msg_iter_var_next(i);
2482                msg->sg.data[prev] = msg->sg.data[i];
2483        } while (i != msg->sg.end);
2484
2485        sk_msg_iter_prev(msg, end);
2486}
2487
2488static void sk_msg_shift_right(struct sk_msg *msg, int i)
2489{
2490        struct scatterlist tmp, sge;
2491
2492        sk_msg_iter_next(msg, end);
2493        sge = sk_msg_elem_cpy(msg, i);
2494        sk_msg_iter_var_next(i);
2495        tmp = sk_msg_elem_cpy(msg, i);
2496
2497        while (i != msg->sg.end) {
2498                msg->sg.data[i] = sge;
2499                sk_msg_iter_var_next(i);
2500                sge = tmp;
2501                tmp = sk_msg_elem_cpy(msg, i);
2502        }
2503}
2504
2505BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
2506           u32, len, u64, flags)
2507{
2508        u32 i = 0, l, space, offset = 0;
2509        u64 last = start + len;
2510        int pop;
2511
2512        if (unlikely(flags))
2513                return -EINVAL;
2514
2515        /* First find the starting scatterlist element */
2516        i = msg->sg.start;
2517        do {
2518                l = sk_msg_elem(msg, i)->length;
2519
2520                if (start < offset + l)
2521                        break;
2522                offset += l;
2523                sk_msg_iter_var_next(i);
2524        } while (i != msg->sg.end);
2525
2526        /* Bounds checks: start and pop must be inside message */
2527        if (start >= offset + l || last >= msg->sg.size)
2528                return -EINVAL;
2529
2530        space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
2531
2532        pop = len;
2533        /* --------------| offset
2534         * -| start      |-------- len -------|
2535         *
2536         *  |----- a ----|-------- pop -------|----- b ----|
2537         *  |______________________________________________| length
2538         *
2539         *
2540         * a:   region at front of scatter element to save
2541         * b:   region at back of scatter element to save when length > A + pop
2542         * pop: region to pop from element, same as input 'pop' here will be
2543         *      decremented below per iteration.
2544         *
2545         * Two top-level cases to handle when start != offset, first B is non
2546         * zero and second B is zero corresponding to when a pop includes more
2547         * than one element.
2548         *
2549         * Then if B is non-zero AND there is no space allocate space and
2550         * compact A, B regions into page. If there is space shift ring to
2551         * the rigth free'ing the next element in ring to place B, leaving
2552         * A untouched except to reduce length.
2553         */
2554        if (start != offset) {
2555                struct scatterlist *nsge, *sge = sk_msg_elem(msg, i);
2556                int a = start;
2557                int b = sge->length - pop - a;
2558
2559                sk_msg_iter_var_next(i);
2560
2561                if (pop < sge->length - a) {
2562                        if (space) {
2563                                sge->length = a;
2564                                sk_msg_shift_right(msg, i);
2565                                nsge = sk_msg_elem(msg, i);
2566                                get_page(sg_page(sge));
2567                                sg_set_page(nsge,
2568                                            sg_page(sge),
2569                                            b, sge->offset + pop + a);
2570                        } else {
2571                                struct page *page, *orig;
2572                                u8 *to, *from;
2573
2574                                page = alloc_pages(__GFP_NOWARN |
2575                                                   __GFP_COMP   | GFP_ATOMIC,
2576                                                   get_order(a + b));
2577                                if (unlikely(!page))
2578                                        return -ENOMEM;
2579
2580                                sge->length = a;
2581                                orig = sg_page(sge);
2582                                from = sg_virt(sge);
2583                                to = page_address(page);
2584                                memcpy(to, from, a);
2585                                memcpy(to + a, from + a + pop, b);
2586                                sg_set_page(sge, page, a + b, 0);
2587                                put_page(orig);
2588                        }
2589                        pop = 0;
2590                } else if (pop >= sge->length - a) {
2591                        sge->length = a;
2592                        pop -= (sge->length - a);
2593                }
2594        }
2595
2596        /* From above the current layout _must_ be as follows,
2597         *
2598         * -| offset
2599         * -| start
2600         *
2601         *  |---- pop ---|---------------- b ------------|
2602         *  |____________________________________________| length
2603         *
2604         * Offset and start of the current msg elem are equal because in the
2605         * previous case we handled offset != start and either consumed the
2606         * entire element and advanced to the next element OR pop == 0.
2607         *
2608         * Two cases to handle here are first pop is less than the length
2609         * leaving some remainder b above. Simply adjust the element's layout
2610         * in this case. Or pop >= length of the element so that b = 0. In this
2611         * case advance to next element decrementing pop.
2612         */
2613        while (pop) {
2614                struct scatterlist *sge = sk_msg_elem(msg, i);
2615
2616                if (pop < sge->length) {
2617                        sge->length -= pop;
2618                        sge->offset += pop;
2619                        pop = 0;
2620                } else {
2621                        pop -= sge->length;
2622                        sk_msg_shift_left(msg, i);
2623                }
2624                sk_msg_iter_var_next(i);
2625        }
2626
2627        sk_mem_uncharge(msg->sk, len - pop);
2628        msg->sg.size -= (len - pop);
2629        sk_msg_compute_data_pointers(msg);
2630        return 0;
2631}
2632
2633static const struct bpf_func_proto bpf_msg_pop_data_proto = {
2634        .func           = bpf_msg_pop_data,
2635        .gpl_only       = false,
2636        .ret_type       = RET_INTEGER,
2637        .arg1_type      = ARG_PTR_TO_CTX,
2638        .arg2_type      = ARG_ANYTHING,
2639        .arg3_type      = ARG_ANYTHING,
2640        .arg4_type      = ARG_ANYTHING,
2641};
2642
2643BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
2644{
2645        return task_get_classid(skb);
2646}
2647
2648static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
2649        .func           = bpf_get_cgroup_classid,
2650        .gpl_only       = false,
2651        .ret_type       = RET_INTEGER,
2652        .arg1_type      = ARG_PTR_TO_CTX,
2653};
2654
2655BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
2656{
2657        return dst_tclassid(skb);
2658}
2659
2660static const struct bpf_func_proto bpf_get_route_realm_proto = {
2661        .func           = bpf_get_route_realm,
2662        .gpl_only       = false,
2663        .ret_type       = RET_INTEGER,
2664        .arg1_type      = ARG_PTR_TO_CTX,
2665};
2666
2667BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
2668{
2669        /* If skb_clear_hash() was called due to mangling, we can
2670         * trigger SW recalculation here. Later access to hash
2671         * can then use the inline skb->hash via context directly
2672         * instead of calling this helper again.
2673         */
2674        return skb_get_hash(skb);
2675}
2676
2677static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
2678        .func           = bpf_get_hash_recalc,
2679        .gpl_only       = false,
2680        .ret_type       = RET_INTEGER,
2681        .arg1_type      = ARG_PTR_TO_CTX,
2682};
2683
2684BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb)
2685{
2686        /* After all direct packet write, this can be used once for
2687         * triggering a lazy recalc on next skb_get_hash() invocation.
2688         */
2689        skb_clear_hash(skb);
2690        return 0;
2691}
2692
2693static const struct bpf_func_proto bpf_set_hash_invalid_proto = {
2694        .func           = bpf_set_hash_invalid,
2695        .gpl_only       = false,
2696        .ret_type       = RET_INTEGER,
2697        .arg1_type      = ARG_PTR_TO_CTX,
2698};
2699
2700BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash)
2701{
2702        /* Set user specified hash as L4(+), so that it gets returned
2703         * on skb_get_hash() call unless BPF prog later on triggers a
2704         * skb_clear_hash().
2705         */
2706        __skb_set_sw_hash(skb, hash, true);
2707        return 0;
2708}
2709
2710static const struct bpf_func_proto bpf_set_hash_proto = {
2711        .func           = bpf_set_hash,
2712        .gpl_only       = false,
2713        .ret_type       = RET_INTEGER,
2714        .arg1_type      = ARG_PTR_TO_CTX,
2715        .arg2_type      = ARG_ANYTHING,
2716};
2717
2718BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
2719           u16, vlan_tci)
2720{
2721        int ret;
2722
2723        if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
2724                     vlan_proto != htons(ETH_P_8021AD)))
2725                vlan_proto = htons(ETH_P_8021Q);
2726
2727        bpf_push_mac_rcsum(skb);
2728        ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
2729        bpf_pull_mac_rcsum(skb);
2730
2731        bpf_compute_data_pointers(skb);
2732        return ret;
2733}
2734
2735static const struct bpf_func_proto bpf_skb_vlan_push_proto = {
2736        .func           = bpf_skb_vlan_push,
2737        .gpl_only       = false,
2738        .ret_type       = RET_INTEGER,
2739        .arg1_type      = ARG_PTR_TO_CTX,
2740        .arg2_type      = ARG_ANYTHING,
2741        .arg3_type      = ARG_ANYTHING,
2742};
2743
2744BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
2745{
2746        int ret;
2747
2748        bpf_push_mac_rcsum(skb);
2749        ret = skb_vlan_pop(skb);
2750        bpf_pull_mac_rcsum(skb);
2751
2752        bpf_compute_data_pointers(skb);
2753        return ret;
2754}
2755
2756static const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
2757        .func           = bpf_skb_vlan_pop,
2758        .gpl_only       = false,
2759        .ret_type       = RET_INTEGER,
2760        .arg1_type      = ARG_PTR_TO_CTX,
2761};
2762
2763static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
2764{
2765        /* Caller already did skb_cow() with len as headroom,
2766         * so no need to do it here.
2767         */
2768        skb_push(skb, len);
2769        memmove(skb->data, skb->data + len, off);
2770        memset(skb->data + off, 0, len);
2771
2772        /* No skb_postpush_rcsum(skb, skb->data + off, len)
2773         * needed here as it does not change the skb->csum
2774         * result for checksum complete when summing over
2775         * zeroed blocks.
2776         */
2777        return 0;
2778}
2779
2780static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
2781{
2782        /* skb_ensure_writable() is not needed here, as we're
2783         * already working on an uncloned skb.
2784         */
2785        if (unlikely(!pskb_may_pull(skb, off + len)))
2786                return -ENOMEM;
2787
2788        skb_postpull_rcsum(skb, skb->data + off, len);
2789        memmove(skb->data + len, skb->data, off);
2790        __skb_pull(skb, len);
2791
2792        return 0;
2793}
2794
2795static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len)
2796{
2797        bool trans_same = skb->transport_header == skb->network_header;
2798        int ret;
2799
2800        /* There's no need for __skb_push()/__skb_pull() pair to
2801         * get to the start of the mac header as we're guaranteed
2802         * to always start from here under eBPF.
2803         */
2804        ret = bpf_skb_generic_push(skb, off, len);
2805        if (likely(!ret)) {
2806                skb->mac_header -= len;
2807                skb->network_header -= len;
2808                if (trans_same)
2809                        skb->transport_header = skb->network_header;
2810        }
2811
2812        return ret;
2813}
2814
2815static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
2816{
2817        bool trans_same = skb->transport_header == skb->network_header;
2818        int ret;
2819
2820        /* Same here, __skb_push()/__skb_pull() pair not needed. */
2821        ret = bpf_skb_generic_pop(skb, off, len);
2822        if (likely(!ret)) {
2823                skb->mac_header += len;
2824                skb->network_header += len;
2825                if (trans_same)
2826                        skb->transport_header = skb->network_header;
2827        }
2828
2829        return ret;
2830}
2831
2832static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
2833{
2834        const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
2835        u32 off = skb_mac_header_len(skb);
2836        int ret;
2837
2838        if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
2839                return -ENOTSUPP;
2840
2841        ret = skb_cow(skb, len_diff);
2842        if (unlikely(ret < 0))
2843                return ret;
2844
2845        ret = bpf_skb_net_hdr_push(skb, off, len_diff);
2846        if (unlikely(ret < 0))
2847                return ret;
2848
2849        if (skb_is_gso(skb)) {
2850                struct skb_shared_info *shinfo = skb_shinfo(skb);
2851
2852                /* SKB_GSO_TCPV4 needs to be changed into
2853                 * SKB_GSO_TCPV6.
2854                 */
2855                if (shinfo->gso_type & SKB_GSO_TCPV4) {
2856                        shinfo->gso_type &= ~SKB_GSO_TCPV4;
2857                        shinfo->gso_type |=  SKB_GSO_TCPV6;
2858                }
2859
2860                /* Due to IPv6 header, MSS needs to be downgraded. */
2861                skb_decrease_gso_size(shinfo, len_diff);
2862                /* Header must be checked, and gso_segs recomputed. */
2863                shinfo->gso_type |= SKB_GSO_DODGY;
2864                shinfo->gso_segs = 0;
2865        }
2866
2867        skb->protocol = htons(ETH_P_IPV6);
2868        skb_clear_hash(skb);
2869
2870        return 0;
2871}
2872
2873static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
2874{
2875        const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
2876        u32 off = skb_mac_header_len(skb);
2877        int ret;
2878
2879        if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
2880                return -ENOTSUPP;
2881
2882        ret = skb_unclone(skb, GFP_ATOMIC);
2883        if (unlikely(ret < 0))
2884                return ret;
2885
2886        ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
2887        if (unlikely(ret < 0))
2888                return ret;
2889
2890        if (skb_is_gso(skb)) {
2891                struct skb_shared_info *shinfo = skb_shinfo(skb);
2892
2893                /* SKB_GSO_TCPV6 needs to be changed into
2894                 * SKB_GSO_TCPV4.
2895                 */
2896                if (shinfo->gso_type & SKB_GSO_TCPV6) {
2897                        shinfo->gso_type &= ~SKB_GSO_TCPV6;
2898                        shinfo->gso_type |=  SKB_GSO_TCPV4;
2899                }
2900
2901                /* Due to IPv4 header, MSS can be upgraded. */
2902                skb_increase_gso_size(shinfo, len_diff);
2903                /* Header must be checked, and gso_segs recomputed. */
2904                shinfo->gso_type |= SKB_GSO_DODGY;
2905                shinfo->gso_segs = 0;
2906        }
2907
2908        skb->protocol = htons(ETH_P_IP);
2909        skb_clear_hash(skb);
2910
2911        return 0;
2912}
2913
2914static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
2915{
2916        __be16 from_proto = skb->protocol;
2917
2918        if (from_proto == htons(ETH_P_IP) &&
2919              to_proto == htons(ETH_P_IPV6))
2920                return bpf_skb_proto_4_to_6(skb);
2921
2922        if (from_proto == htons(ETH_P_IPV6) &&
2923              to_proto == htons(ETH_P_IP))
2924                return bpf_skb_proto_6_to_4(skb);
2925
2926        return -ENOTSUPP;
2927}
2928
2929BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
2930           u64, flags)
2931{
2932        int ret;
2933
2934        if (unlikely(flags))
2935                return -EINVAL;
2936
2937        /* General idea is that this helper does the basic groundwork
2938         * needed for changing the protocol, and eBPF program fills the
2939         * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
2940         * and other helpers, rather than passing a raw buffer here.
2941         *
2942         * The rationale is to keep this minimal and without a need to
2943         * deal with raw packet data. F.e. even if we would pass buffers
2944         * here, the program still needs to call the bpf_lX_csum_replace()
2945         * helpers anyway. Plus, this way we keep also separation of
2946         * concerns, since f.e. bpf_skb_store_bytes() should only take
2947         * care of stores.
2948         *
2949         * Currently, additional options and extension header space are
2950         * not supported, but flags register is reserved so we can adapt
2951         * that. For offloads, we mark packet as dodgy, so that headers
2952         * need to be verified first.
2953         */
2954        ret = bpf_skb_proto_xlat(skb, proto);
2955        bpf_compute_data_pointers(skb);
2956        return ret;
2957}
2958
2959static const struct bpf_func_proto bpf_skb_change_proto_proto = {
2960        .func           = bpf_skb_change_proto,
2961        .gpl_only       = false,
2962        .ret_type       = RET_INTEGER,
2963        .arg1_type      = ARG_PTR_TO_CTX,
2964        .arg2_type      = ARG_ANYTHING,
2965        .arg3_type      = ARG_ANYTHING,
2966};
2967
2968BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
2969{
2970        /* We only allow a restricted subset to be changed for now. */
2971        if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
2972                     !skb_pkt_type_ok(pkt_type)))
2973                return -EINVAL;
2974
2975        skb->pkt_type = pkt_type;
2976        return 0;
2977}
2978
2979static const struct bpf_func_proto bpf_skb_change_type_proto = {
2980        .func           = bpf_skb_change_type,
2981        .gpl_only       = false,
2982        .ret_type       = RET_INTEGER,
2983        .arg1_type      = ARG_PTR_TO_CTX,
2984        .arg2_type      = ARG_ANYTHING,
2985};
2986
2987static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
2988{
2989        switch (skb->protocol) {
2990        case htons(ETH_P_IP):
2991                return sizeof(struct iphdr);
2992        case htons(ETH_P_IPV6):
2993                return sizeof(struct ipv6hdr);
2994        default:
2995                return ~0U;
2996        }
2997}
2998
2999#define BPF_F_ADJ_ROOM_ENCAP_L3_MASK    (BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | \
3000                                         BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3001
3002#define BPF_F_ADJ_ROOM_MASK             (BPF_F_ADJ_ROOM_FIXED_GSO | \
3003                                         BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \
3004                                         BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \
3005                                         BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \
3006                                         BPF_F_ADJ_ROOM_ENCAP_L2( \
3007                                          BPF_ADJ_ROOM_ENCAP_L2_MASK))
3008
3009static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
3010                            u64 flags)
3011{
3012        u8 inner_mac_len = flags >> BPF_ADJ_ROOM_ENCAP_L2_SHIFT;
3013        bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK;
3014        u16 mac_len = 0, inner_net = 0, inner_trans = 0;
3015        unsigned int gso_type = SKB_GSO_DODGY;
3016        int ret;
3017
3018        if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
3019                /* udp gso_size delineates datagrams, only allow if fixed */
3020                if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
3021                    !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3022                        return -ENOTSUPP;
3023        }
3024
3025        ret = skb_cow_head(skb, len_diff);
3026        if (unlikely(ret < 0))
3027                return ret;
3028
3029        if (encap) {
3030                if (skb->protocol != htons(ETH_P_IP) &&
3031                    skb->protocol != htons(ETH_P_IPV6))
3032                        return -ENOTSUPP;
3033
3034                if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 &&
3035                    flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3036                        return -EINVAL;
3037
3038                if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE &&
3039                    flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
3040                        return -EINVAL;
3041
3042                if (skb->encapsulation)
3043                        return -EALREADY;
3044
3045                mac_len = skb->network_header - skb->mac_header;
3046                inner_net = skb->network_header;
3047                if (inner_mac_len > len_diff)
3048                        return -EINVAL;
3049                inner_trans = skb->transport_header;
3050        }
3051
3052        ret = bpf_skb_net_hdr_push(skb, off, len_diff);
3053        if (unlikely(ret < 0))
3054                return ret;
3055
3056        if (encap) {
3057                skb->inner_mac_header = inner_net - inner_mac_len;
3058                skb->inner_network_header = inner_net;
3059                skb->inner_transport_header = inner_trans;
3060                skb_set_inner_protocol(skb, skb->protocol);
3061
3062                skb->encapsulation = 1;
3063                skb_set_network_header(skb, mac_len);
3064
3065                if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
3066                        gso_type |= SKB_GSO_UDP_TUNNEL;
3067                else if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE)
3068                        gso_type |= SKB_GSO_GRE;
3069                else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3070                        gso_type |= SKB_GSO_IPXIP6;
3071                else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
3072                        gso_type |= SKB_GSO_IPXIP4;
3073
3074                if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE ||
3075                    flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) {
3076                        int nh_len = flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 ?
3077                                        sizeof(struct ipv6hdr) :
3078                                        sizeof(struct iphdr);
3079
3080                        skb_set_transport_header(skb, mac_len + nh_len);
3081                }
3082
3083                /* Match skb->protocol to new outer l3 protocol */
3084                if (skb->protocol == htons(ETH_P_IP) &&
3085                    flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3086                        skb->protocol = htons(ETH_P_IPV6);
3087                else if (skb->protocol == htons(ETH_P_IPV6) &&
3088                         flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
3089                        skb->protocol = htons(ETH_P_IP);
3090        }
3091
3092        if (skb_is_gso(skb)) {
3093                struct skb_shared_info *shinfo = skb_shinfo(skb);
3094
3095                /* Due to header grow, MSS needs to be downgraded. */
3096                if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3097                        skb_decrease_gso_size(shinfo, len_diff);
3098
3099                /* Header must be checked, and gso_segs recomputed. */
3100                shinfo->gso_type |= gso_type;
3101                shinfo->gso_segs = 0;
3102        }
3103
3104        return 0;
3105}
3106
3107static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
3108                              u64 flags)
3109{
3110        int ret;
3111
3112        if (flags & ~BPF_F_ADJ_ROOM_FIXED_GSO)
3113                return -EINVAL;
3114
3115        if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
3116                /* udp gso_size delineates datagrams, only allow if fixed */
3117                if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
3118                    !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3119                        return -ENOTSUPP;
3120        }
3121
3122        ret = skb_unclone(skb, GFP_ATOMIC);
3123        if (unlikely(ret < 0))
3124                return ret;
3125
3126        ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
3127        if (unlikely(ret < 0))
3128                return ret;
3129
3130        if (skb_is_gso(skb)) {
3131                struct skb_shared_info *shinfo = skb_shinfo(skb);
3132
3133                /* Due to header shrink, MSS can be upgraded. */
3134                if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3135                        skb_increase_gso_size(shinfo, len_diff);
3136
3137                /* Header must be checked, and gso_segs recomputed. */
3138                shinfo->gso_type |= SKB_GSO_DODGY;
3139                shinfo->gso_segs = 0;
3140        }
3141
3142        return 0;
3143}
3144
3145static u32 __bpf_skb_max_len(const struct sk_buff *skb)
3146{
3147        return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
3148                          SKB_MAX_ALLOC;
3149}
3150
3151BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
3152           u32, mode, u64, flags)
3153{
3154        u32 len_cur, len_diff_abs = abs(len_diff);
3155        u32 len_min = bpf_skb_net_base_len(skb);
3156        u32 len_max = __bpf_skb_max_len(skb);
3157        __be16 proto = skb->protocol;
3158        bool shrink = len_diff < 0;
3159        u32 off;
3160        int ret;
3161
3162        if (unlikely(flags & ~BPF_F_ADJ_ROOM_MASK))
3163                return -EINVAL;
3164        if (unlikely(len_diff_abs > 0xfffU))
3165                return -EFAULT;
3166        if (unlikely(proto != htons(ETH_P_IP) &&
3167                     proto != htons(ETH_P_IPV6)))
3168                return -ENOTSUPP;
3169
3170        off = skb_mac_header_len(skb);
3171        switch (mode) {
3172        case BPF_ADJ_ROOM_NET:
3173                off += bpf_skb_net_base_len(skb);
3174                break;
3175        case BPF_ADJ_ROOM_MAC:
3176                break;
3177        default:
3178                return -ENOTSUPP;
3179        }
3180
3181        len_cur = skb->len - skb_network_offset(skb);
3182        if ((shrink && (len_diff_abs >= len_cur ||
3183                        len_cur - len_diff_abs < len_min)) ||
3184            (!shrink && (skb->len + len_diff_abs > len_max &&
3185                         !skb_is_gso(skb))))
3186                return -ENOTSUPP;
3187
3188        ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) :
3189                       bpf_skb_net_grow(skb, off, len_diff_abs, flags);
3190
3191        bpf_compute_data_pointers(skb);
3192        return ret;
3193}
3194
3195static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
3196        .func           = bpf_skb_adjust_room,
3197        .gpl_only       = false,
3198        .ret_type       = RET_INTEGER,
3199        .arg1_type      = ARG_PTR_TO_CTX,
3200        .arg2_type      = ARG_ANYTHING,
3201        .arg3_type      = ARG_ANYTHING,
3202        .arg4_type      = ARG_ANYTHING,
3203};
3204
3205static u32 __bpf_skb_min_len(const struct sk_buff *skb)
3206{
3207        u32 min_len = skb_network_offset(skb);
3208
3209        if (skb_transport_header_was_set(skb))
3210                min_len = skb_transport_offset(skb);
3211        if (skb->ip_summed == CHECKSUM_PARTIAL)
3212                min_len = skb_checksum_start_offset(skb) +
3213                          skb->csum_offset + sizeof(__sum16);
3214        return min_len;
3215}
3216
3217static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
3218{
3219        unsigned int old_len = skb->len;
3220        int ret;
3221
3222        ret = __skb_grow_rcsum(skb, new_len);
3223        if (!ret)
3224                memset(skb->data + old_len, 0, new_len - old_len);
3225        return ret;
3226}
3227
3228static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
3229{
3230        return __skb_trim_rcsum(skb, new_len);
3231}
3232
3233static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
3234                                        u64 flags)
3235{
3236        u32 max_len = __bpf_skb_max_len(skb);
3237        u32 min_len = __bpf_skb_min_len(skb);
3238        int ret;
3239
3240        if (unlikely(flags || new_len > max_len || new_len < min_len))
3241                return -EINVAL;
3242        if (skb->encapsulation)
3243                return -ENOTSUPP;
3244
3245        /* The basic idea of this helper is that it's performing the
3246         * needed work to either grow or trim an skb, and eBPF program
3247         * rewrites the rest via helpers like bpf_skb_store_bytes(),
3248         * bpf_lX_csum_replace() and others rather than passing a raw
3249         * buffer here. This one is a slow path helper and intended
3250         * for replies with control messages.
3251         *
3252         * Like in bpf_skb_change_proto(), we want to keep this rather
3253         * minimal and without protocol specifics so that we are able
3254         * to separate concerns as in bpf_skb_store_bytes() should only
3255         * be the one responsible for writing buffers.
3256         *
3257         * It's really expected to be a slow path operation here for
3258         * control message replies, so we're implicitly linearizing,
3259         * uncloning and drop offloads from the skb by this.
3260         */
3261        ret = __bpf_try_make_writable(skb, skb->len);
3262        if (!ret) {
3263                if (new_len > skb->len)
3264                        ret = bpf_skb_grow_rcsum(skb, new_len);
3265                else if (new_len < skb->len)
3266                        ret = bpf_skb_trim_rcsum(skb, new_len);
3267                if (!ret && skb_is_gso(skb))
3268                        skb_gso_reset(skb);
3269        }
3270        return ret;
3271}
3272
3273BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
3274           u64, flags)
3275{
3276        int ret = __bpf_skb_change_tail(skb, new_len, flags);
3277
3278        bpf_compute_data_pointers(skb);
3279        return ret;
3280}
3281
3282static const struct bpf_func_proto bpf_skb_change_tail_proto = {
3283        .func           = bpf_skb_change_tail,
3284        .gpl_only       = false,
3285        .ret_type       = RET_INTEGER,
3286        .arg1_type      = ARG_PTR_TO_CTX,
3287        .arg2_type      = ARG_ANYTHING,
3288        .arg3_type      = ARG_ANYTHING,
3289};
3290
3291BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len,
3292           u64, flags)
3293{
3294        int ret = __bpf_skb_change_tail(skb, new_len, flags);
3295
3296        bpf_compute_data_end_sk_skb(skb);
3297        return ret;
3298}
3299
3300static const struct bpf_func_proto sk_skb_change_tail_proto = {
3301        .func           = sk_skb_change_tail,
3302        .gpl_only       = false,
3303        .ret_type       = RET_INTEGER,
3304        .arg1_type      = ARG_PTR_TO_CTX,
3305        .arg2_type      = ARG_ANYTHING,
3306        .arg3_type      = ARG_ANYTHING,
3307};
3308
3309static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
3310                                        u64 flags)
3311{
3312        u32 max_len = __bpf_skb_max_len(skb);
3313        u32 new_len = skb->len + head_room;
3314        int ret;
3315
3316        if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) ||
3317                     new_len < skb->len))
3318                return -EINVAL;
3319
3320        ret = skb_cow(skb, head_room);
3321        if (likely(!ret)) {
3322                /* Idea for this helper is that we currently only
3323                 * allow to expand on mac header. This means that
3324                 * skb->protocol network header, etc, stay as is.
3325                 * Compared to bpf_skb_change_tail(), we're more
3326                 * flexible due to not needing to linearize or
3327                 * reset GSO. Intention for this helper is to be
3328                 * used by an L3 skb that needs to push mac header
3329                 * for redirection into L2 device.
3330                 */
3331                __skb_push(skb, head_room);
3332                memset(skb->data, 0, head_room);
3333                skb_reset_mac_header(skb);
3334        }
3335
3336        return ret;
3337}
3338
3339BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
3340           u64, flags)
3341{
3342        int ret = __bpf_skb_change_head(skb, head_room, flags);
3343
3344        bpf_compute_data_pointers(skb);
3345        return ret;
3346}
3347
3348static const struct bpf_func_proto bpf_skb_change_head_proto = {
3349        .func           = bpf_skb_change_head,
3350        .gpl_only       = false,
3351        .ret_type       = RET_INTEGER,
3352        .arg1_type      = ARG_PTR_TO_CTX,
3353        .arg2_type      = ARG_ANYTHING,
3354        .arg3_type      = ARG_ANYTHING,
3355};
3356
3357BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room,
3358           u64, flags)
3359{
3360        int ret = __bpf_skb_change_head(skb, head_room, flags);
3361
3362        bpf_compute_data_end_sk_skb(skb);
3363        return ret;
3364}
3365
3366static const struct bpf_func_proto sk_skb_change_head_proto = {
3367        .func           = sk_skb_change_head,
3368        .gpl_only       = false,
3369        .ret_type       = RET_INTEGER,
3370        .arg1_type      = ARG_PTR_TO_CTX,
3371        .arg2_type      = ARG_ANYTHING,
3372        .arg3_type      = ARG_ANYTHING,
3373};
3374static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
3375{
3376        return xdp_data_meta_unsupported(xdp) ? 0 :
3377               xdp->data - xdp->data_meta;
3378}
3379
3380BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset)
3381{
3382        void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
3383        unsigned long metalen = xdp_get_metalen(xdp);
3384        void *data_start = xdp_frame_end + metalen;
3385        void *data = xdp->data + offset;
3386
3387        if (unlikely(data < data_start ||
3388                     data > xdp->data_end - ETH_HLEN))
3389                return -EINVAL;
3390
3391        if (metalen)
3392                memmove(xdp->data_meta + offset,
3393                        xdp->data_meta, metalen);
3394        xdp->data_meta += offset;
3395        xdp->data = data;
3396
3397        return 0;
3398}
3399
3400static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
3401        .func           = bpf_xdp_adjust_head,
3402        .gpl_only       = false,
3403        .ret_type       = RET_INTEGER,
3404        .arg1_type      = ARG_PTR_TO_CTX,
3405        .arg2_type      = ARG_ANYTHING,
3406};
3407
3408BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
3409{
3410        void *data_end = xdp->data_end + offset;
3411
3412        /* only shrinking is allowed for now. */
3413        if (unlikely(offset >= 0))
3414                return -EINVAL;
3415
3416        if (unlikely(data_end < xdp->data + ETH_HLEN))
3417                return -EINVAL;
3418
3419        xdp->data_end = data_end;
3420
3421        return 0;
3422}
3423
3424static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = {
3425        .func           = bpf_xdp_adjust_tail,
3426        .gpl_only       = false,
3427        .ret_type       = RET_INTEGER,
3428        .arg1_type      = ARG_PTR_TO_CTX,
3429        .arg2_type      = ARG_ANYTHING,
3430};
3431
3432BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset)
3433{
3434        void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
3435        void *meta = xdp->data_meta + offset;
3436        unsigned long metalen = xdp->data - meta;
3437
3438        if (xdp_data_meta_unsupported(xdp))
3439                return -ENOTSUPP;
3440        if (unlikely(meta < xdp_frame_end ||
3441                     meta > xdp->data))
3442                return -EINVAL;
3443        if (unlikely((metalen & (sizeof(__u32) - 1)) ||
3444                     (metalen > 32)))
3445                return -EACCES;
3446
3447        xdp->data_meta = meta;
3448
3449        return 0;
3450}
3451
3452static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
3453        .func           = bpf_xdp_adjust_meta,
3454        .gpl_only       = false,
3455        .ret_type       = RET_INTEGER,
3456        .arg1_type      = ARG_PTR_TO_CTX,
3457        .arg2_type      = ARG_ANYTHING,
3458};
3459
3460static int __bpf_tx_xdp(struct net_device *dev,
3461                        struct bpf_map *map,
3462                        struct xdp_buff *xdp,
3463                        u32 index)
3464{
3465        struct xdp_frame *xdpf;
3466        int err, sent;
3467
3468        if (!dev->netdev_ops->ndo_xdp_xmit) {
3469                return -EOPNOTSUPP;
3470        }
3471
3472        err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
3473        if (unlikely(err))
3474                return err;
3475
3476        xdpf = convert_to_xdp_frame(xdp);
3477        if (unlikely(!xdpf))
3478                return -EOVERFLOW;
3479
3480        sent = dev->netdev_ops->ndo_xdp_xmit(dev, 1, &xdpf, XDP_XMIT_FLUSH);
3481        if (sent <= 0)
3482                return sent;
3483        return 0;
3484}
3485
3486static noinline int
3487xdp_do_redirect_slow(struct net_device *dev, struct xdp_buff *xdp,
3488                     struct bpf_prog *xdp_prog, struct bpf_redirect_info *ri)
3489{
3490        struct net_device *fwd;
3491        u32 index = ri->tgt_index;
3492        int err;
3493
3494        fwd = dev_get_by_index_rcu(dev_net(dev), index);
3495        ri->tgt_index = 0;
3496        if (unlikely(!fwd)) {
3497                err = -EINVAL;
3498                goto err;
3499        }
3500
3501        err = __bpf_tx_xdp(fwd, NULL, xdp, 0);
3502        if (unlikely(err))
3503                goto err;
3504
3505        _trace_xdp_redirect(dev, xdp_prog, index);
3506        return 0;
3507err:
3508        _trace_xdp_redirect_err(dev, xdp_prog, index, err);
3509        return err;
3510}
3511
3512static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
3513                            struct bpf_map *map,
3514                            struct xdp_buff *xdp,
3515                            u32 index)
3516{
3517        int err;
3518
3519        switch (map->map_type) {
3520        case BPF_MAP_TYPE_DEVMAP:
3521        case BPF_MAP_TYPE_DEVMAP_HASH: {
3522                struct bpf_dtab_netdev *dst = fwd;
3523
3524                err = dev_map_enqueue(dst, xdp, dev_rx);
3525                if (unlikely(err))
3526                        return err;
3527                break;
3528        }
3529        case BPF_MAP_TYPE_CPUMAP: {
3530                struct bpf_cpu_map_entry *rcpu = fwd;
3531
3532                err = cpu_map_enqueue(rcpu, xdp, dev_rx);
3533                if (unlikely(err))
3534                        return err;
3535                break;
3536        }
3537        case BPF_MAP_TYPE_XSKMAP: {
3538                struct xdp_sock *xs = fwd;
3539
3540                err = __xsk_map_redirect(map, xdp, xs);
3541                return err;
3542        }
3543        default:
3544                break;
3545        }
3546        return 0;
3547}
3548
3549void xdp_do_flush_map(void)
3550{
3551        struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3552        struct bpf_map *map = ri->map_to_flush;
3553
3554        ri->map_to_flush = NULL;
3555        if (map) {
3556                switch (map->map_type) {
3557                case BPF_MAP_TYPE_DEVMAP:
3558                case BPF_MAP_TYPE_DEVMAP_HASH:
3559                        __dev_map_flush(map);
3560                        break;
3561                case BPF_MAP_TYPE_CPUMAP:
3562                        __cpu_map_flush(map);
3563                        break;
3564                case BPF_MAP_TYPE_XSKMAP:
3565                        __xsk_map_flush(map);
3566                        break;
3567                default:
3568                        break;
3569                }
3570        }
3571}
3572EXPORT_SYMBOL_GPL(xdp_do_flush_map);
3573
3574static inline void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
3575{
3576        switch (map->map_type) {
3577        case BPF_MAP_TYPE_DEVMAP:
3578                return __dev_map_lookup_elem(map, index);
3579        case BPF_MAP_TYPE_DEVMAP_HASH:
3580                return __dev_map_hash_lookup_elem(map, index);
3581        case BPF_MAP_TYPE_CPUMAP:
3582                return __cpu_map_lookup_elem(map, index);
3583        case BPF_MAP_TYPE_XSKMAP:
3584                return __xsk_map_lookup_elem(map, index);
3585        default:
3586                return NULL;
3587        }
3588}
3589
3590void bpf_clear_redirect_map(struct bpf_map *map)
3591{
3592        struct bpf_redirect_info *ri;
3593        int cpu;
3594
3595        for_each_possible_cpu(cpu) {
3596                ri = per_cpu_ptr(&bpf_redirect_info, cpu);
3597                /* Avoid polluting remote cacheline due to writes if
3598                 * not needed. Once we pass this test, we need the
3599                 * cmpxchg() to make sure it hasn't been changed in
3600                 * the meantime by remote CPU.
3601                 */
3602                if (unlikely(READ_ONCE(ri->map) == map))
3603                        cmpxchg(&ri->map, map, NULL);
3604        }
3605}
3606
3607static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
3608                               struct bpf_prog *xdp_prog, struct bpf_map *map,
3609                               struct bpf_redirect_info *ri)
3610{
3611        u32 index = ri->tgt_index;
3612        void *fwd = ri->tgt_value;
3613        int err;
3614
3615        ri->tgt_index = 0;
3616        ri->tgt_value = NULL;
3617        WRITE_ONCE(ri->map, NULL);
3618
3619        if (ri->map_to_flush && unlikely(ri->map_to_flush != map))
3620                xdp_do_flush_map();
3621
3622        err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index);
3623        if (unlikely(err))
3624                goto err;
3625
3626        ri->map_to_flush = map;
3627        _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
3628        return 0;
3629err:
3630        _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
3631        return err;
3632}
3633
3634int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
3635                    struct bpf_prog *xdp_prog)
3636{
3637        struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3638        struct bpf_map *map = READ_ONCE(ri->map);
3639
3640        if (likely(map))
3641                return xdp_do_redirect_map(dev, xdp, xdp_prog, map, ri);
3642
3643        return xdp_do_redirect_slow(dev, xdp, xdp_prog, ri);
3644}
3645EXPORT_SYMBOL_GPL(xdp_do_redirect);
3646
3647static int xdp_do_generic_redirect_map(struct net_device *dev,
3648                                       struct sk_buff *skb,
3649                                       struct xdp_buff *xdp,
3650                                       struct bpf_prog *xdp_prog,
3651                                       struct bpf_map *map)
3652{
3653        struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3654        u32 index = ri->tgt_index;
3655        void *fwd = ri->tgt_value;
3656        int err = 0;
3657
3658        ri->tgt_index = 0;
3659        ri->tgt_value = NULL;
3660        WRITE_ONCE(ri->map, NULL);
3661
3662        if (map->map_type == BPF_MAP_TYPE_DEVMAP ||
3663            map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
3664                struct bpf_dtab_netdev *dst = fwd;
3665
3666                err = dev_map_generic_redirect(dst, skb, xdp_prog);
3667                if (unlikely(err))
3668                        goto err;
3669        } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
3670                struct xdp_sock *xs = fwd;
3671
3672                err = xsk_generic_rcv(xs, xdp);
3673                if (err)
3674                        goto err;
3675                consume_skb(skb);
3676        } else {
3677                /* TODO: Handle BPF_MAP_TYPE_CPUMAP */
3678                err = -EBADRQC;
3679                goto err;
3680        }
3681
3682        _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
3683        return 0;
3684err:
3685        _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
3686        return err;
3687}
3688
3689int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
3690                            struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
3691{
3692        struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3693        struct bpf_map *map = READ_ONCE(ri->map);
3694        u32 index = ri->tgt_index;
3695        struct net_device *fwd;
3696        int err = 0;
3697
3698        if (map)
3699                return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog,
3700                                                   map);
3701        ri->tgt_index = 0;
3702        fwd = dev_get_by_index_rcu(dev_net(dev), index);
3703        if (unlikely(!fwd)) {
3704                err = -EINVAL;
3705                goto err;
3706        }
3707
3708        err = xdp_ok_fwd_dev(fwd, skb->len);
3709        if (unlikely(err))
3710                goto err;
3711
3712        skb->dev = fwd;
3713        _trace_xdp_redirect(dev, xdp_prog, index);
3714        generic_xdp_tx(skb, xdp_prog);
3715        return 0;
3716err:
3717        _trace_xdp_redirect_err(dev, xdp_prog, index, err);
3718        return err;
3719}
3720EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);
3721
3722BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
3723{
3724        struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3725
3726        if (unlikely(flags))
3727                return XDP_ABORTED;
3728
3729        ri->flags = flags;
3730        ri->tgt_index = ifindex;
3731        ri->tgt_value = NULL;
3732        WRITE_ONCE(ri->map, NULL);
3733
3734        return XDP_REDIRECT;
3735}
3736
3737static const struct bpf_func_proto bpf_xdp_redirect_proto = {
3738        .func           = bpf_xdp_redirect,
3739        .gpl_only       = false,
3740        .ret_type       = RET_INTEGER,
3741        .arg1_type      = ARG_ANYTHING,
3742        .arg2_type      = ARG_ANYTHING,
3743};
3744
3745BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex,
3746           u64, flags)
3747{
3748        struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3749
3750        /* Lower bits of the flags are used as return code on lookup failure */
3751        if (unlikely(flags > XDP_TX))
3752                return XDP_ABORTED;
3753
3754        ri->tgt_value = __xdp_map_lookup_elem(map, ifindex);
3755        if (unlikely(!ri->tgt_value)) {
3756                /* If the lookup fails we want to clear out the state in the
3757                 * redirect_info struct completely, so that if an eBPF program
3758                 * performs multiple lookups, the last one always takes
3759                 * precedence.
3760                 */
3761                WRITE_ONCE(ri->map, NULL);
3762                return flags;
3763        }
3764
3765        ri->flags = flags;
3766        ri->tgt_index = ifindex;
3767        WRITE_ONCE(ri->map, map);
3768
3769        return XDP_REDIRECT;
3770}
3771
3772static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
3773        .func           = bpf_xdp_redirect_map,
3774        .gpl_only       = false,
3775        .ret_type       = RET_INTEGER,
3776        .arg1_type      = ARG_CONST_MAP_PTR,
3777        .arg2_type      = ARG_ANYTHING,
3778        .arg3_type      = ARG_ANYTHING,
3779};
3780
3781static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
3782                                  unsigned long off, unsigned long len)
3783{
3784        void *ptr = skb_header_pointer(skb, off, len, dst_buff);
3785
3786        if (unlikely(!ptr))
3787                return len;
3788        if (ptr != dst_buff)
3789                memcpy(dst_buff, ptr, len);
3790
3791        return 0;
3792}
3793
3794BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
3795           u64, flags, void *, meta, u64, meta_size)
3796{
3797        u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
3798
3799        if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
3800                return -EINVAL;
3801        if (unlikely(skb_size > skb->len))
3802                return -EFAULT;
3803
3804        return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
3805                                bpf_skb_copy);
3806}
3807
3808static const struct bpf_func_proto bpf_skb_event_output_proto = {
3809        .func           = bpf_skb_event_output,
3810        .gpl_only       = true,
3811        .ret_type       = RET_INTEGER,
3812        .arg1_type      = ARG_PTR_TO_CTX,
3813        .arg2_type      = ARG_CONST_MAP_PTR,
3814        .arg3_type      = ARG_ANYTHING,
3815        .arg4_type      = ARG_PTR_TO_MEM,
3816        .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
3817};
3818
3819static unsigned short bpf_tunnel_key_af(u64 flags)
3820{
3821        return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
3822}
3823
3824BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
3825           u32, size, u64, flags)
3826{
3827        const struct ip_tunnel_info *info = skb_tunnel_info(skb);
3828        u8 compat[sizeof(struct bpf_tunnel_key)];
3829        void *to_orig = to;
3830        int err;
3831
3832        if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) {
3833                err = -EINVAL;
3834                goto err_clear;
3835        }
3836        if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) {
3837                err = -EPROTO;
3838                goto err_clear;
3839        }
3840        if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
3841                err = -EINVAL;
3842                switch (size) {
3843                case offsetof(struct bpf_tunnel_key, tunnel_label):
3844                case offsetof(struct bpf_tunnel_key, tunnel_ext):
3845                        goto set_compat;
3846                case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
3847                        /* Fixup deprecated structure layouts here, so we have
3848                         * a common path later on.
3849                         */
3850                        if (ip_tunnel_info_af(info) != AF_INET)
3851                                goto err_clear;
3852set_compat:
3853                        to = (struct bpf_tunnel_key *)compat;
3854                        break;
3855                default:
3856                        goto err_clear;
3857                }
3858        }
3859
3860        to->tunnel_id = be64_to_cpu(info->key.tun_id);
3861        to->tunnel_tos = info->key.tos;
3862        to->tunnel_ttl = info->key.ttl;
3863        to->tunnel_ext = 0;
3864
3865        if (flags & BPF_F_TUNINFO_IPV6) {
3866                memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
3867                       sizeof(to->remote_ipv6));
3868                to->tunnel_label = be32_to_cpu(info->key.label);
3869        } else {
3870                to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
3871                memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
3872                to->tunnel_label = 0;
3873        }
3874
3875        if (unlikely(size != sizeof(struct bpf_tunnel_key)))
3876                memcpy(to_orig, to, size);
3877
3878        return 0;
3879err_clear:
3880        memset(to_orig, 0, size);
3881        return err;
3882}
3883
3884static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
3885        .func           = bpf_skb_get_tunnel_key,
3886        .gpl_only       = false,
3887        .ret_type       = RET_INTEGER,
3888        .arg1_type      = ARG_PTR_TO_CTX,
3889        .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
3890        .arg3_type      = ARG_CONST_SIZE,
3891        .arg4_type      = ARG_ANYTHING,
3892};
3893
3894BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
3895{
3896        const struct ip_tunnel_info *info = skb_tunnel_info(skb);
3897        int err;
3898
3899        if (unlikely(!info ||
3900                     !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
3901                err = -ENOENT;
3902                goto err_clear;
3903        }
3904        if (unlikely(size < info->options_len)) {
3905                err = -ENOMEM;
3906                goto err_clear;
3907        }
3908
3909        ip_tunnel_info_opts_get(to, info);
3910        if (size > info->options_len)
3911                memset(to + info->options_len, 0, size - info->options_len);
3912
3913        return info->options_len;
3914err_clear:
3915        memset(to, 0, size);
3916        return err;
3917}
3918
3919static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
3920        .func           = bpf_skb_get_tunnel_opt,
3921        .gpl_only       = false,
3922        .ret_type       = RET_INTEGER,
3923        .arg1_type      = ARG_PTR_TO_CTX,
3924        .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
3925        .arg3_type      = ARG_CONST_SIZE,
3926};
3927
3928static struct metadata_dst __percpu *md_dst;
3929
3930BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
3931           const struct bpf_tunnel_key *, from, u32, size, u64, flags)
3932{
3933        struct metadata_dst *md = this_cpu_ptr(md_dst);
3934        u8 compat[sizeof(struct bpf_tunnel_key)];
3935        struct ip_tunnel_info *info;
3936
3937        if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
3938                               BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER)))
3939                return -EINVAL;
3940        if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
3941                switch (size) {
3942                case offsetof(struct bpf_tunnel_key, tunnel_label):
3943                case offsetof(struct bpf_tunnel_key, tunnel_ext):
3944                case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
3945                        /* Fixup deprecated structure layouts here, so we have
3946                         * a common path later on.
3947                         */
3948                        memcpy(compat, from, size);
3949                        memset(compat + size, 0, sizeof(compat) - size);
3950                        from = (const struct bpf_tunnel_key *) compat;
3951                        break;
3952                default:
3953                        return -EINVAL;
3954                }
3955        }
3956        if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
3957                     from->tunnel_ext))
3958                return -EINVAL;
3959
3960        skb_dst_drop(skb);
3961        dst_hold((struct dst_entry *) md);
3962        skb_dst_set(skb, (struct dst_entry *) md);
3963
3964        info = &md->u.tun_info;
3965        memset(info, 0, sizeof(*info));
3966        info->mode = IP_TUNNEL_INFO_TX;
3967
3968        info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
3969        if (flags & BPF_F_DONT_FRAGMENT)
3970                info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
3971        if (flags & BPF_F_ZERO_CSUM_TX)
3972                info->key.tun_flags &= ~TUNNEL_CSUM;
3973        if (flags & BPF_F_SEQ_NUMBER)
3974                info->key.tun_flags |= TUNNEL_SEQ;
3975
3976        info->key.tun_id = cpu_to_be64(from->tunnel_id);
3977        info->key.tos = from->tunnel_tos;
3978        info->key.ttl = from->tunnel_ttl;
3979
3980        if (flags & BPF_F_TUNINFO_IPV6) {
3981                info->mode |= IP_TUNNEL_INFO_IPV6;
3982                memcpy(&info->key.u.ipv6.dst, from->remote_ipv6,
3983                       sizeof(from->remote_ipv6));
3984                info->key.label = cpu_to_be32(from->tunnel_label) &
3985                                  IPV6_FLOWLABEL_MASK;
3986        } else {
3987                info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
3988        }
3989
3990        return 0;
3991}
3992
3993static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
3994        .func           = bpf_skb_set_tunnel_key,
3995        .gpl_only       = false,
3996        .ret_type       = RET_INTEGER,
3997        .arg1_type      = ARG_PTR_TO_CTX,
3998        .arg2_type      = ARG_PTR_TO_MEM,
3999        .arg3_type      = ARG_CONST_SIZE,
4000        .arg4_type      = ARG_ANYTHING,
4001};
4002
4003BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
4004           const u8 *, from, u32, size)
4005{
4006        struct ip_tunnel_info *info = skb_tunnel_info(skb);
4007        const struct metadata_dst *md = this_cpu_ptr(md_dst);
4008
4009        if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
4010                return -EINVAL;
4011        if (unlikely(size > IP_TUNNEL_OPTS_MAX))
4012                return -ENOMEM;
4013
4014        ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT);
4015
4016        return 0;
4017}
4018
4019static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
4020        .func           = bpf_skb_set_tunnel_opt,
4021        .gpl_only       = false,
4022        .ret_type       = RET_INTEGER,
4023        .arg1_type      = ARG_PTR_TO_CTX,
4024        .arg2_type      = ARG_PTR_TO_MEM,
4025        .arg3_type      = ARG_CONST_SIZE,
4026};
4027
4028static const struct bpf_func_proto *
4029bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
4030{
4031        if (!md_dst) {
4032                struct metadata_dst __percpu *tmp;
4033
4034                tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX,
4035                                                METADATA_IP_TUNNEL,
4036                                                GFP_KERNEL);
4037                if (!tmp)
4038                        return NULL;
4039                if (cmpxchg(&md_dst, NULL, tmp))
4040                        metadata_dst_free_percpu(tmp);
4041        }
4042
4043        switch (which) {
4044        case BPF_FUNC_skb_set_tunnel_key:
4045                return &bpf_skb_set_tunnel_key_proto;
4046        case BPF_FUNC_skb_set_tunnel_opt:
4047                return &bpf_skb_set_tunnel_opt_proto;
4048        default:
4049                return NULL;
4050        }
4051}
4052
4053BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
4054           u32, idx)
4055{
4056        struct bpf_array *array = container_of(map, struct bpf_array, map);
4057        struct cgroup *cgrp;
4058        struct sock *sk;
4059
4060        sk = skb_to_full_sk(skb);
4061        if (!sk || !sk_fullsock(sk))
4062                return -ENOENT;
4063        if (unlikely(idx >= array->map.max_entries))
4064                return -E2BIG;
4065
4066        cgrp = READ_ONCE(array->ptrs[idx]);
4067        if (unlikely(!cgrp))
4068                return -EAGAIN;
4069
4070        return sk_under_cgroup_hierarchy(sk, cgrp);
4071}
4072
4073static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
4074        .func           = bpf_skb_under_cgroup,
4075        .gpl_only       = false,
4076        .ret_type       = RET_INTEGER,
4077        .arg1_type      = ARG_PTR_TO_CTX,
4078        .arg2_type      = ARG_CONST_MAP_PTR,
4079        .arg3_type      = ARG_ANYTHING,
4080};
4081
4082#ifdef CONFIG_SOCK_CGROUP_DATA
4083BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb)
4084{
4085        struct sock *sk = skb_to_full_sk(skb);
4086        struct cgroup *cgrp;
4087
4088        if (!sk || !sk_fullsock(sk))
4089                return 0;
4090
4091        cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
4092        return cgrp->kn->id.id;
4093}
4094
4095static const struct bpf_func_proto bpf_skb_cgroup_id_proto = {
4096        .func           = bpf_skb_cgroup_id,
4097        .gpl_only       = false,
4098        .ret_type       = RET_INTEGER,
4099        .arg1_type      = ARG_PTR_TO_CTX,
4100};
4101
4102BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int,
4103           ancestor_level)
4104{
4105        struct sock *sk = skb_to_full_sk(skb);
4106        struct cgroup *ancestor;
4107        struct cgroup *cgrp;
4108
4109        if (!sk || !sk_fullsock(sk))
4110                return 0;
4111
4112        cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
4113        ancestor = cgroup_ancestor(cgrp, ancestor_level);
4114        if (!ancestor)
4115                return 0;
4116
4117        return ancestor->kn->id.id;
4118}
4119
4120static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = {
4121        .func           = bpf_skb_ancestor_cgroup_id,
4122        .gpl_only       = false,
4123        .ret_type       = RET_INTEGER,
4124        .arg1_type      = ARG_PTR_TO_CTX,
4125        .arg2_type      = ARG_ANYTHING,
4126};
4127#endif
4128
4129static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
4130                                  unsigned long off, unsigned long len)
4131{
4132        memcpy(dst_buff, src_buff + off, len);
4133        return 0;
4134}
4135
4136BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
4137           u64, flags, void *, meta, u64, meta_size)
4138{
4139        u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
4140
4141        if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
4142                return -EINVAL;
4143        if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data)))
4144                return -EFAULT;
4145
4146        return bpf_event_output(map, flags, meta, meta_size, xdp->data,
4147                                xdp_size, bpf_xdp_copy);
4148}
4149
4150static const struct bpf_func_proto bpf_xdp_event_output_proto = {
4151        .func           = bpf_xdp_event_output,
4152        .gpl_only       = true,
4153        .ret_type       = RET_INTEGER,
4154        .arg1_type      = ARG_PTR_TO_CTX,
4155        .arg2_type      = ARG_CONST_MAP_PTR,
4156        .arg3_type      = ARG_ANYTHING,
4157        .arg4_type      = ARG_PTR_TO_MEM,
4158        .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
4159};
4160
4161BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
4162{
4163        return skb->sk ? sock_gen_cookie(skb->sk) : 0;
4164}
4165
4166static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
4167        .func           = bpf_get_socket_cookie,
4168        .gpl_only       = false,
4169        .ret_type       = RET_INTEGER,
4170        .arg1_type      = ARG_PTR_TO_CTX,
4171};
4172
4173BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx)
4174{
4175        return sock_gen_cookie(ctx->sk);
4176}
4177
4178static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = {
4179        .func           = bpf_get_socket_cookie_sock_addr,
4180        .gpl_only       = false,
4181        .ret_type       = RET_INTEGER,
4182        .arg1_type      = ARG_PTR_TO_CTX,
4183};
4184
4185BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx)
4186{
4187        return sock_gen_cookie(ctx->sk);
4188}
4189
4190static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = {
4191        .func           = bpf_get_socket_cookie_sock_ops,
4192        .gpl_only       = false,
4193        .ret_type       = RET_INTEGER,
4194        .arg1_type      = ARG_PTR_TO_CTX,
4195};
4196
4197BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
4198{
4199        struct sock *sk = sk_to_full_sk(skb->sk);
4200        kuid_t kuid;
4201
4202        if (!sk || !sk_fullsock(sk))
4203                return overflowuid;
4204        kuid = sock_net_uid(sock_net(sk), sk);
4205        return from_kuid_munged(sock_net(sk)->user_ns, kuid);
4206}
4207
4208static const struct bpf_func_proto bpf_get_socket_uid_proto = {
4209        .func           = bpf_get_socket_uid,
4210        .gpl_only       = false,
4211        .ret_type       = RET_INTEGER,
4212        .arg1_type      = ARG_PTR_TO_CTX,
4213};
4214
4215BPF_CALL_5(bpf_sockopt_event_output, struct bpf_sock_ops_kern *, bpf_sock,
4216           struct bpf_map *, map, u64, flags, void *, data, u64, size)
4217{
4218        if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
4219                return -EINVAL;
4220
4221        return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
4222}
4223
4224static const struct bpf_func_proto bpf_sockopt_event_output_proto =  {
4225        .func           = bpf_sockopt_event_output,
4226        .gpl_only       = true,
4227        .ret_type       = RET_INTEGER,
4228        .arg1_type      = ARG_PTR_TO_CTX,
4229        .arg2_type      = ARG_CONST_MAP_PTR,
4230        .arg3_type      = ARG_ANYTHING,
4231        .arg4_type      = ARG_PTR_TO_MEM,
4232        .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
4233};
4234
4235BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4236           int, level, int, optname, char *, optval, int, optlen)
4237{
4238        struct sock *sk = bpf_sock->sk;
4239        int ret = 0;
4240        int val;
4241
4242        if (!sk_fullsock(sk))
4243                return -EINVAL;
4244
4245        if (level == SOL_SOCKET) {
4246                if (optlen != sizeof(int))
4247                        return -EINVAL;
4248                val = *((int *)optval);
4249
4250                /* Only some socketops are supported */
4251                switch (optname) {
4252                case SO_RCVBUF:
4253                        val = min_t(u32, val, sysctl_rmem_max);
4254                        sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
4255                        WRITE_ONCE(sk->sk_rcvbuf,
4256                                   max_t(int, val * 2, SOCK_MIN_RCVBUF));
4257                        break;
4258                case SO_SNDBUF:
4259                        val = min_t(u32, val, sysctl_wmem_max);
4260                        sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
4261                        WRITE_ONCE(sk->sk_sndbuf,
4262                                   max_t(int, val * 2, SOCK_MIN_SNDBUF));
4263                        break;
4264                case SO_MAX_PACING_RATE: /* 32bit version */
4265                        if (val != ~0U)
4266                                cmpxchg(&sk->sk_pacing_status,
4267                                        SK_PACING_NONE,
4268                                        SK_PACING_NEEDED);
4269                        sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
4270                        sk->sk_pacing_rate = min(sk->sk_pacing_rate,
4271                                                 sk->sk_max_pacing_rate);
4272                        break;
4273                case SO_PRIORITY:
4274                        sk->sk_priority = val;
4275                        break;
4276                case SO_RCVLOWAT:
4277                        if (val < 0)
4278                                val = INT_MAX;
4279                        WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
4280                        break;
4281                case SO_MARK:
4282                        if (sk->sk_mark != val) {
4283                                sk->sk_mark = val;
4284                                sk_dst_reset(sk);
4285                        }
4286                        break;
4287                default:
4288                        ret = -EINVAL;
4289                }
4290#ifdef CONFIG_INET
4291        } else if (level == SOL_IP) {
4292                if (optlen != sizeof(int) || sk->sk_family != AF_INET)
4293                        return -EINVAL;
4294
4295                val = *((int *)optval);
4296                /* Only some options are supported */
4297                switch (optname) {
4298                case IP_TOS:
4299                        if (val < -1 || val > 0xff) {
4300                                ret = -EINVAL;
4301                        } else {
4302                                struct inet_sock *inet = inet_sk(sk);
4303
4304                                if (val == -1)
4305                                        val = 0;
4306                                inet->tos = val;
4307                        }
4308                        break;
4309                default:
4310                        ret = -EINVAL;
4311                }
4312#if IS_ENABLED(CONFIG_IPV6)
4313        } else if (level == SOL_IPV6) {
4314                if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
4315                        return -EINVAL;
4316
4317                val = *((int *)optval);
4318                /* Only some options are supported */
4319                switch (optname) {
4320                case IPV6_TCLASS:
4321                        if (val < -1 || val > 0xff) {
4322                                ret = -EINVAL;
4323                        } else {
4324                                struct ipv6_pinfo *np = inet6_sk(sk);
4325
4326                                if (val == -1)
4327                                        val = 0;
4328                                np->tclass = val;
4329                        }
4330                        break;
4331                default:
4332                        ret = -EINVAL;
4333                }
4334#endif
4335        } else if (level == SOL_TCP &&
4336                   sk->sk_prot->setsockopt == tcp_setsockopt) {
4337                if (optname == TCP_CONGESTION) {
4338                        char name[TCP_CA_NAME_MAX];
4339                        bool reinit = bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN;
4340
4341                        strncpy(name, optval, min_t(long, optlen,
4342                                                    TCP_CA_NAME_MAX-1));
4343                        name[TCP_CA_NAME_MAX-1] = 0;
4344                        ret = tcp_set_congestion_control(sk, name, false,
4345                                                         reinit, true);
4346                } else {
4347                        struct tcp_sock *tp = tcp_sk(sk);
4348
4349                        if (optlen != sizeof(int))
4350                                return -EINVAL;
4351
4352                        val = *((int *)optval);
4353                        /* Only some options are supported */
4354                        switch (optname) {
4355                        case TCP_BPF_IW:
4356                                if (val <= 0 || tp->data_segs_out > tp->syn_data)
4357                                        ret = -EINVAL;
4358                                else
4359                                        tp->snd_cwnd = val;
4360                                break;
4361                        case TCP_BPF_SNDCWND_CLAMP:
4362                                if (val <= 0) {
4363                                        ret = -EINVAL;
4364                                } else {
4365                                        tp->snd_cwnd_clamp = val;
4366                                        tp->snd_ssthresh = val;
4367                                }
4368                                break;
4369                        case TCP_SAVE_SYN:
4370                                if (val < 0 || val > 1)
4371                                        ret = -EINVAL;
4372                                else
4373                                        tp->save_syn = val;
4374                                break;
4375                        default:
4376                                ret = -EINVAL;
4377                        }
4378                }
4379#endif
4380        } else {
4381                ret = -EINVAL;
4382        }
4383        return ret;
4384}
4385
4386static const struct bpf_func_proto bpf_setsockopt_proto = {
4387        .func           = bpf_setsockopt,
4388        .gpl_only       = false,
4389        .ret_type       = RET_INTEGER,
4390        .arg1_type      = ARG_PTR_TO_CTX,
4391        .arg2_type      = ARG_ANYTHING,
4392        .arg3_type      = ARG_ANYTHING,
4393        .arg4_type      = ARG_PTR_TO_MEM,
4394        .arg5_type      = ARG_CONST_SIZE,
4395};
4396
4397BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4398           int, level, int, optname, char *, optval, int, optlen)
4399{
4400        struct sock *sk = bpf_sock->sk;
4401
4402        if (!sk_fullsock(sk))
4403                goto err_clear;
4404#ifdef CONFIG_INET
4405        if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) {
4406                struct inet_connection_sock *icsk;
4407                struct tcp_sock *tp;
4408
4409                switch (optname) {
4410                case TCP_CONGESTION:
4411                        icsk = inet_csk(sk);
4412
4413                        if (!icsk->icsk_ca_ops || optlen <= 1)
4414                                goto err_clear;
4415                        strncpy(optval, icsk->icsk_ca_ops->name, optlen);
4416                        optval[optlen - 1] = 0;
4417                        break;
4418                case TCP_SAVED_SYN:
4419                        tp = tcp_sk(sk);
4420
4421                        if (optlen <= 0 || !tp->saved_syn ||
4422                            optlen > tp->saved_syn[0])
4423                                goto err_clear;
4424                        memcpy(optval, tp->saved_syn + 1, optlen);
4425                        break;
4426                default:
4427                        goto err_clear;
4428                }
4429        } else if (level == SOL_IP) {
4430                struct inet_sock *inet = inet_sk(sk);
4431
4432                if (optlen != sizeof(int) || sk->sk_family != AF_INET)
4433                        goto err_clear;
4434
4435                /* Only some options are supported */
4436                switch (optname) {
4437                case IP_TOS:
4438                        *((int *)optval) = (int)inet->tos;
4439                        break;
4440                default:
4441                        goto err_clear;
4442                }
4443#if IS_ENABLED(CONFIG_IPV6)
4444        } else if (level == SOL_IPV6) {
4445                struct ipv6_pinfo *np = inet6_sk(sk);
4446
4447                if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
4448                        goto err_clear;
4449
4450                /* Only some options are supported */
4451                switch (optname) {
4452                case IPV6_TCLASS:
4453                        *((int *)optval) = (int)np->tclass;
4454                        break;
4455                default:
4456                        goto err_clear;
4457                }
4458#endif
4459        } else {
4460                goto err_clear;
4461        }
4462        return 0;
4463#endif
4464err_clear:
4465        memset(optval, 0, optlen);
4466        return -EINVAL;
4467}
4468
4469static const struct bpf_func_proto bpf_getsockopt_proto = {
4470        .func           = bpf_getsockopt,
4471        .gpl_only       = false,
4472        .ret_type       = RET_INTEGER,
4473        .arg1_type      = ARG_PTR_TO_CTX,
4474        .arg2_type      = ARG_ANYTHING,
4475        .arg3_type      = ARG_ANYTHING,
4476        .arg4_type      = ARG_PTR_TO_UNINIT_MEM,
4477        .arg5_type      = ARG_CONST_SIZE,
4478};
4479
4480BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
4481           int, argval)
4482{
4483        struct sock *sk = bpf_sock->sk;
4484        int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
4485
4486        if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
4487                return -EINVAL;
4488
4489        tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
4490
4491        return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
4492}
4493
4494static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
4495        .func           = bpf_sock_ops_cb_flags_set,
4496        .gpl_only       = false,
4497        .ret_type       = RET_INTEGER,
4498        .arg1_type      = ARG_PTR_TO_CTX,
4499        .arg2_type      = ARG_ANYTHING,
4500};
4501
4502const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
4503EXPORT_SYMBOL_GPL(ipv6_bpf_stub);
4504
4505BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
4506           int, addr_len)
4507{
4508#ifdef CONFIG_INET
4509        struct sock *sk = ctx->sk;
4510        int err;
4511
4512        /* Binding to port can be expensive so it's prohibited in the helper.
4513         * Only binding to IP is supported.
4514         */
4515        err = -EINVAL;
4516        if (addr_len < offsetofend(struct sockaddr, sa_family))
4517                return err;
4518        if (addr->sa_family == AF_INET) {
4519                if (addr_len < sizeof(struct sockaddr_in))
4520                        return err;
4521                if (((struct sockaddr_in *)addr)->sin_port != htons(0))
4522                        return err;
4523                return __inet_bind(sk, addr, addr_len, true, false);
4524#if IS_ENABLED(CONFIG_IPV6)
4525        } else if (addr->sa_family == AF_INET6) {
4526                if (addr_len < SIN6_LEN_RFC2133)
4527                        return err;
4528                if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
4529                        return err;
4530                /* ipv6_bpf_stub cannot be NULL, since it's called from
4531                 * bpf_cgroup_inet6_connect hook and ipv6 is already loaded
4532                 */
4533                return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, true, false);
4534#endif /* CONFIG_IPV6 */
4535        }
4536#endif /* CONFIG_INET */
4537
4538        return -EAFNOSUPPORT;
4539}
4540
4541static const struct bpf_func_proto bpf_bind_proto = {
4542        .func           = bpf_bind,
4543        .gpl_only       = false,
4544        .ret_type       = RET_INTEGER,
4545        .arg1_type      = ARG_PTR_TO_CTX,
4546        .arg2_type      = ARG_PTR_TO_MEM,
4547        .arg3_type      = ARG_CONST_SIZE,
4548};
4549
4550#ifdef CONFIG_XFRM
4551BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index,
4552           struct bpf_xfrm_state *, to, u32, size, u64, flags)
4553{
4554        const struct sec_path *sp = skb_sec_path(skb);
4555        const struct xfrm_state *x;
4556
4557        if (!sp || unlikely(index >= sp->len || flags))
4558                goto err_clear;
4559
4560        x = sp->xvec[index];
4561
4562        if (unlikely(size != sizeof(struct bpf_xfrm_state)))
4563                goto err_clear;
4564
4565        to->reqid = x->props.reqid;
4566        to->spi = x->id.spi;
4567        to->family = x->props.family;
4568        to->ext = 0;
4569
4570        if (to->family == AF_INET6) {
4571                memcpy(to->remote_ipv6, x->props.saddr.a6,
4572                       sizeof(to->remote_ipv6));
4573        } else {
4574                to->remote_ipv4 = x->props.saddr.a4;
4575                memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
4576        }
4577
4578        return 0;
4579err_clear:
4580        memset(to, 0, size);
4581        return -EINVAL;
4582}
4583
4584static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = {
4585        .func           = bpf_skb_get_xfrm_state,
4586        .gpl_only       = false,
4587        .ret_type       = RET_INTEGER,
4588        .arg1_type      = ARG_PTR_TO_CTX,
4589        .arg2_type      = ARG_ANYTHING,
4590        .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
4591        .arg4_type      = ARG_CONST_SIZE,
4592        .arg5_type      = ARG_ANYTHING,
4593};
4594#endif
4595
4596#if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6)
4597static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
4598                                  const struct neighbour *neigh,
4599                                  const struct net_device *dev)
4600{
4601        memcpy(params->dmac, neigh->ha, ETH_ALEN);
4602        memcpy(params->smac, dev->dev_addr, ETH_ALEN);
4603        params->h_vlan_TCI = 0;
4604        params->h_vlan_proto = 0;
4605        params->ifindex = dev->ifindex;
4606
4607        return 0;
4608}
4609#endif
4610
4611#if IS_ENABLED(CONFIG_INET)
4612static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
4613                               u32 flags, bool check_mtu)
4614{
4615        struct fib_nh_common *nhc;
4616        struct in_device *in_dev;
4617        struct neighbour *neigh;
4618        struct net_device *dev;
4619        struct fib_result res;
4620        struct flowi4 fl4;
4621        int err;
4622        u32 mtu;
4623
4624        dev = dev_get_by_index_rcu(net, params->ifindex);
4625        if (unlikely(!dev))
4626                return -ENODEV;
4627
4628        /* verify forwarding is enabled on this interface */
4629        in_dev = __in_dev_get_rcu(dev);
4630        if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev)))
4631                return BPF_FIB_LKUP_RET_FWD_DISABLED;
4632
4633        if (flags & BPF_FIB_LOOKUP_OUTPUT) {
4634                fl4.flowi4_iif = 1;
4635                fl4.flowi4_oif = params->ifindex;
4636        } else {
4637                fl4.flowi4_iif = params->ifindex;
4638                fl4.flowi4_oif = 0;
4639        }
4640        fl4.flowi4_tos = params->tos & IPTOS_RT_MASK;
4641        fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
4642        fl4.flowi4_flags = 0;
4643
4644        fl4.flowi4_proto = params->l4_protocol;
4645        fl4.daddr = params->ipv4_dst;
4646        fl4.saddr = params->ipv4_src;
4647        fl4.fl4_sport = params->sport;
4648        fl4.fl4_dport = params->dport;
4649
4650        if (flags & BPF_FIB_LOOKUP_DIRECT) {
4651                u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
4652                struct fib_table *tb;
4653
4654                tb = fib_get_table(net, tbid);
4655                if (unlikely(!tb))
4656                        return BPF_FIB_LKUP_RET_NOT_FWDED;
4657
4658                err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
4659        } else {
4660                fl4.flowi4_mark = 0;
4661                fl4.flowi4_secid = 0;
4662                fl4.flowi4_tun_key.tun_id = 0;
4663                fl4.flowi4_uid = sock_net_uid(net, NULL);
4664
4665                err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF);
4666        }
4667
4668        if (err) {
4669                /* map fib lookup errors to RTN_ type */
4670                if (err == -EINVAL)
4671                        return BPF_FIB_LKUP_RET_BLACKHOLE;
4672                if (err == -EHOSTUNREACH)
4673                        return BPF_FIB_LKUP_RET_UNREACHABLE;
4674                if (err == -EACCES)
4675                        return BPF_FIB_LKUP_RET_PROHIBIT;
4676
4677                return BPF_FIB_LKUP_RET_NOT_FWDED;
4678        }
4679
4680        if (res.type != RTN_UNICAST)
4681                return BPF_FIB_LKUP_RET_NOT_FWDED;
4682
4683        if (fib_info_num_path(res.fi) > 1)
4684                fib_select_path(net, &res, &fl4, NULL);
4685
4686        if (check_mtu) {
4687                mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst);
4688                if (params->tot_len > mtu)
4689                        return BPF_FIB_LKUP_RET_FRAG_NEEDED;
4690        }
4691
4692        nhc = res.nhc;
4693
4694        /* do not handle lwt encaps right now */
4695        if (nhc->nhc_lwtstate)
4696                return BPF_FIB_LKUP_RET_UNSUPP_LWT;
4697
4698        dev = nhc->nhc_dev;
4699
4700        params->rt_metric = res.fi->fib_priority;
4701
4702        /* xdp and cls_bpf programs are run in RCU-bh so
4703         * rcu_read_lock_bh is not needed here
4704         */
4705        if (likely(nhc->nhc_gw_family != AF_INET6)) {
4706                if (nhc->nhc_gw_family)
4707                        params->ipv4_dst = nhc->nhc_gw.ipv4;
4708
4709                neigh = __ipv4_neigh_lookup_noref(dev,
4710                                                 (__force u32)params->ipv4_dst);
4711        } else {
4712                struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst;
4713
4714                params->family = AF_INET6;
4715                *dst = nhc->nhc_gw.ipv6;
4716                neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
4717        }
4718
4719        if (!neigh)
4720                return BPF_FIB_LKUP_RET_NO_NEIGH;
4721
4722        return bpf_fib_set_fwd_params(params, neigh, dev);
4723}
4724#endif
4725
4726#if IS_ENABLED(CONFIG_IPV6)
4727static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
4728                               u32 flags, bool check_mtu)
4729{
4730        struct in6_addr *src = (struct in6_addr *) params->ipv6_src;
4731        struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst;
4732        struct fib6_result res = {};
4733        struct neighbour *neigh;
4734        struct net_device *dev;
4735        struct inet6_dev *idev;
4736        struct flowi6 fl6;
4737        int strict = 0;
4738        int oif, err;
4739        u32 mtu;
4740
4741        /* link local addresses are never forwarded */
4742        if (rt6_need_strict(dst) || rt6_need_strict(src))
4743                return BPF_FIB_LKUP_RET_NOT_FWDED;
4744
4745        dev = dev_get_by_index_rcu(net, params->ifindex);
4746        if (unlikely(!dev))
4747                return -ENODEV;
4748
4749        idev = __in6_dev_get_safely(dev);
4750        if (unlikely(!idev || !idev->cnf.forwarding))
4751                return BPF_FIB_LKUP_RET_FWD_DISABLED;
4752
4753        if (flags & BPF_FIB_LOOKUP_OUTPUT) {
4754                fl6.flowi6_iif = 1;
4755                oif = fl6.flowi6_oif = params->ifindex;
4756        } else {
4757                oif = fl6.flowi6_iif = params->ifindex;
4758                fl6.flowi6_oif = 0;
4759                strict = RT6_LOOKUP_F_HAS_SADDR;
4760        }
4761        fl6.flowlabel = params->flowinfo;
4762        fl6.flowi6_scope = 0;
4763        fl6.flowi6_flags = 0;
4764        fl6.mp_hash = 0;
4765
4766        fl6.flowi6_proto = params->l4_protocol;
4767        fl6.daddr = *dst;
4768        fl6.saddr = *src;
4769        fl6.fl6_sport = params->sport;
4770        fl6.fl6_dport = params->dport;
4771
4772        if (flags & BPF_FIB_LOOKUP_DIRECT) {
4773                u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
4774                struct fib6_table *tb;
4775
4776                tb = ipv6_stub->fib6_get_table(net, tbid);
4777                if (unlikely(!tb))
4778                        return BPF_FIB_LKUP_RET_NOT_FWDED;
4779
4780                err = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, &res,
4781                                                   strict);
4782        } else {
4783                fl6.flowi6_mark = 0;
4784                fl6.flowi6_secid = 0;
4785                fl6.flowi6_tun_key.tun_id = 0;
4786                fl6.flowi6_uid = sock_net_uid(net, NULL);
4787
4788                err = ipv6_stub->fib6_lookup(net, oif, &fl6, &res, strict);
4789        }
4790
4791        if (unlikely(err || IS_ERR_OR_NULL(res.f6i) ||
4792                     res.f6i == net->ipv6.fib6_null_entry))
4793                return BPF_FIB_LKUP_RET_NOT_FWDED;
4794
4795        switch (res.fib6_type) {
4796        /* only unicast is forwarded */
4797        case RTN_UNICAST:
4798                break;
4799        case RTN_BLACKHOLE:
4800                return BPF_FIB_LKUP_RET_BLACKHOLE;
4801        case RTN_UNREACHABLE:
4802                return BPF_FIB_LKUP_RET_UNREACHABLE;
4803        case RTN_PROHIBIT:
4804                return BPF_FIB_LKUP_RET_PROHIBIT;
4805        default:
4806                return BPF_FIB_LKUP_RET_NOT_FWDED;
4807        }
4808
4809        ipv6_stub->fib6_select_path(net, &res, &fl6, fl6.flowi6_oif,
4810                                    fl6.flowi6_oif != 0, NULL, strict);
4811
4812        if (check_mtu) {
4813                mtu = ipv6_stub->ip6_mtu_from_fib6(&res, dst, src);
4814                if (params->tot_len > mtu)
4815                        return BPF_FIB_LKUP_RET_FRAG_NEEDED;
4816        }
4817
4818        if (res.nh->fib_nh_lws)
4819                return BPF_FIB_LKUP_RET_UNSUPP_LWT;
4820
4821        if (res.nh->fib_nh_gw_family)
4822                *dst = res.nh->fib_nh_gw6;
4823
4824        dev = res.nh->fib_nh_dev;
4825        params->rt_metric = res.f6i->fib6_metric;
4826
4827        /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
4828         * not needed here.
4829         */
4830        neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
4831        if (!neigh)
4832                return BPF_FIB_LKUP_RET_NO_NEIGH;
4833
4834        return bpf_fib_set_fwd_params(params, neigh, dev);
4835}
4836#endif
4837
4838BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
4839           struct bpf_fib_lookup *, params, int, plen, u32, flags)
4840{
4841        if (plen < sizeof(*params))
4842                return -EINVAL;
4843
4844        if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
4845                return -EINVAL;
4846
4847        switch (params->family) {
4848#if IS_ENABLED(CONFIG_INET)
4849        case AF_INET:
4850                return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params,
4851                                           flags, true);
4852#endif
4853#if IS_ENABLED(CONFIG_IPV6)
4854        case AF_INET6:
4855                return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params,
4856                                           flags, true);
4857#endif
4858        }
4859        return -EAFNOSUPPORT;
4860}
4861
4862static const struct bpf_func_proto bpf_xdp_fib_lookup_proto = {
4863        .func           = bpf_xdp_fib_lookup,
4864        .gpl_only       = true,
4865        .ret_type       = RET_INTEGER,
4866        .arg1_type      = ARG_PTR_TO_CTX,
4867        .arg2_type      = ARG_PTR_TO_MEM,
4868        .arg3_type      = ARG_CONST_SIZE,
4869        .arg4_type      = ARG_ANYTHING,
4870};
4871
4872BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
4873           struct bpf_fib_lookup *, params, int, plen, u32, flags)
4874{
4875        struct net *net = dev_net(skb->dev);
4876        int rc = -EAFNOSUPPORT;
4877
4878        if (plen < sizeof(*params))
4879                return -EINVAL;
4880
4881        if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
4882                return -EINVAL;
4883
4884        switch (params->family) {
4885#if IS_ENABLED(CONFIG_INET)
4886        case AF_INET:
4887                rc = bpf_ipv4_fib_lookup(net, params, flags, false);
4888                break;
4889#endif
4890#if IS_ENABLED(CONFIG_IPV6)
4891        case AF_INET6:
4892                rc = bpf_ipv6_fib_lookup(net, params, flags, false);
4893                break;
4894#endif
4895        }
4896
4897        if (!rc) {
4898                struct net_device *dev;
4899
4900                dev = dev_get_by_index_rcu(net, params->ifindex);
4901                if (!is_skb_forwardable(dev, skb))
4902                        rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
4903        }
4904
4905        return rc;
4906}
4907
4908static const struct bpf_func_proto bpf_skb_fib_lookup_proto = {
4909        .func           = bpf_skb_fib_lookup,
4910        .gpl_only       = true,
4911        .ret_type       = RET_INTEGER,
4912        .arg1_type      = ARG_PTR_TO_CTX,
4913        .arg2_type      = ARG_PTR_TO_MEM,
4914        .arg3_type      = ARG_CONST_SIZE,
4915        .arg4_type      = ARG_ANYTHING,
4916};
4917
4918#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4919static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
4920{
4921        int err;
4922        struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr;
4923
4924        if (!seg6_validate_srh(srh, len))
4925                return -EINVAL;
4926
4927        switch (type) {
4928        case BPF_LWT_ENCAP_SEG6_INLINE:
4929                if (skb->protocol != htons(ETH_P_IPV6))
4930                        return -EBADMSG;
4931
4932                err = seg6_do_srh_inline(skb, srh);
4933                break;
4934        case BPF_LWT_ENCAP_SEG6:
4935                skb_reset_inner_headers(skb);
4936                skb->encapsulation = 1;
4937                err = seg6_do_srh_encap(skb, srh, IPPROTO_IPV6);
4938                break;
4939        default:
4940                return -EINVAL;
4941        }
4942
4943        bpf_compute_data_pointers(skb);
4944        if (err)
4945                return err;
4946
4947        ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
4948        skb_set_transport_header(skb, sizeof(struct ipv6hdr));
4949
4950        return seg6_lookup_nexthop(skb, NULL, 0);
4951}
4952#endif /* CONFIG_IPV6_SEG6_BPF */
4953
4954#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
4955static int bpf_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
4956                             bool ingress)
4957{
4958        return bpf_lwt_push_ip_encap(skb, hdr, len, ingress);
4959}
4960#endif
4961
4962BPF_CALL_4(bpf_lwt_in_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
4963           u32, len)
4964{
4965        switch (type) {
4966#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4967        case BPF_LWT_ENCAP_SEG6:
4968        case BPF_LWT_ENCAP_SEG6_INLINE:
4969                return bpf_push_seg6_encap(skb, type, hdr, len);
4970#endif
4971#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
4972        case BPF_LWT_ENCAP_IP:
4973                return bpf_push_ip_encap(skb, hdr, len, true /* ingress */);
4974#endif
4975        default:
4976                return -EINVAL;
4977        }
4978}
4979
4980BPF_CALL_4(bpf_lwt_xmit_push_encap, struct sk_buff *, skb, u32, type,
4981           void *, hdr, u32, len)
4982{
4983        switch (type) {
4984#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
4985        case BPF_LWT_ENCAP_IP:
4986                return bpf_push_ip_encap(skb, hdr, len, false /* egress */);
4987#endif
4988        default:
4989                return -EINVAL;
4990        }
4991}
4992
4993static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = {
4994        .func           = bpf_lwt_in_push_encap,
4995        .gpl_only       = false,
4996        .ret_type       = RET_INTEGER,
4997        .arg1_type      = ARG_PTR_TO_CTX,
4998        .arg2_type      = ARG_ANYTHING,
4999        .arg3_type      = ARG_PTR_TO_MEM,
5000        .arg4_type      = ARG_CONST_SIZE
5001};
5002
5003static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = {
5004        .func           = bpf_lwt_xmit_push_encap,
5005        .gpl_only       = false,
5006        .ret_type       = RET_INTEGER,
5007        .arg1_type      = ARG_PTR_TO_CTX,
5008        .arg2_type      = ARG_ANYTHING,
5009        .arg3_type      = ARG_PTR_TO_MEM,
5010        .arg4_type      = ARG_CONST_SIZE
5011};
5012
5013#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
5014BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
5015           const void *, from, u32, len)
5016{
5017        struct seg6_bpf_srh_state *srh_state =
5018                this_cpu_ptr(&seg6_bpf_srh_states);
5019        struct ipv6_sr_hdr *srh = srh_state->srh;
5020        void *srh_tlvs, *srh_end, *ptr;
5021        int srhoff = 0;
5022
5023        if (srh == NULL)
5024                return -EINVAL;
5025
5026        srh_tlvs = (void *)((char *)srh + ((srh->first_segment + 1) << 4));
5027        srh_end = (void *)((char *)srh + sizeof(*srh) + srh_state->hdrlen);
5028
5029        ptr = skb->data + offset;
5030        if (ptr >= srh_tlvs && ptr + len <= srh_end)
5031                srh_state->valid = false;
5032        else if (ptr < (void *)&srh->flags ||
5033                 ptr + len > (void *)&srh->segments)
5034                return -EFAULT;
5035
5036        if (unlikely(bpf_try_make_writable(skb, offset + len)))
5037                return -EFAULT;
5038        if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
5039                return -EINVAL;
5040        srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
5041
5042        memcpy(skb->data + offset, from, len);
5043        return 0;
5044}
5045
5046static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
5047        .func           = bpf_lwt_seg6_store_bytes,
5048        .gpl_only       = false,
5049        .ret_type       = RET_INTEGER,
5050        .arg1_type      = ARG_PTR_TO_CTX,
5051        .arg2_type      = ARG_ANYTHING,
5052        .arg3_type      = ARG_PTR_TO_MEM,
5053        .arg4_type      = ARG_CONST_SIZE
5054};
5055
5056static void bpf_update_srh_state(struct sk_buff *skb)
5057{
5058        struct seg6_bpf_srh_state *srh_state =
5059                this_cpu_ptr(&seg6_bpf_srh_states);
5060        int srhoff = 0;
5061
5062        if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) {
5063                srh_state->srh = NULL;
5064        } else {
5065                srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
5066                srh_state->hdrlen = srh_state->srh->hdrlen << 3;
5067                srh_state->valid = true;
5068        }
5069}
5070
5071BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
5072           u32, action, void *, param, u32, param_len)
5073{
5074        struct seg6_bpf_srh_state *srh_state =
5075                this_cpu_ptr(&seg6_bpf_srh_states);
5076        int hdroff = 0;
5077        int err;
5078
5079        switch (action) {
5080        case SEG6_LOCAL_ACTION_END_X:
5081                if (!seg6_bpf_has_valid_srh(skb))
5082                        return -EBADMSG;
5083                if (param_len != sizeof(struct in6_addr))
5084                        return -EINVAL;
5085                return seg6_lookup_nexthop(skb, (struct in6_addr *)param, 0);
5086        case SEG6_LOCAL_ACTION_END_T:
5087                if (!seg6_bpf_has_valid_srh(skb))
5088                        return -EBADMSG;
5089                if (param_len != sizeof(int))
5090                        return -EINVAL;
5091                return seg6_lookup_nexthop(skb, NULL, *(int *)param);
5092        case SEG6_LOCAL_ACTION_END_DT6:
5093                if (!seg6_bpf_has_valid_srh(skb))
5094                        return -EBADMSG;
5095                if (param_len != sizeof(int))
5096                        return -EINVAL;
5097
5098                if (ipv6_find_hdr(skb, &hdroff, IPPROTO_IPV6, NULL, NULL) < 0)
5099                        return -EBADMSG;
5100                if (!pskb_pull(skb, hdroff))
5101                        return -EBADMSG;
5102
5103                skb_postpull_rcsum(skb, skb_network_header(skb), hdroff);
5104                skb_reset_network_header(skb);
5105                skb_reset_transport_header(skb);
5106                skb->encapsulation = 0;
5107
5108                bpf_compute_data_pointers(skb);
5109                bpf_update_srh_state(skb);
5110                return seg6_lookup_nexthop(skb, NULL, *(int *)param);
5111        case SEG6_LOCAL_ACTION_END_B6:
5112                if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
5113                        return -EBADMSG;
5114                err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6_INLINE,
5115                                          param, param_len);
5116                if (!err)
5117                        bpf_update_srh_state(skb);
5118
5119                return err;
5120        case SEG6_LOCAL_ACTION_END_B6_ENCAP:
5121                if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
5122                        return -EBADMSG;
5123                err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6,
5124                                          param, param_len);
5125                if (!err)
5126                        bpf_update_srh_state(skb);
5127
5128                return err;
5129        default:
5130                return -EINVAL;
5131        }
5132}
5133
5134static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
5135        .func           = bpf_lwt_seg6_action,
5136        .gpl_only       = false,
5137        .ret_type       = RET_INTEGER,
5138        .arg1_type      = ARG_PTR_TO_CTX,
5139        .arg2_type      = ARG_ANYTHING,
5140        .arg3_type      = ARG_PTR_TO_MEM,
5141        .arg4_type      = ARG_CONST_SIZE
5142};
5143
5144BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
5145           s32, len)
5146{
5147        struct seg6_bpf_srh_state *srh_state =
5148                this_cpu_ptr(&seg6_bpf_srh_states);
5149        struct ipv6_sr_hdr *srh = srh_state->srh;
5150        void *srh_end, *srh_tlvs, *ptr;
5151        struct ipv6hdr *hdr;
5152        int srhoff = 0;
5153        int ret;
5154
5155        if (unlikely(srh == NULL))
5156                return -EINVAL;
5157
5158        srh_tlvs = (void *)((unsigned char *)srh + sizeof(*srh) +
5159                        ((srh->first_segment + 1) << 4));
5160        srh_end = (void *)((unsigned char *)srh + sizeof(*srh) +
5161                        srh_state->hdrlen);
5162        ptr = skb->data + offset;
5163
5164        if (unlikely(ptr < srh_tlvs || ptr > srh_end))
5165                return -EFAULT;
5166        if (unlikely(len < 0 && (void *)((char *)ptr - len) > srh_end))
5167                return -EFAULT;
5168
5169        if (len > 0) {
5170                ret = skb_cow_head(skb, len);
5171                if (unlikely(ret < 0))
5172                        return ret;
5173
5174                ret = bpf_skb_net_hdr_push(skb, offset, len);
5175        } else {
5176                ret = bpf_skb_net_hdr_pop(skb, offset, -1 * len);
5177        }
5178
5179        bpf_compute_data_pointers(skb);
5180        if (unlikely(ret < 0))
5181                return ret;
5182
5183        hdr = (struct ipv6hdr *)skb->data;
5184        hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
5185
5186        if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
5187                return -EINVAL;
5188        srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
5189        srh_state->hdrlen += len;
5190        srh_state->valid = false;
5191        return 0;
5192}
5193
5194static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
5195        .func           = bpf_lwt_seg6_adjust_srh,
5196        .gpl_only       = false,
5197        .ret_type       = RET_INTEGER,
5198        .arg1_type      = ARG_PTR_TO_CTX,
5199        .arg2_type      = ARG_ANYTHING,
5200        .arg3_type      = ARG_ANYTHING,
5201};
5202#endif /* CONFIG_IPV6_SEG6_BPF */
5203
5204#ifdef CONFIG_INET
5205static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
5206                              int dif, int sdif, u8 family, u8 proto)
5207{
5208        bool refcounted = false;
5209        struct sock *sk = NULL;
5210
5211        if (family == AF_INET) {
5212                __be32 src4 = tuple->ipv4.saddr;
5213                __be32 dst4 = tuple->ipv4.daddr;
5214
5215                if (proto == IPPROTO_TCP)
5216                        sk = __inet_lookup(net, &tcp_hashinfo, NULL, 0,
5217                                           src4, tuple->ipv4.sport,
5218                                           dst4, tuple->ipv4.dport,
5219                                           dif, sdif, &refcounted);
5220                else
5221                        sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport,
5222                                               dst4, tuple->ipv4.dport,
5223                                               dif, sdif, &udp_table, NULL);
5224#if IS_ENABLED(CONFIG_IPV6)
5225        } else {
5226                struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr;
5227                struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr;
5228
5229                if (proto == IPPROTO_TCP)
5230                        sk = __inet6_lookup(net, &tcp_hashinfo, NULL, 0,
5231                                            src6, tuple->ipv6.sport,
5232                                            dst6, ntohs(tuple->ipv6.dport),
5233                                            dif, sdif, &refcounted);
5234                else if (likely(ipv6_bpf_stub))
5235                        sk = ipv6_bpf_stub->udp6_lib_lookup(net,
5236                                                            src6, tuple->ipv6.sport,
5237                                                            dst6, tuple->ipv6.dport,
5238                                                            dif, sdif,
5239                                                            &udp_table, NULL);
5240#endif
5241        }
5242
5243        if (unlikely(sk && !refcounted && !sock_flag(sk, SOCK_RCU_FREE))) {
5244                WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
5245                sk = NULL;
5246        }
5247        return sk;
5248}
5249
5250/* bpf_skc_lookup performs the core lookup for different types of sockets,
5251 * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE.
5252 * Returns the socket as an 'unsigned long' to simplify the casting in the
5253 * callers to satisfy BPF_CALL declarations.
5254 */
5255static struct sock *
5256__bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5257                 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
5258                 u64 flags)
5259{
5260        struct sock *sk = NULL;
5261        u8 family = AF_UNSPEC;
5262        struct net *net;
5263        int sdif;
5264
5265        if (len == sizeof(tuple->ipv4))
5266                family = AF_INET;
5267        else if (len == sizeof(tuple->ipv6))
5268                family = AF_INET6;
5269        else
5270                return NULL;
5271
5272        if (unlikely(family == AF_UNSPEC || flags ||
5273                     !((s32)netns_id < 0 || netns_id <= S32_MAX)))
5274                goto out;
5275
5276        if (family == AF_INET)
5277                sdif = inet_sdif(skb);
5278        else
5279                sdif = inet6_sdif(skb);
5280
5281        if ((s32)netns_id < 0) {
5282                net = caller_net;
5283                sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
5284        } else {
5285                net = get_net_ns_by_id(caller_net, netns_id);
5286                if (unlikely(!net))
5287                        goto out;
5288                sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
5289                put_net(net);
5290        }
5291
5292out:
5293        return sk;
5294}
5295
5296static struct sock *
5297__bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5298                struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
5299                u64 flags)
5300{
5301        struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net,
5302                                           ifindex, proto, netns_id, flags);
5303
5304        if (sk) {
5305                sk = sk_to_full_sk(sk);
5306                if (!sk_fullsock(sk)) {
5307                        if (!sock_flag(sk, SOCK_RCU_FREE))
5308                                sock_gen_put(sk);
5309                        return NULL;
5310                }
5311        }
5312
5313        return sk;
5314}
5315
5316static struct sock *
5317bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5318               u8 proto, u64 netns_id, u64 flags)
5319{
5320        struct net *caller_net;
5321        int ifindex;
5322
5323        if (skb->dev) {
5324                caller_net = dev_net(skb->dev);
5325                ifindex = skb->dev->ifindex;
5326        } else {
5327                caller_net = sock_net(skb->sk);
5328                ifindex = 0;
5329        }
5330
5331        return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto,
5332                                netns_id, flags);
5333}
5334
5335static struct sock *
5336bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5337              u8 proto, u64 netns_id, u64 flags)
5338{
5339        struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id,
5340                                         flags);
5341
5342        if (sk) {
5343                sk = sk_to_full_sk(sk);
5344                if (!sk_fullsock(sk)) {
5345                        if (!sock_flag(sk, SOCK_RCU_FREE))
5346                                sock_gen_put(sk);
5347                        return NULL;
5348                }
5349        }
5350
5351        return sk;
5352}
5353
5354BPF_CALL_5(bpf_skc_lookup_tcp, struct sk_buff *, skb,
5355           struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5356{
5357        return (unsigned long)bpf_skc_lookup(skb, tuple, len, IPPROTO_TCP,
5358                                             netns_id, flags);
5359}
5360
5361static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = {
5362        .func           = bpf_skc_lookup_tcp,
5363        .gpl_only       = false,
5364        .pkt_access     = true,
5365        .ret_type       = RET_PTR_TO_SOCK_COMMON_OR_NULL,
5366        .arg1_type      = ARG_PTR_TO_CTX,
5367        .arg2_type      = ARG_PTR_TO_MEM,
5368        .arg3_type      = ARG_CONST_SIZE,
5369        .arg4_type      = ARG_ANYTHING,
5370        .arg5_type      = ARG_ANYTHING,
5371};
5372
5373BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
5374           struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5375{
5376        return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP,
5377                                            netns_id, flags);
5378}
5379
5380static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
5381        .func           = bpf_sk_lookup_tcp,
5382        .gpl_only       = false,
5383        .pkt_access     = true,
5384        .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
5385        .arg1_type      = ARG_PTR_TO_CTX,
5386        .arg2_type      = ARG_PTR_TO_MEM,
5387        .arg3_type      = ARG_CONST_SIZE,
5388        .arg4_type      = ARG_ANYTHING,
5389        .arg5_type      = ARG_ANYTHING,
5390};
5391
5392BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb,
5393           struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5394{
5395        return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP,
5396                                            netns_id, flags);
5397}
5398
5399static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
5400        .func           = bpf_sk_lookup_udp,
5401        .gpl_only       = false,
5402        .pkt_access     = true,
5403        .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
5404        .arg1_type      = ARG_PTR_TO_CTX,
5405        .arg2_type      = ARG_PTR_TO_MEM,
5406        .arg3_type      = ARG_CONST_SIZE,
5407        .arg4_type      = ARG_ANYTHING,
5408        .arg5_type      = ARG_ANYTHING,
5409};
5410
5411BPF_CALL_1(bpf_sk_release, struct sock *, sk)
5412{
5413        if (!sock_flag(sk, SOCK_RCU_FREE))
5414                sock_gen_put(sk);
5415        return 0;
5416}
5417
5418static const struct bpf_func_proto bpf_sk_release_proto = {
5419        .func           = bpf_sk_release,
5420        .gpl_only       = false,
5421        .ret_type       = RET_INTEGER,
5422        .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
5423};
5424
5425BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
5426           struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
5427{
5428        struct net *caller_net = dev_net(ctx->rxq->dev);
5429        int ifindex = ctx->rxq->dev->ifindex;
5430
5431        return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
5432                                              ifindex, IPPROTO_UDP, netns_id,
5433                                              flags);
5434}
5435
5436static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
5437        .func           = bpf_xdp_sk_lookup_udp,
5438        .gpl_only       = false,
5439        .pkt_access     = true,
5440        .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
5441        .arg1_type      = ARG_PTR_TO_CTX,
5442        .arg2_type      = ARG_PTR_TO_MEM,
5443        .arg3_type      = ARG_CONST_SIZE,
5444        .arg4_type      = ARG_ANYTHING,
5445        .arg5_type      = ARG_ANYTHING,
5446};
5447
5448BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx,
5449           struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
5450{
5451        struct net *caller_net = dev_net(ctx->rxq->dev);
5452        int ifindex = ctx->rxq->dev->ifindex;
5453
5454        return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net,
5455                                               ifindex, IPPROTO_TCP, netns_id,
5456                                               flags);
5457}
5458
5459static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
5460        .func           = bpf_xdp_skc_lookup_tcp,
5461        .gpl_only       = false,
5462        .pkt_access     = true,
5463        .ret_type       = RET_PTR_TO_SOCK_COMMON_OR_NULL,
5464        .arg1_type      = ARG_PTR_TO_CTX,
5465        .arg2_type      = ARG_PTR_TO_MEM,
5466        .arg3_type      = ARG_CONST_SIZE,
5467        .arg4_type      = ARG_ANYTHING,
5468        .arg5_type      = ARG_ANYTHING,
5469};
5470
5471BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx,
5472           struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
5473{
5474        struct net *caller_net = dev_net(ctx->rxq->dev);
5475        int ifindex = ctx->rxq->dev->ifindex;
5476
5477        return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
5478                                              ifindex, IPPROTO_TCP, netns_id,
5479                                              flags);
5480}
5481
5482static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
5483        .func           = bpf_xdp_sk_lookup_tcp,
5484        .gpl_only       = false,
5485        .pkt_access     = true,
5486        .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
5487        .arg1_type      = ARG_PTR_TO_CTX,
5488        .arg2_type      = ARG_PTR_TO_MEM,
5489        .arg3_type      = ARG_CONST_SIZE,
5490        .arg4_type      = ARG_ANYTHING,
5491        .arg5_type      = ARG_ANYTHING,
5492};
5493
5494BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
5495           struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5496{
5497        return (unsigned long)__bpf_skc_lookup(NULL, tuple, len,
5498                                               sock_net(ctx->sk), 0,
5499                                               IPPROTO_TCP, netns_id, flags);
5500}
5501
5502static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
5503        .func           = bpf_sock_addr_skc_lookup_tcp,
5504        .gpl_only       = false,
5505        .ret_type       = RET_PTR_TO_SOCK_COMMON_OR_NULL,
5506        .arg1_type      = ARG_PTR_TO_CTX,
5507        .arg2_type      = ARG_PTR_TO_MEM,
5508        .arg3_type      = ARG_CONST_SIZE,
5509        .arg4_type      = ARG_ANYTHING,
5510        .arg5_type      = ARG_ANYTHING,
5511};
5512
5513BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
5514           struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5515{
5516        return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
5517                                              sock_net(ctx->sk), 0, IPPROTO_TCP,
5518                                              netns_id, flags);
5519}
5520
5521static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
5522        .func           = bpf_sock_addr_sk_lookup_tcp,
5523        .gpl_only       = false,
5524        .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
5525        .arg1_type      = ARG_PTR_TO_CTX,
5526        .arg2_type      = ARG_PTR_TO_MEM,
5527        .arg3_type      = ARG_CONST_SIZE,
5528        .arg4_type      = ARG_ANYTHING,
5529        .arg5_type      = ARG_ANYTHING,
5530};
5531
5532BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx,
5533           struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5534{
5535        return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
5536                                              sock_net(ctx->sk), 0, IPPROTO_UDP,
5537                                              netns_id, flags);
5538}
5539
5540static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
5541        .func           = bpf_sock_addr_sk_lookup_udp,
5542        .gpl_only       = false,
5543        .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
5544        .arg1_type      = ARG_PTR_TO_CTX,
5545        .arg2_type      = ARG_PTR_TO_MEM,
5546        .arg3_type      = ARG_CONST_SIZE,
5547        .arg4_type      = ARG_ANYTHING,
5548        .arg5_type      = ARG_ANYTHING,
5549};
5550
5551bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
5552                                  struct bpf_insn_access_aux *info)
5553{
5554        if (off < 0 || off >= offsetofend(struct bpf_tcp_sock,
5555                                          icsk_retransmits))
5556                return false;
5557
5558        if (off % size != 0)
5559                return false;
5560
5561        switch (off) {
5562        case offsetof(struct bpf_tcp_sock, bytes_received):
5563        case offsetof(struct bpf_tcp_sock, bytes_acked):
5564                return size == sizeof(__u64);
5565        default:
5566                return size == sizeof(__u32);
5567        }
5568}
5569
5570u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
5571                                    const struct bpf_insn *si,
5572                                    struct bpf_insn *insn_buf,
5573                                    struct bpf_prog *prog, u32 *target_size)
5574{
5575        struct bpf_insn *insn = insn_buf;
5576
5577#define BPF_TCP_SOCK_GET_COMMON(FIELD)                                  \
5578        do {                                                            \
5579                BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, FIELD) >     \
5580                             FIELD_SIZEOF(struct bpf_tcp_sock, FIELD)); \
5581                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_sock, FIELD),\
5582                                      si->dst_reg, si->src_reg,         \
5583                                      offsetof(struct tcp_sock, FIELD)); \
5584        } while (0)
5585
5586#define BPF_INET_SOCK_GET_COMMON(FIELD)                                 \
5587        do {                                                            \
5588                BUILD_BUG_ON(FIELD_SIZEOF(struct inet_connection_sock,  \
5589                                          FIELD) >                      \
5590                             FIELD_SIZEOF(struct bpf_tcp_sock, FIELD)); \
5591                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                 \
5592                                        struct inet_connection_sock,    \
5593                                        FIELD),                         \
5594                                      si->dst_reg, si->src_reg,         \
5595                                      offsetof(                         \
5596                                        struct inet_connection_sock,    \
5597                                        FIELD));                        \
5598        } while (0)
5599
5600        if (insn > insn_buf)
5601                return insn - insn_buf;
5602
5603        switch (si->off) {
5604        case offsetof(struct bpf_tcp_sock, rtt_min):
5605                BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
5606                             sizeof(struct minmax));
5607                BUILD_BUG_ON(sizeof(struct minmax) <
5608                             sizeof(struct minmax_sample));
5609
5610                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5611                                      offsetof(struct tcp_sock, rtt_min) +
5612                                      offsetof(struct minmax_sample, v));
5613                break;
5614        case offsetof(struct bpf_tcp_sock, snd_cwnd):
5615                BPF_TCP_SOCK_GET_COMMON(snd_cwnd);
5616                break;
5617        case offsetof(struct bpf_tcp_sock, srtt_us):
5618                BPF_TCP_SOCK_GET_COMMON(srtt_us);
5619                break;
5620        case offsetof(struct bpf_tcp_sock, snd_ssthresh):
5621                BPF_TCP_SOCK_GET_COMMON(snd_ssthresh);
5622                break;
5623        case offsetof(struct bpf_tcp_sock, rcv_nxt):
5624                BPF_TCP_SOCK_GET_COMMON(rcv_nxt);
5625                break;
5626        case offsetof(struct bpf_tcp_sock, snd_nxt):
5627                BPF_TCP_SOCK_GET_COMMON(snd_nxt);
5628                break;
5629        case offsetof(struct bpf_tcp_sock, snd_una):
5630                BPF_TCP_SOCK_GET_COMMON(snd_una);
5631                break;
5632        case offsetof(struct bpf_tcp_sock, mss_cache):
5633                BPF_TCP_SOCK_GET_COMMON(mss_cache);
5634                break;
5635        case offsetof(struct bpf_tcp_sock, ecn_flags):
5636                BPF_TCP_SOCK_GET_COMMON(ecn_flags);
5637                break;
5638        case offsetof(struct bpf_tcp_sock, rate_delivered):
5639                BPF_TCP_SOCK_GET_COMMON(rate_delivered);
5640                break;
5641        case offsetof(struct bpf_tcp_sock, rate_interval_us):
5642                BPF_TCP_SOCK_GET_COMMON(rate_interval_us);
5643                break;
5644        case offsetof(struct bpf_tcp_sock, packets_out):
5645                BPF_TCP_SOCK_GET_COMMON(packets_out);
5646                break;
5647        case offsetof(struct bpf_tcp_sock, retrans_out):
5648                BPF_TCP_SOCK_GET_COMMON(retrans_out);
5649                break;
5650        case offsetof(struct bpf_tcp_sock, total_retrans):
5651                BPF_TCP_SOCK_GET_COMMON(total_retrans);
5652                break;
5653        case offsetof(struct bpf_tcp_sock, segs_in):
5654                BPF_TCP_SOCK_GET_COMMON(segs_in);
5655                break;
5656        case offsetof(struct bpf_tcp_sock, data_segs_in):
5657                BPF_TCP_SOCK_GET_COMMON(data_segs_in);
5658                break;
5659        case offsetof(struct bpf_tcp_sock, segs_out):
5660                BPF_TCP_SOCK_GET_COMMON(segs_out);
5661                break;
5662        case offsetof(struct bpf_tcp_sock, data_segs_out):
5663                BPF_TCP_SOCK_GET_COMMON(data_segs_out);
5664                break;
5665        case offsetof(struct bpf_tcp_sock, lost_out):
5666                BPF_TCP_SOCK_GET_COMMON(lost_out);
5667                break;
5668        case offsetof(struct bpf_tcp_sock, sacked_out):
5669                BPF_TCP_SOCK_GET_COMMON(sacked_out);
5670                break;
5671        case offsetof(struct bpf_tcp_sock, bytes_received):
5672                BPF_TCP_SOCK_GET_COMMON(bytes_received);
5673                break;
5674        case offsetof(struct bpf_tcp_sock, bytes_acked):
5675                BPF_TCP_SOCK_GET_COMMON(bytes_acked);
5676                break;
5677        case offsetof(struct bpf_tcp_sock, dsack_dups):
5678                BPF_TCP_SOCK_GET_COMMON(dsack_dups);
5679                break;
5680        case offsetof(struct bpf_tcp_sock, delivered):
5681                BPF_TCP_SOCK_GET_COMMON(delivered);
5682                break;
5683        case offsetof(struct bpf_tcp_sock, delivered_ce):
5684                BPF_TCP_SOCK_GET_COMMON(delivered_ce);
5685                break;
5686        case offsetof(struct bpf_tcp_sock, icsk_retransmits):
5687                BPF_INET_SOCK_GET_COMMON(icsk_retransmits);
5688                break;
5689        }
5690
5691        return insn - insn_buf;
5692}
5693
5694BPF_CALL_1(bpf_tcp_sock, struct sock *, sk)
5695{
5696        if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
5697                return (unsigned long)sk;
5698
5699        return (unsigned long)NULL;
5700}
5701
5702const struct bpf_func_proto bpf_tcp_sock_proto = {
5703        .func           = bpf_tcp_sock,
5704        .gpl_only       = false,
5705        .ret_type       = RET_PTR_TO_TCP_SOCK_OR_NULL,
5706        .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
5707};
5708
5709BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk)
5710{
5711        sk = sk_to_full_sk(sk);
5712
5713        if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE))
5714                return (unsigned long)sk;
5715
5716        return (unsigned long)NULL;
5717}
5718
5719static const struct bpf_func_proto bpf_get_listener_sock_proto = {
5720        .func           = bpf_get_listener_sock,
5721        .gpl_only       = false,
5722        .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
5723        .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
5724};
5725
5726BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
5727{
5728        unsigned int iphdr_len;
5729
5730        if (skb->protocol == cpu_to_be16(ETH_P_IP))
5731                iphdr_len = sizeof(struct iphdr);
5732        else if (skb->protocol == cpu_to_be16(ETH_P_IPV6))
5733                iphdr_len = sizeof(struct ipv6hdr);
5734        else
5735                return 0;
5736
5737        if (skb_headlen(skb) < iphdr_len)
5738                return 0;
5739
5740        if (skb_cloned(skb) && !skb_clone_writable(skb, iphdr_len))
5741                return 0;
5742
5743        return INET_ECN_set_ce(skb);
5744}
5745
5746bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
5747                                  struct bpf_insn_access_aux *info)
5748{
5749        if (off < 0 || off >= offsetofend(struct bpf_xdp_sock, queue_id))
5750                return false;
5751
5752        if (off % size != 0)
5753                return false;
5754
5755        switch (off) {
5756        default:
5757                return size == sizeof(__u32);
5758        }
5759}
5760
5761u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
5762                                    const struct bpf_insn *si,
5763                                    struct bpf_insn *insn_buf,
5764                                    struct bpf_prog *prog, u32 *target_size)
5765{
5766        struct bpf_insn *insn = insn_buf;
5767
5768#define BPF_XDP_SOCK_GET(FIELD)                                         \
5769        do {                                                            \
5770                BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_sock, FIELD) >     \
5771                             FIELD_SIZEOF(struct bpf_xdp_sock, FIELD)); \
5772                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_sock, FIELD),\
5773                                      si->dst_reg, si->src_reg,         \
5774                                      offsetof(struct xdp_sock, FIELD)); \
5775        } while (0)
5776
5777        switch (si->off) {
5778        case offsetof(struct bpf_xdp_sock, queue_id):
5779                BPF_XDP_SOCK_GET(queue_id);
5780                break;
5781        }
5782
5783        return insn - insn_buf;
5784}
5785
5786static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = {
5787        .func           = bpf_skb_ecn_set_ce,
5788        .gpl_only       = false,
5789        .ret_type       = RET_INTEGER,
5790        .arg1_type      = ARG_PTR_TO_CTX,
5791};
5792
5793BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
5794           struct tcphdr *, th, u32, th_len)
5795{
5796#ifdef CONFIG_SYN_COOKIES
5797        u32 cookie;
5798        int ret;
5799
5800        if (unlikely(th_len < sizeof(*th)))
5801                return -EINVAL;
5802
5803        /* sk_listener() allows TCP_NEW_SYN_RECV, which makes no sense here. */
5804        if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
5805                return -EINVAL;
5806
5807        if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
5808                return -EINVAL;
5809
5810        if (!th->ack || th->rst || th->syn)
5811                return -ENOENT;
5812
5813        if (tcp_synq_no_recent_overflow(sk))
5814                return -ENOENT;
5815
5816        cookie = ntohl(th->ack_seq) - 1;
5817
5818        switch (sk->sk_family) {
5819        case AF_INET:
5820                if (unlikely(iph_len < sizeof(struct iphdr)))
5821                        return -EINVAL;
5822
5823                ret = __cookie_v4_check((struct iphdr *)iph, th, cookie);
5824                break;
5825
5826#if IS_BUILTIN(CONFIG_IPV6)
5827        case AF_INET6:
5828                if (unlikely(iph_len < sizeof(struct ipv6hdr)))
5829                        return -EINVAL;
5830
5831                ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie);
5832                break;
5833#endif /* CONFIG_IPV6 */
5834
5835        default:
5836                return -EPROTONOSUPPORT;
5837        }
5838
5839        if (ret > 0)
5840                return 0;
5841
5842        return -ENOENT;
5843#else
5844        return -ENOTSUPP;
5845#endif
5846}
5847
5848static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = {
5849        .func           = bpf_tcp_check_syncookie,
5850        .gpl_only       = true,
5851        .pkt_access     = true,
5852        .ret_type       = RET_INTEGER,
5853        .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
5854        .arg2_type      = ARG_PTR_TO_MEM,
5855        .arg3_type      = ARG_CONST_SIZE,
5856        .arg4_type      = ARG_PTR_TO_MEM,
5857        .arg5_type      = ARG_CONST_SIZE,
5858};
5859
5860BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
5861           struct tcphdr *, th, u32, th_len)
5862{
5863#ifdef CONFIG_SYN_COOKIES
5864        u32 cookie;
5865        u16 mss;
5866
5867        if (unlikely(th_len < sizeof(*th) || th_len != th->doff * 4))
5868                return -EINVAL;
5869
5870        if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
5871                return -EINVAL;
5872
5873        if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
5874                return -ENOENT;
5875
5876        if (!th->syn || th->ack || th->fin || th->rst)
5877                return -EINVAL;
5878
5879        if (unlikely(iph_len < sizeof(struct iphdr)))
5880                return -EINVAL;
5881
5882        /* Both struct iphdr and struct ipv6hdr have the version field at the
5883         * same offset so we can cast to the shorter header (struct iphdr).
5884         */
5885        switch (((struct iphdr *)iph)->version) {
5886        case 4:
5887                if (sk->sk_family == AF_INET6 && sk->sk_ipv6only)
5888                        return -EINVAL;
5889
5890                mss = tcp_v4_get_syncookie(sk, iph, th, &cookie);
5891                break;
5892
5893#if IS_BUILTIN(CONFIG_IPV6)
5894        case 6:
5895                if (unlikely(iph_len < sizeof(struct ipv6hdr)))
5896                        return -EINVAL;
5897
5898                if (sk->sk_family != AF_INET6)
5899                        return -EINVAL;
5900
5901                mss = tcp_v6_get_syncookie(sk, iph, th, &cookie);
5902                break;
5903#endif /* CONFIG_IPV6 */
5904
5905        default:
5906                return -EPROTONOSUPPORT;
5907        }
5908        if (mss == 0)
5909                return -ENOENT;
5910
5911        return cookie | ((u64)mss << 32);
5912#else
5913        return -EOPNOTSUPP;
5914#endif /* CONFIG_SYN_COOKIES */
5915}
5916
5917static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = {
5918        .func           = bpf_tcp_gen_syncookie,
5919        .gpl_only       = true, /* __cookie_v*_init_sequence() is GPL */
5920        .pkt_access     = true,
5921        .ret_type       = RET_INTEGER,
5922        .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
5923        .arg2_type      = ARG_PTR_TO_MEM,
5924        .arg3_type      = ARG_CONST_SIZE,
5925        .arg4_type      = ARG_PTR_TO_MEM,
5926        .arg5_type      = ARG_CONST_SIZE,
5927};
5928
5929#endif /* CONFIG_INET */
5930
5931bool bpf_helper_changes_pkt_data(void *func)
5932{
5933        if (func == bpf_skb_vlan_push ||
5934            func == bpf_skb_vlan_pop ||
5935            func == bpf_skb_store_bytes ||
5936            func == bpf_skb_change_proto ||
5937            func == bpf_skb_change_head ||
5938            func == sk_skb_change_head ||
5939            func == bpf_skb_change_tail ||
5940            func == sk_skb_change_tail ||
5941            func == bpf_skb_adjust_room ||
5942            func == bpf_skb_pull_data ||
5943            func == sk_skb_pull_data ||
5944            func == bpf_clone_redirect ||
5945            func == bpf_l3_csum_replace ||
5946            func == bpf_l4_csum_replace ||
5947            func == bpf_xdp_adjust_head ||
5948            func == bpf_xdp_adjust_meta ||
5949            func == bpf_msg_pull_data ||
5950            func == bpf_msg_push_data ||
5951            func == bpf_msg_pop_data ||
5952            func == bpf_xdp_adjust_tail ||
5953#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
5954            func == bpf_lwt_seg6_store_bytes ||
5955            func == bpf_lwt_seg6_adjust_srh ||
5956            func == bpf_lwt_seg6_action ||
5957#endif
5958            func == bpf_lwt_in_push_encap ||
5959            func == bpf_lwt_xmit_push_encap)
5960                return true;
5961
5962        return false;
5963}
5964
5965static const struct bpf_func_proto *
5966bpf_base_func_proto(enum bpf_func_id func_id)
5967{
5968        switch (func_id) {
5969        case BPF_FUNC_map_lookup_elem:
5970                return &bpf_map_lookup_elem_proto;
5971        case BPF_FUNC_map_update_elem:
5972                return &bpf_map_update_elem_proto;
5973        case BPF_FUNC_map_delete_elem:
5974                return &bpf_map_delete_elem_proto;
5975        case BPF_FUNC_map_push_elem:
5976                return &bpf_map_push_elem_proto;
5977        case BPF_FUNC_map_pop_elem:
5978                return &bpf_map_pop_elem_proto;
5979        case BPF_FUNC_map_peek_elem:
5980                return &bpf_map_peek_elem_proto;
5981        case BPF_FUNC_get_prandom_u32:
5982                return &bpf_get_prandom_u32_proto;
5983        case BPF_FUNC_get_smp_processor_id:
5984                return &bpf_get_raw_smp_processor_id_proto;
5985        case BPF_FUNC_get_numa_node_id:
5986                return &bpf_get_numa_node_id_proto;
5987        case BPF_FUNC_tail_call:
5988                return &bpf_tail_call_proto;
5989        case BPF_FUNC_ktime_get_ns:
5990                return &bpf_ktime_get_ns_proto;
5991        default:
5992                break;
5993        }
5994
5995        if (!capable(CAP_SYS_ADMIN))
5996                return NULL;
5997
5998        switch (func_id) {
5999        case BPF_FUNC_spin_lock:
6000                return &bpf_spin_lock_proto;
6001        case BPF_FUNC_spin_unlock:
6002                return &bpf_spin_unlock_proto;
6003        case BPF_FUNC_trace_printk:
6004                return bpf_get_trace_printk_proto();
6005        default:
6006                return NULL;
6007        }
6008}
6009
6010static const struct bpf_func_proto *
6011sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6012{
6013        switch (func_id) {
6014        /* inet and inet6 sockets are created in a process
6015         * context so there is always a valid uid/gid
6016         */
6017        case BPF_FUNC_get_current_uid_gid:
6018                return &bpf_get_current_uid_gid_proto;
6019        case BPF_FUNC_get_local_storage:
6020                return &bpf_get_local_storage_proto;
6021        default:
6022                return bpf_base_func_proto(func_id);
6023        }
6024}
6025
6026static const struct bpf_func_proto *
6027sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6028{
6029        switch (func_id) {
6030        /* inet and inet6 sockets are created in a process
6031         * context so there is always a valid uid/gid
6032         */
6033        case BPF_FUNC_get_current_uid_gid:
6034                return &bpf_get_current_uid_gid_proto;
6035        case BPF_FUNC_bind:
6036                switch (prog->expected_attach_type) {
6037                case BPF_CGROUP_INET4_CONNECT:
6038                case BPF_CGROUP_INET6_CONNECT:
6039                        return &bpf_bind_proto;
6040                default:
6041                        return NULL;
6042                }
6043        case BPF_FUNC_get_socket_cookie:
6044                return &bpf_get_socket_cookie_sock_addr_proto;
6045        case BPF_FUNC_get_local_storage:
6046                return &bpf_get_local_storage_proto;
6047#ifdef CONFIG_INET
6048        case BPF_FUNC_sk_lookup_tcp:
6049                return &bpf_sock_addr_sk_lookup_tcp_proto;
6050        case BPF_FUNC_sk_lookup_udp:
6051                return &bpf_sock_addr_sk_lookup_udp_proto;
6052        case BPF_FUNC_sk_release:
6053                return &bpf_sk_release_proto;
6054        case BPF_FUNC_skc_lookup_tcp:
6055                return &bpf_sock_addr_skc_lookup_tcp_proto;
6056#endif /* CONFIG_INET */
6057        case BPF_FUNC_sk_storage_get:
6058                return &bpf_sk_storage_get_proto;
6059        case BPF_FUNC_sk_storage_delete:
6060                return &bpf_sk_storage_delete_proto;
6061        default:
6062                return bpf_base_func_proto(func_id);
6063        }
6064}
6065
6066static const struct bpf_func_proto *
6067sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6068{
6069        switch (func_id) {
6070        case BPF_FUNC_skb_load_bytes:
6071                return &bpf_skb_load_bytes_proto;
6072        case BPF_FUNC_skb_load_bytes_relative:
6073                return &bpf_skb_load_bytes_relative_proto;
6074        case BPF_FUNC_get_socket_cookie:
6075                return &bpf_get_socket_cookie_proto;
6076        case BPF_FUNC_get_socket_uid:
6077                return &bpf_get_socket_uid_proto;
6078        case BPF_FUNC_perf_event_output:
6079                return &bpf_skb_event_output_proto;
6080        default:
6081                return bpf_base_func_proto(func_id);
6082        }
6083}
6084
6085const struct bpf_func_proto bpf_sk_storage_get_proto __weak;
6086const struct bpf_func_proto bpf_sk_storage_delete_proto __weak;
6087
6088static const struct bpf_func_proto *
6089cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6090{
6091        switch (func_id) {
6092        case BPF_FUNC_get_local_storage:
6093                return &bpf_get_local_storage_proto;
6094        case BPF_FUNC_sk_fullsock:
6095                return &bpf_sk_fullsock_proto;
6096        case BPF_FUNC_sk_storage_get:
6097                return &bpf_sk_storage_get_proto;
6098        case BPF_FUNC_sk_storage_delete:
6099                return &bpf_sk_storage_delete_proto;
6100        case BPF_FUNC_perf_event_output:
6101                return &bpf_skb_event_output_proto;
6102#ifdef CONFIG_SOCK_CGROUP_DATA
6103        case BPF_FUNC_skb_cgroup_id:
6104                return &bpf_skb_cgroup_id_proto;
6105#endif
6106#ifdef CONFIG_INET
6107        case BPF_FUNC_tcp_sock:
6108                return &bpf_tcp_sock_proto;
6109        case BPF_FUNC_get_listener_sock:
6110                return &bpf_get_listener_sock_proto;
6111        case BPF_FUNC_skb_ecn_set_ce:
6112                return &bpf_skb_ecn_set_ce_proto;
6113#endif
6114        default:
6115                return sk_filter_func_proto(func_id, prog);
6116        }
6117}
6118
6119static const struct bpf_func_proto *
6120tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6121{
6122        switch (func_id) {
6123        case BPF_FUNC_skb_store_bytes:
6124                return &bpf_skb_store_bytes_proto;
6125        case BPF_FUNC_skb_load_bytes:
6126                return &bpf_skb_load_bytes_proto;
6127        case BPF_FUNC_skb_load_bytes_relative:
6128                return &bpf_skb_load_bytes_relative_proto;
6129        case BPF_FUNC_skb_pull_data:
6130                return &bpf_skb_pull_data_proto;
6131        case BPF_FUNC_csum_diff:
6132                return &bpf_csum_diff_proto;
6133        case BPF_FUNC_csum_update:
6134                return &bpf_csum_update_proto;
6135        case BPF_FUNC_l3_csum_replace:
6136                return &bpf_l3_csum_replace_proto;
6137        case BPF_FUNC_l4_csum_replace:
6138                return &bpf_l4_csum_replace_proto;
6139        case BPF_FUNC_clone_redirect:
6140                return &bpf_clone_redirect_proto;
6141        case BPF_FUNC_get_cgroup_classid:
6142                return &bpf_get_cgroup_classid_proto;
6143        case BPF_FUNC_skb_vlan_push:
6144                return &bpf_skb_vlan_push_proto;
6145        case BPF_FUNC_skb_vlan_pop:
6146                return &bpf_skb_vlan_pop_proto;
6147        case BPF_FUNC_skb_change_proto:
6148                return &bpf_skb_change_proto_proto;
6149        case BPF_FUNC_skb_change_type:
6150                return &bpf_skb_change_type_proto;
6151        case BPF_FUNC_skb_adjust_room:
6152                return &bpf_skb_adjust_room_proto;
6153        case BPF_FUNC_skb_change_tail:
6154                return &bpf_skb_change_tail_proto;
6155        case BPF_FUNC_skb_get_tunnel_key:
6156                return &bpf_skb_get_tunnel_key_proto;
6157        case BPF_FUNC_skb_set_tunnel_key:
6158                return bpf_get_skb_set_tunnel_proto(func_id);
6159        case BPF_FUNC_skb_get_tunnel_opt:
6160                return &bpf_skb_get_tunnel_opt_proto;
6161        case BPF_FUNC_skb_set_tunnel_opt:
6162                return bpf_get_skb_set_tunnel_proto(func_id);
6163        case BPF_FUNC_redirect:
6164                return &bpf_redirect_proto;
6165        case BPF_FUNC_get_route_realm:
6166                return &bpf_get_route_realm_proto;
6167        case BPF_FUNC_get_hash_recalc:
6168                return &bpf_get_hash_recalc_proto;
6169        case BPF_FUNC_set_hash_invalid:
6170                return &bpf_set_hash_invalid_proto;
6171        case BPF_FUNC_set_hash:
6172                return &bpf_set_hash_proto;
6173        case BPF_FUNC_perf_event_output:
6174                return &bpf_skb_event_output_proto;
6175        case BPF_FUNC_get_smp_processor_id:
6176                return &bpf_get_smp_processor_id_proto;
6177        case BPF_FUNC_skb_under_cgroup:
6178                return &bpf_skb_under_cgroup_proto;
6179        case BPF_FUNC_get_socket_cookie:
6180                return &bpf_get_socket_cookie_proto;
6181        case BPF_FUNC_get_socket_uid:
6182                return &bpf_get_socket_uid_proto;
6183        case BPF_FUNC_fib_lookup:
6184                return &bpf_skb_fib_lookup_proto;
6185        case BPF_FUNC_sk_fullsock:
6186                return &bpf_sk_fullsock_proto;
6187        case BPF_FUNC_sk_storage_get:
6188                return &bpf_sk_storage_get_proto;
6189        case BPF_FUNC_sk_storage_delete:
6190                return &bpf_sk_storage_delete_proto;
6191#ifdef CONFIG_XFRM
6192        case BPF_FUNC_skb_get_xfrm_state:
6193                return &bpf_skb_get_xfrm_state_proto;
6194#endif
6195#ifdef CONFIG_SOCK_CGROUP_DATA
6196        case BPF_FUNC_skb_cgroup_id:
6197                return &bpf_skb_cgroup_id_proto;
6198        case BPF_FUNC_skb_ancestor_cgroup_id:
6199                return &bpf_skb_ancestor_cgroup_id_proto;
6200#endif
6201#ifdef CONFIG_INET
6202        case BPF_FUNC_sk_lookup_tcp:
6203                return &bpf_sk_lookup_tcp_proto;
6204        case BPF_FUNC_sk_lookup_udp:
6205                return &bpf_sk_lookup_udp_proto;
6206        case BPF_FUNC_sk_release:
6207                return &bpf_sk_release_proto;
6208        case BPF_FUNC_tcp_sock:
6209                return &bpf_tcp_sock_proto;
6210        case BPF_FUNC_get_listener_sock:
6211                return &bpf_get_listener_sock_proto;
6212        case BPF_FUNC_skc_lookup_tcp:
6213                return &bpf_skc_lookup_tcp_proto;
6214        case BPF_FUNC_tcp_check_syncookie:
6215                return &bpf_tcp_check_syncookie_proto;
6216        case BPF_FUNC_skb_ecn_set_ce:
6217                return &bpf_skb_ecn_set_ce_proto;
6218        case BPF_FUNC_tcp_gen_syncookie:
6219                return &bpf_tcp_gen_syncookie_proto;
6220#endif
6221        default:
6222                return bpf_base_func_proto(func_id);
6223        }
6224}
6225
6226static const struct bpf_func_proto *
6227xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6228{
6229        switch (func_id) {
6230        case BPF_FUNC_perf_event_output:
6231                return &bpf_xdp_event_output_proto;
6232        case BPF_FUNC_get_smp_processor_id:
6233                return &bpf_get_smp_processor_id_proto;
6234        case BPF_FUNC_csum_diff:
6235                return &bpf_csum_diff_proto;
6236        case BPF_FUNC_xdp_adjust_head:
6237                return &bpf_xdp_adjust_head_proto;
6238        case BPF_FUNC_xdp_adjust_meta:
6239                return &bpf_xdp_adjust_meta_proto;
6240        case BPF_FUNC_redirect:
6241                return &bpf_xdp_redirect_proto;
6242        case BPF_FUNC_redirect_map:
6243                return &bpf_xdp_redirect_map_proto;
6244        case BPF_FUNC_xdp_adjust_tail:
6245                return &bpf_xdp_adjust_tail_proto;
6246        case BPF_FUNC_fib_lookup:
6247                return &bpf_xdp_fib_lookup_proto;
6248#ifdef CONFIG_INET
6249        case BPF_FUNC_sk_lookup_udp:
6250                return &bpf_xdp_sk_lookup_udp_proto;
6251        case BPF_FUNC_sk_lookup_tcp:
6252                return &bpf_xdp_sk_lookup_tcp_proto;
6253        case BPF_FUNC_sk_release:
6254                return &bpf_sk_release_proto;
6255        case BPF_FUNC_skc_lookup_tcp:
6256                return &bpf_xdp_skc_lookup_tcp_proto;
6257        case BPF_FUNC_tcp_check_syncookie:
6258                return &bpf_tcp_check_syncookie_proto;
6259        case BPF_FUNC_tcp_gen_syncookie:
6260                return &bpf_tcp_gen_syncookie_proto;
6261#endif
6262        default:
6263                return bpf_base_func_proto(func_id);
6264        }
6265}
6266
6267const struct bpf_func_proto bpf_sock_map_update_proto __weak;
6268const struct bpf_func_proto bpf_sock_hash_update_proto __weak;
6269
6270static const struct bpf_func_proto *
6271sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6272{
6273        switch (func_id) {
6274        case BPF_FUNC_setsockopt:
6275                return &bpf_setsockopt_proto;
6276        case BPF_FUNC_getsockopt:
6277                return &bpf_getsockopt_proto;
6278        case BPF_FUNC_sock_ops_cb_flags_set:
6279                return &bpf_sock_ops_cb_flags_set_proto;
6280        case BPF_FUNC_sock_map_update:
6281                return &bpf_sock_map_update_proto;
6282        case BPF_FUNC_sock_hash_update:
6283                return &bpf_sock_hash_update_proto;
6284        case BPF_FUNC_get_socket_cookie:
6285                return &bpf_get_socket_cookie_sock_ops_proto;
6286        case BPF_FUNC_get_local_storage:
6287                return &bpf_get_local_storage_proto;
6288        case BPF_FUNC_perf_event_output:
6289                return &bpf_sockopt_event_output_proto;
6290        case BPF_FUNC_sk_storage_get:
6291                return &bpf_sk_storage_get_proto;
6292        case BPF_FUNC_sk_storage_delete:
6293                return &bpf_sk_storage_delete_proto;
6294#ifdef CONFIG_INET
6295        case BPF_FUNC_tcp_sock:
6296                return &bpf_tcp_sock_proto;
6297#endif /* CONFIG_INET */
6298        default:
6299                return bpf_base_func_proto(func_id);
6300        }
6301}
6302
6303const struct bpf_func_proto bpf_msg_redirect_map_proto __weak;
6304const struct bpf_func_proto bpf_msg_redirect_hash_proto __weak;
6305
6306static const struct bpf_func_proto *
6307sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6308{
6309        switch (func_id) {
6310        case BPF_FUNC_msg_redirect_map:
6311                return &bpf_msg_redirect_map_proto;
6312        case BPF_FUNC_msg_redirect_hash:
6313                return &bpf_msg_redirect_hash_proto;
6314        case BPF_FUNC_msg_apply_bytes:
6315                return &bpf_msg_apply_bytes_proto;
6316        case BPF_FUNC_msg_cork_bytes:
6317                return &bpf_msg_cork_bytes_proto;
6318        case BPF_FUNC_msg_pull_data:
6319                return &bpf_msg_pull_data_proto;
6320        case BPF_FUNC_msg_push_data:
6321                return &bpf_msg_push_data_proto;
6322        case BPF_FUNC_msg_pop_data:
6323                return &bpf_msg_pop_data_proto;
6324        default:
6325                return bpf_base_func_proto(func_id);
6326        }
6327}
6328
6329const struct bpf_func_proto bpf_sk_redirect_map_proto __weak;
6330const struct bpf_func_proto bpf_sk_redirect_hash_proto __weak;
6331
6332static const struct bpf_func_proto *
6333sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6334{
6335        switch (func_id) {
6336        case BPF_FUNC_skb_store_bytes:
6337                return &bpf_skb_store_bytes_proto;
6338        case BPF_FUNC_skb_load_bytes:
6339                return &bpf_skb_load_bytes_proto;
6340        case BPF_FUNC_skb_pull_data:
6341                return &sk_skb_pull_data_proto;
6342        case BPF_FUNC_skb_change_tail:
6343                return &sk_skb_change_tail_proto;
6344        case BPF_FUNC_skb_change_head:
6345                return &sk_skb_change_head_proto;
6346        case BPF_FUNC_get_socket_cookie:
6347                return &bpf_get_socket_cookie_proto;
6348        case BPF_FUNC_get_socket_uid:
6349                return &bpf_get_socket_uid_proto;
6350        case BPF_FUNC_sk_redirect_map:
6351                return &bpf_sk_redirect_map_proto;
6352        case BPF_FUNC_sk_redirect_hash:
6353                return &bpf_sk_redirect_hash_proto;
6354        case BPF_FUNC_perf_event_output:
6355                return &bpf_skb_event_output_proto;
6356#ifdef CONFIG_INET
6357        case BPF_FUNC_sk_lookup_tcp:
6358                return &bpf_sk_lookup_tcp_proto;
6359        case BPF_FUNC_sk_lookup_udp:
6360                return &bpf_sk_lookup_udp_proto;
6361        case BPF_FUNC_sk_release:
6362                return &bpf_sk_release_proto;
6363        case BPF_FUNC_skc_lookup_tcp:
6364                return &bpf_skc_lookup_tcp_proto;
6365#endif
6366        default:
6367                return bpf_base_func_proto(func_id);
6368        }
6369}
6370
6371static const struct bpf_func_proto *
6372flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6373{
6374        switch (func_id) {
6375        case BPF_FUNC_skb_load_bytes:
6376                return &bpf_flow_dissector_load_bytes_proto;
6377        default:
6378                return bpf_base_func_proto(func_id);
6379        }
6380}
6381
6382static const struct bpf_func_proto *
6383lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6384{
6385        switch (func_id) {
6386        case BPF_FUNC_skb_load_bytes:
6387                return &bpf_skb_load_bytes_proto;
6388        case BPF_FUNC_skb_pull_data:
6389                return &bpf_skb_pull_data_proto;
6390        case BPF_FUNC_csum_diff:
6391                return &bpf_csum_diff_proto;
6392        case BPF_FUNC_get_cgroup_classid:
6393                return &bpf_get_cgroup_classid_proto;
6394        case BPF_FUNC_get_route_realm:
6395                return &bpf_get_route_realm_proto;
6396        case BPF_FUNC_get_hash_recalc:
6397                return &bpf_get_hash_recalc_proto;
6398        case BPF_FUNC_perf_event_output:
6399                return &bpf_skb_event_output_proto;
6400        case BPF_FUNC_get_smp_processor_id:
6401                return &bpf_get_smp_processor_id_proto;
6402        case BPF_FUNC_skb_under_cgroup:
6403                return &bpf_skb_under_cgroup_proto;
6404        default:
6405                return bpf_base_func_proto(func_id);
6406        }
6407}
6408
6409static const struct bpf_func_proto *
6410lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6411{
6412        switch (func_id) {
6413        case BPF_FUNC_lwt_push_encap:
6414                return &bpf_lwt_in_push_encap_proto;
6415        default:
6416                return lwt_out_func_proto(func_id, prog);
6417        }
6418}
6419
6420static const struct bpf_func_proto *
6421lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6422{
6423        switch (func_id) {
6424        case BPF_FUNC_skb_get_tunnel_key:
6425                return &bpf_skb_get_tunnel_key_proto;
6426        case BPF_FUNC_skb_set_tunnel_key:
6427                return bpf_get_skb_set_tunnel_proto(func_id);
6428        case BPF_FUNC_skb_get_tunnel_opt:
6429                return &bpf_skb_get_tunnel_opt_proto;
6430        case BPF_FUNC_skb_set_tunnel_opt:
6431                return bpf_get_skb_set_tunnel_proto(func_id);
6432        case BPF_FUNC_redirect:
6433                return &bpf_redirect_proto;
6434        case BPF_FUNC_clone_redirect:
6435                return &bpf_clone_redirect_proto;
6436        case BPF_FUNC_skb_change_tail:
6437                return &bpf_skb_change_tail_proto;
6438        case BPF_FUNC_skb_change_head:
6439                return &bpf_skb_change_head_proto;
6440        case BPF_FUNC_skb_store_bytes:
6441                return &bpf_skb_store_bytes_proto;
6442        case BPF_FUNC_csum_update:
6443                return &bpf_csum_update_proto;
6444        case BPF_FUNC_l3_csum_replace:
6445                return &bpf_l3_csum_replace_proto;
6446        case BPF_FUNC_l4_csum_replace:
6447                return &bpf_l4_csum_replace_proto;
6448        case BPF_FUNC_set_hash_invalid:
6449                return &bpf_set_hash_invalid_proto;
6450        case BPF_FUNC_lwt_push_encap:
6451                return &bpf_lwt_xmit_push_encap_proto;
6452        default:
6453                return lwt_out_func_proto(func_id, prog);
6454        }
6455}
6456
6457static const struct bpf_func_proto *
6458lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6459{
6460        switch (func_id) {
6461#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
6462        case BPF_FUNC_lwt_seg6_store_bytes:
6463                return &bpf_lwt_seg6_store_bytes_proto;
6464        case BPF_FUNC_lwt_seg6_action:
6465                return &bpf_lwt_seg6_action_proto;
6466        case BPF_FUNC_lwt_seg6_adjust_srh:
6467                return &bpf_lwt_seg6_adjust_srh_proto;
6468#endif
6469        default:
6470                return lwt_out_func_proto(func_id, prog);
6471        }
6472}
6473
6474static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type,
6475                                    const struct bpf_prog *prog,
6476                                    struct bpf_insn_access_aux *info)
6477{
6478        const int size_default = sizeof(__u32);
6479
6480        if (off < 0 || off >= sizeof(struct __sk_buff))
6481                return false;
6482
6483        /* The verifier guarantees that size > 0. */
6484        if (off % size != 0)
6485                return false;
6486
6487        switch (off) {
6488        case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6489                if (off + size > offsetofend(struct __sk_buff, cb[4]))
6490                        return false;
6491                break;
6492        case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]):
6493        case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]):
6494        case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4):
6495        case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4):
6496        case bpf_ctx_range(struct __sk_buff, data):
6497        case bpf_ctx_range(struct __sk_buff, data_meta):
6498        case bpf_ctx_range(struct __sk_buff, data_end):
6499                if (size != size_default)
6500                        return false;
6501                break;
6502        case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
6503                return false;
6504        case bpf_ctx_range(struct __sk_buff, tstamp):
6505                if (size != sizeof(__u64))
6506                        return false;
6507                break;
6508        case offsetof(struct __sk_buff, sk):
6509                if (type == BPF_WRITE || size != sizeof(__u64))
6510                        return false;
6511                info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL;
6512                break;
6513        default:
6514                /* Only narrow read access allowed for now. */
6515                if (type == BPF_WRITE) {
6516                        if (size != size_default)
6517                                return false;
6518                } else {
6519                        bpf_ctx_record_field_size(info, size_default);
6520                        if (!bpf_ctx_narrow_access_ok(off, size, size_default))
6521                                return false;
6522                }
6523        }
6524
6525        return true;
6526}
6527
6528static bool sk_filter_is_valid_access(int off, int size,
6529                                      enum bpf_access_type type,
6530                                      const struct bpf_prog *prog,
6531                                      struct bpf_insn_access_aux *info)
6532{
6533        switch (off) {
6534        case bpf_ctx_range(struct __sk_buff, tc_classid):
6535        case bpf_ctx_range(struct __sk_buff, data):
6536        case bpf_ctx_range(struct __sk_buff, data_meta):
6537        case bpf_ctx_range(struct __sk_buff, data_end):
6538        case bpf_ctx_range_till(struct __sk_buff, family, local_port):
6539        case bpf_ctx_range(struct __sk_buff, tstamp):
6540        case bpf_ctx_range(struct __sk_buff, wire_len):
6541                return false;
6542        }
6543
6544        if (type == BPF_WRITE) {
6545                switch (off) {
6546                case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6547                        break;
6548                default:
6549                        return false;
6550                }
6551        }
6552
6553        return bpf_skb_is_valid_access(off, size, type, prog, info);
6554}
6555
6556static bool cg_skb_is_valid_access(int off, int size,
6557                                   enum bpf_access_type type,
6558                                   const struct bpf_prog *prog,
6559                                   struct bpf_insn_access_aux *info)
6560{
6561        switch (off) {
6562        case bpf_ctx_range(struct __sk_buff, tc_classid):
6563        case bpf_ctx_range(struct __sk_buff, data_meta):
6564        case bpf_ctx_range(struct __sk_buff, wire_len):
6565                return false;
6566        case bpf_ctx_range(struct __sk_buff, data):
6567        case bpf_ctx_range(struct __sk_buff, data_end):
6568                if (!capable(CAP_SYS_ADMIN))
6569                        return false;
6570                break;
6571        }
6572
6573        if (type == BPF_WRITE) {
6574                switch (off) {
6575                case bpf_ctx_range(struct __sk_buff, mark):
6576                case bpf_ctx_range(struct __sk_buff, priority):
6577                case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6578                        break;
6579                case bpf_ctx_range(struct __sk_buff, tstamp):
6580                        if (!capable(CAP_SYS_ADMIN))
6581                                return false;
6582                        break;
6583                default:
6584                        return false;
6585                }
6586        }
6587
6588        switch (off) {
6589        case bpf_ctx_range(struct __sk_buff, data):
6590                info->reg_type = PTR_TO_PACKET;
6591                break;
6592        case bpf_ctx_range(struct __sk_buff, data_end):
6593                info->reg_type = PTR_TO_PACKET_END;
6594                break;
6595        }
6596
6597        return bpf_skb_is_valid_access(off, size, type, prog, info);
6598}
6599
6600static bool lwt_is_valid_access(int off, int size,
6601                                enum bpf_access_type type,
6602                                const struct bpf_prog *prog,
6603                                struct bpf_insn_access_aux *info)
6604{
6605        switch (off) {
6606        case bpf_ctx_range(struct __sk_buff, tc_classid):
6607        case bpf_ctx_range_till(struct __sk_buff, family, local_port):
6608        case bpf_ctx_range(struct __sk_buff, data_meta):
6609        case bpf_ctx_range(struct __sk_buff, tstamp):
6610        case bpf_ctx_range(struct __sk_buff, wire_len):
6611                return false;
6612        }
6613
6614        if (type == BPF_WRITE) {
6615                switch (off) {
6616                case bpf_ctx_range(struct __sk_buff, mark):
6617                case bpf_ctx_range(struct __sk_buff, priority):
6618                case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6619                        break;
6620                default:
6621                        return false;
6622                }
6623        }
6624
6625        switch (off) {
6626        case bpf_ctx_range(struct __sk_buff, data):
6627                info->reg_type = PTR_TO_PACKET;
6628                break;
6629        case bpf_ctx_range(struct __sk_buff, data_end):
6630                info->reg_type = PTR_TO_PACKET_END;
6631                break;
6632        }
6633
6634        return bpf_skb_is_valid_access(off, size, type, prog, info);
6635}
6636
6637/* Attach type specific accesses */
6638static bool __sock_filter_check_attach_type(int off,
6639                                            enum bpf_access_type access_type,
6640                                            enum bpf_attach_type attach_type)
6641{
6642        switch (off) {
6643        case offsetof(struct bpf_sock, bound_dev_if):
6644        case offsetof(struct bpf_sock, mark):
6645        case offsetof(struct bpf_sock, priority):
6646                switch (attach_type) {
6647                case BPF_CGROUP_INET_SOCK_CREATE:
6648                        goto full_access;
6649                default:
6650                        return false;
6651                }
6652        case bpf_ctx_range(struct bpf_sock, src_ip4):
6653                switch (attach_type) {
6654                case BPF_CGROUP_INET4_POST_BIND:
6655                        goto read_only;
6656                default:
6657                        return false;
6658                }
6659        case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
6660                switch (attach_type) {
6661                case BPF_CGROUP_INET6_POST_BIND:
6662                        goto read_only;
6663                default:
6664                        return false;
6665                }
6666        case bpf_ctx_range(struct bpf_sock, src_port):
6667                switch (attach_type) {
6668                case BPF_CGROUP_INET4_POST_BIND:
6669                case BPF_CGROUP_INET6_POST_BIND:
6670                        goto read_only;
6671                default:
6672                        return false;
6673                }
6674        }
6675read_only:
6676        return access_type == BPF_READ;
6677full_access:
6678        return true;
6679}
6680
6681bool bpf_sock_common_is_valid_access(int off, int size,
6682                                     enum bpf_access_type type,
6683                                     struct bpf_insn_access_aux *info)
6684{
6685        switch (off) {
6686        case bpf_ctx_range_till(struct bpf_sock, type, priority):
6687                return false;
6688        default:
6689                return bpf_sock_is_valid_access(off, size, type, info);
6690        }
6691}
6692
6693bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
6694                              struct bpf_insn_access_aux *info)
6695{
6696        const int size_default = sizeof(__u32);
6697
6698        if (off < 0 || off >= sizeof(struct bpf_sock))
6699                return false;
6700        if (off % size != 0)
6701                return false;
6702
6703        switch (off) {
6704        case offsetof(struct bpf_sock, state):
6705        case offsetof(struct bpf_sock, family):
6706        case offsetof(struct bpf_sock, type):
6707        case offsetof(struct bpf_sock, protocol):
6708        case offsetof(struct bpf_sock, dst_port):
6709        case offsetof(struct bpf_sock, src_port):
6710        case bpf_ctx_range(struct bpf_sock, src_ip4):
6711        case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
6712        case bpf_ctx_range(struct bpf_sock, dst_ip4):
6713        case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
6714                bpf_ctx_record_field_size(info, size_default);
6715                return bpf_ctx_narrow_access_ok(off, size, size_default);
6716        }
6717
6718        return size == size_default;
6719}
6720
6721static bool sock_filter_is_valid_access(int off, int size,
6722                                        enum bpf_access_type type,
6723                                        const struct bpf_prog *prog,
6724                                        struct bpf_insn_access_aux *info)
6725{
6726        if (!bpf_sock_is_valid_access(off, size, type, info))
6727                return false;
6728        return __sock_filter_check_attach_type(off, type,
6729                                               prog->expected_attach_type);
6730}
6731
6732static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write,
6733                             const struct bpf_prog *prog)
6734{
6735        /* Neither direct read nor direct write requires any preliminary
6736         * action.
6737         */
6738        return 0;
6739}
6740
6741static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
6742                                const struct bpf_prog *prog, int drop_verdict)
6743{
6744        struct bpf_insn *insn = insn_buf;
6745
6746        if (!direct_write)
6747                return 0;
6748
6749        /* if (!skb->cloned)
6750         *       goto start;
6751         *
6752         * (Fast-path, otherwise approximation that we might be
6753         *  a clone, do the rest in helper.)
6754         */
6755        *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET());
6756        *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
6757        *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);
6758
6759        /* ret = bpf_skb_pull_data(skb, 0); */
6760        *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
6761        *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2);
6762        *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6763                               BPF_FUNC_skb_pull_data);
6764        /* if (!ret)
6765         *      goto restore;
6766         * return TC_ACT_SHOT;
6767         */
6768        *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2);
6769        *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict);
6770        *insn++ = BPF_EXIT_INSN();
6771
6772        /* restore: */
6773        *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
6774        /* start: */
6775        *insn++ = prog->insnsi[0];
6776
6777        return insn - insn_buf;
6778}
6779
6780static int bpf_gen_ld_abs(const struct bpf_insn *orig,
6781                          struct bpf_insn *insn_buf)
6782{
6783        bool indirect = BPF_MODE(orig->code) == BPF_IND;
6784        struct bpf_insn *insn = insn_buf;
6785
6786        /* We're guaranteed here that CTX is in R6. */
6787        *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
6788        if (!indirect) {
6789                *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm);
6790        } else {
6791                *insn++ = BPF_MOV64_REG(BPF_REG_2, orig->src_reg);
6792                if (orig->imm)
6793                        *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm);
6794        }
6795
6796        switch (BPF_SIZE(orig->code)) {
6797        case BPF_B:
6798                *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8_no_cache);
6799                break;
6800        case BPF_H:
6801                *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16_no_cache);
6802                break;
6803        case BPF_W:
6804                *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32_no_cache);
6805                break;
6806        }
6807
6808        *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 2);
6809        *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0);
6810        *insn++ = BPF_EXIT_INSN();
6811
6812        return insn - insn_buf;
6813}
6814
6815static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
6816                               const struct bpf_prog *prog)
6817{
6818        return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT);
6819}
6820
6821static bool tc_cls_act_is_valid_access(int off, int size,
6822                                       enum bpf_access_type type,
6823                                       const struct bpf_prog *prog,
6824                                       struct bpf_insn_access_aux *info)
6825{
6826        if (type == BPF_WRITE) {
6827                switch (off) {
6828                case bpf_ctx_range(struct __sk_buff, mark):
6829                case bpf_ctx_range(struct __sk_buff, tc_index):
6830                case bpf_ctx_range(struct __sk_buff, priority):
6831                case bpf_ctx_range(struct __sk_buff, tc_classid):
6832                case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6833                case bpf_ctx_range(struct __sk_buff, tstamp):
6834                case bpf_ctx_range(struct __sk_buff, queue_mapping):
6835                        break;
6836                default:
6837                        return false;
6838                }
6839        }
6840
6841        switch (off) {
6842        case bpf_ctx_range(struct __sk_buff, data):
6843                info->reg_type = PTR_TO_PACKET;
6844                break;
6845        case bpf_ctx_range(struct __sk_buff, data_meta):
6846                info->reg_type = PTR_TO_PACKET_META;
6847                break;
6848        case bpf_ctx_range(struct __sk_buff, data_end):
6849                info->reg_type = PTR_TO_PACKET_END;
6850                break;
6851        case bpf_ctx_range_till(struct __sk_buff, family, local_port):
6852                return false;
6853        }
6854
6855        return bpf_skb_is_valid_access(off, size, type, prog, info);
6856}
6857
6858static bool __is_valid_xdp_access(int off, int size)
6859{
6860        if (off < 0 || off >= sizeof(struct xdp_md))
6861                return false;
6862        if (off % size != 0)
6863                return false;
6864        if (size != sizeof(__u32))
6865                return false;
6866
6867        return true;
6868}
6869
6870static bool xdp_is_valid_access(int off, int size,
6871                                enum bpf_access_type type,
6872                                const struct bpf_prog *prog,
6873                                struct bpf_insn_access_aux *info)
6874{
6875        if (type == BPF_WRITE) {
6876                if (bpf_prog_is_dev_bound(prog->aux)) {
6877                        switch (off) {
6878                        case offsetof(struct xdp_md, rx_queue_index):
6879                                return __is_valid_xdp_access(off, size);
6880                        }
6881                }
6882                return false;
6883        }
6884
6885        switch (off) {
6886        case offsetof(struct xdp_md, data):
6887                info->reg_type = PTR_TO_PACKET;
6888                break;
6889        case offsetof(struct xdp_md, data_meta):
6890                info->reg_type = PTR_TO_PACKET_META;
6891                break;
6892        case offsetof(struct xdp_md, data_end):
6893                info->reg_type = PTR_TO_PACKET_END;
6894                break;
6895        }
6896
6897        return __is_valid_xdp_access(off, size);
6898}
6899
6900void bpf_warn_invalid_xdp_action(u32 act)
6901{
6902        const u32 act_max = XDP_REDIRECT;
6903
6904        WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n",
6905                  act > act_max ? "Illegal" : "Driver unsupported",
6906                  act);
6907}
6908EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
6909
6910static bool sock_addr_is_valid_access(int off, int size,
6911                                      enum bpf_access_type type,
6912                                      const struct bpf_prog *prog,
6913                                      struct bpf_insn_access_aux *info)
6914{
6915        const int size_default = sizeof(__u32);
6916
6917        if (off < 0 || off >= sizeof(struct bpf_sock_addr))
6918                return false;
6919        if (off % size != 0)
6920                return false;
6921
6922        /* Disallow access to IPv6 fields from IPv4 contex and vise
6923         * versa.
6924         */
6925        switch (off) {
6926        case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
6927                switch (prog->expected_attach_type) {
6928                case BPF_CGROUP_INET4_BIND:
6929                case BPF_CGROUP_INET4_CONNECT:
6930                case BPF_CGROUP_UDP4_SENDMSG:
6931                case BPF_CGROUP_UDP4_RECVMSG:
6932                        break;
6933                default:
6934                        return false;
6935                }
6936                break;
6937        case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
6938                switch (prog->expected_attach_type) {
6939                case BPF_CGROUP_INET6_BIND:
6940                case BPF_CGROUP_INET6_CONNECT:
6941                case BPF_CGROUP_UDP6_SENDMSG:
6942                case BPF_CGROUP_UDP6_RECVMSG:
6943                        break;
6944                default:
6945                        return false;
6946                }
6947                break;
6948        case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
6949                switch (prog->expected_attach_type) {
6950                case BPF_CGROUP_UDP4_SENDMSG:
6951                        break;
6952                default:
6953                        return false;
6954                }
6955                break;
6956        case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
6957                                msg_src_ip6[3]):
6958                switch (prog->expected_attach_type) {
6959                case BPF_CGROUP_UDP6_SENDMSG:
6960                        break;
6961                default:
6962                        return false;
6963                }
6964                break;
6965        }
6966
6967        switch (off) {
6968        case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
6969        case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
6970        case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
6971        case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
6972                                msg_src_ip6[3]):
6973                if (type == BPF_READ) {
6974                        bpf_ctx_record_field_size(info, size_default);
6975
6976                        if (bpf_ctx_wide_access_ok(off, size,
6977                                                   struct bpf_sock_addr,
6978                                                   user_ip6))
6979                                return true;
6980
6981                        if (bpf_ctx_wide_access_ok(off, size,
6982                                                   struct bpf_sock_addr,
6983                                                   msg_src_ip6))
6984                                return true;
6985
6986                        if (!bpf_ctx_narrow_access_ok(off, size, size_default))
6987                                return false;
6988                } else {
6989                        if (bpf_ctx_wide_access_ok(off, size,
6990                                                   struct bpf_sock_addr,
6991                                                   user_ip6))
6992                                return true;
6993
6994                        if (bpf_ctx_wide_access_ok(off, size,
6995                                                   struct bpf_sock_addr,
6996                                                   msg_src_ip6))
6997                                return true;
6998
6999                        if (size != size_default)
7000                                return false;
7001                }
7002                break;
7003        case bpf_ctx_range(struct bpf_sock_addr, user_port):
7004                if (size != size_default)
7005                        return false;
7006                break;
7007        case offsetof(struct bpf_sock_addr, sk):
7008                if (type != BPF_READ)
7009                        return false;
7010                if (size != sizeof(__u64))
7011                        return false;
7012                info->reg_type = PTR_TO_SOCKET;
7013                break;
7014        default:
7015                if (type == BPF_READ) {
7016                        if (size != size_default)
7017                                return false;
7018                } else {
7019                        return false;
7020                }
7021        }
7022
7023        return true;
7024}
7025
7026static bool sock_ops_is_valid_access(int off, int size,
7027                                     enum bpf_access_type type,
7028                                     const struct bpf_prog *prog,
7029                                     struct bpf_insn_access_aux *info)
7030{
7031        const int size_default = sizeof(__u32);
7032
7033        if (off < 0 || off >= sizeof(struct bpf_sock_ops))
7034                return false;
7035
7036        /* The verifier guarantees that size > 0. */
7037        if (off % size != 0)
7038                return false;
7039
7040        if (type == BPF_WRITE) {
7041                switch (off) {
7042                case offsetof(struct bpf_sock_ops, reply):
7043                case offsetof(struct bpf_sock_ops, sk_txhash):
7044                        if (size != size_default)
7045                                return false;
7046                        break;
7047                default:
7048                        return false;
7049                }
7050        } else {
7051                switch (off) {
7052                case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received,
7053                                        bytes_acked):
7054                        if (size != sizeof(__u64))
7055                                return false;
7056                        break;
7057                case offsetof(struct bpf_sock_ops, sk):
7058                        if (size != sizeof(__u64))
7059                                return false;
7060                        info->reg_type = PTR_TO_SOCKET_OR_NULL;
7061                        break;
7062                default:
7063                        if (size != size_default)
7064                                return false;
7065                        break;
7066                }
7067        }
7068
7069        return true;
7070}
7071
7072static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write,
7073                           const struct bpf_prog *prog)
7074{
7075        return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP);
7076}
7077
7078static bool sk_skb_is_valid_access(int off, int size,
7079                                   enum bpf_access_type type,
7080                                   const struct bpf_prog *prog,
7081                                   struct bpf_insn_access_aux *info)
7082{
7083        switch (off) {
7084        case bpf_ctx_range(struct __sk_buff, tc_classid):
7085        case bpf_ctx_range(struct __sk_buff, data_meta):
7086        case bpf_ctx_range(struct __sk_buff, tstamp):
7087        case bpf_ctx_range(struct __sk_buff, wire_len):
7088                return false;
7089        }
7090
7091        if (type == BPF_WRITE) {
7092                switch (off) {
7093                case bpf_ctx_range(struct __sk_buff, tc_index):
7094                case bpf_ctx_range(struct __sk_buff, priority):
7095                        break;
7096                default:
7097                        return false;
7098                }
7099        }
7100
7101        switch (off) {
7102        case bpf_ctx_range(struct __sk_buff, mark):
7103                return false;
7104        case bpf_ctx_range(struct __sk_buff, data):
7105                info->reg_type = PTR_TO_PACKET;
7106                break;
7107        case bpf_ctx_range(struct __sk_buff, data_end):
7108                info->reg_type = PTR_TO_PACKET_END;
7109                break;
7110        }
7111
7112        return bpf_skb_is_valid_access(off, size, type, prog, info);
7113}
7114
7115static bool sk_msg_is_valid_access(int off, int size,
7116                                   enum bpf_access_type type,
7117                                   const struct bpf_prog *prog,
7118                                   struct bpf_insn_access_aux *info)
7119{
7120        if (type == BPF_WRITE)
7121                return false;
7122
7123        if (off % size != 0)
7124                return false;
7125
7126        switch (off) {
7127        case offsetof(struct sk_msg_md, data):
7128                info->reg_type = PTR_TO_PACKET;
7129                if (size != sizeof(__u64))
7130                        return false;
7131                break;
7132        case offsetof(struct sk_msg_md, data_end):
7133                info->reg_type = PTR_TO_PACKET_END;
7134                if (size != sizeof(__u64))
7135                        return false;
7136                break;
7137        case bpf_ctx_range(struct sk_msg_md, family):
7138        case bpf_ctx_range(struct sk_msg_md, remote_ip4):
7139        case bpf_ctx_range(struct sk_msg_md, local_ip4):
7140        case bpf_ctx_range_till(struct sk_msg_md, remote_ip6[0], remote_ip6[3]):
7141        case bpf_ctx_range_till(struct sk_msg_md, local_ip6[0], local_ip6[3]):
7142        case bpf_ctx_range(struct sk_msg_md, remote_port):
7143        case bpf_ctx_range(struct sk_msg_md, local_port):
7144        case bpf_ctx_range(struct sk_msg_md, size):
7145                if (size != sizeof(__u32))
7146                        return false;
7147                break;
7148        default:
7149                return false;
7150        }
7151        return true;
7152}
7153
7154static bool flow_dissector_is_valid_access(int off, int size,
7155                                           enum bpf_access_type type,
7156                                           const struct bpf_prog *prog,
7157                                           struct bpf_insn_access_aux *info)
7158{
7159        const int size_default = sizeof(__u32);
7160
7161        if (off < 0 || off >= sizeof(struct __sk_buff))
7162                return false;
7163
7164        if (type == BPF_WRITE)
7165                return false;
7166
7167        switch (off) {
7168        case bpf_ctx_range(struct __sk_buff, data):
7169                if (size != size_default)
7170                        return false;
7171                info->reg_type = PTR_TO_PACKET;
7172                return true;
7173        case bpf_ctx_range(struct __sk_buff, data_end):
7174                if (size != size_default)
7175                        return false;
7176                info->reg_type = PTR_TO_PACKET_END;
7177                return true;
7178        case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
7179                if (size != sizeof(__u64))
7180                        return false;
7181                info->reg_type = PTR_TO_FLOW_KEYS;
7182                return true;
7183        default:
7184                return false;
7185        }
7186}
7187
7188static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type,
7189                                             const struct bpf_insn *si,
7190                                             struct bpf_insn *insn_buf,
7191                                             struct bpf_prog *prog,
7192                                             u32 *target_size)
7193
7194{
7195        struct bpf_insn *insn = insn_buf;
7196
7197        switch (si->off) {
7198        case offsetof(struct __sk_buff, data):
7199                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data),
7200                                      si->dst_reg, si->src_reg,
7201                                      offsetof(struct bpf_flow_dissector, data));
7202                break;
7203
7204        case offsetof(struct __sk_buff, data_end):
7205                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data_end),
7206                                      si->dst_reg, si->src_reg,
7207                                      offsetof(struct bpf_flow_dissector, data_end));
7208                break;
7209
7210        case offsetof(struct __sk_buff, flow_keys):
7211                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, flow_keys),
7212                                      si->dst_reg, si->src_reg,
7213                                      offsetof(struct bpf_flow_dissector, flow_keys));
7214                break;
7215        }
7216
7217        return insn - insn_buf;
7218}
7219
7220static u32 bpf_convert_ctx_access(enum bpf_access_type type,
7221                                  const struct bpf_insn *si,
7222                                  struct bpf_insn *insn_buf,
7223                                  struct bpf_prog *prog, u32 *target_size)
7224{
7225        struct bpf_insn *insn = insn_buf;
7226        int off;
7227
7228        switch (si->off) {
7229        case offsetof(struct __sk_buff, len):
7230                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7231                                      bpf_target_off(struct sk_buff, len, 4,
7232                                                     target_size));
7233                break;
7234
7235        case offsetof(struct __sk_buff, protocol):
7236                *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
7237                                      bpf_target_off(struct sk_buff, protocol, 2,
7238                                                     target_size));
7239                break;
7240
7241        case offsetof(struct __sk_buff, vlan_proto):
7242                *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
7243                                      bpf_target_off(struct sk_buff, vlan_proto, 2,
7244                                                     target_size));
7245                break;
7246
7247        case offsetof(struct __sk_buff, priority):
7248                if (type == BPF_WRITE)
7249                        *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7250                                              bpf_target_off(struct sk_buff, priority, 4,
7251                                                             target_size));
7252                else
7253                        *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7254                                              bpf_target_off(struct sk_buff, priority, 4,
7255                                                             target_size));
7256                break;
7257
7258        case offsetof(struct __sk_buff, ingress_ifindex):
7259                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7260                                      bpf_target_off(struct sk_buff, skb_iif, 4,
7261                                                     target_size));
7262                break;
7263
7264        case offsetof(struct __sk_buff, ifindex):
7265                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
7266                                      si->dst_reg, si->src_reg,
7267                                      offsetof(struct sk_buff, dev));
7268                *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
7269                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7270                                      bpf_target_off(struct net_device, ifindex, 4,
7271                                                     target_size));
7272                break;
7273
7274        case offsetof(struct __sk_buff, hash):
7275                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7276                                      bpf_target_off(struct sk_buff, hash, 4,
7277                                                     target_size));
7278                break;
7279
7280        case offsetof(struct __sk_buff, mark):
7281                if (type == BPF_WRITE)
7282                        *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7283                                              bpf_target_off(struct sk_buff, mark, 4,
7284                                                             target_size));
7285                else
7286                        *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7287                                              bpf_target_off(struct sk_buff, mark, 4,
7288                                                             target_size));
7289                break;
7290
7291        case offsetof(struct __sk_buff, pkt_type):
7292                *target_size = 1;
7293                *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
7294                                      PKT_TYPE_OFFSET());
7295                *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX);
7296#ifdef __BIG_ENDIAN_BITFIELD
7297                *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5);
7298#endif
7299                break;
7300
7301        case offsetof(struct __sk_buff, queue_mapping):
7302                if (type == BPF_WRITE) {
7303                        *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1);
7304                        *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
7305                                              bpf_target_off(struct sk_buff,
7306                                                             queue_mapping,
7307                                                             2, target_size));
7308                } else {
7309                        *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
7310                                              bpf_target_off(struct sk_buff,
7311                                                             queue_mapping,
7312                                                             2, target_size));
7313                }
7314                break;
7315
7316        case offsetof(struct __sk_buff, vlan_present):
7317                *target_size = 1;
7318                *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
7319                                      PKT_VLAN_PRESENT_OFFSET());
7320                if (PKT_VLAN_PRESENT_BIT)
7321                        *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT);
7322                if (PKT_VLAN_PRESENT_BIT < 7)
7323                        *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
7324                break;
7325
7326        case offsetof(struct __sk_buff, vlan_tci):
7327                *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
7328                                      bpf_target_off(struct sk_buff, vlan_tci, 2,
7329                                                     target_size));
7330                break;
7331
7332        case offsetof(struct __sk_buff, cb[0]) ...
7333             offsetofend(struct __sk_buff, cb[4]) - 1:
7334                BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
7335                BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
7336                              offsetof(struct qdisc_skb_cb, data)) %
7337                             sizeof(__u64));
7338
7339                prog->cb_access = 1;
7340                off  = si->off;
7341                off -= offsetof(struct __sk_buff, cb[0]);
7342                off += offsetof(struct sk_buff, cb);
7343                off += offsetof(struct qdisc_skb_cb, data);
7344                if (type == BPF_WRITE)
7345                        *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
7346                                              si->src_reg, off);
7347                else
7348                        *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
7349                                              si->src_reg, off);
7350                break;
7351
7352        case offsetof(struct __sk_buff, tc_classid):
7353                BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2);
7354
7355                off  = si->off;
7356                off -= offsetof(struct __sk_buff, tc_classid);
7357                off += offsetof(struct sk_buff, cb);
7358                off += offsetof(struct qdisc_skb_cb, tc_classid);
7359                *target_size = 2;
7360                if (type == BPF_WRITE)
7361                        *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
7362                                              si->src_reg, off);
7363                else
7364                        *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
7365                                              si->src_reg, off);
7366                break;
7367
7368        case offsetof(struct __sk_buff, data):
7369                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
7370                                      si->dst_reg, si->src_reg,
7371                                      offsetof(struct sk_buff, data));
7372                break;
7373
7374        case offsetof(struct __sk_buff, data_meta):
7375                off  = si->off;
7376                off -= offsetof(struct __sk_buff, data_meta);
7377                off += offsetof(struct sk_buff, cb);
7378                off += offsetof(struct bpf_skb_data_end, data_meta);
7379                *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
7380                                      si->src_reg, off);
7381                break;
7382
7383        case offsetof(struct __sk_buff, data_end):
7384                off  = si->off;
7385                off -= offsetof(struct __sk_buff, data_end);
7386                off += offsetof(struct sk_buff, cb);
7387                off += offsetof(struct bpf_skb_data_end, data_end);
7388                *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
7389                                      si->src_reg, off);
7390                break;
7391
7392        case offsetof(struct __sk_buff, tc_index):
7393#ifdef CONFIG_NET_SCHED
7394                if (type == BPF_WRITE)
7395                        *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
7396                                              bpf_target_off(struct sk_buff, tc_index, 2,
7397                                                             target_size));
7398                else
7399                        *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
7400                                              bpf_target_off(struct sk_buff, tc_index, 2,
7401                                                             target_size));
7402#else
7403                *target_size = 2;
7404                if (type == BPF_WRITE)
7405                        *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
7406                else
7407                        *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
7408#endif
7409                break;
7410
7411        case offsetof(struct __sk_buff, napi_id):
7412#if defined(CONFIG_NET_RX_BUSY_POLL)
7413                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7414                                      bpf_target_off(struct sk_buff, napi_id, 4,
7415                                                     target_size));
7416                *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
7417                *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
7418#else
7419                *target_size = 4;
7420                *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
7421#endif
7422                break;
7423        case offsetof(struct __sk_buff, family):
7424                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
7425
7426                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7427                                      si->dst_reg, si->src_reg,
7428                                      offsetof(struct sk_buff, sk));
7429                *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7430                                      bpf_target_off(struct sock_common,
7431                                                     skc_family,
7432                                                     2, target_size));
7433                break;
7434        case offsetof(struct __sk_buff, remote_ip4):
7435                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
7436
7437                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7438                                      si->dst_reg, si->src_reg,
7439                                      offsetof(struct sk_buff, sk));
7440                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7441                                      bpf_target_off(struct sock_common,
7442                                                     skc_daddr,
7443                                                     4, target_size));
7444                break;
7445        case offsetof(struct __sk_buff, local_ip4):
7446                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
7447                                          skc_rcv_saddr) != 4);
7448
7449                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7450                                      si->dst_reg, si->src_reg,
7451                                      offsetof(struct sk_buff, sk));
7452                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7453                                      bpf_target_off(struct sock_common,
7454                                                     skc_rcv_saddr,
7455                                                     4, target_size));
7456                break;
7457        case offsetof(struct __sk_buff, remote_ip6[0]) ...
7458             offsetof(struct __sk_buff, remote_ip6[3]):
7459#if IS_ENABLED(CONFIG_IPV6)
7460                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
7461                                          skc_v6_daddr.s6_addr32[0]) != 4);
7462
7463                off = si->off;
7464                off -= offsetof(struct __sk_buff, remote_ip6[0]);
7465
7466                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7467                                      si->dst_reg, si->src_reg,
7468                                      offsetof(struct sk_buff, sk));
7469                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7470                                      offsetof(struct sock_common,
7471                                               skc_v6_daddr.s6_addr32[0]) +
7472                                      off);
7473#else
7474                *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7475#endif
7476                break;
7477        case offsetof(struct __sk_buff, local_ip6[0]) ...
7478             offsetof(struct __sk_buff, local_ip6[3]):
7479#if IS_ENABLED(CONFIG_IPV6)
7480                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
7481                                          skc_v6_rcv_saddr.s6_addr32[0]) != 4);
7482
7483                off = si->off;
7484                off -= offsetof(struct __sk_buff, local_ip6[0]);
7485
7486                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7487                                      si->dst_reg, si->src_reg,
7488                                      offsetof(struct sk_buff, sk));
7489                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7490                                      offsetof(struct sock_common,
7491                                               skc_v6_rcv_saddr.s6_addr32[0]) +
7492                                      off);
7493#else
7494                *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7495#endif
7496                break;
7497
7498        case offsetof(struct __sk_buff, remote_port):
7499                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
7500
7501                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7502                                      si->dst_reg, si->src_reg,
7503                                      offsetof(struct sk_buff, sk));
7504                *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7505                                      bpf_target_off(struct sock_common,
7506                                                     skc_dport,
7507                                                     2, target_size));
7508#ifndef __BIG_ENDIAN_BITFIELD
7509                *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
7510#endif
7511                break;
7512
7513        case offsetof(struct __sk_buff, local_port):
7514                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
7515
7516                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7517                                      si->dst_reg, si->src_reg,
7518                                      offsetof(struct sk_buff, sk));
7519                *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7520                                      bpf_target_off(struct sock_common,
7521                                                     skc_num, 2, target_size));
7522                break;
7523
7524        case offsetof(struct __sk_buff, tstamp):
7525                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tstamp) != 8);
7526
7527                if (type == BPF_WRITE)
7528                        *insn++ = BPF_STX_MEM(BPF_DW,
7529                                              si->dst_reg, si->src_reg,
7530                                              bpf_target_off(struct sk_buff,
7531                                                             tstamp, 8,
7532                                                             target_size));
7533                else
7534                        *insn++ = BPF_LDX_MEM(BPF_DW,
7535                                              si->dst_reg, si->src_reg,
7536                                              bpf_target_off(struct sk_buff,
7537                                                             tstamp, 8,
7538                                                             target_size));
7539                break;
7540
7541        case offsetof(struct __sk_buff, gso_segs):
7542                /* si->dst_reg = skb_shinfo(SKB); */
7543#ifdef NET_SKBUFF_DATA_USES_OFFSET
7544                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
7545                                      BPF_REG_AX, si->src_reg,
7546                                      offsetof(struct sk_buff, end));
7547                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
7548                                      si->dst_reg, si->src_reg,
7549                                      offsetof(struct sk_buff, head));
7550                *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
7551#else
7552                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
7553                                      si->dst_reg, si->src_reg,
7554                                      offsetof(struct sk_buff, end));
7555#endif
7556                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs),
7557                                      si->dst_reg, si->dst_reg,
7558                                      bpf_target_off(struct skb_shared_info,
7559                                                     gso_segs, 2,
7560                                                     target_size));
7561                break;
7562        case offsetof(struct __sk_buff, wire_len):
7563                BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, pkt_len) != 4);
7564
7565                off = si->off;
7566                off -= offsetof(struct __sk_buff, wire_len);
7567                off += offsetof(struct sk_buff, cb);
7568                off += offsetof(struct qdisc_skb_cb, pkt_len);
7569                *target_size = 4;
7570                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off);
7571                break;
7572
7573        case offsetof(struct __sk_buff, sk):
7574                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7575                                      si->dst_reg, si->src_reg,
7576                                      offsetof(struct sk_buff, sk));
7577                break;
7578        }
7579
7580        return insn - insn_buf;
7581}
7582
7583u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
7584                                const struct bpf_insn *si,
7585                                struct bpf_insn *insn_buf,
7586                                struct bpf_prog *prog, u32 *target_size)
7587{
7588        struct bpf_insn *insn = insn_buf;
7589        int off;
7590
7591        switch (si->off) {
7592        case offsetof(struct bpf_sock, bound_dev_if):
7593                BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4);
7594
7595                if (type == BPF_WRITE)
7596                        *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7597                                        offsetof(struct sock, sk_bound_dev_if));
7598                else
7599                        *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7600                                      offsetof(struct sock, sk_bound_dev_if));
7601                break;
7602
7603        case offsetof(struct bpf_sock, mark):
7604                BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_mark) != 4);
7605
7606                if (type == BPF_WRITE)
7607                        *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7608                                        offsetof(struct sock, sk_mark));
7609                else
7610                        *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7611                                      offsetof(struct sock, sk_mark));
7612                break;
7613
7614        case offsetof(struct bpf_sock, priority):
7615                BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_priority) != 4);
7616
7617                if (type == BPF_WRITE)
7618                        *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7619                                        offsetof(struct sock, sk_priority));
7620                else
7621                        *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7622                                      offsetof(struct sock, sk_priority));
7623                break;
7624
7625        case offsetof(struct bpf_sock, family):
7626                *insn++ = BPF_LDX_MEM(
7627                        BPF_FIELD_SIZEOF(struct sock_common, skc_family),
7628                        si->dst_reg, si->src_reg,
7629                        bpf_target_off(struct sock_common,
7630                                       skc_family,
7631                                       FIELD_SIZEOF(struct sock_common,
7632                                                    skc_family),
7633                                       target_size));
7634                break;
7635
7636        case offsetof(struct bpf_sock, type):
7637                BUILD_BUG_ON(HWEIGHT32(SK_FL_TYPE_MASK) != BITS_PER_BYTE * 2);
7638                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7639                                      offsetof(struct sock, __sk_flags_offset));
7640                *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
7641                *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
7642                *target_size = 2;
7643                break;
7644
7645        case offsetof(struct bpf_sock, protocol):
7646                BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
7647                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7648                                      offsetof(struct sock, __sk_flags_offset));
7649                *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
7650                *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT);
7651                *target_size = 1;
7652                break;
7653
7654        case offsetof(struct bpf_sock, src_ip4):
7655                *insn++ = BPF_LDX_MEM(
7656                        BPF_SIZE(si->code), si->dst_reg, si->src_reg,
7657                        bpf_target_off(struct sock_common, skc_rcv_saddr,
7658                                       FIELD_SIZEOF(struct sock_common,
7659                                                    skc_rcv_saddr),
7660                                       target_size));
7661                break;
7662
7663        case offsetof(struct bpf_sock, dst_ip4):
7664                *insn++ = BPF_LDX_MEM(
7665                        BPF_SIZE(si->code), si->dst_reg, si->src_reg,
7666                        bpf_target_off(struct sock_common, skc_daddr,
7667                                       FIELD_SIZEOF(struct sock_common,
7668                                                    skc_daddr),
7669                                       target_size));
7670                break;
7671
7672        case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
7673#if IS_ENABLED(CONFIG_IPV6)
7674                off = si->off;
7675                off -= offsetof(struct bpf_sock, src_ip6[0]);
7676                *insn++ = BPF_LDX_MEM(
7677                        BPF_SIZE(si->code), si->dst_reg, si->src_reg,
7678                        bpf_target_off(
7679                                struct sock_common,
7680                                skc_v6_rcv_saddr.s6_addr32[0],
7681                                FIELD_SIZEOF(struct sock_common,
7682                                             skc_v6_rcv_saddr.s6_addr32[0]),
7683                                target_size) + off);
7684#else
7685                (void)off;
7686                *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7687#endif
7688                break;
7689
7690        case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
7691#if IS_ENABLED(CONFIG_IPV6)
7692                off = si->off;
7693                off -= offsetof(struct bpf_sock, dst_ip6[0]);
7694                *insn++ = BPF_LDX_MEM(
7695                        BPF_SIZE(si->code), si->dst_reg, si->src_reg,
7696                        bpf_target_off(struct sock_common,
7697                                       skc_v6_daddr.s6_addr32[0],
7698                                       FIELD_SIZEOF(struct sock_common,
7699                                                    skc_v6_daddr.s6_addr32[0]),
7700                                       target_size) + off);
7701#else
7702                *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7703                *target_size = 4;
7704#endif
7705                break;
7706
7707        case offsetof(struct bpf_sock, src_port):
7708                *insn++ = BPF_LDX_MEM(
7709                        BPF_FIELD_SIZEOF(struct sock_common, skc_num),
7710                        si->dst_reg, si->src_reg,
7711                        bpf_target_off(struct sock_common, skc_num,
7712                                       FIELD_SIZEOF(struct sock_common,
7713                                                    skc_num),
7714                                       target_size));
7715                break;
7716
7717        case offsetof(struct bpf_sock, dst_port):
7718                *insn++ = BPF_LDX_MEM(
7719                        BPF_FIELD_SIZEOF(struct sock_common, skc_dport),
7720                        si->dst_reg, si->src_reg,
7721                        bpf_target_off(struct sock_common, skc_dport,
7722                                       FIELD_SIZEOF(struct sock_common,
7723                                                    skc_dport),
7724                                       target_size));
7725                break;
7726
7727        case offsetof(struct bpf_sock, state):
7728                *insn++ = BPF_LDX_MEM(
7729                        BPF_FIELD_SIZEOF(struct sock_common, skc_state),
7730                        si->dst_reg, si->src_reg,
7731                        bpf_target_off(struct sock_common, skc_state,
7732                                       FIELD_SIZEOF(struct sock_common,
7733                                                    skc_state),
7734                                       target_size));
7735                break;
7736        }
7737
7738        return insn - insn_buf;
7739}
7740
7741static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type,
7742                                         const struct bpf_insn *si,
7743                                         struct bpf_insn *insn_buf,
7744                                         struct bpf_prog *prog, u32 *target_size)
7745{
7746        struct bpf_insn *insn = insn_buf;
7747
7748        switch (si->off) {
7749        case offsetof(struct __sk_buff, ifindex):
7750                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
7751                                      si->dst_reg, si->src_reg,
7752                                      offsetof(struct sk_buff, dev));
7753                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7754                                      bpf_target_off(struct net_device, ifindex, 4,
7755                                                     target_size));
7756                break;
7757        default:
7758                return bpf_convert_ctx_access(type, si, insn_buf, prog,
7759                                              target_size);
7760        }
7761
7762        return insn - insn_buf;
7763}
7764
7765static u32 xdp_convert_ctx_access(enum bpf_access_type type,
7766                                  const struct bpf_insn *si,
7767                                  struct bpf_insn *insn_buf,
7768                                  struct bpf_prog *prog, u32 *target_size)
7769{
7770        struct bpf_insn *insn = insn_buf;
7771
7772        switch (si->off) {
7773        case offsetof(struct xdp_md, data):
7774                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
7775                                      si->dst_reg, si->src_reg,
7776                                      offsetof(struct xdp_buff, data));
7777                break;
7778        case offsetof(struct xdp_md, data_meta):
7779                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta),
7780                                      si->dst_reg, si->src_reg,
7781                                      offsetof(struct xdp_buff, data_meta));
7782                break;
7783        case offsetof(struct xdp_md, data_end):
7784                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
7785                                      si->dst_reg, si->src_reg,
7786                                      offsetof(struct xdp_buff, data_end));
7787                break;
7788        case offsetof(struct xdp_md, ingress_ifindex):
7789                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
7790                                      si->dst_reg, si->src_reg,
7791                                      offsetof(struct xdp_buff, rxq));
7792                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev),
7793                                      si->dst_reg, si->dst_reg,
7794                                      offsetof(struct xdp_rxq_info, dev));
7795                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7796                                      offsetof(struct net_device, ifindex));
7797                break;
7798        case offsetof(struct xdp_md, rx_queue_index):
7799                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
7800                                      si->dst_reg, si->src_reg,
7801                                      offsetof(struct xdp_buff, rxq));
7802                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7803                                      offsetof(struct xdp_rxq_info,
7804                                               queue_index));
7805                break;
7806        }
7807
7808        return insn - insn_buf;
7809}
7810
7811/* SOCK_ADDR_LOAD_NESTED_FIELD() loads Nested Field S.F.NF where S is type of
7812 * context Structure, F is Field in context structure that contains a pointer
7813 * to Nested Structure of type NS that has the field NF.
7814 *
7815 * SIZE encodes the load size (BPF_B, BPF_H, etc). It's up to caller to make
7816 * sure that SIZE is not greater than actual size of S.F.NF.
7817 *
7818 * If offset OFF is provided, the load happens from that offset relative to
7819 * offset of NF.
7820 */
7821#define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF)          \
7822        do {                                                                   \
7823                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg,     \
7824                                      si->src_reg, offsetof(S, F));            \
7825                *insn++ = BPF_LDX_MEM(                                         \
7826                        SIZE, si->dst_reg, si->dst_reg,                        \
7827                        bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF),           \
7828                                       target_size)                            \
7829                                + OFF);                                        \
7830        } while (0)
7831
7832#define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF)                              \
7833        SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF,                     \
7834                                             BPF_FIELD_SIZEOF(NS, NF), 0)
7835
7836/* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to
7837 * SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation.
7838 *
7839 * In addition it uses Temporary Field TF (member of struct S) as the 3rd
7840 * "register" since two registers available in convert_ctx_access are not
7841 * enough: we can't override neither SRC, since it contains value to store, nor
7842 * DST since it contains pointer to context that may be used by later
7843 * instructions. But we need a temporary place to save pointer to nested
7844 * structure whose field we want to store to.
7845 */
7846#define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, OFF, TF)          \
7847        do {                                                                   \
7848                int tmp_reg = BPF_REG_9;                                       \
7849                if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg)          \
7850                        --tmp_reg;                                             \
7851                if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg)          \
7852                        --tmp_reg;                                             \
7853                *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg,            \
7854                                      offsetof(S, TF));                        \
7855                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg,         \
7856                                      si->dst_reg, offsetof(S, F));            \
7857                *insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg,              \
7858                        bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF),           \
7859                                       target_size)                            \
7860                                + OFF);                                        \
7861                *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg,            \
7862                                      offsetof(S, TF));                        \
7863        } while (0)
7864
7865#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \
7866                                                      TF)                      \
7867        do {                                                                   \
7868                if (type == BPF_WRITE) {                                       \
7869                        SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE,   \
7870                                                         OFF, TF);             \
7871                } else {                                                       \
7872                        SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(                  \
7873                                S, NS, F, NF, SIZE, OFF);  \
7874                }                                                              \
7875        } while (0)
7876
7877#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF)                 \
7878        SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(                         \
7879                S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF)
7880
7881static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
7882                                        const struct bpf_insn *si,
7883                                        struct bpf_insn *insn_buf,
7884                                        struct bpf_prog *prog, u32 *target_size)
7885{
7886        struct bpf_insn *insn = insn_buf;
7887        int off;
7888
7889        switch (si->off) {
7890        case offsetof(struct bpf_sock_addr, user_family):
7891                SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
7892                                            struct sockaddr, uaddr, sa_family);
7893                break;
7894
7895        case offsetof(struct bpf_sock_addr, user_ip4):
7896                SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
7897                        struct bpf_sock_addr_kern, struct sockaddr_in, uaddr,
7898                        sin_addr, BPF_SIZE(si->code), 0, tmp_reg);
7899                break;
7900
7901        case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
7902                off = si->off;
7903                off -= offsetof(struct bpf_sock_addr, user_ip6[0]);
7904                SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
7905                        struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr,
7906                        sin6_addr.s6_addr32[0], BPF_SIZE(si->code), off,
7907                        tmp_reg);
7908                break;
7909
7910        case offsetof(struct bpf_sock_addr, user_port):
7911                /* To get port we need to know sa_family first and then treat
7912                 * sockaddr as either sockaddr_in or sockaddr_in6.
7913                 * Though we can simplify since port field has same offset and
7914                 * size in both structures.
7915                 * Here we check this invariant and use just one of the
7916                 * structures if it's true.
7917                 */
7918                BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) !=
7919                             offsetof(struct sockaddr_in6, sin6_port));
7920                BUILD_BUG_ON(FIELD_SIZEOF(struct sockaddr_in, sin_port) !=
7921                             FIELD_SIZEOF(struct sockaddr_in6, sin6_port));
7922                SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(struct bpf_sock_addr_kern,
7923                                                     struct sockaddr_in6, uaddr,
7924                                                     sin6_port, tmp_reg);
7925                break;
7926
7927        case offsetof(struct bpf_sock_addr, family):
7928                SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
7929                                            struct sock, sk, sk_family);
7930                break;
7931
7932        case offsetof(struct bpf_sock_addr, type):
7933                SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
7934                        struct bpf_sock_addr_kern, struct sock, sk,
7935                        __sk_flags_offset, BPF_W, 0);
7936                *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
7937                *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
7938                break;
7939
7940        case offsetof(struct bpf_sock_addr, protocol):
7941                SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
7942                        struct bpf_sock_addr_kern, struct sock, sk,
7943                        __sk_flags_offset, BPF_W, 0);
7944                *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
7945                *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
7946                                        SK_FL_PROTO_SHIFT);
7947                break;
7948
7949        case offsetof(struct bpf_sock_addr, msg_src_ip4):
7950                /* Treat t_ctx as struct in_addr for msg_src_ip4. */
7951                SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
7952                        struct bpf_sock_addr_kern, struct in_addr, t_ctx,
7953                        s_addr, BPF_SIZE(si->code), 0, tmp_reg);
7954                break;
7955
7956        case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
7957                                msg_src_ip6[3]):
7958                off = si->off;
7959                off -= offsetof(struct bpf_sock_addr, msg_src_ip6[0]);
7960                /* Treat t_ctx as struct in6_addr for msg_src_ip6. */
7961                SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
7962                        struct bpf_sock_addr_kern, struct in6_addr, t_ctx,
7963                        s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg);
7964                break;
7965        case offsetof(struct bpf_sock_addr, sk):
7966                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_addr_kern, sk),
7967                                      si->dst_reg, si->src_reg,
7968                                      offsetof(struct bpf_sock_addr_kern, sk));
7969                break;
7970        }
7971
7972        return insn - insn_buf;
7973}
7974
7975static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
7976                                       const struct bpf_insn *si,
7977                                       struct bpf_insn *insn_buf,
7978                                       struct bpf_prog *prog,
7979                                       u32 *target_size)
7980{
7981        struct bpf_insn *insn = insn_buf;
7982        int off;
7983
7984/* Helper macro for adding read access to tcp_sock or sock fields. */
7985#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ)                         \
7986        do {                                                                  \
7987                BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) >                   \
7988                             FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD));   \
7989                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
7990                                                struct bpf_sock_ops_kern,     \
7991                                                is_fullsock),                 \
7992                                      si->dst_reg, si->src_reg,               \
7993                                      offsetof(struct bpf_sock_ops_kern,      \
7994                                               is_fullsock));                 \
7995                *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2);            \
7996                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
7997                                                struct bpf_sock_ops_kern, sk),\
7998                                      si->dst_reg, si->src_reg,               \
7999                                      offsetof(struct bpf_sock_ops_kern, sk));\
8000                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ,                   \
8001                                                       OBJ_FIELD),            \
8002                                      si->dst_reg, si->dst_reg,               \
8003                                      offsetof(OBJ, OBJ_FIELD));              \
8004        } while (0)
8005
8006#define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
8007                SOCK_OPS_GET_FIELD(FIELD, FIELD, struct tcp_sock)
8008
8009/* Helper macro for adding write access to tcp_sock or sock fields.
8010 * The macro is called with two registers, dst_reg which contains a pointer
8011 * to ctx (context) and src_reg which contains the value that should be
8012 * stored. However, we need an additional register since we cannot overwrite
8013 * dst_reg because it may be used later in the program.
8014 * Instead we "borrow" one of the other register. We first save its value
8015 * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore
8016 * it at the end of the macro.
8017 */
8018#define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ)                         \
8019        do {                                                                  \
8020                int reg = BPF_REG_9;                                          \
8021                BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) >                   \
8022                             FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD));   \
8023                if (si->dst_reg == reg || si->src_reg == reg)                 \
8024                        reg--;                                                \
8025                if (si->dst_reg == reg || si->src_reg == reg)                 \
8026                        reg--;                                                \
8027                *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg,               \
8028                                      offsetof(struct bpf_sock_ops_kern,      \
8029                                               temp));                        \
8030                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
8031                                                struct bpf_sock_ops_kern,     \
8032                                                is_fullsock),                 \
8033                                      reg, si->dst_reg,                       \
8034                                      offsetof(struct bpf_sock_ops_kern,      \
8035                                               is_fullsock));                 \
8036                *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2);                    \
8037                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
8038                                                struct bpf_sock_ops_kern, sk),\
8039                                      reg, si->dst_reg,                       \
8040                                      offsetof(struct bpf_sock_ops_kern, sk));\
8041                *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD),       \
8042                                      reg, si->src_reg,                       \
8043                                      offsetof(OBJ, OBJ_FIELD));              \
8044                *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg,               \
8045                                      offsetof(struct bpf_sock_ops_kern,      \
8046                                               temp));                        \
8047        } while (0)
8048
8049#define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE)            \
8050        do {                                                                  \
8051                if (TYPE == BPF_WRITE)                                        \
8052                        SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ);        \
8053                else                                                          \
8054                        SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ);        \
8055        } while (0)
8056
8057        if (insn > insn_buf)
8058                return insn - insn_buf;
8059
8060        switch (si->off) {
8061        case offsetof(struct bpf_sock_ops, op) ...
8062             offsetof(struct bpf_sock_ops, replylong[3]):
8063                BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, op) !=
8064                             FIELD_SIZEOF(struct bpf_sock_ops_kern, op));
8065                BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, reply) !=
8066                             FIELD_SIZEOF(struct bpf_sock_ops_kern, reply));
8067                BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, replylong) !=
8068                             FIELD_SIZEOF(struct bpf_sock_ops_kern, replylong));
8069                off = si->off;
8070                off -= offsetof(struct bpf_sock_ops, op);
8071                off += offsetof(struct bpf_sock_ops_kern, op);
8072                if (type == BPF_WRITE)
8073                        *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
8074                                              off);
8075                else
8076                        *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8077                                              off);
8078                break;
8079
8080        case offsetof(struct bpf_sock_ops, family):
8081                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
8082
8083                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8084                                              struct bpf_sock_ops_kern, sk),
8085                                      si->dst_reg, si->src_reg,
8086                                      offsetof(struct bpf_sock_ops_kern, sk));
8087                *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8088                                      offsetof(struct sock_common, skc_family));
8089                break;
8090
8091        case offsetof(struct bpf_sock_ops, remote_ip4):
8092                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
8093
8094                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8095                                                struct bpf_sock_ops_kern, sk),
8096                                      si->dst_reg, si->src_reg,
8097                                      offsetof(struct bpf_sock_ops_kern, sk));
8098                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8099                                      offsetof(struct sock_common, skc_daddr));
8100                break;
8101
8102        case offsetof(struct bpf_sock_ops, local_ip4):
8103                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
8104                                          skc_rcv_saddr) != 4);
8105
8106                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8107                                              struct bpf_sock_ops_kern, sk),
8108                                      si->dst_reg, si->src_reg,
8109                                      offsetof(struct bpf_sock_ops_kern, sk));
8110                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8111                                      offsetof(struct sock_common,
8112                                               skc_rcv_saddr));
8113                break;
8114
8115        case offsetof(struct bpf_sock_ops, remote_ip6[0]) ...
8116             offsetof(struct bpf_sock_ops, remote_ip6[3]):
8117#if IS_ENABLED(CONFIG_IPV6)
8118                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
8119                                          skc_v6_daddr.s6_addr32[0]) != 4);
8120
8121                off = si->off;
8122                off -= offsetof(struct bpf_sock_ops, remote_ip6[0]);
8123                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8124                                                struct bpf_sock_ops_kern, sk),
8125                                      si->dst_reg, si->src_reg,
8126                                      offsetof(struct bpf_sock_ops_kern, sk));
8127                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8128                                      offsetof(struct sock_common,
8129                                               skc_v6_daddr.s6_addr32[0]) +
8130                                      off);
8131#else
8132                *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8133#endif
8134                break;
8135
8136        case offsetof(struct bpf_sock_ops, local_ip6[0]) ...
8137             offsetof(struct bpf_sock_ops, local_ip6[3]):
8138#if IS_ENABLED(CONFIG_IPV6)
8139                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
8140                                          skc_v6_rcv_saddr.s6_addr32[0]) != 4);
8141
8142                off = si->off;
8143                off -= offsetof(struct bpf_sock_ops, local_ip6[0]);
8144                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8145                                                struct bpf_sock_ops_kern, sk),
8146                                      si->dst_reg, si->src_reg,
8147                                      offsetof(struct bpf_sock_ops_kern, sk));
8148                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8149                                      offsetof(struct sock_common,
8150                                               skc_v6_rcv_saddr.s6_addr32[0]) +
8151                                      off);
8152#else
8153                *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8154#endif
8155                break;
8156
8157        case offsetof(struct bpf_sock_ops, remote_port):
8158                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
8159
8160                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8161                                                struct bpf_sock_ops_kern, sk),
8162                                      si->dst_reg, si->src_reg,
8163                                      offsetof(struct bpf_sock_ops_kern, sk));
8164                *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8165                                      offsetof(struct sock_common, skc_dport));
8166#ifndef __BIG_ENDIAN_BITFIELD
8167                *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
8168#endif
8169                break;
8170
8171        case offsetof(struct bpf_sock_ops, local_port):
8172                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
8173
8174                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8175                                                struct bpf_sock_ops_kern, sk),
8176                                      si->dst_reg, si->src_reg,
8177                                      offsetof(struct bpf_sock_ops_kern, sk));
8178                *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8179                                      offsetof(struct sock_common, skc_num));
8180                break;
8181
8182        case offsetof(struct bpf_sock_ops, is_fullsock):
8183                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8184                                                struct bpf_sock_ops_kern,
8185                                                is_fullsock),
8186                                      si->dst_reg, si->src_reg,
8187                                      offsetof(struct bpf_sock_ops_kern,
8188                                               is_fullsock));
8189                break;
8190
8191        case offsetof(struct bpf_sock_ops, state):
8192                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_state) != 1);
8193
8194                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8195                                                struct bpf_sock_ops_kern, sk),
8196                                      si->dst_reg, si->src_reg,
8197                                      offsetof(struct bpf_sock_ops_kern, sk));
8198                *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg,
8199                                      offsetof(struct sock_common, skc_state));
8200                break;
8201
8202        case offsetof(struct bpf_sock_ops, rtt_min):
8203                BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
8204                             sizeof(struct minmax));
8205                BUILD_BUG_ON(sizeof(struct minmax) <
8206                             sizeof(struct minmax_sample));
8207
8208                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8209                                                struct bpf_sock_ops_kern, sk),
8210                                      si->dst_reg, si->src_reg,
8211                                      offsetof(struct bpf_sock_ops_kern, sk));
8212                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8213                                      offsetof(struct tcp_sock, rtt_min) +
8214                                      FIELD_SIZEOF(struct minmax_sample, t));
8215                break;
8216
8217        case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
8218                SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags,
8219                                   struct tcp_sock);
8220                break;
8221
8222        case offsetof(struct bpf_sock_ops, sk_txhash):
8223                SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash,
8224                                          struct sock, type);
8225                break;
8226        case offsetof(struct bpf_sock_ops, snd_cwnd):
8227                SOCK_OPS_GET_TCP_SOCK_FIELD(snd_cwnd);
8228                break;
8229        case offsetof(struct bpf_sock_ops, srtt_us):
8230                SOCK_OPS_GET_TCP_SOCK_FIELD(srtt_us);
8231                break;
8232        case offsetof(struct bpf_sock_ops, snd_ssthresh):
8233                SOCK_OPS_GET_TCP_SOCK_FIELD(snd_ssthresh);
8234                break;
8235        case offsetof(struct bpf_sock_ops, rcv_nxt):
8236                SOCK_OPS_GET_TCP_SOCK_FIELD(rcv_nxt);
8237                break;
8238        case offsetof(struct bpf_sock_ops, snd_nxt):
8239                SOCK_OPS_GET_TCP_SOCK_FIELD(snd_nxt);
8240                break;
8241        case offsetof(struct bpf_sock_ops, snd_una):
8242                SOCK_OPS_GET_TCP_SOCK_FIELD(snd_una);
8243                break;
8244        case offsetof(struct bpf_sock_ops, mss_cache):
8245                SOCK_OPS_GET_TCP_SOCK_FIELD(mss_cache);
8246                break;
8247        case offsetof(struct bpf_sock_ops, ecn_flags):
8248                SOCK_OPS_GET_TCP_SOCK_FIELD(ecn_flags);
8249                break;
8250        case offsetof(struct bpf_sock_ops, rate_delivered):
8251                SOCK_OPS_GET_TCP_SOCK_FIELD(rate_delivered);
8252                break;
8253        case offsetof(struct bpf_sock_ops, rate_interval_us):
8254                SOCK_OPS_GET_TCP_SOCK_FIELD(rate_interval_us);
8255                break;
8256        case offsetof(struct bpf_sock_ops, packets_out):
8257                SOCK_OPS_GET_TCP_SOCK_FIELD(packets_out);
8258                break;
8259        case offsetof(struct bpf_sock_ops, retrans_out):
8260                SOCK_OPS_GET_TCP_SOCK_FIELD(retrans_out);
8261                break;
8262        case offsetof(struct bpf_sock_ops, total_retrans):
8263                SOCK_OPS_GET_TCP_SOCK_FIELD(total_retrans);
8264                break;
8265        case offsetof(struct bpf_sock_ops, segs_in):
8266                SOCK_OPS_GET_TCP_SOCK_FIELD(segs_in);
8267                break;
8268        case offsetof(struct bpf_sock_ops, data_segs_in):
8269                SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_in);
8270                break;
8271        case offsetof(struct bpf_sock_ops, segs_out):
8272                SOCK_OPS_GET_TCP_SOCK_FIELD(segs_out);
8273                break;
8274        case offsetof(struct bpf_sock_ops, data_segs_out):
8275                SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_out);
8276                break;
8277        case offsetof(struct bpf_sock_ops, lost_out):
8278                SOCK_OPS_GET_TCP_SOCK_FIELD(lost_out);
8279                break;
8280        case offsetof(struct bpf_sock_ops, sacked_out):
8281                SOCK_OPS_GET_TCP_SOCK_FIELD(sacked_out);
8282                break;
8283        case offsetof(struct bpf_sock_ops, bytes_received):
8284                SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_received);
8285                break;
8286        case offsetof(struct bpf_sock_ops, bytes_acked):
8287                SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked);
8288                break;
8289        case offsetof(struct bpf_sock_ops, sk):
8290                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8291                                                struct bpf_sock_ops_kern,
8292                                                is_fullsock),
8293                                      si->dst_reg, si->src_reg,
8294                                      offsetof(struct bpf_sock_ops_kern,
8295                                               is_fullsock));
8296                *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
8297                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8298                                                struct bpf_sock_ops_kern, sk),
8299                                      si->dst_reg, si->src_reg,
8300                                      offsetof(struct bpf_sock_ops_kern, sk));
8301                break;
8302        }
8303        return insn - insn_buf;
8304}
8305
8306static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
8307                                     const struct bpf_insn *si,
8308                                     struct bpf_insn *insn_buf,
8309                                     struct bpf_prog *prog, u32 *target_size)
8310{
8311        struct bpf_insn *insn = insn_buf;
8312        int off;
8313
8314        switch (si->off) {
8315        case offsetof(struct __sk_buff, data_end):
8316                off  = si->off;
8317                off -= offsetof(struct __sk_buff, data_end);
8318                off += offsetof(struct sk_buff, cb);
8319                off += offsetof(struct tcp_skb_cb, bpf.data_end);
8320                *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
8321                                      si->src_reg, off);
8322                break;
8323        default:
8324                return bpf_convert_ctx_access(type, si, insn_buf, prog,
8325                                              target_size);
8326        }
8327
8328        return insn - insn_buf;
8329}
8330
8331static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
8332                                     const struct bpf_insn *si,
8333                                     struct bpf_insn *insn_buf,
8334                                     struct bpf_prog *prog, u32 *target_size)
8335{
8336        struct bpf_insn *insn = insn_buf;
8337#if IS_ENABLED(CONFIG_IPV6)
8338        int off;
8339#endif
8340
8341        /* convert ctx uses the fact sg element is first in struct */
8342        BUILD_BUG_ON(offsetof(struct sk_msg, sg) != 0);
8343
8344        switch (si->off) {
8345        case offsetof(struct sk_msg_md, data):
8346                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data),
8347                                      si->dst_reg, si->src_reg,
8348                                      offsetof(struct sk_msg, data));
8349                break;
8350        case offsetof(struct sk_msg_md, data_end):
8351                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data_end),
8352                                      si->dst_reg, si->src_reg,
8353                                      offsetof(struct sk_msg, data_end));
8354                break;
8355        case offsetof(struct sk_msg_md, family):
8356                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
8357
8358                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8359                                              struct sk_msg, sk),
8360                                      si->dst_reg, si->src_reg,
8361                                      offsetof(struct sk_msg, sk));
8362                *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8363                                      offsetof(struct sock_common, skc_family));
8364                break;
8365
8366        case offsetof(struct sk_msg_md, remote_ip4):
8367                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
8368
8369                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8370                                                struct sk_msg, sk),
8371                                      si->dst_reg, si->src_reg,
8372                                      offsetof(struct sk_msg, sk));
8373                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8374                                      offsetof(struct sock_common, skc_daddr));
8375                break;
8376
8377        case offsetof(struct sk_msg_md, local_ip4):
8378                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
8379                                          skc_rcv_saddr) != 4);
8380
8381                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8382                                              struct sk_msg, sk),
8383                                      si->dst_reg, si->src_reg,
8384                                      offsetof(struct sk_msg, sk));
8385                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8386                                      offsetof(struct sock_common,
8387                                               skc_rcv_saddr));
8388                break;
8389
8390        case offsetof(struct sk_msg_md, remote_ip6[0]) ...
8391             offsetof(struct sk_msg_md, remote_ip6[3]):
8392#if IS_ENABLED(CONFIG_IPV6)
8393                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
8394                                          skc_v6_daddr.s6_addr32[0]) != 4);
8395
8396                off = si->off;
8397                off -= offsetof(struct sk_msg_md, remote_ip6[0]);
8398                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8399                                                struct sk_msg, sk),
8400                                      si->dst_reg, si->src_reg,
8401                                      offsetof(struct sk_msg, sk));
8402                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8403                                      offsetof(struct sock_common,
8404                                               skc_v6_daddr.s6_addr32[0]) +
8405                                      off);
8406#else
8407                *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8408#endif
8409                break;
8410
8411        case offsetof(struct sk_msg_md, local_ip6[0]) ...
8412             offsetof(struct sk_msg_md, local_ip6[3]):
8413#if IS_ENABLED(CONFIG_IPV6)
8414                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
8415                                          skc_v6_rcv_saddr.s6_addr32[0]) != 4);
8416
8417                off = si->off;
8418                off -= offsetof(struct sk_msg_md, local_ip6[0]);
8419                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8420                                                struct sk_msg, sk),
8421                                      si->dst_reg, si->src_reg,
8422                                      offsetof(struct sk_msg, sk));
8423                *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8424                                      offsetof(struct sock_common,
8425                                               skc_v6_rcv_saddr.s6_addr32[0]) +
8426                                      off);
8427#else
8428                *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8429#endif
8430                break;
8431
8432        case offsetof(struct sk_msg_md, remote_port):
8433                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
8434
8435                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8436                                                struct sk_msg, sk),
8437                                      si->dst_reg, si->src_reg,
8438                                      offsetof(struct sk_msg, sk));
8439                *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8440                                      offsetof(struct sock_common, skc_dport));
8441#ifndef __BIG_ENDIAN_BITFIELD
8442                *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
8443#endif
8444                break;
8445
8446        case offsetof(struct sk_msg_md, local_port):
8447                BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
8448
8449                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8450                                                struct sk_msg, sk),
8451                                      si->dst_reg, si->src_reg,
8452                                      offsetof(struct sk_msg, sk));
8453                *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8454                                      offsetof(struct sock_common, skc_num));
8455                break;
8456
8457        case offsetof(struct sk_msg_md, size):
8458                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_sg, size),
8459                                      si->dst_reg, si->src_reg,
8460                                      offsetof(struct sk_msg_sg, size));
8461                break;
8462        }
8463
8464        return insn - insn_buf;
8465}
8466
8467const struct bpf_verifier_ops sk_filter_verifier_ops = {
8468        .get_func_proto         = sk_filter_func_proto,
8469        .is_valid_access        = sk_filter_is_valid_access,
8470        .convert_ctx_access     = bpf_convert_ctx_access,
8471        .gen_ld_abs             = bpf_gen_ld_abs,
8472};
8473
8474const struct bpf_prog_ops sk_filter_prog_ops = {
8475        .test_run               = bpf_prog_test_run_skb,
8476};
8477
8478const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
8479        .get_func_proto         = tc_cls_act_func_proto,
8480        .is_valid_access        = tc_cls_act_is_valid_access,
8481        .convert_ctx_access     = tc_cls_act_convert_ctx_access,
8482        .gen_prologue           = tc_cls_act_prologue,
8483        .gen_ld_abs             = bpf_gen_ld_abs,
8484};
8485
8486const struct bpf_prog_ops tc_cls_act_prog_ops = {
8487        .test_run               = bpf_prog_test_run_skb,
8488};
8489
8490const struct bpf_verifier_ops xdp_verifier_ops = {
8491        .get_func_proto         = xdp_func_proto,
8492        .is_valid_access        = xdp_is_valid_access,
8493        .convert_ctx_access     = xdp_convert_ctx_access,
8494        .gen_prologue           = bpf_noop_prologue,
8495};
8496
8497const struct bpf_prog_ops xdp_prog_ops = {
8498        .test_run               = bpf_prog_test_run_xdp,
8499};
8500
8501const struct bpf_verifier_ops cg_skb_verifier_ops = {
8502        .get_func_proto         = cg_skb_func_proto,
8503        .is_valid_access        = cg_skb_is_valid_access,
8504        .convert_ctx_access     = bpf_convert_ctx_access,
8505};
8506
8507const struct bpf_prog_ops cg_skb_prog_ops = {
8508        .test_run               = bpf_prog_test_run_skb,
8509};
8510
8511const struct bpf_verifier_ops lwt_in_verifier_ops = {
8512        .get_func_proto         = lwt_in_func_proto,
8513        .is_valid_access        = lwt_is_valid_access,
8514        .convert_ctx_access     = bpf_convert_ctx_access,
8515};
8516
8517const struct bpf_prog_ops lwt_in_prog_ops = {
8518        .test_run               = bpf_prog_test_run_skb,
8519};
8520
8521const struct bpf_verifier_ops lwt_out_verifier_ops = {
8522        .get_func_proto         = lwt_out_func_proto,
8523        .is_valid_access        = lwt_is_valid_access,
8524        .convert_ctx_access     = bpf_convert_ctx_access,
8525};
8526
8527const struct bpf_prog_ops lwt_out_prog_ops = {
8528        .test_run               = bpf_prog_test_run_skb,
8529};
8530
8531const struct bpf_verifier_ops lwt_xmit_verifier_ops = {
8532        .get_func_proto         = lwt_xmit_func_proto,
8533        .is_valid_access        = lwt_is_valid_access,
8534        .convert_ctx_access     = bpf_convert_ctx_access,
8535        .gen_prologue           = tc_cls_act_prologue,
8536};
8537
8538const struct bpf_prog_ops lwt_xmit_prog_ops = {
8539        .test_run               = bpf_prog_test_run_skb,
8540};
8541
8542const struct bpf_verifier_ops lwt_seg6local_verifier_ops = {
8543        .get_func_proto         = lwt_seg6local_func_proto,
8544        .is_valid_access        = lwt_is_valid_access,
8545        .convert_ctx_access     = bpf_convert_ctx_access,
8546};
8547
8548const struct bpf_prog_ops lwt_seg6local_prog_ops = {
8549        .test_run               = bpf_prog_test_run_skb,
8550};
8551
8552const struct bpf_verifier_ops cg_sock_verifier_ops = {
8553        .get_func_proto         = sock_filter_func_proto,
8554        .is_valid_access        = sock_filter_is_valid_access,
8555        .convert_ctx_access     = bpf_sock_convert_ctx_access,
8556};
8557
8558const struct bpf_prog_ops cg_sock_prog_ops = {
8559};
8560
8561const struct bpf_verifier_ops cg_sock_addr_verifier_ops = {
8562        .get_func_proto         = sock_addr_func_proto,
8563        .is_valid_access        = sock_addr_is_valid_access,
8564        .convert_ctx_access     = sock_addr_convert_ctx_access,
8565};
8566
8567const struct bpf_prog_ops cg_sock_addr_prog_ops = {
8568};
8569
8570const struct bpf_verifier_ops sock_ops_verifier_ops = {
8571        .get_func_proto         = sock_ops_func_proto,
8572        .is_valid_access        = sock_ops_is_valid_access,
8573        .convert_ctx_access     = sock_ops_convert_ctx_access,
8574};
8575
8576const struct bpf_prog_ops sock_ops_prog_ops = {
8577};
8578
8579const struct bpf_verifier_ops sk_skb_verifier_ops = {
8580        .get_func_proto         = sk_skb_func_proto,
8581        .is_valid_access        = sk_skb_is_valid_access,
8582        .convert_ctx_access     = sk_skb_convert_ctx_access,
8583        .gen_prologue           = sk_skb_prologue,
8584};
8585
8586const struct bpf_prog_ops sk_skb_prog_ops = {
8587};
8588
8589const struct bpf_verifier_ops sk_msg_verifier_ops = {
8590        .get_func_proto         = sk_msg_func_proto,
8591        .is_valid_access        = sk_msg_is_valid_access,
8592        .convert_ctx_access     = sk_msg_convert_ctx_access,
8593        .gen_prologue           = bpf_noop_prologue,
8594};
8595
8596const struct bpf_prog_ops sk_msg_prog_ops = {
8597};
8598
8599const struct bpf_verifier_ops flow_dissector_verifier_ops = {
8600        .get_func_proto         = flow_dissector_func_proto,
8601        .is_valid_access        = flow_dissector_is_valid_access,
8602        .convert_ctx_access     = flow_dissector_convert_ctx_access,
8603};
8604
8605const struct bpf_prog_ops flow_dissector_prog_ops = {
8606        .test_run               = bpf_prog_test_run_flow_dissector,
8607};
8608
8609int sk_detach_filter(struct sock *sk)
8610{
8611        int ret = -ENOENT;
8612        struct sk_filter *filter;
8613
8614        if (sock_flag(sk, SOCK_FILTER_LOCKED))
8615                return -EPERM;
8616
8617        filter = rcu_dereference_protected(sk->sk_filter,
8618                                           lockdep_sock_is_held(sk));
8619        if (filter) {
8620                RCU_INIT_POINTER(sk->sk_filter, NULL);
8621                sk_filter_uncharge(sk, filter);
8622                ret = 0;
8623        }
8624
8625        return ret;
8626}
8627EXPORT_SYMBOL_GPL(sk_detach_filter);
8628
8629int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
8630                  unsigned int len)
8631{
8632        struct sock_fprog_kern *fprog;
8633        struct sk_filter *filter;
8634        int ret = 0;
8635
8636        lock_sock(sk);
8637        filter = rcu_dereference_protected(sk->sk_filter,
8638                                           lockdep_sock_is_held(sk));
8639        if (!filter)
8640                goto out;
8641
8642        /* We're copying the filter that has been originally attached,
8643         * so no conversion/decode needed anymore. eBPF programs that
8644         * have no original program cannot be dumped through this.
8645         */
8646        ret = -EACCES;
8647        fprog = filter->prog->orig_prog;
8648        if (!fprog)
8649                goto out;
8650
8651        ret = fprog->len;
8652        if (!len)
8653                /* User space only enquires number of filter blocks. */
8654                goto out;
8655
8656        ret = -EINVAL;
8657        if (len < fprog->len)
8658                goto out;
8659
8660        ret = -EFAULT;
8661        if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
8662                goto out;
8663
8664        /* Instead of bytes, the API requests to return the number
8665         * of filter blocks.
8666         */
8667        ret = fprog->len;
8668out:
8669        release_sock(sk);
8670        return ret;
8671}
8672
8673#ifdef CONFIG_INET
8674struct sk_reuseport_kern {
8675        struct sk_buff *skb;
8676        struct sock *sk;
8677        struct sock *selected_sk;
8678        void *data_end;
8679        u32 hash;
8680        u32 reuseport_id;
8681        bool bind_inany;
8682};
8683
8684static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern,
8685                                    struct sock_reuseport *reuse,
8686                                    struct sock *sk, struct sk_buff *skb,
8687                                    u32 hash)
8688{
8689        reuse_kern->skb = skb;
8690        reuse_kern->sk = sk;
8691        reuse_kern->selected_sk = NULL;
8692        reuse_kern->data_end = skb->data + skb_headlen(skb);
8693        reuse_kern->hash = hash;
8694        reuse_kern->reuseport_id = reuse->reuseport_id;
8695        reuse_kern->bind_inany = reuse->bind_inany;
8696}
8697
8698struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
8699                                  struct bpf_prog *prog, struct sk_buff *skb,
8700                                  u32 hash)
8701{
8702        struct sk_reuseport_kern reuse_kern;
8703        enum sk_action action;
8704
8705        bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, hash);
8706        action = BPF_PROG_RUN(prog, &reuse_kern);
8707
8708        if (action == SK_PASS)
8709                return reuse_kern.selected_sk;
8710        else
8711                return ERR_PTR(-ECONNREFUSED);
8712}
8713
8714BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
8715           struct bpf_map *, map, void *, key, u32, flags)
8716{
8717        struct sock_reuseport *reuse;
8718        struct sock *selected_sk;
8719
8720        selected_sk = map->ops->map_lookup_elem(map, key);
8721        if (!selected_sk)
8722                return -ENOENT;
8723
8724        reuse = rcu_dereference(selected_sk->sk_reuseport_cb);
8725        if (!reuse)
8726                /* selected_sk is unhashed (e.g. by close()) after the
8727                 * above map_lookup_elem().  Treat selected_sk has already
8728                 * been removed from the map.
8729                 */
8730                return -ENOENT;
8731
8732        if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) {
8733                struct sock *sk;
8734
8735                if (unlikely(!reuse_kern->reuseport_id))
8736                        /* There is a small race between adding the
8737                         * sk to the map and setting the
8738                         * reuse_kern->reuseport_id.
8739                         * Treat it as the sk has not been added to
8740                         * the bpf map yet.
8741                         */
8742                        return -ENOENT;
8743
8744                sk = reuse_kern->sk;
8745                if (sk->sk_protocol != selected_sk->sk_protocol)
8746                        return -EPROTOTYPE;
8747                else if (sk->sk_family != selected_sk->sk_family)
8748                        return -EAFNOSUPPORT;
8749
8750                /* Catch all. Likely bound to a different sockaddr. */
8751                return -EBADFD;
8752        }
8753
8754        reuse_kern->selected_sk = selected_sk;
8755
8756        return 0;
8757}
8758
8759static const struct bpf_func_proto sk_select_reuseport_proto = {
8760        .func           = sk_select_reuseport,
8761        .gpl_only       = false,
8762        .ret_type       = RET_INTEGER,
8763        .arg1_type      = ARG_PTR_TO_CTX,
8764        .arg2_type      = ARG_CONST_MAP_PTR,
8765        .arg3_type      = ARG_PTR_TO_MAP_KEY,
8766        .arg4_type      = ARG_ANYTHING,
8767};
8768
8769BPF_CALL_4(sk_reuseport_load_bytes,
8770           const struct sk_reuseport_kern *, reuse_kern, u32, offset,
8771           void *, to, u32, len)
8772{
8773        return ____bpf_skb_load_bytes(reuse_kern->skb, offset, to, len);
8774}
8775
8776static const struct bpf_func_proto sk_reuseport_load_bytes_proto = {
8777        .func           = sk_reuseport_load_bytes,
8778        .gpl_only       = false,
8779        .ret_type       = RET_INTEGER,
8780        .arg1_type      = ARG_PTR_TO_CTX,
8781        .arg2_type      = ARG_ANYTHING,
8782        .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
8783        .arg4_type      = ARG_CONST_SIZE,
8784};
8785
8786BPF_CALL_5(sk_reuseport_load_bytes_relative,
8787           const struct sk_reuseport_kern *, reuse_kern, u32, offset,
8788           void *, to, u32, len, u32, start_header)
8789{
8790        return ____bpf_skb_load_bytes_relative(reuse_kern->skb, offset, to,
8791                                               len, start_header);
8792}
8793
8794static const struct bpf_func_proto sk_reuseport_load_bytes_relative_proto = {
8795        .func           = sk_reuseport_load_bytes_relative,
8796        .gpl_only       = false,
8797        .ret_type       = RET_INTEGER,
8798        .arg1_type      = ARG_PTR_TO_CTX,
8799        .arg2_type      = ARG_ANYTHING,
8800        .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
8801        .arg4_type      = ARG_CONST_SIZE,
8802        .arg5_type      = ARG_ANYTHING,
8803};
8804
8805static const struct bpf_func_proto *
8806sk_reuseport_func_proto(enum bpf_func_id func_id,
8807                        const struct bpf_prog *prog)
8808{
8809        switch (func_id) {
8810        case BPF_FUNC_sk_select_reuseport:
8811                return &sk_select_reuseport_proto;
8812        case BPF_FUNC_skb_load_bytes:
8813                return &sk_reuseport_load_bytes_proto;
8814        case BPF_FUNC_skb_load_bytes_relative:
8815                return &sk_reuseport_load_bytes_relative_proto;
8816        default:
8817                return bpf_base_func_proto(func_id);
8818        }
8819}
8820
8821static bool
8822sk_reuseport_is_valid_access(int off, int size,
8823                             enum bpf_access_type type,
8824                             const struct bpf_prog *prog,
8825                             struct bpf_insn_access_aux *info)
8826{
8827        const u32 size_default = sizeof(__u32);
8828
8829        if (off < 0 || off >= sizeof(struct sk_reuseport_md) ||
8830            off % size || type != BPF_READ)
8831                return false;
8832
8833        switch (off) {
8834        case offsetof(struct sk_reuseport_md, data):
8835                info->reg_type = PTR_TO_PACKET;
8836                return size == sizeof(__u64);
8837
8838        case offsetof(struct sk_reuseport_md, data_end):
8839                info->reg_type = PTR_TO_PACKET_END;
8840                return size == sizeof(__u64);
8841
8842        case offsetof(struct sk_reuseport_md, hash):
8843                return size == size_default;
8844
8845        /* Fields that allow narrowing */
8846        case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
8847                if (size < FIELD_SIZEOF(struct sk_buff, protocol))
8848                        return false;
8849                /* fall through */
8850        case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
8851        case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
8852        case bpf_ctx_range(struct sk_reuseport_md, len):
8853                bpf_ctx_record_field_size(info, size_default);
8854                return bpf_ctx_narrow_access_ok(off, size, size_default);
8855
8856        default:
8857                return false;
8858        }
8859}
8860
8861#define SK_REUSEPORT_LOAD_FIELD(F) ({                                   \
8862        *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \
8863                              si->dst_reg, si->src_reg,                 \
8864                              bpf_target_off(struct sk_reuseport_kern, F, \
8865                                             FIELD_SIZEOF(struct sk_reuseport_kern, F), \
8866                                             target_size));             \
8867        })
8868
8869#define SK_REUSEPORT_LOAD_SKB_FIELD(SKB_FIELD)                          \
8870        SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern,           \
8871                                    struct sk_buff,                     \
8872                                    skb,                                \
8873                                    SKB_FIELD)
8874
8875#define SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(SK_FIELD, BPF_SIZE, EXTRA_OFF) \
8876        SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(struct sk_reuseport_kern,  \
8877                                             struct sock,               \
8878                                             sk,                        \
8879                                             SK_FIELD, BPF_SIZE, EXTRA_OFF)
8880
8881static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
8882                                           const struct bpf_insn *si,
8883                                           struct bpf_insn *insn_buf,
8884                                           struct bpf_prog *prog,
8885                                           u32 *target_size)
8886{
8887        struct bpf_insn *insn = insn_buf;
8888
8889        switch (si->off) {
8890        case offsetof(struct sk_reuseport_md, data):
8891                SK_REUSEPORT_LOAD_SKB_FIELD(data);
8892                break;
8893
8894        case offsetof(struct sk_reuseport_md, len):
8895                SK_REUSEPORT_LOAD_SKB_FIELD(len);
8896                break;
8897
8898        case offsetof(struct sk_reuseport_md, eth_protocol):
8899                SK_REUSEPORT_LOAD_SKB_FIELD(protocol);
8900                break;
8901
8902        case offsetof(struct sk_reuseport_md, ip_protocol):
8903                BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
8904                SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset,
8905                                                    BPF_W, 0);
8906                *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
8907                *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
8908                                        SK_FL_PROTO_SHIFT);
8909                /* SK_FL_PROTO_MASK and SK_FL_PROTO_SHIFT are endian
8910                 * aware.  No further narrowing or masking is needed.
8911                 */
8912                *target_size = 1;
8913                break;
8914
8915        case offsetof(struct sk_reuseport_md, data_end):
8916                SK_REUSEPORT_LOAD_FIELD(data_end);
8917                break;
8918
8919        case offsetof(struct sk_reuseport_md, hash):
8920                SK_REUSEPORT_LOAD_FIELD(hash);
8921                break;
8922
8923        case offsetof(struct sk_reuseport_md, bind_inany):
8924                SK_REUSEPORT_LOAD_FIELD(bind_inany);
8925                break;
8926        }
8927
8928        return insn - insn_buf;
8929}
8930
8931const struct bpf_verifier_ops sk_reuseport_verifier_ops = {
8932        .get_func_proto         = sk_reuseport_func_proto,
8933        .is_valid_access        = sk_reuseport_is_valid_access,
8934        .convert_ctx_access     = sk_reuseport_convert_ctx_access,
8935};
8936
8937const struct bpf_prog_ops sk_reuseport_prog_ops = {
8938};
8939#endif /* CONFIG_INET */
8940