linux/net/ipv4/tcp_bpf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
   3
   4#include <linux/skmsg.h>
   5#include <linux/filter.h>
   6#include <linux/bpf.h>
   7#include <linux/init.h>
   8#include <linux/wait.h>
   9
  10#include <net/inet_common.h>
  11#include <net/tls.h>
  12
  13int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
  14                      struct msghdr *msg, int len, int flags)
  15{
  16        struct iov_iter *iter = &msg->msg_iter;
  17        int peek = flags & MSG_PEEK;
  18        struct sk_msg *msg_rx;
  19        int i, copied = 0;
  20
  21        msg_rx = list_first_entry_or_null(&psock->ingress_msg,
  22                                          struct sk_msg, list);
  23
  24        while (copied != len) {
  25                struct scatterlist *sge;
  26
  27                if (unlikely(!msg_rx))
  28                        break;
  29
  30                i = msg_rx->sg.start;
  31                do {
  32                        struct page *page;
  33                        int copy;
  34
  35                        sge = sk_msg_elem(msg_rx, i);
  36                        copy = sge->length;
  37                        page = sg_page(sge);
  38                        if (copied + copy > len)
  39                                copy = len - copied;
  40                        copy = copy_page_to_iter(page, sge->offset, copy, iter);
  41                        if (!copy)
  42                                return copied ? copied : -EFAULT;
  43
  44                        copied += copy;
  45                        if (likely(!peek)) {
  46                                sge->offset += copy;
  47                                sge->length -= copy;
  48                                if (!msg_rx->skb)
  49                                        sk_mem_uncharge(sk, copy);
  50                                msg_rx->sg.size -= copy;
  51
  52                                if (!sge->length) {
  53                                        sk_msg_iter_var_next(i);
  54                                        if (!msg_rx->skb)
  55                                                put_page(page);
  56                                }
  57                        } else {
  58                                /* Lets not optimize peek case if copy_page_to_iter
  59                                 * didn't copy the entire length lets just break.
  60                                 */
  61                                if (copy != sge->length)
  62                                        return copied;
  63                                sk_msg_iter_var_next(i);
  64                        }
  65
  66                        if (copied == len)
  67                                break;
  68                } while (i != msg_rx->sg.end);
  69
  70                if (unlikely(peek)) {
  71                        if (msg_rx == list_last_entry(&psock->ingress_msg,
  72                                                      struct sk_msg, list))
  73                                break;
  74                        msg_rx = list_next_entry(msg_rx, list);
  75                        continue;
  76                }
  77
  78                msg_rx->sg.start = i;
  79                if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) {
  80                        list_del(&msg_rx->list);
  81                        if (msg_rx->skb)
  82                                consume_skb(msg_rx->skb);
  83                        kfree(msg_rx);
  84                }
  85                msg_rx = list_first_entry_or_null(&psock->ingress_msg,
  86                                                  struct sk_msg, list);
  87        }
  88
  89        return copied;
  90}
  91EXPORT_SYMBOL_GPL(__tcp_bpf_recvmsg);
  92
  93static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
  94                           struct sk_msg *msg, u32 apply_bytes, int flags)
  95{
  96        bool apply = apply_bytes;
  97        struct scatterlist *sge;
  98        u32 size, copied = 0;
  99        struct sk_msg *tmp;
 100        int i, ret = 0;
 101
 102        tmp = kzalloc(sizeof(*tmp), __GFP_NOWARN | GFP_KERNEL);
 103        if (unlikely(!tmp))
 104                return -ENOMEM;
 105
 106        lock_sock(sk);
 107        tmp->sg.start = msg->sg.start;
 108        i = msg->sg.start;
 109        do {
 110                sge = sk_msg_elem(msg, i);
 111                size = (apply && apply_bytes < sge->length) ?
 112                        apply_bytes : sge->length;
 113                if (!sk_wmem_schedule(sk, size)) {
 114                        if (!copied)
 115                                ret = -ENOMEM;
 116                        break;
 117                }
 118
 119                sk_mem_charge(sk, size);
 120                sk_msg_xfer(tmp, msg, i, size);
 121                copied += size;
 122                if (sge->length)
 123                        get_page(sk_msg_page(tmp, i));
 124                sk_msg_iter_var_next(i);
 125                tmp->sg.end = i;
 126                if (apply) {
 127                        apply_bytes -= size;
 128                        if (!apply_bytes)
 129                                break;
 130                }
 131        } while (i != msg->sg.end);
 132
 133        if (!ret) {
 134                msg->sg.start = i;
 135                sk_psock_queue_msg(psock, tmp);
 136                sk_psock_data_ready(sk, psock);
 137        } else {
 138                sk_msg_free(sk, tmp);
 139                kfree(tmp);
 140        }
 141
 142        release_sock(sk);
 143        return ret;
 144}
 145
 146static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes,
 147                        int flags, bool uncharge)
 148{
 149        bool apply = apply_bytes;
 150        struct scatterlist *sge;
 151        struct page *page;
 152        int size, ret = 0;
 153        u32 off;
 154
 155        while (1) {
 156                bool has_tx_ulp;
 157
 158                sge = sk_msg_elem(msg, msg->sg.start);
 159                size = (apply && apply_bytes < sge->length) ?
 160                        apply_bytes : sge->length;
 161                off  = sge->offset;
 162                page = sg_page(sge);
 163
 164                tcp_rate_check_app_limited(sk);
 165retry:
 166                has_tx_ulp = tls_sw_has_ctx_tx(sk);
 167                if (has_tx_ulp) {
 168                        flags |= MSG_SENDPAGE_NOPOLICY;
 169                        ret = kernel_sendpage_locked(sk,
 170                                                     page, off, size, flags);
 171                } else {
 172                        ret = do_tcp_sendpages(sk, page, off, size, flags);
 173                }
 174
 175                if (ret <= 0)
 176                        return ret;
 177                if (apply)
 178                        apply_bytes -= ret;
 179                msg->sg.size -= ret;
 180                sge->offset += ret;
 181                sge->length -= ret;
 182                if (uncharge)
 183                        sk_mem_uncharge(sk, ret);
 184                if (ret != size) {
 185                        size -= ret;
 186                        off  += ret;
 187                        goto retry;
 188                }
 189                if (!sge->length) {
 190                        put_page(page);
 191                        sk_msg_iter_next(msg, start);
 192                        sg_init_table(sge, 1);
 193                        if (msg->sg.start == msg->sg.end)
 194                                break;
 195                }
 196                if (apply && !apply_bytes)
 197                        break;
 198        }
 199
 200        return 0;
 201}
 202
 203static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg,
 204                               u32 apply_bytes, int flags, bool uncharge)
 205{
 206        int ret;
 207
 208        lock_sock(sk);
 209        ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge);
 210        release_sock(sk);
 211        return ret;
 212}
 213
 214int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg,
 215                          u32 bytes, int flags)
 216{
 217        bool ingress = sk_msg_to_ingress(msg);
 218        struct sk_psock *psock = sk_psock_get(sk);
 219        int ret;
 220
 221        if (unlikely(!psock)) {
 222                sk_msg_free(sk, msg);
 223                return 0;
 224        }
 225        ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) :
 226                        tcp_bpf_push_locked(sk, msg, bytes, flags, false);
 227        sk_psock_put(sk, psock);
 228        return ret;
 229}
 230EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir);
 231
 232#ifdef CONFIG_BPF_STREAM_PARSER
 233static bool tcp_bpf_stream_read(const struct sock *sk)
 234{
 235        struct sk_psock *psock;
 236        bool empty = true;
 237
 238        rcu_read_lock();
 239        psock = sk_psock(sk);
 240        if (likely(psock))
 241                empty = list_empty(&psock->ingress_msg);
 242        rcu_read_unlock();
 243        return !empty;
 244}
 245
 246static int tcp_bpf_wait_data(struct sock *sk, struct sk_psock *psock,
 247                             int flags, long timeo, int *err)
 248{
 249        DEFINE_WAIT_FUNC(wait, woken_wake_function);
 250        int ret = 0;
 251
 252        if (sk->sk_shutdown & RCV_SHUTDOWN)
 253                return 1;
 254
 255        if (!timeo)
 256                return ret;
 257
 258        add_wait_queue(sk_sleep(sk), &wait);
 259        sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
 260        ret = sk_wait_event(sk, &timeo,
 261                            !list_empty(&psock->ingress_msg) ||
 262                            !skb_queue_empty(&sk->sk_receive_queue), &wait);
 263        sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
 264        remove_wait_queue(sk_sleep(sk), &wait);
 265        return ret;
 266}
 267
 268static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 269                    int nonblock, int flags, int *addr_len)
 270{
 271        struct sk_psock *psock;
 272        int copied, ret;
 273
 274        if (unlikely(flags & MSG_ERRQUEUE))
 275                return inet_recv_error(sk, msg, len, addr_len);
 276
 277        psock = sk_psock_get(sk);
 278        if (unlikely(!psock))
 279                return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
 280        if (!skb_queue_empty(&sk->sk_receive_queue) &&
 281            sk_psock_queue_empty(psock)) {
 282                sk_psock_put(sk, psock);
 283                return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
 284        }
 285        lock_sock(sk);
 286msg_bytes_ready:
 287        copied = __tcp_bpf_recvmsg(sk, psock, msg, len, flags);
 288        if (!copied) {
 289                int data, err = 0;
 290                long timeo;
 291
 292                timeo = sock_rcvtimeo(sk, nonblock);
 293                data = tcp_bpf_wait_data(sk, psock, flags, timeo, &err);
 294                if (data) {
 295                        if (!sk_psock_queue_empty(psock))
 296                                goto msg_bytes_ready;
 297                        release_sock(sk);
 298                        sk_psock_put(sk, psock);
 299                        return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
 300                }
 301                if (err) {
 302                        ret = err;
 303                        goto out;
 304                }
 305                copied = -EAGAIN;
 306        }
 307        ret = copied;
 308out:
 309        release_sock(sk);
 310        sk_psock_put(sk, psock);
 311        return ret;
 312}
 313
 314static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
 315                                struct sk_msg *msg, int *copied, int flags)
 316{
 317        bool cork = false, enospc = sk_msg_full(msg);
 318        struct sock *sk_redir;
 319        u32 tosend, delta = 0;
 320        int ret;
 321
 322more_data:
 323        if (psock->eval == __SK_NONE) {
 324                /* Track delta in msg size to add/subtract it on SK_DROP from
 325                 * returned to user copied size. This ensures user doesn't
 326                 * get a positive return code with msg_cut_data and SK_DROP
 327                 * verdict.
 328                 */
 329                delta = msg->sg.size;
 330                psock->eval = sk_psock_msg_verdict(sk, psock, msg);
 331                delta -= msg->sg.size;
 332        }
 333
 334        if (msg->cork_bytes &&
 335            msg->cork_bytes > msg->sg.size && !enospc) {
 336                psock->cork_bytes = msg->cork_bytes - msg->sg.size;
 337                if (!psock->cork) {
 338                        psock->cork = kzalloc(sizeof(*psock->cork),
 339                                              GFP_ATOMIC | __GFP_NOWARN);
 340                        if (!psock->cork)
 341                                return -ENOMEM;
 342                }
 343                memcpy(psock->cork, msg, sizeof(*msg));
 344                return 0;
 345        }
 346
 347        tosend = msg->sg.size;
 348        if (psock->apply_bytes && psock->apply_bytes < tosend)
 349                tosend = psock->apply_bytes;
 350
 351        switch (psock->eval) {
 352        case __SK_PASS:
 353                ret = tcp_bpf_push(sk, msg, tosend, flags, true);
 354                if (unlikely(ret)) {
 355                        *copied -= sk_msg_free(sk, msg);
 356                        break;
 357                }
 358                sk_msg_apply_bytes(psock, tosend);
 359                break;
 360        case __SK_REDIRECT:
 361                sk_redir = psock->sk_redir;
 362                sk_msg_apply_bytes(psock, tosend);
 363                if (psock->cork) {
 364                        cork = true;
 365                        psock->cork = NULL;
 366                }
 367                sk_msg_return(sk, msg, tosend);
 368                release_sock(sk);
 369                ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags);
 370                lock_sock(sk);
 371                if (unlikely(ret < 0)) {
 372                        int free = sk_msg_free_nocharge(sk, msg);
 373
 374                        if (!cork)
 375                                *copied -= free;
 376                }
 377                if (cork) {
 378                        sk_msg_free(sk, msg);
 379                        kfree(msg);
 380                        msg = NULL;
 381                        ret = 0;
 382                }
 383                break;
 384        case __SK_DROP:
 385        default:
 386                sk_msg_free_partial(sk, msg, tosend);
 387                sk_msg_apply_bytes(psock, tosend);
 388                *copied -= (tosend + delta);
 389                return -EACCES;
 390        }
 391
 392        if (likely(!ret)) {
 393                if (!psock->apply_bytes) {
 394                        psock->eval =  __SK_NONE;
 395                        if (psock->sk_redir) {
 396                                sock_put(psock->sk_redir);
 397                                psock->sk_redir = NULL;
 398                        }
 399                }
 400                if (msg &&
 401                    msg->sg.data[msg->sg.start].page_link &&
 402                    msg->sg.data[msg->sg.start].length)
 403                        goto more_data;
 404        }
 405        return ret;
 406}
 407
 408static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
 409{
 410        struct sk_msg tmp, *msg_tx = NULL;
 411        int copied = 0, err = 0;
 412        struct sk_psock *psock;
 413        long timeo;
 414        int flags;
 415
 416        /* Don't let internal do_tcp_sendpages() flags through */
 417        flags = (msg->msg_flags & ~MSG_SENDPAGE_DECRYPTED);
 418        flags |= MSG_NO_SHARED_FRAGS;
 419
 420        psock = sk_psock_get(sk);
 421        if (unlikely(!psock))
 422                return tcp_sendmsg(sk, msg, size);
 423
 424        lock_sock(sk);
 425        timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 426        while (msg_data_left(msg)) {
 427                bool enospc = false;
 428                u32 copy, osize;
 429
 430                if (sk->sk_err) {
 431                        err = -sk->sk_err;
 432                        goto out_err;
 433                }
 434
 435                copy = msg_data_left(msg);
 436                if (!sk_stream_memory_free(sk))
 437                        goto wait_for_sndbuf;
 438                if (psock->cork) {
 439                        msg_tx = psock->cork;
 440                } else {
 441                        msg_tx = &tmp;
 442                        sk_msg_init(msg_tx);
 443                }
 444
 445                osize = msg_tx->sg.size;
 446                err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1);
 447                if (err) {
 448                        if (err != -ENOSPC)
 449                                goto wait_for_memory;
 450                        enospc = true;
 451                        copy = msg_tx->sg.size - osize;
 452                }
 453
 454                err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx,
 455                                               copy);
 456                if (err < 0) {
 457                        sk_msg_trim(sk, msg_tx, osize);
 458                        goto out_err;
 459                }
 460
 461                copied += copy;
 462                if (psock->cork_bytes) {
 463                        if (size > psock->cork_bytes)
 464                                psock->cork_bytes = 0;
 465                        else
 466                                psock->cork_bytes -= size;
 467                        if (psock->cork_bytes && !enospc)
 468                                goto out_err;
 469                        /* All cork bytes are accounted, rerun the prog. */
 470                        psock->eval = __SK_NONE;
 471                        psock->cork_bytes = 0;
 472                }
 473
 474                err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags);
 475                if (unlikely(err < 0))
 476                        goto out_err;
 477                continue;
 478wait_for_sndbuf:
 479                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 480wait_for_memory:
 481                err = sk_stream_wait_memory(sk, &timeo);
 482                if (err) {
 483                        if (msg_tx && msg_tx != psock->cork)
 484                                sk_msg_free(sk, msg_tx);
 485                        goto out_err;
 486                }
 487        }
 488out_err:
 489        if (err < 0)
 490                err = sk_stream_error(sk, msg->msg_flags, err);
 491        release_sock(sk);
 492        sk_psock_put(sk, psock);
 493        return copied ? copied : err;
 494}
 495
 496static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset,
 497                            size_t size, int flags)
 498{
 499        struct sk_msg tmp, *msg = NULL;
 500        int err = 0, copied = 0;
 501        struct sk_psock *psock;
 502        bool enospc = false;
 503
 504        psock = sk_psock_get(sk);
 505        if (unlikely(!psock))
 506                return tcp_sendpage(sk, page, offset, size, flags);
 507
 508        lock_sock(sk);
 509        if (psock->cork) {
 510                msg = psock->cork;
 511        } else {
 512                msg = &tmp;
 513                sk_msg_init(msg);
 514        }
 515
 516        /* Catch case where ring is full and sendpage is stalled. */
 517        if (unlikely(sk_msg_full(msg)))
 518                goto out_err;
 519
 520        sk_msg_page_add(msg, page, size, offset);
 521        sk_mem_charge(sk, size);
 522        copied = size;
 523        if (sk_msg_full(msg))
 524                enospc = true;
 525        if (psock->cork_bytes) {
 526                if (size > psock->cork_bytes)
 527                        psock->cork_bytes = 0;
 528                else
 529                        psock->cork_bytes -= size;
 530                if (psock->cork_bytes && !enospc)
 531                        goto out_err;
 532                /* All cork bytes are accounted, rerun the prog. */
 533                psock->eval = __SK_NONE;
 534                psock->cork_bytes = 0;
 535        }
 536
 537        err = tcp_bpf_send_verdict(sk, psock, msg, &copied, flags);
 538out_err:
 539        release_sock(sk);
 540        sk_psock_put(sk, psock);
 541        return copied ? copied : err;
 542}
 543
 544enum {
 545        TCP_BPF_IPV4,
 546        TCP_BPF_IPV6,
 547        TCP_BPF_NUM_PROTS,
 548};
 549
 550enum {
 551        TCP_BPF_BASE,
 552        TCP_BPF_TX,
 553        TCP_BPF_NUM_CFGS,
 554};
 555
 556static struct proto *tcpv6_prot_saved __read_mostly;
 557static DEFINE_SPINLOCK(tcpv6_prot_lock);
 558static struct proto tcp_bpf_prots[TCP_BPF_NUM_PROTS][TCP_BPF_NUM_CFGS];
 559
 560static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS],
 561                                   struct proto *base)
 562{
 563        prot[TCP_BPF_BASE]                      = *base;
 564        prot[TCP_BPF_BASE].unhash               = sock_map_unhash;
 565        prot[TCP_BPF_BASE].close                = sock_map_close;
 566        prot[TCP_BPF_BASE].recvmsg              = tcp_bpf_recvmsg;
 567        prot[TCP_BPF_BASE].stream_memory_read   = tcp_bpf_stream_read;
 568
 569        prot[TCP_BPF_TX]                        = prot[TCP_BPF_BASE];
 570        prot[TCP_BPF_TX].sendmsg                = tcp_bpf_sendmsg;
 571        prot[TCP_BPF_TX].sendpage               = tcp_bpf_sendpage;
 572}
 573
 574static void tcp_bpf_check_v6_needs_rebuild(struct proto *ops)
 575{
 576        if (unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) {
 577                spin_lock_bh(&tcpv6_prot_lock);
 578                if (likely(ops != tcpv6_prot_saved)) {
 579                        tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops);
 580                        smp_store_release(&tcpv6_prot_saved, ops);
 581                }
 582                spin_unlock_bh(&tcpv6_prot_lock);
 583        }
 584}
 585
 586static int __init tcp_bpf_v4_build_proto(void)
 587{
 588        tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot);
 589        return 0;
 590}
 591core_initcall(tcp_bpf_v4_build_proto);
 592
 593static int tcp_bpf_assert_proto_ops(struct proto *ops)
 594{
 595        /* In order to avoid retpoline, we make assumptions when we call
 596         * into ops if e.g. a psock is not present. Make sure they are
 597         * indeed valid assumptions.
 598         */
 599        return ops->recvmsg  == tcp_recvmsg &&
 600               ops->sendmsg  == tcp_sendmsg &&
 601               ops->sendpage == tcp_sendpage ? 0 : -ENOTSUPP;
 602}
 603
 604struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock)
 605{
 606        int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
 607        int config = psock->progs.msg_parser   ? TCP_BPF_TX   : TCP_BPF_BASE;
 608
 609        if (sk->sk_family == AF_INET6) {
 610                if (tcp_bpf_assert_proto_ops(psock->sk_proto))
 611                        return ERR_PTR(-EINVAL);
 612
 613                tcp_bpf_check_v6_needs_rebuild(psock->sk_proto);
 614        }
 615
 616        return &tcp_bpf_prots[family][config];
 617}
 618
 619/* If a child got cloned from a listening socket that had tcp_bpf
 620 * protocol callbacks installed, we need to restore the callbacks to
 621 * the default ones because the child does not inherit the psock state
 622 * that tcp_bpf callbacks expect.
 623 */
 624void tcp_bpf_clone(const struct sock *sk, struct sock *newsk)
 625{
 626        int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
 627        struct proto *prot = newsk->sk_prot;
 628
 629        if (prot == &tcp_bpf_prots[family][TCP_BPF_BASE])
 630                newsk->sk_prot = sk->sk_prot_creator;
 631}
 632#endif /* CONFIG_BPF_STREAM_PARSER */
 633