linux/net/core/skmsg.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
   3
   4#include <linux/skmsg.h>
   5#include <linux/skbuff.h>
   6#include <linux/scatterlist.h>
   7
   8#include <net/sock.h>
   9#include <net/tcp.h>
  10#include <net/tls.h>
  11
  12static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
  13{
  14        if (msg->sg.end > msg->sg.start &&
  15            elem_first_coalesce < msg->sg.end)
  16                return true;
  17
  18        if (msg->sg.end < msg->sg.start &&
  19            (elem_first_coalesce > msg->sg.start ||
  20             elem_first_coalesce < msg->sg.end))
  21                return true;
  22
  23        return false;
  24}
  25
  26int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
  27                 int elem_first_coalesce)
  28{
  29        struct page_frag *pfrag = sk_page_frag(sk);
  30        int ret = 0;
  31
  32        len -= msg->sg.size;
  33        while (len > 0) {
  34                struct scatterlist *sge;
  35                u32 orig_offset;
  36                int use, i;
  37
  38                if (!sk_page_frag_refill(sk, pfrag))
  39                        return -ENOMEM;
  40
  41                orig_offset = pfrag->offset;
  42                use = min_t(int, len, pfrag->size - orig_offset);
  43                if (!sk_wmem_schedule(sk, use))
  44                        return -ENOMEM;
  45
  46                i = msg->sg.end;
  47                sk_msg_iter_var_prev(i);
  48                sge = &msg->sg.data[i];
  49
  50                if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
  51                    sg_page(sge) == pfrag->page &&
  52                    sge->offset + sge->length == orig_offset) {
  53                        sge->length += use;
  54                } else {
  55                        if (sk_msg_full(msg)) {
  56                                ret = -ENOSPC;
  57                                break;
  58                        }
  59
  60                        sge = &msg->sg.data[msg->sg.end];
  61                        sg_unmark_end(sge);
  62                        sg_set_page(sge, pfrag->page, use, orig_offset);
  63                        get_page(pfrag->page);
  64                        sk_msg_iter_next(msg, end);
  65                }
  66
  67                sk_mem_charge(sk, use);
  68                msg->sg.size += use;
  69                pfrag->offset += use;
  70                len -= use;
  71        }
  72
  73        return ret;
  74}
  75EXPORT_SYMBOL_GPL(sk_msg_alloc);
  76
  77int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
  78                 u32 off, u32 len)
  79{
  80        int i = src->sg.start;
  81        struct scatterlist *sge = sk_msg_elem(src, i);
  82        struct scatterlist *sgd = NULL;
  83        u32 sge_len, sge_off;
  84
  85        while (off) {
  86                if (sge->length > off)
  87                        break;
  88                off -= sge->length;
  89                sk_msg_iter_var_next(i);
  90                if (i == src->sg.end && off)
  91                        return -ENOSPC;
  92                sge = sk_msg_elem(src, i);
  93        }
  94
  95        while (len) {
  96                sge_len = sge->length - off;
  97                if (sge_len > len)
  98                        sge_len = len;
  99
 100                if (dst->sg.end)
 101                        sgd = sk_msg_elem(dst, dst->sg.end - 1);
 102
 103                if (sgd &&
 104                    (sg_page(sge) == sg_page(sgd)) &&
 105                    (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
 106                        sgd->length += sge_len;
 107                        dst->sg.size += sge_len;
 108                } else if (!sk_msg_full(dst)) {
 109                        sge_off = sge->offset + off;
 110                        sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
 111                } else {
 112                        return -ENOSPC;
 113                }
 114
 115                off = 0;
 116                len -= sge_len;
 117                sk_mem_charge(sk, sge_len);
 118                sk_msg_iter_var_next(i);
 119                if (i == src->sg.end && len)
 120                        return -ENOSPC;
 121                sge = sk_msg_elem(src, i);
 122        }
 123
 124        return 0;
 125}
 126EXPORT_SYMBOL_GPL(sk_msg_clone);
 127
 128void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
 129{
 130        int i = msg->sg.start;
 131
 132        do {
 133                struct scatterlist *sge = sk_msg_elem(msg, i);
 134
 135                if (bytes < sge->length) {
 136                        sge->length -= bytes;
 137                        sge->offset += bytes;
 138                        sk_mem_uncharge(sk, bytes);
 139                        break;
 140                }
 141
 142                sk_mem_uncharge(sk, sge->length);
 143                bytes -= sge->length;
 144                sge->length = 0;
 145                sge->offset = 0;
 146                sk_msg_iter_var_next(i);
 147        } while (bytes && i != msg->sg.end);
 148        msg->sg.start = i;
 149}
 150EXPORT_SYMBOL_GPL(sk_msg_return_zero);
 151
 152void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
 153{
 154        int i = msg->sg.start;
 155
 156        do {
 157                struct scatterlist *sge = &msg->sg.data[i];
 158                int uncharge = (bytes < sge->length) ? bytes : sge->length;
 159
 160                sk_mem_uncharge(sk, uncharge);
 161                bytes -= uncharge;
 162                sk_msg_iter_var_next(i);
 163        } while (i != msg->sg.end);
 164}
 165EXPORT_SYMBOL_GPL(sk_msg_return);
 166
 167static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
 168                            bool charge)
 169{
 170        struct scatterlist *sge = sk_msg_elem(msg, i);
 171        u32 len = sge->length;
 172
 173        /* When the skb owns the memory we free it from consume_skb path. */
 174        if (!msg->skb) {
 175                if (charge)
 176                        sk_mem_uncharge(sk, len);
 177                put_page(sg_page(sge));
 178        }
 179        memset(sge, 0, sizeof(*sge));
 180        return len;
 181}
 182
 183static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
 184                         bool charge)
 185{
 186        struct scatterlist *sge = sk_msg_elem(msg, i);
 187        int freed = 0;
 188
 189        while (msg->sg.size) {
 190                msg->sg.size -= sge->length;
 191                freed += sk_msg_free_elem(sk, msg, i, charge);
 192                sk_msg_iter_var_next(i);
 193                sk_msg_check_to_free(msg, i, msg->sg.size);
 194                sge = sk_msg_elem(msg, i);
 195        }
 196        consume_skb(msg->skb);
 197        sk_msg_init(msg);
 198        return freed;
 199}
 200
 201int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
 202{
 203        return __sk_msg_free(sk, msg, msg->sg.start, false);
 204}
 205EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
 206
 207int sk_msg_free(struct sock *sk, struct sk_msg *msg)
 208{
 209        return __sk_msg_free(sk, msg, msg->sg.start, true);
 210}
 211EXPORT_SYMBOL_GPL(sk_msg_free);
 212
 213static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
 214                                  u32 bytes, bool charge)
 215{
 216        struct scatterlist *sge;
 217        u32 i = msg->sg.start;
 218
 219        while (bytes) {
 220                sge = sk_msg_elem(msg, i);
 221                if (!sge->length)
 222                        break;
 223                if (bytes < sge->length) {
 224                        if (charge)
 225                                sk_mem_uncharge(sk, bytes);
 226                        sge->length -= bytes;
 227                        sge->offset += bytes;
 228                        msg->sg.size -= bytes;
 229                        break;
 230                }
 231
 232                msg->sg.size -= sge->length;
 233                bytes -= sge->length;
 234                sk_msg_free_elem(sk, msg, i, charge);
 235                sk_msg_iter_var_next(i);
 236                sk_msg_check_to_free(msg, i, bytes);
 237        }
 238        msg->sg.start = i;
 239}
 240
 241void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
 242{
 243        __sk_msg_free_partial(sk, msg, bytes, true);
 244}
 245EXPORT_SYMBOL_GPL(sk_msg_free_partial);
 246
 247void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
 248                                  u32 bytes)
 249{
 250        __sk_msg_free_partial(sk, msg, bytes, false);
 251}
 252
 253void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
 254{
 255        int trim = msg->sg.size - len;
 256        u32 i = msg->sg.end;
 257
 258        if (trim <= 0) {
 259                WARN_ON(trim < 0);
 260                return;
 261        }
 262
 263        sk_msg_iter_var_prev(i);
 264        msg->sg.size = len;
 265        while (msg->sg.data[i].length &&
 266               trim >= msg->sg.data[i].length) {
 267                trim -= msg->sg.data[i].length;
 268                sk_msg_free_elem(sk, msg, i, true);
 269                sk_msg_iter_var_prev(i);
 270                if (!trim)
 271                        goto out;
 272        }
 273
 274        msg->sg.data[i].length -= trim;
 275        sk_mem_uncharge(sk, trim);
 276        /* Adjust copybreak if it falls into the trimmed part of last buf */
 277        if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
 278                msg->sg.copybreak = msg->sg.data[i].length;
 279out:
 280        sk_msg_iter_var_next(i);
 281        msg->sg.end = i;
 282
 283        /* If we trim data a full sg elem before curr pointer update
 284         * copybreak and current so that any future copy operations
 285         * start at new copy location.
 286         * However trimed data that has not yet been used in a copy op
 287         * does not require an update.
 288         */
 289        if (!msg->sg.size) {
 290                msg->sg.curr = msg->sg.start;
 291                msg->sg.copybreak = 0;
 292        } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
 293                   sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
 294                sk_msg_iter_var_prev(i);
 295                msg->sg.curr = i;
 296                msg->sg.copybreak = msg->sg.data[i].length;
 297        }
 298}
 299EXPORT_SYMBOL_GPL(sk_msg_trim);
 300
 301int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
 302                              struct sk_msg *msg, u32 bytes)
 303{
 304        int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
 305        const int to_max_pages = MAX_MSG_FRAGS;
 306        struct page *pages[MAX_MSG_FRAGS];
 307        ssize_t orig, copied, use, offset;
 308
 309        orig = msg->sg.size;
 310        while (bytes > 0) {
 311                i = 0;
 312                maxpages = to_max_pages - num_elems;
 313                if (maxpages == 0) {
 314                        ret = -EFAULT;
 315                        goto out;
 316                }
 317
 318                copied = iov_iter_get_pages(from, pages, bytes, maxpages,
 319                                            &offset);
 320                if (copied <= 0) {
 321                        ret = -EFAULT;
 322                        goto out;
 323                }
 324
 325                iov_iter_advance(from, copied);
 326                bytes -= copied;
 327                msg->sg.size += copied;
 328
 329                while (copied) {
 330                        use = min_t(int, copied, PAGE_SIZE - offset);
 331                        sg_set_page(&msg->sg.data[msg->sg.end],
 332                                    pages[i], use, offset);
 333                        sg_unmark_end(&msg->sg.data[msg->sg.end]);
 334                        sk_mem_charge(sk, use);
 335
 336                        offset = 0;
 337                        copied -= use;
 338                        sk_msg_iter_next(msg, end);
 339                        num_elems++;
 340                        i++;
 341                }
 342                /* When zerocopy is mixed with sk_msg_*copy* operations we
 343                 * may have a copybreak set in this case clear and prefer
 344                 * zerocopy remainder when possible.
 345                 */
 346                msg->sg.copybreak = 0;
 347                msg->sg.curr = msg->sg.end;
 348        }
 349out:
 350        /* Revert iov_iter updates, msg will need to use 'trim' later if it
 351         * also needs to be cleared.
 352         */
 353        if (ret)
 354                iov_iter_revert(from, msg->sg.size - orig);
 355        return ret;
 356}
 357EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
 358
 359int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
 360                             struct sk_msg *msg, u32 bytes)
 361{
 362        int ret = -ENOSPC, i = msg->sg.curr;
 363        struct scatterlist *sge;
 364        u32 copy, buf_size;
 365        void *to;
 366
 367        do {
 368                sge = sk_msg_elem(msg, i);
 369                /* This is possible if a trim operation shrunk the buffer */
 370                if (msg->sg.copybreak >= sge->length) {
 371                        msg->sg.copybreak = 0;
 372                        sk_msg_iter_var_next(i);
 373                        if (i == msg->sg.end)
 374                                break;
 375                        sge = sk_msg_elem(msg, i);
 376                }
 377
 378                buf_size = sge->length - msg->sg.copybreak;
 379                copy = (buf_size > bytes) ? bytes : buf_size;
 380                to = sg_virt(sge) + msg->sg.copybreak;
 381                msg->sg.copybreak += copy;
 382                if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
 383                        ret = copy_from_iter_nocache(to, copy, from);
 384                else
 385                        ret = copy_from_iter(to, copy, from);
 386                if (ret != copy) {
 387                        ret = -EFAULT;
 388                        goto out;
 389                }
 390                bytes -= copy;
 391                if (!bytes)
 392                        break;
 393                msg->sg.copybreak = 0;
 394                sk_msg_iter_var_next(i);
 395        } while (i != msg->sg.end);
 396out:
 397        msg->sg.curr = i;
 398        return ret;
 399}
 400EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
 401
 402/* Receive sk_msg from psock->ingress_msg to @msg. */
 403int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
 404                   int len, int flags)
 405{
 406        struct iov_iter *iter = &msg->msg_iter;
 407        int peek = flags & MSG_PEEK;
 408        struct sk_msg *msg_rx;
 409        int i, copied = 0;
 410
 411        msg_rx = sk_psock_peek_msg(psock);
 412        while (copied != len) {
 413                struct scatterlist *sge;
 414
 415                if (unlikely(!msg_rx))
 416                        break;
 417
 418                i = msg_rx->sg.start;
 419                do {
 420                        struct page *page;
 421                        int copy;
 422
 423                        sge = sk_msg_elem(msg_rx, i);
 424                        copy = sge->length;
 425                        page = sg_page(sge);
 426                        if (copied + copy > len)
 427                                copy = len - copied;
 428                        copy = copy_page_to_iter(page, sge->offset, copy, iter);
 429                        if (!copy)
 430                                return copied ? copied : -EFAULT;
 431
 432                        copied += copy;
 433                        if (likely(!peek)) {
 434                                sge->offset += copy;
 435                                sge->length -= copy;
 436                                if (!msg_rx->skb)
 437                                        sk_mem_uncharge(sk, copy);
 438                                msg_rx->sg.size -= copy;
 439
 440                                if (!sge->length) {
 441                                        sk_msg_iter_var_next(i);
 442                                        if (!msg_rx->skb)
 443                                                put_page(page);
 444                                }
 445                        } else {
 446                                /* Lets not optimize peek case if copy_page_to_iter
 447                                 * didn't copy the entire length lets just break.
 448                                 */
 449                                if (copy != sge->length)
 450                                        return copied;
 451                                sk_msg_iter_var_next(i);
 452                        }
 453
 454                        if (copied == len)
 455                                break;
 456                } while (i != msg_rx->sg.end);
 457
 458                if (unlikely(peek)) {
 459                        msg_rx = sk_psock_next_msg(psock, msg_rx);
 460                        if (!msg_rx)
 461                                break;
 462                        continue;
 463                }
 464
 465                msg_rx->sg.start = i;
 466                if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) {
 467                        msg_rx = sk_psock_dequeue_msg(psock);
 468                        kfree_sk_msg(msg_rx);
 469                }
 470                msg_rx = sk_psock_peek_msg(psock);
 471        }
 472
 473        return copied;
 474}
 475EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
 476
 477bool sk_msg_is_readable(struct sock *sk)
 478{
 479        struct sk_psock *psock;
 480        bool empty = true;
 481
 482        rcu_read_lock();
 483        psock = sk_psock(sk);
 484        if (likely(psock))
 485                empty = list_empty(&psock->ingress_msg);
 486        rcu_read_unlock();
 487        return !empty;
 488}
 489EXPORT_SYMBOL_GPL(sk_msg_is_readable);
 490
 491static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
 492                                                  struct sk_buff *skb)
 493{
 494        struct sk_msg *msg;
 495
 496        if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
 497                return NULL;
 498
 499        if (!sk_rmem_schedule(sk, skb, skb->truesize))
 500                return NULL;
 501
 502        msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
 503        if (unlikely(!msg))
 504                return NULL;
 505
 506        sk_msg_init(msg);
 507        return msg;
 508}
 509
 510static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
 511                                        u32 off, u32 len,
 512                                        struct sk_psock *psock,
 513                                        struct sock *sk,
 514                                        struct sk_msg *msg)
 515{
 516        int num_sge, copied;
 517
 518        /* skb linearize may fail with ENOMEM, but lets simply try again
 519         * later if this happens. Under memory pressure we don't want to
 520         * drop the skb. We need to linearize the skb so that the mapping
 521         * in skb_to_sgvec can not error.
 522         */
 523        if (skb_linearize(skb))
 524                return -EAGAIN;
 525        num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
 526        if (unlikely(num_sge < 0))
 527                return num_sge;
 528
 529        copied = len;
 530        msg->sg.start = 0;
 531        msg->sg.size = copied;
 532        msg->sg.end = num_sge;
 533        msg->skb = skb;
 534
 535        sk_psock_queue_msg(psock, msg);
 536        sk_psock_data_ready(sk, psock);
 537        return copied;
 538}
 539
 540static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
 541                                     u32 off, u32 len);
 542
 543static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
 544                                u32 off, u32 len)
 545{
 546        struct sock *sk = psock->sk;
 547        struct sk_msg *msg;
 548        int err;
 549
 550        /* If we are receiving on the same sock skb->sk is already assigned,
 551         * skip memory accounting and owner transition seeing it already set
 552         * correctly.
 553         */
 554        if (unlikely(skb->sk == sk))
 555                return sk_psock_skb_ingress_self(psock, skb, off, len);
 556        msg = sk_psock_create_ingress_msg(sk, skb);
 557        if (!msg)
 558                return -EAGAIN;
 559
 560        /* This will transition ownership of the data from the socket where
 561         * the BPF program was run initiating the redirect to the socket
 562         * we will eventually receive this data on. The data will be released
 563         * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
 564         * into user buffers.
 565         */
 566        skb_set_owner_r(skb, sk);
 567        err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
 568        if (err < 0)
 569                kfree(msg);
 570        return err;
 571}
 572
 573/* Puts an skb on the ingress queue of the socket already assigned to the
 574 * skb. In this case we do not need to check memory limits or skb_set_owner_r
 575 * because the skb is already accounted for here.
 576 */
 577static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
 578                                     u32 off, u32 len)
 579{
 580        struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
 581        struct sock *sk = psock->sk;
 582        int err;
 583
 584        if (unlikely(!msg))
 585                return -EAGAIN;
 586        sk_msg_init(msg);
 587        skb_set_owner_r(skb, sk);
 588        err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
 589        if (err < 0)
 590                kfree(msg);
 591        return err;
 592}
 593
 594static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
 595                               u32 off, u32 len, bool ingress)
 596{
 597        if (!ingress) {
 598                if (!sock_writeable(psock->sk))
 599                        return -EAGAIN;
 600                return skb_send_sock(psock->sk, skb, off, len);
 601        }
 602        return sk_psock_skb_ingress(psock, skb, off, len);
 603}
 604
 605static void sk_psock_skb_state(struct sk_psock *psock,
 606                               struct sk_psock_work_state *state,
 607                               struct sk_buff *skb,
 608                               int len, int off)
 609{
 610        spin_lock_bh(&psock->ingress_lock);
 611        if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
 612                state->skb = skb;
 613                state->len = len;
 614                state->off = off;
 615        } else {
 616                sock_drop(psock->sk, skb);
 617        }
 618        spin_unlock_bh(&psock->ingress_lock);
 619}
 620
 621static void sk_psock_backlog(struct work_struct *work)
 622{
 623        struct sk_psock *psock = container_of(work, struct sk_psock, work);
 624        struct sk_psock_work_state *state = &psock->work_state;
 625        struct sk_buff *skb = NULL;
 626        bool ingress;
 627        u32 len, off;
 628        int ret;
 629
 630        mutex_lock(&psock->work_mutex);
 631        if (unlikely(state->skb)) {
 632                spin_lock_bh(&psock->ingress_lock);
 633                skb = state->skb;
 634                len = state->len;
 635                off = state->off;
 636                state->skb = NULL;
 637                spin_unlock_bh(&psock->ingress_lock);
 638        }
 639        if (skb)
 640                goto start;
 641
 642        while ((skb = skb_dequeue(&psock->ingress_skb))) {
 643                len = skb->len;
 644                off = 0;
 645                if (skb_bpf_strparser(skb)) {
 646                        struct strp_msg *stm = strp_msg(skb);
 647
 648                        off = stm->offset;
 649                        len = stm->full_len;
 650                }
 651start:
 652                ingress = skb_bpf_ingress(skb);
 653                skb_bpf_redirect_clear(skb);
 654                do {
 655                        ret = -EIO;
 656                        if (!sock_flag(psock->sk, SOCK_DEAD))
 657                                ret = sk_psock_handle_skb(psock, skb, off,
 658                                                          len, ingress);
 659                        if (ret <= 0) {
 660                                if (ret == -EAGAIN) {
 661                                        sk_psock_skb_state(psock, state, skb,
 662                                                           len, off);
 663                                        goto end;
 664                                }
 665                                /* Hard errors break pipe and stop xmit. */
 666                                sk_psock_report_error(psock, ret ? -ret : EPIPE);
 667                                sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
 668                                sock_drop(psock->sk, skb);
 669                                goto end;
 670                        }
 671                        off += ret;
 672                        len -= ret;
 673                } while (len);
 674
 675                if (!ingress)
 676                        kfree_skb(skb);
 677        }
 678end:
 679        mutex_unlock(&psock->work_mutex);
 680}
 681
 682struct sk_psock *sk_psock_init(struct sock *sk, int node)
 683{
 684        struct sk_psock *psock;
 685        struct proto *prot;
 686
 687        write_lock_bh(&sk->sk_callback_lock);
 688
 689        if (sk->sk_user_data) {
 690                psock = ERR_PTR(-EBUSY);
 691                goto out;
 692        }
 693
 694        psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
 695        if (!psock) {
 696                psock = ERR_PTR(-ENOMEM);
 697                goto out;
 698        }
 699
 700        prot = READ_ONCE(sk->sk_prot);
 701        psock->sk = sk;
 702        psock->eval = __SK_NONE;
 703        psock->sk_proto = prot;
 704        psock->saved_unhash = prot->unhash;
 705        psock->saved_close = prot->close;
 706        psock->saved_write_space = sk->sk_write_space;
 707
 708        INIT_LIST_HEAD(&psock->link);
 709        spin_lock_init(&psock->link_lock);
 710
 711        INIT_WORK(&psock->work, sk_psock_backlog);
 712        mutex_init(&psock->work_mutex);
 713        INIT_LIST_HEAD(&psock->ingress_msg);
 714        spin_lock_init(&psock->ingress_lock);
 715        skb_queue_head_init(&psock->ingress_skb);
 716
 717        sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
 718        refcount_set(&psock->refcnt, 1);
 719
 720        rcu_assign_sk_user_data_nocopy(sk, psock);
 721        sock_hold(sk);
 722
 723out:
 724        write_unlock_bh(&sk->sk_callback_lock);
 725        return psock;
 726}
 727EXPORT_SYMBOL_GPL(sk_psock_init);
 728
 729struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
 730{
 731        struct sk_psock_link *link;
 732
 733        spin_lock_bh(&psock->link_lock);
 734        link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
 735                                        list);
 736        if (link)
 737                list_del(&link->list);
 738        spin_unlock_bh(&psock->link_lock);
 739        return link;
 740}
 741
 742static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
 743{
 744        struct sk_msg *msg, *tmp;
 745
 746        list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
 747                list_del(&msg->list);
 748                sk_msg_free(psock->sk, msg);
 749                kfree(msg);
 750        }
 751}
 752
 753static void __sk_psock_zap_ingress(struct sk_psock *psock)
 754{
 755        struct sk_buff *skb;
 756
 757        while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
 758                skb_bpf_redirect_clear(skb);
 759                sock_drop(psock->sk, skb);
 760        }
 761        kfree_skb(psock->work_state.skb);
 762        /* We null the skb here to ensure that calls to sk_psock_backlog
 763         * do not pick up the free'd skb.
 764         */
 765        psock->work_state.skb = NULL;
 766        __sk_psock_purge_ingress_msg(psock);
 767}
 768
 769static void sk_psock_link_destroy(struct sk_psock *psock)
 770{
 771        struct sk_psock_link *link, *tmp;
 772
 773        list_for_each_entry_safe(link, tmp, &psock->link, list) {
 774                list_del(&link->list);
 775                sk_psock_free_link(link);
 776        }
 777}
 778
 779void sk_psock_stop(struct sk_psock *psock, bool wait)
 780{
 781        spin_lock_bh(&psock->ingress_lock);
 782        sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
 783        sk_psock_cork_free(psock);
 784        __sk_psock_zap_ingress(psock);
 785        spin_unlock_bh(&psock->ingress_lock);
 786
 787        if (wait)
 788                cancel_work_sync(&psock->work);
 789}
 790
 791static void sk_psock_done_strp(struct sk_psock *psock);
 792
 793static void sk_psock_destroy(struct work_struct *work)
 794{
 795        struct sk_psock *psock = container_of(to_rcu_work(work),
 796                                              struct sk_psock, rwork);
 797        /* No sk_callback_lock since already detached. */
 798
 799        sk_psock_done_strp(psock);
 800
 801        cancel_work_sync(&psock->work);
 802        mutex_destroy(&psock->work_mutex);
 803
 804        psock_progs_drop(&psock->progs);
 805
 806        sk_psock_link_destroy(psock);
 807        sk_psock_cork_free(psock);
 808
 809        if (psock->sk_redir)
 810                sock_put(psock->sk_redir);
 811        sock_put(psock->sk);
 812        kfree(psock);
 813}
 814
 815void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
 816{
 817        write_lock_bh(&sk->sk_callback_lock);
 818        sk_psock_restore_proto(sk, psock);
 819        rcu_assign_sk_user_data(sk, NULL);
 820        if (psock->progs.stream_parser)
 821                sk_psock_stop_strp(sk, psock);
 822        else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
 823                sk_psock_stop_verdict(sk, psock);
 824        write_unlock_bh(&sk->sk_callback_lock);
 825
 826        sk_psock_stop(psock, false);
 827
 828        INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
 829        queue_rcu_work(system_wq, &psock->rwork);
 830}
 831EXPORT_SYMBOL_GPL(sk_psock_drop);
 832
 833static int sk_psock_map_verd(int verdict, bool redir)
 834{
 835        switch (verdict) {
 836        case SK_PASS:
 837                return redir ? __SK_REDIRECT : __SK_PASS;
 838        case SK_DROP:
 839        default:
 840                break;
 841        }
 842
 843        return __SK_DROP;
 844}
 845
 846int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
 847                         struct sk_msg *msg)
 848{
 849        struct bpf_prog *prog;
 850        int ret;
 851
 852        rcu_read_lock();
 853        prog = READ_ONCE(psock->progs.msg_parser);
 854        if (unlikely(!prog)) {
 855                ret = __SK_PASS;
 856                goto out;
 857        }
 858
 859        sk_msg_compute_data_pointers(msg);
 860        msg->sk = sk;
 861        ret = bpf_prog_run_pin_on_cpu(prog, msg);
 862        ret = sk_psock_map_verd(ret, msg->sk_redir);
 863        psock->apply_bytes = msg->apply_bytes;
 864        if (ret == __SK_REDIRECT) {
 865                if (psock->sk_redir)
 866                        sock_put(psock->sk_redir);
 867                psock->sk_redir = msg->sk_redir;
 868                if (!psock->sk_redir) {
 869                        ret = __SK_DROP;
 870                        goto out;
 871                }
 872                sock_hold(psock->sk_redir);
 873        }
 874out:
 875        rcu_read_unlock();
 876        return ret;
 877}
 878EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
 879
 880static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
 881{
 882        struct sk_psock *psock_other;
 883        struct sock *sk_other;
 884
 885        sk_other = skb_bpf_redirect_fetch(skb);
 886        /* This error is a buggy BPF program, it returned a redirect
 887         * return code, but then didn't set a redirect interface.
 888         */
 889        if (unlikely(!sk_other)) {
 890                skb_bpf_redirect_clear(skb);
 891                sock_drop(from->sk, skb);
 892                return -EIO;
 893        }
 894        psock_other = sk_psock(sk_other);
 895        /* This error indicates the socket is being torn down or had another
 896         * error that caused the pipe to break. We can't send a packet on
 897         * a socket that is in this state so we drop the skb.
 898         */
 899        if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
 900                skb_bpf_redirect_clear(skb);
 901                sock_drop(from->sk, skb);
 902                return -EIO;
 903        }
 904        spin_lock_bh(&psock_other->ingress_lock);
 905        if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
 906                spin_unlock_bh(&psock_other->ingress_lock);
 907                skb_bpf_redirect_clear(skb);
 908                sock_drop(from->sk, skb);
 909                return -EIO;
 910        }
 911
 912        skb_queue_tail(&psock_other->ingress_skb, skb);
 913        schedule_work(&psock_other->work);
 914        spin_unlock_bh(&psock_other->ingress_lock);
 915        return 0;
 916}
 917
 918static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
 919                                       struct sk_psock *from, int verdict)
 920{
 921        switch (verdict) {
 922        case __SK_REDIRECT:
 923                sk_psock_skb_redirect(from, skb);
 924                break;
 925        case __SK_PASS:
 926        case __SK_DROP:
 927        default:
 928                break;
 929        }
 930}
 931
 932int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
 933{
 934        struct bpf_prog *prog;
 935        int ret = __SK_PASS;
 936
 937        rcu_read_lock();
 938        prog = READ_ONCE(psock->progs.stream_verdict);
 939        if (likely(prog)) {
 940                skb->sk = psock->sk;
 941                skb_dst_drop(skb);
 942                skb_bpf_redirect_clear(skb);
 943                ret = bpf_prog_run_pin_on_cpu(prog, skb);
 944                ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
 945                skb->sk = NULL;
 946        }
 947        sk_psock_tls_verdict_apply(skb, psock, ret);
 948        rcu_read_unlock();
 949        return ret;
 950}
 951EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
 952
 953static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
 954                                  int verdict)
 955{
 956        struct sock *sk_other;
 957        int err = 0;
 958        u32 len, off;
 959
 960        switch (verdict) {
 961        case __SK_PASS:
 962                err = -EIO;
 963                sk_other = psock->sk;
 964                if (sock_flag(sk_other, SOCK_DEAD) ||
 965                    !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
 966                        skb_bpf_redirect_clear(skb);
 967                        goto out_free;
 968                }
 969
 970                skb_bpf_set_ingress(skb);
 971
 972                /* If the queue is empty then we can submit directly
 973                 * into the msg queue. If its not empty we have to
 974                 * queue work otherwise we may get OOO data. Otherwise,
 975                 * if sk_psock_skb_ingress errors will be handled by
 976                 * retrying later from workqueue.
 977                 */
 978                if (skb_queue_empty(&psock->ingress_skb)) {
 979                        len = skb->len;
 980                        off = 0;
 981                        if (skb_bpf_strparser(skb)) {
 982                                struct strp_msg *stm = strp_msg(skb);
 983
 984                                off = stm->offset;
 985                                len = stm->full_len;
 986                        }
 987                        err = sk_psock_skb_ingress_self(psock, skb, off, len);
 988                }
 989                if (err < 0) {
 990                        spin_lock_bh(&psock->ingress_lock);
 991                        if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
 992                                skb_queue_tail(&psock->ingress_skb, skb);
 993                                schedule_work(&psock->work);
 994                                err = 0;
 995                        }
 996                        spin_unlock_bh(&psock->ingress_lock);
 997                        if (err < 0) {
 998                                skb_bpf_redirect_clear(skb);
 999                                goto out_free;
1000                        }
1001                }
1002                break;
1003        case __SK_REDIRECT:
1004                err = sk_psock_skb_redirect(psock, skb);
1005                break;
1006        case __SK_DROP:
1007        default:
1008out_free:
1009                sock_drop(psock->sk, skb);
1010        }
1011
1012        return err;
1013}
1014
1015static void sk_psock_write_space(struct sock *sk)
1016{
1017        struct sk_psock *psock;
1018        void (*write_space)(struct sock *sk) = NULL;
1019
1020        rcu_read_lock();
1021        psock = sk_psock(sk);
1022        if (likely(psock)) {
1023                if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1024                        schedule_work(&psock->work);
1025                write_space = psock->saved_write_space;
1026        }
1027        rcu_read_unlock();
1028        if (write_space)
1029                write_space(sk);
1030}
1031
1032#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
1033static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1034{
1035        struct sk_psock *psock;
1036        struct bpf_prog *prog;
1037        int ret = __SK_DROP;
1038        struct sock *sk;
1039
1040        rcu_read_lock();
1041        sk = strp->sk;
1042        psock = sk_psock(sk);
1043        if (unlikely(!psock)) {
1044                sock_drop(sk, skb);
1045                goto out;
1046        }
1047        prog = READ_ONCE(psock->progs.stream_verdict);
1048        if (likely(prog)) {
1049                skb->sk = sk;
1050                skb_dst_drop(skb);
1051                skb_bpf_redirect_clear(skb);
1052                ret = bpf_prog_run_pin_on_cpu(prog, skb);
1053                if (ret == SK_PASS)
1054                        skb_bpf_set_strparser(skb);
1055                ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1056                skb->sk = NULL;
1057        }
1058        sk_psock_verdict_apply(psock, skb, ret);
1059out:
1060        rcu_read_unlock();
1061}
1062
1063static int sk_psock_strp_read_done(struct strparser *strp, int err)
1064{
1065        return err;
1066}
1067
1068static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1069{
1070        struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1071        struct bpf_prog *prog;
1072        int ret = skb->len;
1073
1074        rcu_read_lock();
1075        prog = READ_ONCE(psock->progs.stream_parser);
1076        if (likely(prog)) {
1077                skb->sk = psock->sk;
1078                ret = bpf_prog_run_pin_on_cpu(prog, skb);
1079                skb->sk = NULL;
1080        }
1081        rcu_read_unlock();
1082        return ret;
1083}
1084
1085/* Called with socket lock held. */
1086static void sk_psock_strp_data_ready(struct sock *sk)
1087{
1088        struct sk_psock *psock;
1089
1090        rcu_read_lock();
1091        psock = sk_psock(sk);
1092        if (likely(psock)) {
1093                if (tls_sw_has_ctx_rx(sk)) {
1094                        psock->saved_data_ready(sk);
1095                } else {
1096                        write_lock_bh(&sk->sk_callback_lock);
1097                        strp_data_ready(&psock->strp);
1098                        write_unlock_bh(&sk->sk_callback_lock);
1099                }
1100        }
1101        rcu_read_unlock();
1102}
1103
1104int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1105{
1106        static const struct strp_callbacks cb = {
1107                .rcv_msg        = sk_psock_strp_read,
1108                .read_sock_done = sk_psock_strp_read_done,
1109                .parse_msg      = sk_psock_strp_parse,
1110        };
1111
1112        return strp_init(&psock->strp, sk, &cb);
1113}
1114
1115void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1116{
1117        if (psock->saved_data_ready)
1118                return;
1119
1120        psock->saved_data_ready = sk->sk_data_ready;
1121        sk->sk_data_ready = sk_psock_strp_data_ready;
1122        sk->sk_write_space = sk_psock_write_space;
1123}
1124
1125void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1126{
1127        psock_set_prog(&psock->progs.stream_parser, NULL);
1128
1129        if (!psock->saved_data_ready)
1130                return;
1131
1132        sk->sk_data_ready = psock->saved_data_ready;
1133        psock->saved_data_ready = NULL;
1134        strp_stop(&psock->strp);
1135}
1136
1137static void sk_psock_done_strp(struct sk_psock *psock)
1138{
1139        /* Parser has been stopped */
1140        if (psock->progs.stream_parser)
1141                strp_done(&psock->strp);
1142}
1143#else
1144static void sk_psock_done_strp(struct sk_psock *psock)
1145{
1146}
1147#endif /* CONFIG_BPF_STREAM_PARSER */
1148
1149static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
1150                                 unsigned int offset, size_t orig_len)
1151{
1152        struct sock *sk = (struct sock *)desc->arg.data;
1153        struct sk_psock *psock;
1154        struct bpf_prog *prog;
1155        int ret = __SK_DROP;
1156        int len = skb->len;
1157
1158        /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
1159        skb = skb_clone(skb, GFP_ATOMIC);
1160        if (!skb) {
1161                desc->error = -ENOMEM;
1162                return 0;
1163        }
1164
1165        rcu_read_lock();
1166        psock = sk_psock(sk);
1167        if (unlikely(!psock)) {
1168                len = 0;
1169                sock_drop(sk, skb);
1170                goto out;
1171        }
1172        prog = READ_ONCE(psock->progs.stream_verdict);
1173        if (!prog)
1174                prog = READ_ONCE(psock->progs.skb_verdict);
1175        if (likely(prog)) {
1176                skb->sk = sk;
1177                skb_dst_drop(skb);
1178                skb_bpf_redirect_clear(skb);
1179                ret = bpf_prog_run_pin_on_cpu(prog, skb);
1180                ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1181                skb->sk = NULL;
1182        }
1183        if (sk_psock_verdict_apply(psock, skb, ret) < 0)
1184                len = 0;
1185out:
1186        rcu_read_unlock();
1187        return len;
1188}
1189
1190static void sk_psock_verdict_data_ready(struct sock *sk)
1191{
1192        struct socket *sock = sk->sk_socket;
1193        read_descriptor_t desc;
1194
1195        if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
1196                return;
1197
1198        desc.arg.data = sk;
1199        desc.error = 0;
1200        desc.count = 1;
1201
1202        sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
1203}
1204
1205void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1206{
1207        if (psock->saved_data_ready)
1208                return;
1209
1210        psock->saved_data_ready = sk->sk_data_ready;
1211        sk->sk_data_ready = sk_psock_verdict_data_ready;
1212        sk->sk_write_space = sk_psock_write_space;
1213}
1214
1215void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1216{
1217        psock_set_prog(&psock->progs.stream_verdict, NULL);
1218        psock_set_prog(&psock->progs.skb_verdict, NULL);
1219
1220        if (!psock->saved_data_ready)
1221                return;
1222
1223        sk->sk_data_ready = psock->saved_data_ready;
1224        psock->saved_data_ready = NULL;
1225}
1226