linux/net/core/skmsg.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
   3
   4#include <linux/skmsg.h>
   5#include <linux/skbuff.h>
   6#include <linux/scatterlist.h>
   7
   8#include <net/sock.h>
   9#include <net/tcp.h>
  10#include <net/tls.h>
  11
  12static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
  13{
  14        if (msg->sg.end > msg->sg.start &&
  15            elem_first_coalesce < msg->sg.end)
  16                return true;
  17
  18        if (msg->sg.end < msg->sg.start &&
  19            (elem_first_coalesce > msg->sg.start ||
  20             elem_first_coalesce < msg->sg.end))
  21                return true;
  22
  23        return false;
  24}
  25
  26int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
  27                 int elem_first_coalesce)
  28{
  29        struct page_frag *pfrag = sk_page_frag(sk);
  30        int ret = 0;
  31
  32        len -= msg->sg.size;
  33        while (len > 0) {
  34                struct scatterlist *sge;
  35                u32 orig_offset;
  36                int use, i;
  37
  38                if (!sk_page_frag_refill(sk, pfrag))
  39                        return -ENOMEM;
  40
  41                orig_offset = pfrag->offset;
  42                use = min_t(int, len, pfrag->size - orig_offset);
  43                if (!sk_wmem_schedule(sk, use))
  44                        return -ENOMEM;
  45
  46                i = msg->sg.end;
  47                sk_msg_iter_var_prev(i);
  48                sge = &msg->sg.data[i];
  49
  50                if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
  51                    sg_page(sge) == pfrag->page &&
  52                    sge->offset + sge->length == orig_offset) {
  53                        sge->length += use;
  54                } else {
  55                        if (sk_msg_full(msg)) {
  56                                ret = -ENOSPC;
  57                                break;
  58                        }
  59
  60                        sge = &msg->sg.data[msg->sg.end];
  61                        sg_unmark_end(sge);
  62                        sg_set_page(sge, pfrag->page, use, orig_offset);
  63                        get_page(pfrag->page);
  64                        sk_msg_iter_next(msg, end);
  65                }
  66
  67                sk_mem_charge(sk, use);
  68                msg->sg.size += use;
  69                pfrag->offset += use;
  70                len -= use;
  71        }
  72
  73        return ret;
  74}
  75EXPORT_SYMBOL_GPL(sk_msg_alloc);
  76
  77int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
  78                 u32 off, u32 len)
  79{
  80        int i = src->sg.start;
  81        struct scatterlist *sge = sk_msg_elem(src, i);
  82        struct scatterlist *sgd = NULL;
  83        u32 sge_len, sge_off;
  84
  85        while (off) {
  86                if (sge->length > off)
  87                        break;
  88                off -= sge->length;
  89                sk_msg_iter_var_next(i);
  90                if (i == src->sg.end && off)
  91                        return -ENOSPC;
  92                sge = sk_msg_elem(src, i);
  93        }
  94
  95        while (len) {
  96                sge_len = sge->length - off;
  97                if (sge_len > len)
  98                        sge_len = len;
  99
 100                if (dst->sg.end)
 101                        sgd = sk_msg_elem(dst, dst->sg.end - 1);
 102
 103                if (sgd &&
 104                    (sg_page(sge) == sg_page(sgd)) &&
 105                    (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
 106                        sgd->length += sge_len;
 107                        dst->sg.size += sge_len;
 108                } else if (!sk_msg_full(dst)) {
 109                        sge_off = sge->offset + off;
 110                        sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
 111                } else {
 112                        return -ENOSPC;
 113                }
 114
 115                off = 0;
 116                len -= sge_len;
 117                sk_mem_charge(sk, sge_len);
 118                sk_msg_iter_var_next(i);
 119                if (i == src->sg.end && len)
 120                        return -ENOSPC;
 121                sge = sk_msg_elem(src, i);
 122        }
 123
 124        return 0;
 125}
 126EXPORT_SYMBOL_GPL(sk_msg_clone);
 127
 128void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
 129{
 130        int i = msg->sg.start;
 131
 132        do {
 133                struct scatterlist *sge = sk_msg_elem(msg, i);
 134
 135                if (bytes < sge->length) {
 136                        sge->length -= bytes;
 137                        sge->offset += bytes;
 138                        sk_mem_uncharge(sk, bytes);
 139                        break;
 140                }
 141
 142                sk_mem_uncharge(sk, sge->length);
 143                bytes -= sge->length;
 144                sge->length = 0;
 145                sge->offset = 0;
 146                sk_msg_iter_var_next(i);
 147        } while (bytes && i != msg->sg.end);
 148        msg->sg.start = i;
 149}
 150EXPORT_SYMBOL_GPL(sk_msg_return_zero);
 151
 152void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
 153{
 154        int i = msg->sg.start;
 155
 156        do {
 157                struct scatterlist *sge = &msg->sg.data[i];
 158                int uncharge = (bytes < sge->length) ? bytes : sge->length;
 159
 160                sk_mem_uncharge(sk, uncharge);
 161                bytes -= uncharge;
 162                sk_msg_iter_var_next(i);
 163        } while (i != msg->sg.end);
 164}
 165EXPORT_SYMBOL_GPL(sk_msg_return);
 166
 167static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
 168                            bool charge)
 169{
 170        struct scatterlist *sge = sk_msg_elem(msg, i);
 171        u32 len = sge->length;
 172
 173        if (charge)
 174                sk_mem_uncharge(sk, len);
 175        if (!msg->skb)
 176                put_page(sg_page(sge));
 177        memset(sge, 0, sizeof(*sge));
 178        return len;
 179}
 180
 181static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
 182                         bool charge)
 183{
 184        struct scatterlist *sge = sk_msg_elem(msg, i);
 185        int freed = 0;
 186
 187        while (msg->sg.size) {
 188                msg->sg.size -= sge->length;
 189                freed += sk_msg_free_elem(sk, msg, i, charge);
 190                sk_msg_iter_var_next(i);
 191                sk_msg_check_to_free(msg, i, msg->sg.size);
 192                sge = sk_msg_elem(msg, i);
 193        }
 194        consume_skb(msg->skb);
 195        sk_msg_init(msg);
 196        return freed;
 197}
 198
 199int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
 200{
 201        return __sk_msg_free(sk, msg, msg->sg.start, false);
 202}
 203EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
 204
 205int sk_msg_free(struct sock *sk, struct sk_msg *msg)
 206{
 207        return __sk_msg_free(sk, msg, msg->sg.start, true);
 208}
 209EXPORT_SYMBOL_GPL(sk_msg_free);
 210
 211static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
 212                                  u32 bytes, bool charge)
 213{
 214        struct scatterlist *sge;
 215        u32 i = msg->sg.start;
 216
 217        while (bytes) {
 218                sge = sk_msg_elem(msg, i);
 219                if (!sge->length)
 220                        break;
 221                if (bytes < sge->length) {
 222                        if (charge)
 223                                sk_mem_uncharge(sk, bytes);
 224                        sge->length -= bytes;
 225                        sge->offset += bytes;
 226                        msg->sg.size -= bytes;
 227                        break;
 228                }
 229
 230                msg->sg.size -= sge->length;
 231                bytes -= sge->length;
 232                sk_msg_free_elem(sk, msg, i, charge);
 233                sk_msg_iter_var_next(i);
 234                sk_msg_check_to_free(msg, i, bytes);
 235        }
 236        msg->sg.start = i;
 237}
 238
 239void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
 240{
 241        __sk_msg_free_partial(sk, msg, bytes, true);
 242}
 243EXPORT_SYMBOL_GPL(sk_msg_free_partial);
 244
 245void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
 246                                  u32 bytes)
 247{
 248        __sk_msg_free_partial(sk, msg, bytes, false);
 249}
 250
 251void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
 252{
 253        int trim = msg->sg.size - len;
 254        u32 i = msg->sg.end;
 255
 256        if (trim <= 0) {
 257                WARN_ON(trim < 0);
 258                return;
 259        }
 260
 261        sk_msg_iter_var_prev(i);
 262        msg->sg.size = len;
 263        while (msg->sg.data[i].length &&
 264               trim >= msg->sg.data[i].length) {
 265                trim -= msg->sg.data[i].length;
 266                sk_msg_free_elem(sk, msg, i, true);
 267                sk_msg_iter_var_prev(i);
 268                if (!trim)
 269                        goto out;
 270        }
 271
 272        msg->sg.data[i].length -= trim;
 273        sk_mem_uncharge(sk, trim);
 274        /* Adjust copybreak if it falls into the trimmed part of last buf */
 275        if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
 276                msg->sg.copybreak = msg->sg.data[i].length;
 277out:
 278        sk_msg_iter_var_next(i);
 279        msg->sg.end = i;
 280
 281        /* If we trim data a full sg elem before curr pointer update
 282         * copybreak and current so that any future copy operations
 283         * start at new copy location.
 284         * However trimed data that has not yet been used in a copy op
 285         * does not require an update.
 286         */
 287        if (!msg->sg.size) {
 288                msg->sg.curr = msg->sg.start;
 289                msg->sg.copybreak = 0;
 290        } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
 291                   sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
 292                sk_msg_iter_var_prev(i);
 293                msg->sg.curr = i;
 294                msg->sg.copybreak = msg->sg.data[i].length;
 295        }
 296}
 297EXPORT_SYMBOL_GPL(sk_msg_trim);
 298
 299int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
 300                              struct sk_msg *msg, u32 bytes)
 301{
 302        int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
 303        const int to_max_pages = MAX_MSG_FRAGS;
 304        struct page *pages[MAX_MSG_FRAGS];
 305        ssize_t orig, copied, use, offset;
 306
 307        orig = msg->sg.size;
 308        while (bytes > 0) {
 309                i = 0;
 310                maxpages = to_max_pages - num_elems;
 311                if (maxpages == 0) {
 312                        ret = -EFAULT;
 313                        goto out;
 314                }
 315
 316                copied = iov_iter_get_pages(from, pages, bytes, maxpages,
 317                                            &offset);
 318                if (copied <= 0) {
 319                        ret = -EFAULT;
 320                        goto out;
 321                }
 322
 323                iov_iter_advance(from, copied);
 324                bytes -= copied;
 325                msg->sg.size += copied;
 326
 327                while (copied) {
 328                        use = min_t(int, copied, PAGE_SIZE - offset);
 329                        sg_set_page(&msg->sg.data[msg->sg.end],
 330                                    pages[i], use, offset);
 331                        sg_unmark_end(&msg->sg.data[msg->sg.end]);
 332                        sk_mem_charge(sk, use);
 333
 334                        offset = 0;
 335                        copied -= use;
 336                        sk_msg_iter_next(msg, end);
 337                        num_elems++;
 338                        i++;
 339                }
 340                /* When zerocopy is mixed with sk_msg_*copy* operations we
 341                 * may have a copybreak set in this case clear and prefer
 342                 * zerocopy remainder when possible.
 343                 */
 344                msg->sg.copybreak = 0;
 345                msg->sg.curr = msg->sg.end;
 346        }
 347out:
 348        /* Revert iov_iter updates, msg will need to use 'trim' later if it
 349         * also needs to be cleared.
 350         */
 351        if (ret)
 352                iov_iter_revert(from, msg->sg.size - orig);
 353        return ret;
 354}
 355EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
 356
 357int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
 358                             struct sk_msg *msg, u32 bytes)
 359{
 360        int ret = -ENOSPC, i = msg->sg.curr;
 361        struct scatterlist *sge;
 362        u32 copy, buf_size;
 363        void *to;
 364
 365        do {
 366                sge = sk_msg_elem(msg, i);
 367                /* This is possible if a trim operation shrunk the buffer */
 368                if (msg->sg.copybreak >= sge->length) {
 369                        msg->sg.copybreak = 0;
 370                        sk_msg_iter_var_next(i);
 371                        if (i == msg->sg.end)
 372                                break;
 373                        sge = sk_msg_elem(msg, i);
 374                }
 375
 376                buf_size = sge->length - msg->sg.copybreak;
 377                copy = (buf_size > bytes) ? bytes : buf_size;
 378                to = sg_virt(sge) + msg->sg.copybreak;
 379                msg->sg.copybreak += copy;
 380                if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
 381                        ret = copy_from_iter_nocache(to, copy, from);
 382                else
 383                        ret = copy_from_iter(to, copy, from);
 384                if (ret != copy) {
 385                        ret = -EFAULT;
 386                        goto out;
 387                }
 388                bytes -= copy;
 389                if (!bytes)
 390                        break;
 391                msg->sg.copybreak = 0;
 392                sk_msg_iter_var_next(i);
 393        } while (i != msg->sg.end);
 394out:
 395        msg->sg.curr = i;
 396        return ret;
 397}
 398EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
 399
 400static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
 401{
 402        struct sock *sk = psock->sk;
 403        int copied = 0, num_sge;
 404        struct sk_msg *msg;
 405
 406        msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
 407        if (unlikely(!msg))
 408                return -EAGAIN;
 409        if (!sk_rmem_schedule(sk, skb, skb->len)) {
 410                kfree(msg);
 411                return -EAGAIN;
 412        }
 413
 414        sk_msg_init(msg);
 415        num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
 416        if (unlikely(num_sge < 0)) {
 417                kfree(msg);
 418                return num_sge;
 419        }
 420
 421        sk_mem_charge(sk, skb->len);
 422        copied = skb->len;
 423        msg->sg.start = 0;
 424        msg->sg.size = copied;
 425        msg->sg.end = num_sge;
 426        msg->skb = skb;
 427
 428        sk_psock_queue_msg(psock, msg);
 429        sk_psock_data_ready(sk, psock);
 430        return copied;
 431}
 432
 433static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
 434                               u32 off, u32 len, bool ingress)
 435{
 436        if (ingress)
 437                return sk_psock_skb_ingress(psock, skb);
 438        else
 439                return skb_send_sock_locked(psock->sk, skb, off, len);
 440}
 441
 442static void sk_psock_backlog(struct work_struct *work)
 443{
 444        struct sk_psock *psock = container_of(work, struct sk_psock, work);
 445        struct sk_psock_work_state *state = &psock->work_state;
 446        struct sk_buff *skb;
 447        bool ingress;
 448        u32 len, off;
 449        int ret;
 450
 451        /* Lock sock to avoid losing sk_socket during loop. */
 452        lock_sock(psock->sk);
 453        if (state->skb) {
 454                skb = state->skb;
 455                len = state->len;
 456                off = state->off;
 457                state->skb = NULL;
 458                goto start;
 459        }
 460
 461        while ((skb = skb_dequeue(&psock->ingress_skb))) {
 462                len = skb->len;
 463                off = 0;
 464start:
 465                ingress = tcp_skb_bpf_ingress(skb);
 466                do {
 467                        ret = -EIO;
 468                        if (likely(psock->sk->sk_socket))
 469                                ret = sk_psock_handle_skb(psock, skb, off,
 470                                                          len, ingress);
 471                        if (ret <= 0) {
 472                                if (ret == -EAGAIN) {
 473                                        state->skb = skb;
 474                                        state->len = len;
 475                                        state->off = off;
 476                                        goto end;
 477                                }
 478                                /* Hard errors break pipe and stop xmit. */
 479                                sk_psock_report_error(psock, ret ? -ret : EPIPE);
 480                                sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
 481                                kfree_skb(skb);
 482                                goto end;
 483                        }
 484                        off += ret;
 485                        len -= ret;
 486                } while (len);
 487
 488                if (!ingress)
 489                        kfree_skb(skb);
 490        }
 491end:
 492        release_sock(psock->sk);
 493}
 494
 495struct sk_psock *sk_psock_init(struct sock *sk, int node)
 496{
 497        struct sk_psock *psock = kzalloc_node(sizeof(*psock),
 498                                              GFP_ATOMIC | __GFP_NOWARN,
 499                                              node);
 500        if (!psock)
 501                return NULL;
 502
 503        psock->sk = sk;
 504        psock->eval =  __SK_NONE;
 505
 506        INIT_LIST_HEAD(&psock->link);
 507        spin_lock_init(&psock->link_lock);
 508
 509        INIT_WORK(&psock->work, sk_psock_backlog);
 510        INIT_LIST_HEAD(&psock->ingress_msg);
 511        skb_queue_head_init(&psock->ingress_skb);
 512
 513        sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
 514        refcount_set(&psock->refcnt, 1);
 515
 516        rcu_assign_sk_user_data_nocopy(sk, psock);
 517        sock_hold(sk);
 518
 519        return psock;
 520}
 521EXPORT_SYMBOL_GPL(sk_psock_init);
 522
 523struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
 524{
 525        struct sk_psock_link *link;
 526
 527        spin_lock_bh(&psock->link_lock);
 528        link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
 529                                        list);
 530        if (link)
 531                list_del(&link->list);
 532        spin_unlock_bh(&psock->link_lock);
 533        return link;
 534}
 535
 536void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
 537{
 538        struct sk_msg *msg, *tmp;
 539
 540        list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
 541                list_del(&msg->list);
 542                sk_msg_free(psock->sk, msg);
 543                kfree(msg);
 544        }
 545}
 546
 547static void sk_psock_zap_ingress(struct sk_psock *psock)
 548{
 549        __skb_queue_purge(&psock->ingress_skb);
 550        __sk_psock_purge_ingress_msg(psock);
 551}
 552
 553static void sk_psock_link_destroy(struct sk_psock *psock)
 554{
 555        struct sk_psock_link *link, *tmp;
 556
 557        list_for_each_entry_safe(link, tmp, &psock->link, list) {
 558                list_del(&link->list);
 559                sk_psock_free_link(link);
 560        }
 561}
 562
 563static void sk_psock_destroy_deferred(struct work_struct *gc)
 564{
 565        struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
 566
 567        /* No sk_callback_lock since already detached. */
 568
 569        /* Parser has been stopped */
 570        if (psock->progs.skb_parser)
 571                strp_done(&psock->parser.strp);
 572
 573        cancel_work_sync(&psock->work);
 574
 575        psock_progs_drop(&psock->progs);
 576
 577        sk_psock_link_destroy(psock);
 578        sk_psock_cork_free(psock);
 579        sk_psock_zap_ingress(psock);
 580
 581        if (psock->sk_redir)
 582                sock_put(psock->sk_redir);
 583        sock_put(psock->sk);
 584        kfree(psock);
 585}
 586
 587void sk_psock_destroy(struct rcu_head *rcu)
 588{
 589        struct sk_psock *psock = container_of(rcu, struct sk_psock, rcu);
 590
 591        INIT_WORK(&psock->gc, sk_psock_destroy_deferred);
 592        schedule_work(&psock->gc);
 593}
 594EXPORT_SYMBOL_GPL(sk_psock_destroy);
 595
 596void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
 597{
 598        sk_psock_cork_free(psock);
 599        sk_psock_zap_ingress(psock);
 600
 601        write_lock_bh(&sk->sk_callback_lock);
 602        sk_psock_restore_proto(sk, psock);
 603        rcu_assign_sk_user_data(sk, NULL);
 604        if (psock->progs.skb_parser)
 605                sk_psock_stop_strp(sk, psock);
 606        write_unlock_bh(&sk->sk_callback_lock);
 607        sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
 608
 609        call_rcu(&psock->rcu, sk_psock_destroy);
 610}
 611EXPORT_SYMBOL_GPL(sk_psock_drop);
 612
 613static int sk_psock_map_verd(int verdict, bool redir)
 614{
 615        switch (verdict) {
 616        case SK_PASS:
 617                return redir ? __SK_REDIRECT : __SK_PASS;
 618        case SK_DROP:
 619        default:
 620                break;
 621        }
 622
 623        return __SK_DROP;
 624}
 625
 626int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
 627                         struct sk_msg *msg)
 628{
 629        struct bpf_prog *prog;
 630        int ret;
 631
 632        rcu_read_lock();
 633        prog = READ_ONCE(psock->progs.msg_parser);
 634        if (unlikely(!prog)) {
 635                ret = __SK_PASS;
 636                goto out;
 637        }
 638
 639        sk_msg_compute_data_pointers(msg);
 640        msg->sk = sk;
 641        ret = bpf_prog_run_pin_on_cpu(prog, msg);
 642        ret = sk_psock_map_verd(ret, msg->sk_redir);
 643        psock->apply_bytes = msg->apply_bytes;
 644        if (ret == __SK_REDIRECT) {
 645                if (psock->sk_redir)
 646                        sock_put(psock->sk_redir);
 647                psock->sk_redir = msg->sk_redir;
 648                if (!psock->sk_redir) {
 649                        ret = __SK_DROP;
 650                        goto out;
 651                }
 652                sock_hold(psock->sk_redir);
 653        }
 654out:
 655        rcu_read_unlock();
 656        return ret;
 657}
 658EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
 659
 660static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
 661                            struct sk_buff *skb)
 662{
 663        int ret;
 664
 665        skb->sk = psock->sk;
 666        bpf_compute_data_end_sk_skb(skb);
 667        ret = bpf_prog_run_pin_on_cpu(prog, skb);
 668        /* strparser clones the skb before handing it to a upper layer,
 669         * meaning skb_orphan has been called. We NULL sk on the way out
 670         * to ensure we don't trigger a BUG_ON() in skb/sk operations
 671         * later and because we are not charging the memory of this skb
 672         * to any socket yet.
 673         */
 674        skb->sk = NULL;
 675        return ret;
 676}
 677
 678static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
 679{
 680        struct sk_psock_parser *parser;
 681
 682        parser = container_of(strp, struct sk_psock_parser, strp);
 683        return container_of(parser, struct sk_psock, parser);
 684}
 685
 686static void sk_psock_skb_redirect(struct sk_buff *skb)
 687{
 688        struct sk_psock *psock_other;
 689        struct sock *sk_other;
 690        bool ingress;
 691
 692        sk_other = tcp_skb_bpf_redirect_fetch(skb);
 693        if (unlikely(!sk_other)) {
 694                kfree_skb(skb);
 695                return;
 696        }
 697        psock_other = sk_psock(sk_other);
 698        if (!psock_other || sock_flag(sk_other, SOCK_DEAD) ||
 699            !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
 700                kfree_skb(skb);
 701                return;
 702        }
 703
 704        ingress = tcp_skb_bpf_ingress(skb);
 705        if ((!ingress && sock_writeable(sk_other)) ||
 706            (ingress &&
 707             atomic_read(&sk_other->sk_rmem_alloc) <=
 708             sk_other->sk_rcvbuf)) {
 709                if (!ingress)
 710                        skb_set_owner_w(skb, sk_other);
 711                skb_queue_tail(&psock_other->ingress_skb, skb);
 712                schedule_work(&psock_other->work);
 713        } else {
 714                kfree_skb(skb);
 715        }
 716}
 717
 718static void sk_psock_tls_verdict_apply(struct sk_buff *skb, int verdict)
 719{
 720        switch (verdict) {
 721        case __SK_REDIRECT:
 722                sk_psock_skb_redirect(skb);
 723                break;
 724        case __SK_PASS:
 725        case __SK_DROP:
 726        default:
 727                break;
 728        }
 729}
 730
 731int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
 732{
 733        struct bpf_prog *prog;
 734        int ret = __SK_PASS;
 735
 736        rcu_read_lock();
 737        prog = READ_ONCE(psock->progs.skb_verdict);
 738        if (likely(prog)) {
 739                tcp_skb_bpf_redirect_clear(skb);
 740                ret = sk_psock_bpf_run(psock, prog, skb);
 741                ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
 742        }
 743        sk_psock_tls_verdict_apply(skb, ret);
 744        rcu_read_unlock();
 745        return ret;
 746}
 747EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
 748
 749static void sk_psock_verdict_apply(struct sk_psock *psock,
 750                                   struct sk_buff *skb, int verdict)
 751{
 752        struct sock *sk_other;
 753
 754        switch (verdict) {
 755        case __SK_PASS:
 756                sk_other = psock->sk;
 757                if (sock_flag(sk_other, SOCK_DEAD) ||
 758                    !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
 759                        goto out_free;
 760                }
 761                if (atomic_read(&sk_other->sk_rmem_alloc) <=
 762                    sk_other->sk_rcvbuf) {
 763                        struct tcp_skb_cb *tcp = TCP_SKB_CB(skb);
 764
 765                        tcp->bpf.flags |= BPF_F_INGRESS;
 766                        skb_queue_tail(&psock->ingress_skb, skb);
 767                        schedule_work(&psock->work);
 768                        break;
 769                }
 770                goto out_free;
 771        case __SK_REDIRECT:
 772                sk_psock_skb_redirect(skb);
 773                break;
 774        case __SK_DROP:
 775        default:
 776out_free:
 777                kfree_skb(skb);
 778        }
 779}
 780
 781static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
 782{
 783        struct sk_psock *psock;
 784        struct bpf_prog *prog;
 785        int ret = __SK_DROP;
 786        struct sock *sk;
 787
 788        rcu_read_lock();
 789        sk = strp->sk;
 790        psock = sk_psock(sk);
 791        if (unlikely(!psock)) {
 792                kfree_skb(skb);
 793                goto out;
 794        }
 795        prog = READ_ONCE(psock->progs.skb_verdict);
 796        if (likely(prog)) {
 797                skb_orphan(skb);
 798                tcp_skb_bpf_redirect_clear(skb);
 799                ret = sk_psock_bpf_run(psock, prog, skb);
 800                ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
 801        }
 802        sk_psock_verdict_apply(psock, skb, ret);
 803out:
 804        rcu_read_unlock();
 805}
 806
 807static int sk_psock_strp_read_done(struct strparser *strp, int err)
 808{
 809        return err;
 810}
 811
 812static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
 813{
 814        struct sk_psock *psock = sk_psock_from_strp(strp);
 815        struct bpf_prog *prog;
 816        int ret = skb->len;
 817
 818        rcu_read_lock();
 819        prog = READ_ONCE(psock->progs.skb_parser);
 820        if (likely(prog))
 821                ret = sk_psock_bpf_run(psock, prog, skb);
 822        rcu_read_unlock();
 823        return ret;
 824}
 825
 826/* Called with socket lock held. */
 827static void sk_psock_strp_data_ready(struct sock *sk)
 828{
 829        struct sk_psock *psock;
 830
 831        rcu_read_lock();
 832        psock = sk_psock(sk);
 833        if (likely(psock)) {
 834                if (tls_sw_has_ctx_rx(sk)) {
 835                        psock->parser.saved_data_ready(sk);
 836                } else {
 837                        write_lock_bh(&sk->sk_callback_lock);
 838                        strp_data_ready(&psock->parser.strp);
 839                        write_unlock_bh(&sk->sk_callback_lock);
 840                }
 841        }
 842        rcu_read_unlock();
 843}
 844
 845static void sk_psock_write_space(struct sock *sk)
 846{
 847        struct sk_psock *psock;
 848        void (*write_space)(struct sock *sk) = NULL;
 849
 850        rcu_read_lock();
 851        psock = sk_psock(sk);
 852        if (likely(psock)) {
 853                if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
 854                        schedule_work(&psock->work);
 855                write_space = psock->saved_write_space;
 856        }
 857        rcu_read_unlock();
 858        if (write_space)
 859                write_space(sk);
 860}
 861
 862int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
 863{
 864        static const struct strp_callbacks cb = {
 865                .rcv_msg        = sk_psock_strp_read,
 866                .read_sock_done = sk_psock_strp_read_done,
 867                .parse_msg      = sk_psock_strp_parse,
 868        };
 869
 870        psock->parser.enabled = false;
 871        return strp_init(&psock->parser.strp, sk, &cb);
 872}
 873
 874void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
 875{
 876        struct sk_psock_parser *parser = &psock->parser;
 877
 878        if (parser->enabled)
 879                return;
 880
 881        parser->saved_data_ready = sk->sk_data_ready;
 882        sk->sk_data_ready = sk_psock_strp_data_ready;
 883        sk->sk_write_space = sk_psock_write_space;
 884        parser->enabled = true;
 885}
 886
 887void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
 888{
 889        struct sk_psock_parser *parser = &psock->parser;
 890
 891        if (!parser->enabled)
 892                return;
 893
 894        sk->sk_data_ready = parser->saved_data_ready;
 895        parser->saved_data_ready = NULL;
 896        strp_stop(&parser->strp);
 897        parser->enabled = false;
 898}
 899