linux/kernel/bpf/cgroup.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Functions to manage eBPF programs attached to cgroups
   4 *
   5 * Copyright (c) 2016 Daniel Mack
   6 */
   7
   8#include <linux/kernel.h>
   9#include <linux/atomic.h>
  10#include <linux/cgroup.h>
  11#include <linux/filter.h>
  12#include <linux/slab.h>
  13#include <linux/sysctl.h>
  14#include <linux/string.h>
  15#include <linux/bpf.h>
  16#include <linux/bpf-cgroup.h>
  17#include <net/sock.h>
  18#include <net/bpf_sk_storage.h>
  19
  20#include "../cgroup/cgroup-internal.h"
  21
  22DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_BPF_ATTACH_TYPE);
  23EXPORT_SYMBOL(cgroup_bpf_enabled_key);
  24
  25void cgroup_bpf_offline(struct cgroup *cgrp)
  26{
  27        cgroup_get(cgrp);
  28        percpu_ref_kill(&cgrp->bpf.refcnt);
  29}
  30
  31static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[])
  32{
  33        enum bpf_cgroup_storage_type stype;
  34
  35        for_each_cgroup_storage_type(stype)
  36                bpf_cgroup_storage_free(storages[stype]);
  37}
  38
  39static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
  40                                     struct bpf_cgroup_storage *new_storages[],
  41                                     enum bpf_attach_type type,
  42                                     struct bpf_prog *prog,
  43                                     struct cgroup *cgrp)
  44{
  45        enum bpf_cgroup_storage_type stype;
  46        struct bpf_cgroup_storage_key key;
  47        struct bpf_map *map;
  48
  49        key.cgroup_inode_id = cgroup_id(cgrp);
  50        key.attach_type = type;
  51
  52        for_each_cgroup_storage_type(stype) {
  53                map = prog->aux->cgroup_storage[stype];
  54                if (!map)
  55                        continue;
  56
  57                storages[stype] = cgroup_storage_lookup((void *)map, &key, false);
  58                if (storages[stype])
  59                        continue;
  60
  61                storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
  62                if (IS_ERR(storages[stype])) {
  63                        bpf_cgroup_storages_free(new_storages);
  64                        return -ENOMEM;
  65                }
  66
  67                new_storages[stype] = storages[stype];
  68        }
  69
  70        return 0;
  71}
  72
  73static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[],
  74                                       struct bpf_cgroup_storage *src[])
  75{
  76        enum bpf_cgroup_storage_type stype;
  77
  78        for_each_cgroup_storage_type(stype)
  79                dst[stype] = src[stype];
  80}
  81
  82static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[],
  83                                     struct cgroup *cgrp,
  84                                     enum bpf_attach_type attach_type)
  85{
  86        enum bpf_cgroup_storage_type stype;
  87
  88        for_each_cgroup_storage_type(stype)
  89                bpf_cgroup_storage_link(storages[stype], cgrp, attach_type);
  90}
  91
  92/* Called when bpf_cgroup_link is auto-detached from dying cgroup.
  93 * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It
  94 * doesn't free link memory, which will eventually be done by bpf_link's
  95 * release() callback, when its last FD is closed.
  96 */
  97static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link)
  98{
  99        cgroup_put(link->cgroup);
 100        link->cgroup = NULL;
 101}
 102
 103/**
 104 * cgroup_bpf_release() - put references of all bpf programs and
 105 *                        release all cgroup bpf data
 106 * @work: work structure embedded into the cgroup to modify
 107 */
 108static void cgroup_bpf_release(struct work_struct *work)
 109{
 110        struct cgroup *p, *cgrp = container_of(work, struct cgroup,
 111                                               bpf.release_work);
 112        struct bpf_prog_array *old_array;
 113        struct list_head *storages = &cgrp->bpf.storages;
 114        struct bpf_cgroup_storage *storage, *stmp;
 115
 116        unsigned int type;
 117
 118        mutex_lock(&cgroup_mutex);
 119
 120        for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
 121                struct list_head *progs = &cgrp->bpf.progs[type];
 122                struct bpf_prog_list *pl, *pltmp;
 123
 124                list_for_each_entry_safe(pl, pltmp, progs, node) {
 125                        list_del(&pl->node);
 126                        if (pl->prog)
 127                                bpf_prog_put(pl->prog);
 128                        if (pl->link)
 129                                bpf_cgroup_link_auto_detach(pl->link);
 130                        kfree(pl);
 131                        static_branch_dec(&cgroup_bpf_enabled_key[type]);
 132                }
 133                old_array = rcu_dereference_protected(
 134                                cgrp->bpf.effective[type],
 135                                lockdep_is_held(&cgroup_mutex));
 136                bpf_prog_array_free(old_array);
 137        }
 138
 139        list_for_each_entry_safe(storage, stmp, storages, list_cg) {
 140                bpf_cgroup_storage_unlink(storage);
 141                bpf_cgroup_storage_free(storage);
 142        }
 143
 144        mutex_unlock(&cgroup_mutex);
 145
 146        for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
 147                cgroup_bpf_put(p);
 148
 149        percpu_ref_exit(&cgrp->bpf.refcnt);
 150        cgroup_put(cgrp);
 151}
 152
 153/**
 154 * cgroup_bpf_release_fn() - callback used to schedule releasing
 155 *                           of bpf cgroup data
 156 * @ref: percpu ref counter structure
 157 */
 158static void cgroup_bpf_release_fn(struct percpu_ref *ref)
 159{
 160        struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
 161
 162        INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
 163        queue_work(system_wq, &cgrp->bpf.release_work);
 164}
 165
 166/* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
 167 * link or direct prog.
 168 */
 169static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl)
 170{
 171        if (pl->prog)
 172                return pl->prog;
 173        if (pl->link)
 174                return pl->link->link.prog;
 175        return NULL;
 176}
 177
 178/* count number of elements in the list.
 179 * it's slow but the list cannot be long
 180 */
 181static u32 prog_list_length(struct list_head *head)
 182{
 183        struct bpf_prog_list *pl;
 184        u32 cnt = 0;
 185
 186        list_for_each_entry(pl, head, node) {
 187                if (!prog_list_prog(pl))
 188                        continue;
 189                cnt++;
 190        }
 191        return cnt;
 192}
 193
 194/* if parent has non-overridable prog attached,
 195 * disallow attaching new programs to the descendent cgroup.
 196 * if parent has overridable or multi-prog, allow attaching
 197 */
 198static bool hierarchy_allows_attach(struct cgroup *cgrp,
 199                                    enum bpf_attach_type type)
 200{
 201        struct cgroup *p;
 202
 203        p = cgroup_parent(cgrp);
 204        if (!p)
 205                return true;
 206        do {
 207                u32 flags = p->bpf.flags[type];
 208                u32 cnt;
 209
 210                if (flags & BPF_F_ALLOW_MULTI)
 211                        return true;
 212                cnt = prog_list_length(&p->bpf.progs[type]);
 213                WARN_ON_ONCE(cnt > 1);
 214                if (cnt == 1)
 215                        return !!(flags & BPF_F_ALLOW_OVERRIDE);
 216                p = cgroup_parent(p);
 217        } while (p);
 218        return true;
 219}
 220
 221/* compute a chain of effective programs for a given cgroup:
 222 * start from the list of programs in this cgroup and add
 223 * all parent programs.
 224 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
 225 * to programs in this cgroup
 226 */
 227static int compute_effective_progs(struct cgroup *cgrp,
 228                                   enum bpf_attach_type type,
 229                                   struct bpf_prog_array **array)
 230{
 231        struct bpf_prog_array_item *item;
 232        struct bpf_prog_array *progs;
 233        struct bpf_prog_list *pl;
 234        struct cgroup *p = cgrp;
 235        int cnt = 0;
 236
 237        /* count number of effective programs by walking parents */
 238        do {
 239                if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
 240                        cnt += prog_list_length(&p->bpf.progs[type]);
 241                p = cgroup_parent(p);
 242        } while (p);
 243
 244        progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
 245        if (!progs)
 246                return -ENOMEM;
 247
 248        /* populate the array with effective progs */
 249        cnt = 0;
 250        p = cgrp;
 251        do {
 252                if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
 253                        continue;
 254
 255                list_for_each_entry(pl, &p->bpf.progs[type], node) {
 256                        if (!prog_list_prog(pl))
 257                                continue;
 258
 259                        item = &progs->items[cnt];
 260                        item->prog = prog_list_prog(pl);
 261                        bpf_cgroup_storages_assign(item->cgroup_storage,
 262                                                   pl->storage);
 263                        cnt++;
 264                }
 265        } while ((p = cgroup_parent(p)));
 266
 267        *array = progs;
 268        return 0;
 269}
 270
 271static void activate_effective_progs(struct cgroup *cgrp,
 272                                     enum bpf_attach_type type,
 273                                     struct bpf_prog_array *old_array)
 274{
 275        old_array = rcu_replace_pointer(cgrp->bpf.effective[type], old_array,
 276                                        lockdep_is_held(&cgroup_mutex));
 277        /* free prog array after grace period, since __cgroup_bpf_run_*()
 278         * might be still walking the array
 279         */
 280        bpf_prog_array_free(old_array);
 281}
 282
 283/**
 284 * cgroup_bpf_inherit() - inherit effective programs from parent
 285 * @cgrp: the cgroup to modify
 286 */
 287int cgroup_bpf_inherit(struct cgroup *cgrp)
 288{
 289/* has to use marco instead of const int, since compiler thinks
 290 * that array below is variable length
 291 */
 292#define NR ARRAY_SIZE(cgrp->bpf.effective)
 293        struct bpf_prog_array *arrays[NR] = {};
 294        struct cgroup *p;
 295        int ret, i;
 296
 297        ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
 298                              GFP_KERNEL);
 299        if (ret)
 300                return ret;
 301
 302        for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
 303                cgroup_bpf_get(p);
 304
 305        for (i = 0; i < NR; i++)
 306                INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
 307
 308        INIT_LIST_HEAD(&cgrp->bpf.storages);
 309
 310        for (i = 0; i < NR; i++)
 311                if (compute_effective_progs(cgrp, i, &arrays[i]))
 312                        goto cleanup;
 313
 314        for (i = 0; i < NR; i++)
 315                activate_effective_progs(cgrp, i, arrays[i]);
 316
 317        return 0;
 318cleanup:
 319        for (i = 0; i < NR; i++)
 320                bpf_prog_array_free(arrays[i]);
 321
 322        for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
 323                cgroup_bpf_put(p);
 324
 325        percpu_ref_exit(&cgrp->bpf.refcnt);
 326
 327        return -ENOMEM;
 328}
 329
 330static int update_effective_progs(struct cgroup *cgrp,
 331                                  enum bpf_attach_type type)
 332{
 333        struct cgroup_subsys_state *css;
 334        int err;
 335
 336        /* allocate and recompute effective prog arrays */
 337        css_for_each_descendant_pre(css, &cgrp->self) {
 338                struct cgroup *desc = container_of(css, struct cgroup, self);
 339
 340                if (percpu_ref_is_zero(&desc->bpf.refcnt))
 341                        continue;
 342
 343                err = compute_effective_progs(desc, type, &desc->bpf.inactive);
 344                if (err)
 345                        goto cleanup;
 346        }
 347
 348        /* all allocations were successful. Activate all prog arrays */
 349        css_for_each_descendant_pre(css, &cgrp->self) {
 350                struct cgroup *desc = container_of(css, struct cgroup, self);
 351
 352                if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
 353                        if (unlikely(desc->bpf.inactive)) {
 354                                bpf_prog_array_free(desc->bpf.inactive);
 355                                desc->bpf.inactive = NULL;
 356                        }
 357                        continue;
 358                }
 359
 360                activate_effective_progs(desc, type, desc->bpf.inactive);
 361                desc->bpf.inactive = NULL;
 362        }
 363
 364        return 0;
 365
 366cleanup:
 367        /* oom while computing effective. Free all computed effective arrays
 368         * since they were not activated
 369         */
 370        css_for_each_descendant_pre(css, &cgrp->self) {
 371                struct cgroup *desc = container_of(css, struct cgroup, self);
 372
 373                bpf_prog_array_free(desc->bpf.inactive);
 374                desc->bpf.inactive = NULL;
 375        }
 376
 377        return err;
 378}
 379
 380#define BPF_CGROUP_MAX_PROGS 64
 381
 382static struct bpf_prog_list *find_attach_entry(struct list_head *progs,
 383                                               struct bpf_prog *prog,
 384                                               struct bpf_cgroup_link *link,
 385                                               struct bpf_prog *replace_prog,
 386                                               bool allow_multi)
 387{
 388        struct bpf_prog_list *pl;
 389
 390        /* single-attach case */
 391        if (!allow_multi) {
 392                if (list_empty(progs))
 393                        return NULL;
 394                return list_first_entry(progs, typeof(*pl), node);
 395        }
 396
 397        list_for_each_entry(pl, progs, node) {
 398                if (prog && pl->prog == prog && prog != replace_prog)
 399                        /* disallow attaching the same prog twice */
 400                        return ERR_PTR(-EINVAL);
 401                if (link && pl->link == link)
 402                        /* disallow attaching the same link twice */
 403                        return ERR_PTR(-EINVAL);
 404        }
 405
 406        /* direct prog multi-attach w/ replacement case */
 407        if (replace_prog) {
 408                list_for_each_entry(pl, progs, node) {
 409                        if (pl->prog == replace_prog)
 410                                /* a match found */
 411                                return pl;
 412                }
 413                /* prog to replace not found for cgroup */
 414                return ERR_PTR(-ENOENT);
 415        }
 416
 417        return NULL;
 418}
 419
 420/**
 421 * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and
 422 *                         propagate the change to descendants
 423 * @cgrp: The cgroup which descendants to traverse
 424 * @prog: A program to attach
 425 * @link: A link to attach
 426 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
 427 * @type: Type of attach operation
 428 * @flags: Option flags
 429 *
 430 * Exactly one of @prog or @link can be non-null.
 431 * Must be called with cgroup_mutex held.
 432 */
 433int __cgroup_bpf_attach(struct cgroup *cgrp,
 434                        struct bpf_prog *prog, struct bpf_prog *replace_prog,
 435                        struct bpf_cgroup_link *link,
 436                        enum bpf_attach_type type, u32 flags)
 437{
 438        u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
 439        struct list_head *progs = &cgrp->bpf.progs[type];
 440        struct bpf_prog *old_prog = NULL;
 441        struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
 442        struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
 443        struct bpf_prog_list *pl;
 444        int err;
 445
 446        if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
 447            ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI)))
 448                /* invalid combination */
 449                return -EINVAL;
 450        if (link && (prog || replace_prog))
 451                /* only either link or prog/replace_prog can be specified */
 452                return -EINVAL;
 453        if (!!replace_prog != !!(flags & BPF_F_REPLACE))
 454                /* replace_prog implies BPF_F_REPLACE, and vice versa */
 455                return -EINVAL;
 456
 457        if (!hierarchy_allows_attach(cgrp, type))
 458                return -EPERM;
 459
 460        if (!list_empty(progs) && cgrp->bpf.flags[type] != saved_flags)
 461                /* Disallow attaching non-overridable on top
 462                 * of existing overridable in this cgroup.
 463                 * Disallow attaching multi-prog if overridable or none
 464                 */
 465                return -EPERM;
 466
 467        if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
 468                return -E2BIG;
 469
 470        pl = find_attach_entry(progs, prog, link, replace_prog,
 471                               flags & BPF_F_ALLOW_MULTI);
 472        if (IS_ERR(pl))
 473                return PTR_ERR(pl);
 474
 475        if (bpf_cgroup_storages_alloc(storage, new_storage, type,
 476                                      prog ? : link->link.prog, cgrp))
 477                return -ENOMEM;
 478
 479        if (pl) {
 480                old_prog = pl->prog;
 481        } else {
 482                pl = kmalloc(sizeof(*pl), GFP_KERNEL);
 483                if (!pl) {
 484                        bpf_cgroup_storages_free(new_storage);
 485                        return -ENOMEM;
 486                }
 487                list_add_tail(&pl->node, progs);
 488        }
 489
 490        pl->prog = prog;
 491        pl->link = link;
 492        bpf_cgroup_storages_assign(pl->storage, storage);
 493        cgrp->bpf.flags[type] = saved_flags;
 494
 495        err = update_effective_progs(cgrp, type);
 496        if (err)
 497                goto cleanup;
 498
 499        if (old_prog)
 500                bpf_prog_put(old_prog);
 501        else
 502                static_branch_inc(&cgroup_bpf_enabled_key[type]);
 503        bpf_cgroup_storages_link(new_storage, cgrp, type);
 504        return 0;
 505
 506cleanup:
 507        if (old_prog) {
 508                pl->prog = old_prog;
 509                pl->link = NULL;
 510        }
 511        bpf_cgroup_storages_free(new_storage);
 512        if (!old_prog) {
 513                list_del(&pl->node);
 514                kfree(pl);
 515        }
 516        return err;
 517}
 518
 519/* Swap updated BPF program for given link in effective program arrays across
 520 * all descendant cgroups. This function is guaranteed to succeed.
 521 */
 522static void replace_effective_prog(struct cgroup *cgrp,
 523                                   enum bpf_attach_type type,
 524                                   struct bpf_cgroup_link *link)
 525{
 526        struct bpf_prog_array_item *item;
 527        struct cgroup_subsys_state *css;
 528        struct bpf_prog_array *progs;
 529        struct bpf_prog_list *pl;
 530        struct list_head *head;
 531        struct cgroup *cg;
 532        int pos;
 533
 534        css_for_each_descendant_pre(css, &cgrp->self) {
 535                struct cgroup *desc = container_of(css, struct cgroup, self);
 536
 537                if (percpu_ref_is_zero(&desc->bpf.refcnt))
 538                        continue;
 539
 540                /* find position of link in effective progs array */
 541                for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
 542                        if (pos && !(cg->bpf.flags[type] & BPF_F_ALLOW_MULTI))
 543                                continue;
 544
 545                        head = &cg->bpf.progs[type];
 546                        list_for_each_entry(pl, head, node) {
 547                                if (!prog_list_prog(pl))
 548                                        continue;
 549                                if (pl->link == link)
 550                                        goto found;
 551                                pos++;
 552                        }
 553                }
 554found:
 555                BUG_ON(!cg);
 556                progs = rcu_dereference_protected(
 557                                desc->bpf.effective[type],
 558                                lockdep_is_held(&cgroup_mutex));
 559                item = &progs->items[pos];
 560                WRITE_ONCE(item->prog, link->link.prog);
 561        }
 562}
 563
 564/**
 565 * __cgroup_bpf_replace() - Replace link's program and propagate the change
 566 *                          to descendants
 567 * @cgrp: The cgroup which descendants to traverse
 568 * @link: A link for which to replace BPF program
 569 * @type: Type of attach operation
 570 *
 571 * Must be called with cgroup_mutex held.
 572 */
 573static int __cgroup_bpf_replace(struct cgroup *cgrp,
 574                                struct bpf_cgroup_link *link,
 575                                struct bpf_prog *new_prog)
 576{
 577        struct list_head *progs = &cgrp->bpf.progs[link->type];
 578        struct bpf_prog *old_prog;
 579        struct bpf_prog_list *pl;
 580        bool found = false;
 581
 582        if (link->link.prog->type != new_prog->type)
 583                return -EINVAL;
 584
 585        list_for_each_entry(pl, progs, node) {
 586                if (pl->link == link) {
 587                        found = true;
 588                        break;
 589                }
 590        }
 591        if (!found)
 592                return -ENOENT;
 593
 594        old_prog = xchg(&link->link.prog, new_prog);
 595        replace_effective_prog(cgrp, link->type, link);
 596        bpf_prog_put(old_prog);
 597        return 0;
 598}
 599
 600static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
 601                              struct bpf_prog *old_prog)
 602{
 603        struct bpf_cgroup_link *cg_link;
 604        int ret;
 605
 606        cg_link = container_of(link, struct bpf_cgroup_link, link);
 607
 608        mutex_lock(&cgroup_mutex);
 609        /* link might have been auto-released by dying cgroup, so fail */
 610        if (!cg_link->cgroup) {
 611                ret = -ENOLINK;
 612                goto out_unlock;
 613        }
 614        if (old_prog && link->prog != old_prog) {
 615                ret = -EPERM;
 616                goto out_unlock;
 617        }
 618        ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
 619out_unlock:
 620        mutex_unlock(&cgroup_mutex);
 621        return ret;
 622}
 623
 624static struct bpf_prog_list *find_detach_entry(struct list_head *progs,
 625                                               struct bpf_prog *prog,
 626                                               struct bpf_cgroup_link *link,
 627                                               bool allow_multi)
 628{
 629        struct bpf_prog_list *pl;
 630
 631        if (!allow_multi) {
 632                if (list_empty(progs))
 633                        /* report error when trying to detach and nothing is attached */
 634                        return ERR_PTR(-ENOENT);
 635
 636                /* to maintain backward compatibility NONE and OVERRIDE cgroups
 637                 * allow detaching with invalid FD (prog==NULL) in legacy mode
 638                 */
 639                return list_first_entry(progs, typeof(*pl), node);
 640        }
 641
 642        if (!prog && !link)
 643                /* to detach MULTI prog the user has to specify valid FD
 644                 * of the program or link to be detached
 645                 */
 646                return ERR_PTR(-EINVAL);
 647
 648        /* find the prog or link and detach it */
 649        list_for_each_entry(pl, progs, node) {
 650                if (pl->prog == prog && pl->link == link)
 651                        return pl;
 652        }
 653        return ERR_PTR(-ENOENT);
 654}
 655
 656/**
 657 * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
 658 *                         propagate the change to descendants
 659 * @cgrp: The cgroup which descendants to traverse
 660 * @prog: A program to detach or NULL
 661 * @prog: A link to detach or NULL
 662 * @type: Type of detach operation
 663 *
 664 * At most one of @prog or @link can be non-NULL.
 665 * Must be called with cgroup_mutex held.
 666 */
 667int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
 668                        struct bpf_cgroup_link *link, enum bpf_attach_type type)
 669{
 670        struct list_head *progs = &cgrp->bpf.progs[type];
 671        u32 flags = cgrp->bpf.flags[type];
 672        struct bpf_prog_list *pl;
 673        struct bpf_prog *old_prog;
 674        int err;
 675
 676        if (prog && link)
 677                /* only one of prog or link can be specified */
 678                return -EINVAL;
 679
 680        pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI);
 681        if (IS_ERR(pl))
 682                return PTR_ERR(pl);
 683
 684        /* mark it deleted, so it's ignored while recomputing effective */
 685        old_prog = pl->prog;
 686        pl->prog = NULL;
 687        pl->link = NULL;
 688
 689        err = update_effective_progs(cgrp, type);
 690        if (err)
 691                goto cleanup;
 692
 693        /* now can actually delete it from this cgroup list */
 694        list_del(&pl->node);
 695        kfree(pl);
 696        if (list_empty(progs))
 697                /* last program was detached, reset flags to zero */
 698                cgrp->bpf.flags[type] = 0;
 699        if (old_prog)
 700                bpf_prog_put(old_prog);
 701        static_branch_dec(&cgroup_bpf_enabled_key[type]);
 702        return 0;
 703
 704cleanup:
 705        /* restore back prog or link */
 706        pl->prog = old_prog;
 707        pl->link = link;
 708        return err;
 709}
 710
 711/* Must be called with cgroup_mutex held to avoid races. */
 712int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
 713                       union bpf_attr __user *uattr)
 714{
 715        __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
 716        enum bpf_attach_type type = attr->query.attach_type;
 717        struct list_head *progs = &cgrp->bpf.progs[type];
 718        u32 flags = cgrp->bpf.flags[type];
 719        struct bpf_prog_array *effective;
 720        struct bpf_prog *prog;
 721        int cnt, ret = 0, i;
 722
 723        effective = rcu_dereference_protected(cgrp->bpf.effective[type],
 724                                              lockdep_is_held(&cgroup_mutex));
 725
 726        if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
 727                cnt = bpf_prog_array_length(effective);
 728        else
 729                cnt = prog_list_length(progs);
 730
 731        if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
 732                return -EFAULT;
 733        if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
 734                return -EFAULT;
 735        if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
 736                /* return early if user requested only program count + flags */
 737                return 0;
 738        if (attr->query.prog_cnt < cnt) {
 739                cnt = attr->query.prog_cnt;
 740                ret = -ENOSPC;
 741        }
 742
 743        if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
 744                return bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
 745        } else {
 746                struct bpf_prog_list *pl;
 747                u32 id;
 748
 749                i = 0;
 750                list_for_each_entry(pl, progs, node) {
 751                        prog = prog_list_prog(pl);
 752                        id = prog->aux->id;
 753                        if (copy_to_user(prog_ids + i, &id, sizeof(id)))
 754                                return -EFAULT;
 755                        if (++i == cnt)
 756                                break;
 757                }
 758        }
 759        return ret;
 760}
 761
 762int cgroup_bpf_prog_attach(const union bpf_attr *attr,
 763                           enum bpf_prog_type ptype, struct bpf_prog *prog)
 764{
 765        struct bpf_prog *replace_prog = NULL;
 766        struct cgroup *cgrp;
 767        int ret;
 768
 769        cgrp = cgroup_get_from_fd(attr->target_fd);
 770        if (IS_ERR(cgrp))
 771                return PTR_ERR(cgrp);
 772
 773        if ((attr->attach_flags & BPF_F_ALLOW_MULTI) &&
 774            (attr->attach_flags & BPF_F_REPLACE)) {
 775                replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype);
 776                if (IS_ERR(replace_prog)) {
 777                        cgroup_put(cgrp);
 778                        return PTR_ERR(replace_prog);
 779                }
 780        }
 781
 782        ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL,
 783                                attr->attach_type, attr->attach_flags);
 784
 785        if (replace_prog)
 786                bpf_prog_put(replace_prog);
 787        cgroup_put(cgrp);
 788        return ret;
 789}
 790
 791int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
 792{
 793        struct bpf_prog *prog;
 794        struct cgroup *cgrp;
 795        int ret;
 796
 797        cgrp = cgroup_get_from_fd(attr->target_fd);
 798        if (IS_ERR(cgrp))
 799                return PTR_ERR(cgrp);
 800
 801        prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
 802        if (IS_ERR(prog))
 803                prog = NULL;
 804
 805        ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type);
 806        if (prog)
 807                bpf_prog_put(prog);
 808
 809        cgroup_put(cgrp);
 810        return ret;
 811}
 812
 813static void bpf_cgroup_link_release(struct bpf_link *link)
 814{
 815        struct bpf_cgroup_link *cg_link =
 816                container_of(link, struct bpf_cgroup_link, link);
 817        struct cgroup *cg;
 818
 819        /* link might have been auto-detached by dying cgroup already,
 820         * in that case our work is done here
 821         */
 822        if (!cg_link->cgroup)
 823                return;
 824
 825        mutex_lock(&cgroup_mutex);
 826
 827        /* re-check cgroup under lock again */
 828        if (!cg_link->cgroup) {
 829                mutex_unlock(&cgroup_mutex);
 830                return;
 831        }
 832
 833        WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link,
 834                                    cg_link->type));
 835
 836        cg = cg_link->cgroup;
 837        cg_link->cgroup = NULL;
 838
 839        mutex_unlock(&cgroup_mutex);
 840
 841        cgroup_put(cg);
 842}
 843
 844static void bpf_cgroup_link_dealloc(struct bpf_link *link)
 845{
 846        struct bpf_cgroup_link *cg_link =
 847                container_of(link, struct bpf_cgroup_link, link);
 848
 849        kfree(cg_link);
 850}
 851
 852static int bpf_cgroup_link_detach(struct bpf_link *link)
 853{
 854        bpf_cgroup_link_release(link);
 855
 856        return 0;
 857}
 858
 859static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
 860                                        struct seq_file *seq)
 861{
 862        struct bpf_cgroup_link *cg_link =
 863                container_of(link, struct bpf_cgroup_link, link);
 864        u64 cg_id = 0;
 865
 866        mutex_lock(&cgroup_mutex);
 867        if (cg_link->cgroup)
 868                cg_id = cgroup_id(cg_link->cgroup);
 869        mutex_unlock(&cgroup_mutex);
 870
 871        seq_printf(seq,
 872                   "cgroup_id:\t%llu\n"
 873                   "attach_type:\t%d\n",
 874                   cg_id,
 875                   cg_link->type);
 876}
 877
 878static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link,
 879                                          struct bpf_link_info *info)
 880{
 881        struct bpf_cgroup_link *cg_link =
 882                container_of(link, struct bpf_cgroup_link, link);
 883        u64 cg_id = 0;
 884
 885        mutex_lock(&cgroup_mutex);
 886        if (cg_link->cgroup)
 887                cg_id = cgroup_id(cg_link->cgroup);
 888        mutex_unlock(&cgroup_mutex);
 889
 890        info->cgroup.cgroup_id = cg_id;
 891        info->cgroup.attach_type = cg_link->type;
 892        return 0;
 893}
 894
 895static const struct bpf_link_ops bpf_cgroup_link_lops = {
 896        .release = bpf_cgroup_link_release,
 897        .dealloc = bpf_cgroup_link_dealloc,
 898        .detach = bpf_cgroup_link_detach,
 899        .update_prog = cgroup_bpf_replace,
 900        .show_fdinfo = bpf_cgroup_link_show_fdinfo,
 901        .fill_link_info = bpf_cgroup_link_fill_link_info,
 902};
 903
 904int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
 905{
 906        struct bpf_link_primer link_primer;
 907        struct bpf_cgroup_link *link;
 908        struct cgroup *cgrp;
 909        int err;
 910
 911        if (attr->link_create.flags)
 912                return -EINVAL;
 913
 914        cgrp = cgroup_get_from_fd(attr->link_create.target_fd);
 915        if (IS_ERR(cgrp))
 916                return PTR_ERR(cgrp);
 917
 918        link = kzalloc(sizeof(*link), GFP_USER);
 919        if (!link) {
 920                err = -ENOMEM;
 921                goto out_put_cgroup;
 922        }
 923        bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops,
 924                      prog);
 925        link->cgroup = cgrp;
 926        link->type = attr->link_create.attach_type;
 927
 928        err  = bpf_link_prime(&link->link, &link_primer);
 929        if (err) {
 930                kfree(link);
 931                goto out_put_cgroup;
 932        }
 933
 934        err = cgroup_bpf_attach(cgrp, NULL, NULL, link, link->type,
 935                                BPF_F_ALLOW_MULTI);
 936        if (err) {
 937                bpf_link_cleanup(&link_primer);
 938                goto out_put_cgroup;
 939        }
 940
 941        return bpf_link_settle(&link_primer);
 942
 943out_put_cgroup:
 944        cgroup_put(cgrp);
 945        return err;
 946}
 947
 948int cgroup_bpf_prog_query(const union bpf_attr *attr,
 949                          union bpf_attr __user *uattr)
 950{
 951        struct cgroup *cgrp;
 952        int ret;
 953
 954        cgrp = cgroup_get_from_fd(attr->query.target_fd);
 955        if (IS_ERR(cgrp))
 956                return PTR_ERR(cgrp);
 957
 958        ret = cgroup_bpf_query(cgrp, attr, uattr);
 959
 960        cgroup_put(cgrp);
 961        return ret;
 962}
 963
 964/**
 965 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
 966 * @sk: The socket sending or receiving traffic
 967 * @skb: The skb that is being sent or received
 968 * @type: The type of program to be exectuted
 969 *
 970 * If no socket is passed, or the socket is not of type INET or INET6,
 971 * this function does nothing and returns 0.
 972 *
 973 * The program type passed in via @type must be suitable for network
 974 * filtering. No further check is performed to assert that.
 975 *
 976 * For egress packets, this function can return:
 977 *   NET_XMIT_SUCCESS    (0)    - continue with packet output
 978 *   NET_XMIT_DROP       (1)    - drop packet and notify TCP to call cwr
 979 *   NET_XMIT_CN         (2)    - continue with packet output and notify TCP
 980 *                                to call cwr
 981 *   -EPERM                     - drop packet
 982 *
 983 * For ingress packets, this function will return -EPERM if any
 984 * attached program was found and if it returned != 1 during execution.
 985 * Otherwise 0 is returned.
 986 */
 987int __cgroup_bpf_run_filter_skb(struct sock *sk,
 988                                struct sk_buff *skb,
 989                                enum bpf_attach_type type)
 990{
 991        unsigned int offset = skb->data - skb_network_header(skb);
 992        struct sock *save_sk;
 993        void *saved_data_end;
 994        struct cgroup *cgrp;
 995        int ret;
 996
 997        if (!sk || !sk_fullsock(sk))
 998                return 0;
 999
1000        if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1001                return 0;
1002
1003        cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1004        save_sk = skb->sk;
1005        skb->sk = sk;
1006        __skb_push(skb, offset);
1007
1008        /* compute pointers for the bpf prog */
1009        bpf_compute_and_save_data_end(skb, &saved_data_end);
1010
1011        if (type == BPF_CGROUP_INET_EGRESS) {
1012                ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(
1013                        cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb);
1014        } else {
1015                ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
1016                                          __bpf_prog_run_save_cb);
1017                ret = (ret == 1 ? 0 : -EPERM);
1018        }
1019        bpf_restore_data_end(skb, saved_data_end);
1020        __skb_pull(skb, offset);
1021        skb->sk = save_sk;
1022
1023        return ret;
1024}
1025EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
1026
1027/**
1028 * __cgroup_bpf_run_filter_sk() - Run a program on a sock
1029 * @sk: sock structure to manipulate
1030 * @type: The type of program to be exectuted
1031 *
1032 * socket is passed is expected to be of type INET or INET6.
1033 *
1034 * The program type passed in via @type must be suitable for sock
1035 * filtering. No further check is performed to assert that.
1036 *
1037 * This function will return %-EPERM if any if an attached program was found
1038 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1039 */
1040int __cgroup_bpf_run_filter_sk(struct sock *sk,
1041                               enum bpf_attach_type type)
1042{
1043        struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1044        int ret;
1045
1046        ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN);
1047        return ret == 1 ? 0 : -EPERM;
1048}
1049EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
1050
1051/**
1052 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
1053 *                                       provided by user sockaddr
1054 * @sk: sock struct that will use sockaddr
1055 * @uaddr: sockaddr struct provided by user
1056 * @type: The type of program to be exectuted
1057 * @t_ctx: Pointer to attach type specific context
1058 * @flags: Pointer to u32 which contains higher bits of BPF program
1059 *         return value (OR'ed together).
1060 *
1061 * socket is expected to be of type INET or INET6.
1062 *
1063 * This function will return %-EPERM if an attached program is found and
1064 * returned value != 1 during execution. In all other cases, 0 is returned.
1065 */
1066int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
1067                                      struct sockaddr *uaddr,
1068                                      enum bpf_attach_type type,
1069                                      void *t_ctx,
1070                                      u32 *flags)
1071{
1072        struct bpf_sock_addr_kern ctx = {
1073                .sk = sk,
1074                .uaddr = uaddr,
1075                .t_ctx = t_ctx,
1076        };
1077        struct sockaddr_storage unspec;
1078        struct cgroup *cgrp;
1079        int ret;
1080
1081        /* Check socket family since not all sockets represent network
1082         * endpoint (e.g. AF_UNIX).
1083         */
1084        if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1085                return 0;
1086
1087        if (!ctx.uaddr) {
1088                memset(&unspec, 0, sizeof(unspec));
1089                ctx.uaddr = (struct sockaddr *)&unspec;
1090        }
1091
1092        cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1093        ret = BPF_PROG_RUN_ARRAY_FLAGS(cgrp->bpf.effective[type], &ctx,
1094                                       BPF_PROG_RUN, flags);
1095
1096        return ret == 1 ? 0 : -EPERM;
1097}
1098EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
1099
1100/**
1101 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
1102 * @sk: socket to get cgroup from
1103 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
1104 * sk with connection information (IP addresses, etc.) May not contain
1105 * cgroup info if it is a req sock.
1106 * @type: The type of program to be exectuted
1107 *
1108 * socket passed is expected to be of type INET or INET6.
1109 *
1110 * The program type passed in via @type must be suitable for sock_ops
1111 * filtering. No further check is performed to assert that.
1112 *
1113 * This function will return %-EPERM if any if an attached program was found
1114 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1115 */
1116int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
1117                                     struct bpf_sock_ops_kern *sock_ops,
1118                                     enum bpf_attach_type type)
1119{
1120        struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1121        int ret;
1122
1123        ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops,
1124                                 BPF_PROG_RUN);
1125        return ret == 1 ? 0 : -EPERM;
1126}
1127EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
1128
1129int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
1130                                      short access, enum bpf_attach_type type)
1131{
1132        struct cgroup *cgrp;
1133        struct bpf_cgroup_dev_ctx ctx = {
1134                .access_type = (access << 16) | dev_type,
1135                .major = major,
1136                .minor = minor,
1137        };
1138        int allow = 1;
1139
1140        rcu_read_lock();
1141        cgrp = task_dfl_cgroup(current);
1142        allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx,
1143                                   BPF_PROG_RUN);
1144        rcu_read_unlock();
1145
1146        return !allow;
1147}
1148
1149static const struct bpf_func_proto *
1150cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1151{
1152        switch (func_id) {
1153        case BPF_FUNC_get_current_uid_gid:
1154                return &bpf_get_current_uid_gid_proto;
1155        case BPF_FUNC_get_local_storage:
1156                return &bpf_get_local_storage_proto;
1157        case BPF_FUNC_get_current_cgroup_id:
1158                return &bpf_get_current_cgroup_id_proto;
1159        case BPF_FUNC_perf_event_output:
1160                return &bpf_event_output_data_proto;
1161        default:
1162                return bpf_base_func_proto(func_id);
1163        }
1164}
1165
1166static const struct bpf_func_proto *
1167cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1168{
1169        return cgroup_base_func_proto(func_id, prog);
1170}
1171
1172static bool cgroup_dev_is_valid_access(int off, int size,
1173                                       enum bpf_access_type type,
1174                                       const struct bpf_prog *prog,
1175                                       struct bpf_insn_access_aux *info)
1176{
1177        const int size_default = sizeof(__u32);
1178
1179        if (type == BPF_WRITE)
1180                return false;
1181
1182        if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
1183                return false;
1184        /* The verifier guarantees that size > 0. */
1185        if (off % size != 0)
1186                return false;
1187
1188        switch (off) {
1189        case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
1190                bpf_ctx_record_field_size(info, size_default);
1191                if (!bpf_ctx_narrow_access_ok(off, size, size_default))
1192                        return false;
1193                break;
1194        default:
1195                if (size != size_default)
1196                        return false;
1197        }
1198
1199        return true;
1200}
1201
1202const struct bpf_prog_ops cg_dev_prog_ops = {
1203};
1204
1205const struct bpf_verifier_ops cg_dev_verifier_ops = {
1206        .get_func_proto         = cgroup_dev_func_proto,
1207        .is_valid_access        = cgroup_dev_is_valid_access,
1208};
1209
1210/**
1211 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
1212 *
1213 * @head: sysctl table header
1214 * @table: sysctl table
1215 * @write: sysctl is being read (= 0) or written (= 1)
1216 * @buf: pointer to buffer (in and out)
1217 * @pcount: value-result argument: value is size of buffer pointed to by @buf,
1218 *      result is size of @new_buf if program set new value, initial value
1219 *      otherwise
1220 * @ppos: value-result argument: value is position at which read from or write
1221 *      to sysctl is happening, result is new position if program overrode it,
1222 *      initial value otherwise
1223 * @type: type of program to be executed
1224 *
1225 * Program is run when sysctl is being accessed, either read or written, and
1226 * can allow or deny such access.
1227 *
1228 * This function will return %-EPERM if an attached program is found and
1229 * returned value != 1 during execution. In all other cases 0 is returned.
1230 */
1231int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
1232                                   struct ctl_table *table, int write,
1233                                   char **buf, size_t *pcount, loff_t *ppos,
1234                                   enum bpf_attach_type type)
1235{
1236        struct bpf_sysctl_kern ctx = {
1237                .head = head,
1238                .table = table,
1239                .write = write,
1240                .ppos = ppos,
1241                .cur_val = NULL,
1242                .cur_len = PAGE_SIZE,
1243                .new_val = NULL,
1244                .new_len = 0,
1245                .new_updated = 0,
1246        };
1247        struct cgroup *cgrp;
1248        loff_t pos = 0;
1249        int ret;
1250
1251        ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
1252        if (!ctx.cur_val ||
1253            table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) {
1254                /* Let BPF program decide how to proceed. */
1255                ctx.cur_len = 0;
1256        }
1257
1258        if (write && *buf && *pcount) {
1259                /* BPF program should be able to override new value with a
1260                 * buffer bigger than provided by user.
1261                 */
1262                ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
1263                ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
1264                if (ctx.new_val) {
1265                        memcpy(ctx.new_val, *buf, ctx.new_len);
1266                } else {
1267                        /* Let BPF program decide how to proceed. */
1268                        ctx.new_len = 0;
1269                }
1270        }
1271
1272        rcu_read_lock();
1273        cgrp = task_dfl_cgroup(current);
1274        ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
1275        rcu_read_unlock();
1276
1277        kfree(ctx.cur_val);
1278
1279        if (ret == 1 && ctx.new_updated) {
1280                kfree(*buf);
1281                *buf = ctx.new_val;
1282                *pcount = ctx.new_len;
1283        } else {
1284                kfree(ctx.new_val);
1285        }
1286
1287        return ret == 1 ? 0 : -EPERM;
1288}
1289
1290#ifdef CONFIG_NET
1291static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
1292                                             enum bpf_attach_type attach_type)
1293{
1294        struct bpf_prog_array *prog_array;
1295        bool empty;
1296
1297        rcu_read_lock();
1298        prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]);
1299        empty = bpf_prog_array_is_empty(prog_array);
1300        rcu_read_unlock();
1301
1302        return empty;
1303}
1304
1305static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen,
1306                             struct bpf_sockopt_buf *buf)
1307{
1308        if (unlikely(max_optlen < 0))
1309                return -EINVAL;
1310
1311        if (unlikely(max_optlen > PAGE_SIZE)) {
1312                /* We don't expose optvals that are greater than PAGE_SIZE
1313                 * to the BPF program.
1314                 */
1315                max_optlen = PAGE_SIZE;
1316        }
1317
1318        if (max_optlen <= sizeof(buf->data)) {
1319                /* When the optval fits into BPF_SOCKOPT_KERN_BUF_SIZE
1320                 * bytes avoid the cost of kzalloc.
1321                 */
1322                ctx->optval = buf->data;
1323                ctx->optval_end = ctx->optval + max_optlen;
1324                return max_optlen;
1325        }
1326
1327        ctx->optval = kzalloc(max_optlen, GFP_USER);
1328        if (!ctx->optval)
1329                return -ENOMEM;
1330
1331        ctx->optval_end = ctx->optval + max_optlen;
1332
1333        return max_optlen;
1334}
1335
1336static void sockopt_free_buf(struct bpf_sockopt_kern *ctx,
1337                             struct bpf_sockopt_buf *buf)
1338{
1339        if (ctx->optval == buf->data)
1340                return;
1341        kfree(ctx->optval);
1342}
1343
1344static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx,
1345                                  struct bpf_sockopt_buf *buf)
1346{
1347        return ctx->optval != buf->data;
1348}
1349
1350int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
1351                                       int *optname, char __user *optval,
1352                                       int *optlen, char **kernel_optval)
1353{
1354        struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1355        struct bpf_sockopt_buf buf = {};
1356        struct bpf_sockopt_kern ctx = {
1357                .sk = sk,
1358                .level = *level,
1359                .optname = *optname,
1360        };
1361        int ret, max_optlen;
1362
1363        /* Opportunistic check to see whether we have any BPF program
1364         * attached to the hook so we don't waste time allocating
1365         * memory and locking the socket.
1366         */
1367        if (__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT))
1368                return 0;
1369
1370        /* Allocate a bit more than the initial user buffer for
1371         * BPF program. The canonical use case is overriding
1372         * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
1373         */
1374        max_optlen = max_t(int, 16, *optlen);
1375
1376        max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
1377        if (max_optlen < 0)
1378                return max_optlen;
1379
1380        ctx.optlen = *optlen;
1381
1382        if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
1383                ret = -EFAULT;
1384                goto out;
1385        }
1386
1387        lock_sock(sk);
1388        ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_SETSOCKOPT],
1389                                 &ctx, BPF_PROG_RUN);
1390        release_sock(sk);
1391
1392        if (!ret) {
1393                ret = -EPERM;
1394                goto out;
1395        }
1396
1397        if (ctx.optlen == -1) {
1398                /* optlen set to -1, bypass kernel */
1399                ret = 1;
1400        } else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
1401                /* optlen is out of bounds */
1402                ret = -EFAULT;
1403        } else {
1404                /* optlen within bounds, run kernel handler */
1405                ret = 0;
1406
1407                /* export any potential modifications */
1408                *level = ctx.level;
1409                *optname = ctx.optname;
1410
1411                /* optlen == 0 from BPF indicates that we should
1412                 * use original userspace data.
1413                 */
1414                if (ctx.optlen != 0) {
1415                        *optlen = ctx.optlen;
1416                        /* We've used bpf_sockopt_kern->buf as an intermediary
1417                         * storage, but the BPF program indicates that we need
1418                         * to pass this data to the kernel setsockopt handler.
1419                         * No way to export on-stack buf, have to allocate a
1420                         * new buffer.
1421                         */
1422                        if (!sockopt_buf_allocated(&ctx, &buf)) {
1423                                void *p = kmalloc(ctx.optlen, GFP_USER);
1424
1425                                if (!p) {
1426                                        ret = -ENOMEM;
1427                                        goto out;
1428                                }
1429                                memcpy(p, ctx.optval, ctx.optlen);
1430                                *kernel_optval = p;
1431                        } else {
1432                                *kernel_optval = ctx.optval;
1433                        }
1434                        /* export and don't free sockopt buf */
1435                        return 0;
1436                }
1437        }
1438
1439out:
1440        sockopt_free_buf(&ctx, &buf);
1441        return ret;
1442}
1443
1444int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
1445                                       int optname, char __user *optval,
1446                                       int __user *optlen, int max_optlen,
1447                                       int retval)
1448{
1449        struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1450        struct bpf_sockopt_buf buf = {};
1451        struct bpf_sockopt_kern ctx = {
1452                .sk = sk,
1453                .level = level,
1454                .optname = optname,
1455                .retval = retval,
1456        };
1457        int ret;
1458
1459        /* Opportunistic check to see whether we have any BPF program
1460         * attached to the hook so we don't waste time allocating
1461         * memory and locking the socket.
1462         */
1463        if (__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
1464                return retval;
1465
1466        ctx.optlen = max_optlen;
1467
1468        max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
1469        if (max_optlen < 0)
1470                return max_optlen;
1471
1472        if (!retval) {
1473                /* If kernel getsockopt finished successfully,
1474                 * copy whatever was returned to the user back
1475                 * into our temporary buffer. Set optlen to the
1476                 * one that kernel returned as well to let
1477                 * BPF programs inspect the value.
1478                 */
1479
1480                if (get_user(ctx.optlen, optlen)) {
1481                        ret = -EFAULT;
1482                        goto out;
1483                }
1484
1485                if (ctx.optlen < 0) {
1486                        ret = -EFAULT;
1487                        goto out;
1488                }
1489
1490                if (copy_from_user(ctx.optval, optval,
1491                                   min(ctx.optlen, max_optlen)) != 0) {
1492                        ret = -EFAULT;
1493                        goto out;
1494                }
1495        }
1496
1497        lock_sock(sk);
1498        ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT],
1499                                 &ctx, BPF_PROG_RUN);
1500        release_sock(sk);
1501
1502        if (!ret) {
1503                ret = -EPERM;
1504                goto out;
1505        }
1506
1507        if (ctx.optlen > max_optlen || ctx.optlen < 0) {
1508                ret = -EFAULT;
1509                goto out;
1510        }
1511
1512        /* BPF programs only allowed to set retval to 0, not some
1513         * arbitrary value.
1514         */
1515        if (ctx.retval != 0 && ctx.retval != retval) {
1516                ret = -EFAULT;
1517                goto out;
1518        }
1519
1520        if (ctx.optlen != 0) {
1521                if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
1522                    put_user(ctx.optlen, optlen)) {
1523                        ret = -EFAULT;
1524                        goto out;
1525                }
1526        }
1527
1528        ret = ctx.retval;
1529
1530out:
1531        sockopt_free_buf(&ctx, &buf);
1532        return ret;
1533}
1534
1535int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
1536                                            int optname, void *optval,
1537                                            int *optlen, int retval)
1538{
1539        struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1540        struct bpf_sockopt_kern ctx = {
1541                .sk = sk,
1542                .level = level,
1543                .optname = optname,
1544                .retval = retval,
1545                .optlen = *optlen,
1546                .optval = optval,
1547                .optval_end = optval + *optlen,
1548        };
1549        int ret;
1550
1551        /* Note that __cgroup_bpf_run_filter_getsockopt doesn't copy
1552         * user data back into BPF buffer when reval != 0. This is
1553         * done as an optimization to avoid extra copy, assuming
1554         * kernel won't populate the data in case of an error.
1555         * Here we always pass the data and memset() should
1556         * be called if that data shouldn't be "exported".
1557         */
1558
1559        ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT],
1560                                 &ctx, BPF_PROG_RUN);
1561        if (!ret)
1562                return -EPERM;
1563
1564        if (ctx.optlen > *optlen)
1565                return -EFAULT;
1566
1567        /* BPF programs only allowed to set retval to 0, not some
1568         * arbitrary value.
1569         */
1570        if (ctx.retval != 0 && ctx.retval != retval)
1571                return -EFAULT;
1572
1573        /* BPF programs can shrink the buffer, export the modifications.
1574         */
1575        if (ctx.optlen != 0)
1576                *optlen = ctx.optlen;
1577
1578        return ctx.retval;
1579}
1580#endif
1581
1582static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
1583                              size_t *lenp)
1584{
1585        ssize_t tmp_ret = 0, ret;
1586
1587        if (dir->header.parent) {
1588                tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
1589                if (tmp_ret < 0)
1590                        return tmp_ret;
1591        }
1592
1593        ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
1594        if (ret < 0)
1595                return ret;
1596        *bufp += ret;
1597        *lenp -= ret;
1598        ret += tmp_ret;
1599
1600        /* Avoid leading slash. */
1601        if (!ret)
1602                return ret;
1603
1604        tmp_ret = strscpy(*bufp, "/", *lenp);
1605        if (tmp_ret < 0)
1606                return tmp_ret;
1607        *bufp += tmp_ret;
1608        *lenp -= tmp_ret;
1609
1610        return ret + tmp_ret;
1611}
1612
1613BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
1614           size_t, buf_len, u64, flags)
1615{
1616        ssize_t tmp_ret = 0, ret;
1617
1618        if (!buf)
1619                return -EINVAL;
1620
1621        if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
1622                if (!ctx->head)
1623                        return -EINVAL;
1624                tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
1625                if (tmp_ret < 0)
1626                        return tmp_ret;
1627        }
1628
1629        ret = strscpy(buf, ctx->table->procname, buf_len);
1630
1631        return ret < 0 ? ret : tmp_ret + ret;
1632}
1633
1634static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
1635        .func           = bpf_sysctl_get_name,
1636        .gpl_only       = false,
1637        .ret_type       = RET_INTEGER,
1638        .arg1_type      = ARG_PTR_TO_CTX,
1639        .arg2_type      = ARG_PTR_TO_MEM,
1640        .arg3_type      = ARG_CONST_SIZE,
1641        .arg4_type      = ARG_ANYTHING,
1642};
1643
1644static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
1645                             size_t src_len)
1646{
1647        if (!dst)
1648                return -EINVAL;
1649
1650        if (!dst_len)
1651                return -E2BIG;
1652
1653        if (!src || !src_len) {
1654                memset(dst, 0, dst_len);
1655                return -EINVAL;
1656        }
1657
1658        memcpy(dst, src, min(dst_len, src_len));
1659
1660        if (dst_len > src_len) {
1661                memset(dst + src_len, '\0', dst_len - src_len);
1662                return src_len;
1663        }
1664
1665        dst[dst_len - 1] = '\0';
1666
1667        return -E2BIG;
1668}
1669
1670BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
1671           char *, buf, size_t, buf_len)
1672{
1673        return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
1674}
1675
1676static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
1677        .func           = bpf_sysctl_get_current_value,
1678        .gpl_only       = false,
1679        .ret_type       = RET_INTEGER,
1680        .arg1_type      = ARG_PTR_TO_CTX,
1681        .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1682        .arg3_type      = ARG_CONST_SIZE,
1683};
1684
1685BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
1686           size_t, buf_len)
1687{
1688        if (!ctx->write) {
1689                if (buf && buf_len)
1690                        memset(buf, '\0', buf_len);
1691                return -EINVAL;
1692        }
1693        return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
1694}
1695
1696static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
1697        .func           = bpf_sysctl_get_new_value,
1698        .gpl_only       = false,
1699        .ret_type       = RET_INTEGER,
1700        .arg1_type      = ARG_PTR_TO_CTX,
1701        .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1702        .arg3_type      = ARG_CONST_SIZE,
1703};
1704
1705BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
1706           const char *, buf, size_t, buf_len)
1707{
1708        if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
1709                return -EINVAL;
1710
1711        if (buf_len > PAGE_SIZE - 1)
1712                return -E2BIG;
1713
1714        memcpy(ctx->new_val, buf, buf_len);
1715        ctx->new_len = buf_len;
1716        ctx->new_updated = 1;
1717
1718        return 0;
1719}
1720
1721static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
1722        .func           = bpf_sysctl_set_new_value,
1723        .gpl_only       = false,
1724        .ret_type       = RET_INTEGER,
1725        .arg1_type      = ARG_PTR_TO_CTX,
1726        .arg2_type      = ARG_PTR_TO_MEM,
1727        .arg3_type      = ARG_CONST_SIZE,
1728};
1729
1730static const struct bpf_func_proto *
1731sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1732{
1733        switch (func_id) {
1734        case BPF_FUNC_strtol:
1735                return &bpf_strtol_proto;
1736        case BPF_FUNC_strtoul:
1737                return &bpf_strtoul_proto;
1738        case BPF_FUNC_sysctl_get_name:
1739                return &bpf_sysctl_get_name_proto;
1740        case BPF_FUNC_sysctl_get_current_value:
1741                return &bpf_sysctl_get_current_value_proto;
1742        case BPF_FUNC_sysctl_get_new_value:
1743                return &bpf_sysctl_get_new_value_proto;
1744        case BPF_FUNC_sysctl_set_new_value:
1745                return &bpf_sysctl_set_new_value_proto;
1746        default:
1747                return cgroup_base_func_proto(func_id, prog);
1748        }
1749}
1750
1751static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
1752                                   const struct bpf_prog *prog,
1753                                   struct bpf_insn_access_aux *info)
1754{
1755        const int size_default = sizeof(__u32);
1756
1757        if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
1758                return false;
1759
1760        switch (off) {
1761        case bpf_ctx_range(struct bpf_sysctl, write):
1762                if (type != BPF_READ)
1763                        return false;
1764                bpf_ctx_record_field_size(info, size_default);
1765                return bpf_ctx_narrow_access_ok(off, size, size_default);
1766        case bpf_ctx_range(struct bpf_sysctl, file_pos):
1767                if (type == BPF_READ) {
1768                        bpf_ctx_record_field_size(info, size_default);
1769                        return bpf_ctx_narrow_access_ok(off, size, size_default);
1770                } else {
1771                        return size == size_default;
1772                }
1773        default:
1774                return false;
1775        }
1776}
1777
1778static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
1779                                     const struct bpf_insn *si,
1780                                     struct bpf_insn *insn_buf,
1781                                     struct bpf_prog *prog, u32 *target_size)
1782{
1783        struct bpf_insn *insn = insn_buf;
1784        u32 read_size;
1785
1786        switch (si->off) {
1787        case offsetof(struct bpf_sysctl, write):
1788                *insn++ = BPF_LDX_MEM(
1789                        BPF_SIZE(si->code), si->dst_reg, si->src_reg,
1790                        bpf_target_off(struct bpf_sysctl_kern, write,
1791                                       sizeof_field(struct bpf_sysctl_kern,
1792                                                    write),
1793                                       target_size));
1794                break;
1795        case offsetof(struct bpf_sysctl, file_pos):
1796                /* ppos is a pointer so it should be accessed via indirect
1797                 * loads and stores. Also for stores additional temporary
1798                 * register is used since neither src_reg nor dst_reg can be
1799                 * overridden.
1800                 */
1801                if (type == BPF_WRITE) {
1802                        int treg = BPF_REG_9;
1803
1804                        if (si->src_reg == treg || si->dst_reg == treg)
1805                                --treg;
1806                        if (si->src_reg == treg || si->dst_reg == treg)
1807                                --treg;
1808                        *insn++ = BPF_STX_MEM(
1809                                BPF_DW, si->dst_reg, treg,
1810                                offsetof(struct bpf_sysctl_kern, tmp_reg));
1811                        *insn++ = BPF_LDX_MEM(
1812                                BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1813                                treg, si->dst_reg,
1814                                offsetof(struct bpf_sysctl_kern, ppos));
1815                        *insn++ = BPF_STX_MEM(
1816                                BPF_SIZEOF(u32), treg, si->src_reg,
1817                                bpf_ctx_narrow_access_offset(
1818                                        0, sizeof(u32), sizeof(loff_t)));
1819                        *insn++ = BPF_LDX_MEM(
1820                                BPF_DW, treg, si->dst_reg,
1821                                offsetof(struct bpf_sysctl_kern, tmp_reg));
1822                } else {
1823                        *insn++ = BPF_LDX_MEM(
1824                                BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1825                                si->dst_reg, si->src_reg,
1826                                offsetof(struct bpf_sysctl_kern, ppos));
1827                        read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
1828                        *insn++ = BPF_LDX_MEM(
1829                                BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
1830                                bpf_ctx_narrow_access_offset(
1831                                        0, read_size, sizeof(loff_t)));
1832                }
1833                *target_size = sizeof(u32);
1834                break;
1835        }
1836
1837        return insn - insn_buf;
1838}
1839
1840const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
1841        .get_func_proto         = sysctl_func_proto,
1842        .is_valid_access        = sysctl_is_valid_access,
1843        .convert_ctx_access     = sysctl_convert_ctx_access,
1844};
1845
1846const struct bpf_prog_ops cg_sysctl_prog_ops = {
1847};
1848
1849static const struct bpf_func_proto *
1850cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1851{
1852        switch (func_id) {
1853#ifdef CONFIG_NET
1854        case BPF_FUNC_sk_storage_get:
1855                return &bpf_sk_storage_get_proto;
1856        case BPF_FUNC_sk_storage_delete:
1857                return &bpf_sk_storage_delete_proto;
1858#endif
1859#ifdef CONFIG_INET
1860        case BPF_FUNC_tcp_sock:
1861                return &bpf_tcp_sock_proto;
1862#endif
1863        default:
1864                return cgroup_base_func_proto(func_id, prog);
1865        }
1866}
1867
1868static bool cg_sockopt_is_valid_access(int off, int size,
1869                                       enum bpf_access_type type,
1870                                       const struct bpf_prog *prog,
1871                                       struct bpf_insn_access_aux *info)
1872{
1873        const int size_default = sizeof(__u32);
1874
1875        if (off < 0 || off >= sizeof(struct bpf_sockopt))
1876                return false;
1877
1878        if (off % size != 0)
1879                return false;
1880
1881        if (type == BPF_WRITE) {
1882                switch (off) {
1883                case offsetof(struct bpf_sockopt, retval):
1884                        if (size != size_default)
1885                                return false;
1886                        return prog->expected_attach_type ==
1887                                BPF_CGROUP_GETSOCKOPT;
1888                case offsetof(struct bpf_sockopt, optname):
1889                        fallthrough;
1890                case offsetof(struct bpf_sockopt, level):
1891                        if (size != size_default)
1892                                return false;
1893                        return prog->expected_attach_type ==
1894                                BPF_CGROUP_SETSOCKOPT;
1895                case offsetof(struct bpf_sockopt, optlen):
1896                        return size == size_default;
1897                default:
1898                        return false;
1899                }
1900        }
1901
1902        switch (off) {
1903        case offsetof(struct bpf_sockopt, sk):
1904                if (size != sizeof(__u64))
1905                        return false;
1906                info->reg_type = PTR_TO_SOCKET;
1907                break;
1908        case offsetof(struct bpf_sockopt, optval):
1909                if (size != sizeof(__u64))
1910                        return false;
1911                info->reg_type = PTR_TO_PACKET;
1912                break;
1913        case offsetof(struct bpf_sockopt, optval_end):
1914                if (size != sizeof(__u64))
1915                        return false;
1916                info->reg_type = PTR_TO_PACKET_END;
1917                break;
1918        case offsetof(struct bpf_sockopt, retval):
1919                if (size != size_default)
1920                        return false;
1921                return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
1922        default:
1923                if (size != size_default)
1924                        return false;
1925                break;
1926        }
1927        return true;
1928}
1929
1930#define CG_SOCKOPT_ACCESS_FIELD(T, F)                                   \
1931        T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F),                 \
1932          si->dst_reg, si->src_reg,                                     \
1933          offsetof(struct bpf_sockopt_kern, F))
1934
1935static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
1936                                         const struct bpf_insn *si,
1937                                         struct bpf_insn *insn_buf,
1938                                         struct bpf_prog *prog,
1939                                         u32 *target_size)
1940{
1941        struct bpf_insn *insn = insn_buf;
1942
1943        switch (si->off) {
1944        case offsetof(struct bpf_sockopt, sk):
1945                *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk);
1946                break;
1947        case offsetof(struct bpf_sockopt, level):
1948                if (type == BPF_WRITE)
1949                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level);
1950                else
1951                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level);
1952                break;
1953        case offsetof(struct bpf_sockopt, optname):
1954                if (type == BPF_WRITE)
1955                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname);
1956                else
1957                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname);
1958                break;
1959        case offsetof(struct bpf_sockopt, optlen):
1960                if (type == BPF_WRITE)
1961                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen);
1962                else
1963                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen);
1964                break;
1965        case offsetof(struct bpf_sockopt, retval):
1966                if (type == BPF_WRITE)
1967                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval);
1968                else
1969                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval);
1970                break;
1971        case offsetof(struct bpf_sockopt, optval):
1972                *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval);
1973                break;
1974        case offsetof(struct bpf_sockopt, optval_end):
1975                *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end);
1976                break;
1977        }
1978
1979        return insn - insn_buf;
1980}
1981
1982static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf,
1983                                   bool direct_write,
1984                                   const struct bpf_prog *prog)
1985{
1986        /* Nothing to do for sockopt argument. The data is kzalloc'ated.
1987         */
1988        return 0;
1989}
1990
1991const struct bpf_verifier_ops cg_sockopt_verifier_ops = {
1992        .get_func_proto         = cg_sockopt_func_proto,
1993        .is_valid_access        = cg_sockopt_is_valid_access,
1994        .convert_ctx_access     = cg_sockopt_convert_ctx_access,
1995        .gen_prologue           = cg_sockopt_get_prologue,
1996};
1997
1998const struct bpf_prog_ops cg_sockopt_prog_ops = {
1999};
2000