linux/kernel/bpf/cgroup.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Functions to manage eBPF programs attached to cgroups
   4 *
   5 * Copyright (c) 2016 Daniel Mack
   6 */
   7
   8#include <linux/kernel.h>
   9#include <linux/atomic.h>
  10#include <linux/cgroup.h>
  11#include <linux/filter.h>
  12#include <linux/slab.h>
  13#include <linux/sysctl.h>
  14#include <linux/string.h>
  15#include <linux/bpf.h>
  16#include <linux/bpf-cgroup.h>
  17#include <net/sock.h>
  18#include <net/bpf_sk_storage.h>
  19
  20#include "../cgroup/cgroup-internal.h"
  21
  22DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE);
  23EXPORT_SYMBOL(cgroup_bpf_enabled_key);
  24
  25void cgroup_bpf_offline(struct cgroup *cgrp)
  26{
  27        cgroup_get(cgrp);
  28        percpu_ref_kill(&cgrp->bpf.refcnt);
  29}
  30
  31static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[])
  32{
  33        enum bpf_cgroup_storage_type stype;
  34
  35        for_each_cgroup_storage_type(stype)
  36                bpf_cgroup_storage_free(storages[stype]);
  37}
  38
  39static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
  40                                     struct bpf_cgroup_storage *new_storages[],
  41                                     enum bpf_attach_type type,
  42                                     struct bpf_prog *prog,
  43                                     struct cgroup *cgrp)
  44{
  45        enum bpf_cgroup_storage_type stype;
  46        struct bpf_cgroup_storage_key key;
  47        struct bpf_map *map;
  48
  49        key.cgroup_inode_id = cgroup_id(cgrp);
  50        key.attach_type = type;
  51
  52        for_each_cgroup_storage_type(stype) {
  53                map = prog->aux->cgroup_storage[stype];
  54                if (!map)
  55                        continue;
  56
  57                storages[stype] = cgroup_storage_lookup((void *)map, &key, false);
  58                if (storages[stype])
  59                        continue;
  60
  61                storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
  62                if (IS_ERR(storages[stype])) {
  63                        bpf_cgroup_storages_free(new_storages);
  64                        return -ENOMEM;
  65                }
  66
  67                new_storages[stype] = storages[stype];
  68        }
  69
  70        return 0;
  71}
  72
  73static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[],
  74                                       struct bpf_cgroup_storage *src[])
  75{
  76        enum bpf_cgroup_storage_type stype;
  77
  78        for_each_cgroup_storage_type(stype)
  79                dst[stype] = src[stype];
  80}
  81
  82static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[],
  83                                     struct cgroup *cgrp,
  84                                     enum bpf_attach_type attach_type)
  85{
  86        enum bpf_cgroup_storage_type stype;
  87
  88        for_each_cgroup_storage_type(stype)
  89                bpf_cgroup_storage_link(storages[stype], cgrp, attach_type);
  90}
  91
  92/* Called when bpf_cgroup_link is auto-detached from dying cgroup.
  93 * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It
  94 * doesn't free link memory, which will eventually be done by bpf_link's
  95 * release() callback, when its last FD is closed.
  96 */
  97static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link)
  98{
  99        cgroup_put(link->cgroup);
 100        link->cgroup = NULL;
 101}
 102
 103/**
 104 * cgroup_bpf_release() - put references of all bpf programs and
 105 *                        release all cgroup bpf data
 106 * @work: work structure embedded into the cgroup to modify
 107 */
 108static void cgroup_bpf_release(struct work_struct *work)
 109{
 110        struct cgroup *p, *cgrp = container_of(work, struct cgroup,
 111                                               bpf.release_work);
 112        struct bpf_prog_array *old_array;
 113        struct list_head *storages = &cgrp->bpf.storages;
 114        struct bpf_cgroup_storage *storage, *stmp;
 115
 116        unsigned int atype;
 117
 118        mutex_lock(&cgroup_mutex);
 119
 120        for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) {
 121                struct list_head *progs = &cgrp->bpf.progs[atype];
 122                struct bpf_prog_list *pl, *pltmp;
 123
 124                list_for_each_entry_safe(pl, pltmp, progs, node) {
 125                        list_del(&pl->node);
 126                        if (pl->prog)
 127                                bpf_prog_put(pl->prog);
 128                        if (pl->link)
 129                                bpf_cgroup_link_auto_detach(pl->link);
 130                        kfree(pl);
 131                        static_branch_dec(&cgroup_bpf_enabled_key[atype]);
 132                }
 133                old_array = rcu_dereference_protected(
 134                                cgrp->bpf.effective[atype],
 135                                lockdep_is_held(&cgroup_mutex));
 136                bpf_prog_array_free(old_array);
 137        }
 138
 139        list_for_each_entry_safe(storage, stmp, storages, list_cg) {
 140                bpf_cgroup_storage_unlink(storage);
 141                bpf_cgroup_storage_free(storage);
 142        }
 143
 144        mutex_unlock(&cgroup_mutex);
 145
 146        for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
 147                cgroup_bpf_put(p);
 148
 149        percpu_ref_exit(&cgrp->bpf.refcnt);
 150        cgroup_put(cgrp);
 151}
 152
 153/**
 154 * cgroup_bpf_release_fn() - callback used to schedule releasing
 155 *                           of bpf cgroup data
 156 * @ref: percpu ref counter structure
 157 */
 158static void cgroup_bpf_release_fn(struct percpu_ref *ref)
 159{
 160        struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
 161
 162        INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
 163        queue_work(system_wq, &cgrp->bpf.release_work);
 164}
 165
 166/* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
 167 * link or direct prog.
 168 */
 169static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl)
 170{
 171        if (pl->prog)
 172                return pl->prog;
 173        if (pl->link)
 174                return pl->link->link.prog;
 175        return NULL;
 176}
 177
 178/* count number of elements in the list.
 179 * it's slow but the list cannot be long
 180 */
 181static u32 prog_list_length(struct list_head *head)
 182{
 183        struct bpf_prog_list *pl;
 184        u32 cnt = 0;
 185
 186        list_for_each_entry(pl, head, node) {
 187                if (!prog_list_prog(pl))
 188                        continue;
 189                cnt++;
 190        }
 191        return cnt;
 192}
 193
 194/* if parent has non-overridable prog attached,
 195 * disallow attaching new programs to the descendent cgroup.
 196 * if parent has overridable or multi-prog, allow attaching
 197 */
 198static bool hierarchy_allows_attach(struct cgroup *cgrp,
 199                                    enum cgroup_bpf_attach_type atype)
 200{
 201        struct cgroup *p;
 202
 203        p = cgroup_parent(cgrp);
 204        if (!p)
 205                return true;
 206        do {
 207                u32 flags = p->bpf.flags[atype];
 208                u32 cnt;
 209
 210                if (flags & BPF_F_ALLOW_MULTI)
 211                        return true;
 212                cnt = prog_list_length(&p->bpf.progs[atype]);
 213                WARN_ON_ONCE(cnt > 1);
 214                if (cnt == 1)
 215                        return !!(flags & BPF_F_ALLOW_OVERRIDE);
 216                p = cgroup_parent(p);
 217        } while (p);
 218        return true;
 219}
 220
 221/* compute a chain of effective programs for a given cgroup:
 222 * start from the list of programs in this cgroup and add
 223 * all parent programs.
 224 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
 225 * to programs in this cgroup
 226 */
 227static int compute_effective_progs(struct cgroup *cgrp,
 228                                   enum cgroup_bpf_attach_type atype,
 229                                   struct bpf_prog_array **array)
 230{
 231        struct bpf_prog_array_item *item;
 232        struct bpf_prog_array *progs;
 233        struct bpf_prog_list *pl;
 234        struct cgroup *p = cgrp;
 235        int cnt = 0;
 236
 237        /* count number of effective programs by walking parents */
 238        do {
 239                if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
 240                        cnt += prog_list_length(&p->bpf.progs[atype]);
 241                p = cgroup_parent(p);
 242        } while (p);
 243
 244        progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
 245        if (!progs)
 246                return -ENOMEM;
 247
 248        /* populate the array with effective progs */
 249        cnt = 0;
 250        p = cgrp;
 251        do {
 252                if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
 253                        continue;
 254
 255                list_for_each_entry(pl, &p->bpf.progs[atype], node) {
 256                        if (!prog_list_prog(pl))
 257                                continue;
 258
 259                        item = &progs->items[cnt];
 260                        item->prog = prog_list_prog(pl);
 261                        bpf_cgroup_storages_assign(item->cgroup_storage,
 262                                                   pl->storage);
 263                        cnt++;
 264                }
 265        } while ((p = cgroup_parent(p)));
 266
 267        *array = progs;
 268        return 0;
 269}
 270
 271static void activate_effective_progs(struct cgroup *cgrp,
 272                                     enum cgroup_bpf_attach_type atype,
 273                                     struct bpf_prog_array *old_array)
 274{
 275        old_array = rcu_replace_pointer(cgrp->bpf.effective[atype], old_array,
 276                                        lockdep_is_held(&cgroup_mutex));
 277        /* free prog array after grace period, since __cgroup_bpf_run_*()
 278         * might be still walking the array
 279         */
 280        bpf_prog_array_free(old_array);
 281}
 282
 283/**
 284 * cgroup_bpf_inherit() - inherit effective programs from parent
 285 * @cgrp: the cgroup to modify
 286 */
 287int cgroup_bpf_inherit(struct cgroup *cgrp)
 288{
 289/* has to use marco instead of const int, since compiler thinks
 290 * that array below is variable length
 291 */
 292#define NR ARRAY_SIZE(cgrp->bpf.effective)
 293        struct bpf_prog_array *arrays[NR] = {};
 294        struct cgroup *p;
 295        int ret, i;
 296
 297        ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
 298                              GFP_KERNEL);
 299        if (ret)
 300                return ret;
 301
 302        for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
 303                cgroup_bpf_get(p);
 304
 305        for (i = 0; i < NR; i++)
 306                INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
 307
 308        INIT_LIST_HEAD(&cgrp->bpf.storages);
 309
 310        for (i = 0; i < NR; i++)
 311                if (compute_effective_progs(cgrp, i, &arrays[i]))
 312                        goto cleanup;
 313
 314        for (i = 0; i < NR; i++)
 315                activate_effective_progs(cgrp, i, arrays[i]);
 316
 317        return 0;
 318cleanup:
 319        for (i = 0; i < NR; i++)
 320                bpf_prog_array_free(arrays[i]);
 321
 322        for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
 323                cgroup_bpf_put(p);
 324
 325        percpu_ref_exit(&cgrp->bpf.refcnt);
 326
 327        return -ENOMEM;
 328}
 329
 330static int update_effective_progs(struct cgroup *cgrp,
 331                                  enum cgroup_bpf_attach_type atype)
 332{
 333        struct cgroup_subsys_state *css;
 334        int err;
 335
 336        /* allocate and recompute effective prog arrays */
 337        css_for_each_descendant_pre(css, &cgrp->self) {
 338                struct cgroup *desc = container_of(css, struct cgroup, self);
 339
 340                if (percpu_ref_is_zero(&desc->bpf.refcnt))
 341                        continue;
 342
 343                err = compute_effective_progs(desc, atype, &desc->bpf.inactive);
 344                if (err)
 345                        goto cleanup;
 346        }
 347
 348        /* all allocations were successful. Activate all prog arrays */
 349        css_for_each_descendant_pre(css, &cgrp->self) {
 350                struct cgroup *desc = container_of(css, struct cgroup, self);
 351
 352                if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
 353                        if (unlikely(desc->bpf.inactive)) {
 354                                bpf_prog_array_free(desc->bpf.inactive);
 355                                desc->bpf.inactive = NULL;
 356                        }
 357                        continue;
 358                }
 359
 360                activate_effective_progs(desc, atype, desc->bpf.inactive);
 361                desc->bpf.inactive = NULL;
 362        }
 363
 364        return 0;
 365
 366cleanup:
 367        /* oom while computing effective. Free all computed effective arrays
 368         * since they were not activated
 369         */
 370        css_for_each_descendant_pre(css, &cgrp->self) {
 371                struct cgroup *desc = container_of(css, struct cgroup, self);
 372
 373                bpf_prog_array_free(desc->bpf.inactive);
 374                desc->bpf.inactive = NULL;
 375        }
 376
 377        return err;
 378}
 379
 380#define BPF_CGROUP_MAX_PROGS 64
 381
 382static struct bpf_prog_list *find_attach_entry(struct list_head *progs,
 383                                               struct bpf_prog *prog,
 384                                               struct bpf_cgroup_link *link,
 385                                               struct bpf_prog *replace_prog,
 386                                               bool allow_multi)
 387{
 388        struct bpf_prog_list *pl;
 389
 390        /* single-attach case */
 391        if (!allow_multi) {
 392                if (list_empty(progs))
 393                        return NULL;
 394                return list_first_entry(progs, typeof(*pl), node);
 395        }
 396
 397        list_for_each_entry(pl, progs, node) {
 398                if (prog && pl->prog == prog && prog != replace_prog)
 399                        /* disallow attaching the same prog twice */
 400                        return ERR_PTR(-EINVAL);
 401                if (link && pl->link == link)
 402                        /* disallow attaching the same link twice */
 403                        return ERR_PTR(-EINVAL);
 404        }
 405
 406        /* direct prog multi-attach w/ replacement case */
 407        if (replace_prog) {
 408                list_for_each_entry(pl, progs, node) {
 409                        if (pl->prog == replace_prog)
 410                                /* a match found */
 411                                return pl;
 412                }
 413                /* prog to replace not found for cgroup */
 414                return ERR_PTR(-ENOENT);
 415        }
 416
 417        return NULL;
 418}
 419
 420/**
 421 * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and
 422 *                         propagate the change to descendants
 423 * @cgrp: The cgroup which descendants to traverse
 424 * @prog: A program to attach
 425 * @link: A link to attach
 426 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
 427 * @type: Type of attach operation
 428 * @flags: Option flags
 429 *
 430 * Exactly one of @prog or @link can be non-null.
 431 * Must be called with cgroup_mutex held.
 432 */
 433static int __cgroup_bpf_attach(struct cgroup *cgrp,
 434                               struct bpf_prog *prog, struct bpf_prog *replace_prog,
 435                               struct bpf_cgroup_link *link,
 436                               enum bpf_attach_type type, u32 flags)
 437{
 438        u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
 439        struct bpf_prog *old_prog = NULL;
 440        struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
 441        struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
 442        enum cgroup_bpf_attach_type atype;
 443        struct bpf_prog_list *pl;
 444        struct list_head *progs;
 445        int err;
 446
 447        if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
 448            ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI)))
 449                /* invalid combination */
 450                return -EINVAL;
 451        if (link && (prog || replace_prog))
 452                /* only either link or prog/replace_prog can be specified */
 453                return -EINVAL;
 454        if (!!replace_prog != !!(flags & BPF_F_REPLACE))
 455                /* replace_prog implies BPF_F_REPLACE, and vice versa */
 456                return -EINVAL;
 457
 458        atype = to_cgroup_bpf_attach_type(type);
 459        if (atype < 0)
 460                return -EINVAL;
 461
 462        progs = &cgrp->bpf.progs[atype];
 463
 464        if (!hierarchy_allows_attach(cgrp, atype))
 465                return -EPERM;
 466
 467        if (!list_empty(progs) && cgrp->bpf.flags[atype] != saved_flags)
 468                /* Disallow attaching non-overridable on top
 469                 * of existing overridable in this cgroup.
 470                 * Disallow attaching multi-prog if overridable or none
 471                 */
 472                return -EPERM;
 473
 474        if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
 475                return -E2BIG;
 476
 477        pl = find_attach_entry(progs, prog, link, replace_prog,
 478                               flags & BPF_F_ALLOW_MULTI);
 479        if (IS_ERR(pl))
 480                return PTR_ERR(pl);
 481
 482        if (bpf_cgroup_storages_alloc(storage, new_storage, type,
 483                                      prog ? : link->link.prog, cgrp))
 484                return -ENOMEM;
 485
 486        if (pl) {
 487                old_prog = pl->prog;
 488        } else {
 489                pl = kmalloc(sizeof(*pl), GFP_KERNEL);
 490                if (!pl) {
 491                        bpf_cgroup_storages_free(new_storage);
 492                        return -ENOMEM;
 493                }
 494                list_add_tail(&pl->node, progs);
 495        }
 496
 497        pl->prog = prog;
 498        pl->link = link;
 499        bpf_cgroup_storages_assign(pl->storage, storage);
 500        cgrp->bpf.flags[atype] = saved_flags;
 501
 502        err = update_effective_progs(cgrp, atype);
 503        if (err)
 504                goto cleanup;
 505
 506        if (old_prog)
 507                bpf_prog_put(old_prog);
 508        else
 509                static_branch_inc(&cgroup_bpf_enabled_key[atype]);
 510        bpf_cgroup_storages_link(new_storage, cgrp, type);
 511        return 0;
 512
 513cleanup:
 514        if (old_prog) {
 515                pl->prog = old_prog;
 516                pl->link = NULL;
 517        }
 518        bpf_cgroup_storages_free(new_storage);
 519        if (!old_prog) {
 520                list_del(&pl->node);
 521                kfree(pl);
 522        }
 523        return err;
 524}
 525
 526static int cgroup_bpf_attach(struct cgroup *cgrp,
 527                             struct bpf_prog *prog, struct bpf_prog *replace_prog,
 528                             struct bpf_cgroup_link *link,
 529                             enum bpf_attach_type type,
 530                             u32 flags)
 531{
 532        int ret;
 533
 534        mutex_lock(&cgroup_mutex);
 535        ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags);
 536        mutex_unlock(&cgroup_mutex);
 537        return ret;
 538}
 539
 540/* Swap updated BPF program for given link in effective program arrays across
 541 * all descendant cgroups. This function is guaranteed to succeed.
 542 */
 543static void replace_effective_prog(struct cgroup *cgrp,
 544                                   enum cgroup_bpf_attach_type atype,
 545                                   struct bpf_cgroup_link *link)
 546{
 547        struct bpf_prog_array_item *item;
 548        struct cgroup_subsys_state *css;
 549        struct bpf_prog_array *progs;
 550        struct bpf_prog_list *pl;
 551        struct list_head *head;
 552        struct cgroup *cg;
 553        int pos;
 554
 555        css_for_each_descendant_pre(css, &cgrp->self) {
 556                struct cgroup *desc = container_of(css, struct cgroup, self);
 557
 558                if (percpu_ref_is_zero(&desc->bpf.refcnt))
 559                        continue;
 560
 561                /* find position of link in effective progs array */
 562                for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
 563                        if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
 564                                continue;
 565
 566                        head = &cg->bpf.progs[atype];
 567                        list_for_each_entry(pl, head, node) {
 568                                if (!prog_list_prog(pl))
 569                                        continue;
 570                                if (pl->link == link)
 571                                        goto found;
 572                                pos++;
 573                        }
 574                }
 575found:
 576                BUG_ON(!cg);
 577                progs = rcu_dereference_protected(
 578                                desc->bpf.effective[atype],
 579                                lockdep_is_held(&cgroup_mutex));
 580                item = &progs->items[pos];
 581                WRITE_ONCE(item->prog, link->link.prog);
 582        }
 583}
 584
 585/**
 586 * __cgroup_bpf_replace() - Replace link's program and propagate the change
 587 *                          to descendants
 588 * @cgrp: The cgroup which descendants to traverse
 589 * @link: A link for which to replace BPF program
 590 * @type: Type of attach operation
 591 *
 592 * Must be called with cgroup_mutex held.
 593 */
 594static int __cgroup_bpf_replace(struct cgroup *cgrp,
 595                                struct bpf_cgroup_link *link,
 596                                struct bpf_prog *new_prog)
 597{
 598        enum cgroup_bpf_attach_type atype;
 599        struct bpf_prog *old_prog;
 600        struct bpf_prog_list *pl;
 601        struct list_head *progs;
 602        bool found = false;
 603
 604        atype = to_cgroup_bpf_attach_type(link->type);
 605        if (atype < 0)
 606                return -EINVAL;
 607
 608        progs = &cgrp->bpf.progs[atype];
 609
 610        if (link->link.prog->type != new_prog->type)
 611                return -EINVAL;
 612
 613        list_for_each_entry(pl, progs, node) {
 614                if (pl->link == link) {
 615                        found = true;
 616                        break;
 617                }
 618        }
 619        if (!found)
 620                return -ENOENT;
 621
 622        old_prog = xchg(&link->link.prog, new_prog);
 623        replace_effective_prog(cgrp, atype, link);
 624        bpf_prog_put(old_prog);
 625        return 0;
 626}
 627
 628static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
 629                              struct bpf_prog *old_prog)
 630{
 631        struct bpf_cgroup_link *cg_link;
 632        int ret;
 633
 634        cg_link = container_of(link, struct bpf_cgroup_link, link);
 635
 636        mutex_lock(&cgroup_mutex);
 637        /* link might have been auto-released by dying cgroup, so fail */
 638        if (!cg_link->cgroup) {
 639                ret = -ENOLINK;
 640                goto out_unlock;
 641        }
 642        if (old_prog && link->prog != old_prog) {
 643                ret = -EPERM;
 644                goto out_unlock;
 645        }
 646        ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
 647out_unlock:
 648        mutex_unlock(&cgroup_mutex);
 649        return ret;
 650}
 651
 652static struct bpf_prog_list *find_detach_entry(struct list_head *progs,
 653                                               struct bpf_prog *prog,
 654                                               struct bpf_cgroup_link *link,
 655                                               bool allow_multi)
 656{
 657        struct bpf_prog_list *pl;
 658
 659        if (!allow_multi) {
 660                if (list_empty(progs))
 661                        /* report error when trying to detach and nothing is attached */
 662                        return ERR_PTR(-ENOENT);
 663
 664                /* to maintain backward compatibility NONE and OVERRIDE cgroups
 665                 * allow detaching with invalid FD (prog==NULL) in legacy mode
 666                 */
 667                return list_first_entry(progs, typeof(*pl), node);
 668        }
 669
 670        if (!prog && !link)
 671                /* to detach MULTI prog the user has to specify valid FD
 672                 * of the program or link to be detached
 673                 */
 674                return ERR_PTR(-EINVAL);
 675
 676        /* find the prog or link and detach it */
 677        list_for_each_entry(pl, progs, node) {
 678                if (pl->prog == prog && pl->link == link)
 679                        return pl;
 680        }
 681        return ERR_PTR(-ENOENT);
 682}
 683
 684/**
 685 * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
 686 *                         propagate the change to descendants
 687 * @cgrp: The cgroup which descendants to traverse
 688 * @prog: A program to detach or NULL
 689 * @link: A link to detach or NULL
 690 * @type: Type of detach operation
 691 *
 692 * At most one of @prog or @link can be non-NULL.
 693 * Must be called with cgroup_mutex held.
 694 */
 695static int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
 696                               struct bpf_cgroup_link *link, enum bpf_attach_type type)
 697{
 698        enum cgroup_bpf_attach_type atype;
 699        struct bpf_prog *old_prog;
 700        struct bpf_prog_list *pl;
 701        struct list_head *progs;
 702        u32 flags;
 703        int err;
 704
 705        atype = to_cgroup_bpf_attach_type(type);
 706        if (atype < 0)
 707                return -EINVAL;
 708
 709        progs = &cgrp->bpf.progs[atype];
 710        flags = cgrp->bpf.flags[atype];
 711
 712        if (prog && link)
 713                /* only one of prog or link can be specified */
 714                return -EINVAL;
 715
 716        pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI);
 717        if (IS_ERR(pl))
 718                return PTR_ERR(pl);
 719
 720        /* mark it deleted, so it's ignored while recomputing effective */
 721        old_prog = pl->prog;
 722        pl->prog = NULL;
 723        pl->link = NULL;
 724
 725        err = update_effective_progs(cgrp, atype);
 726        if (err)
 727                goto cleanup;
 728
 729        /* now can actually delete it from this cgroup list */
 730        list_del(&pl->node);
 731        kfree(pl);
 732        if (list_empty(progs))
 733                /* last program was detached, reset flags to zero */
 734                cgrp->bpf.flags[atype] = 0;
 735        if (old_prog)
 736                bpf_prog_put(old_prog);
 737        static_branch_dec(&cgroup_bpf_enabled_key[atype]);
 738        return 0;
 739
 740cleanup:
 741        /* restore back prog or link */
 742        pl->prog = old_prog;
 743        pl->link = link;
 744        return err;
 745}
 746
 747static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
 748                             enum bpf_attach_type type)
 749{
 750        int ret;
 751
 752        mutex_lock(&cgroup_mutex);
 753        ret = __cgroup_bpf_detach(cgrp, prog, NULL, type);
 754        mutex_unlock(&cgroup_mutex);
 755        return ret;
 756}
 757
 758/* Must be called with cgroup_mutex held to avoid races. */
 759static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
 760                              union bpf_attr __user *uattr)
 761{
 762        __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
 763        enum bpf_attach_type type = attr->query.attach_type;
 764        enum cgroup_bpf_attach_type atype;
 765        struct bpf_prog_array *effective;
 766        struct list_head *progs;
 767        struct bpf_prog *prog;
 768        int cnt, ret = 0, i;
 769        u32 flags;
 770
 771        atype = to_cgroup_bpf_attach_type(type);
 772        if (atype < 0)
 773                return -EINVAL;
 774
 775        progs = &cgrp->bpf.progs[atype];
 776        flags = cgrp->bpf.flags[atype];
 777
 778        effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
 779                                              lockdep_is_held(&cgroup_mutex));
 780
 781        if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
 782                cnt = bpf_prog_array_length(effective);
 783        else
 784                cnt = prog_list_length(progs);
 785
 786        if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
 787                return -EFAULT;
 788        if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
 789                return -EFAULT;
 790        if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
 791                /* return early if user requested only program count + flags */
 792                return 0;
 793        if (attr->query.prog_cnt < cnt) {
 794                cnt = attr->query.prog_cnt;
 795                ret = -ENOSPC;
 796        }
 797
 798        if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
 799                return bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
 800        } else {
 801                struct bpf_prog_list *pl;
 802                u32 id;
 803
 804                i = 0;
 805                list_for_each_entry(pl, progs, node) {
 806                        prog = prog_list_prog(pl);
 807                        id = prog->aux->id;
 808                        if (copy_to_user(prog_ids + i, &id, sizeof(id)))
 809                                return -EFAULT;
 810                        if (++i == cnt)
 811                                break;
 812                }
 813        }
 814        return ret;
 815}
 816
 817static int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
 818                            union bpf_attr __user *uattr)
 819{
 820        int ret;
 821
 822        mutex_lock(&cgroup_mutex);
 823        ret = __cgroup_bpf_query(cgrp, attr, uattr);
 824        mutex_unlock(&cgroup_mutex);
 825        return ret;
 826}
 827
 828int cgroup_bpf_prog_attach(const union bpf_attr *attr,
 829                           enum bpf_prog_type ptype, struct bpf_prog *prog)
 830{
 831        struct bpf_prog *replace_prog = NULL;
 832        struct cgroup *cgrp;
 833        int ret;
 834
 835        cgrp = cgroup_get_from_fd(attr->target_fd);
 836        if (IS_ERR(cgrp))
 837                return PTR_ERR(cgrp);
 838
 839        if ((attr->attach_flags & BPF_F_ALLOW_MULTI) &&
 840            (attr->attach_flags & BPF_F_REPLACE)) {
 841                replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype);
 842                if (IS_ERR(replace_prog)) {
 843                        cgroup_put(cgrp);
 844                        return PTR_ERR(replace_prog);
 845                }
 846        }
 847
 848        ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL,
 849                                attr->attach_type, attr->attach_flags);
 850
 851        if (replace_prog)
 852                bpf_prog_put(replace_prog);
 853        cgroup_put(cgrp);
 854        return ret;
 855}
 856
 857int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
 858{
 859        struct bpf_prog *prog;
 860        struct cgroup *cgrp;
 861        int ret;
 862
 863        cgrp = cgroup_get_from_fd(attr->target_fd);
 864        if (IS_ERR(cgrp))
 865                return PTR_ERR(cgrp);
 866
 867        prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
 868        if (IS_ERR(prog))
 869                prog = NULL;
 870
 871        ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type);
 872        if (prog)
 873                bpf_prog_put(prog);
 874
 875        cgroup_put(cgrp);
 876        return ret;
 877}
 878
 879static void bpf_cgroup_link_release(struct bpf_link *link)
 880{
 881        struct bpf_cgroup_link *cg_link =
 882                container_of(link, struct bpf_cgroup_link, link);
 883        struct cgroup *cg;
 884
 885        /* link might have been auto-detached by dying cgroup already,
 886         * in that case our work is done here
 887         */
 888        if (!cg_link->cgroup)
 889                return;
 890
 891        mutex_lock(&cgroup_mutex);
 892
 893        /* re-check cgroup under lock again */
 894        if (!cg_link->cgroup) {
 895                mutex_unlock(&cgroup_mutex);
 896                return;
 897        }
 898
 899        WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link,
 900                                    cg_link->type));
 901
 902        cg = cg_link->cgroup;
 903        cg_link->cgroup = NULL;
 904
 905        mutex_unlock(&cgroup_mutex);
 906
 907        cgroup_put(cg);
 908}
 909
 910static void bpf_cgroup_link_dealloc(struct bpf_link *link)
 911{
 912        struct bpf_cgroup_link *cg_link =
 913                container_of(link, struct bpf_cgroup_link, link);
 914
 915        kfree(cg_link);
 916}
 917
 918static int bpf_cgroup_link_detach(struct bpf_link *link)
 919{
 920        bpf_cgroup_link_release(link);
 921
 922        return 0;
 923}
 924
 925static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
 926                                        struct seq_file *seq)
 927{
 928        struct bpf_cgroup_link *cg_link =
 929                container_of(link, struct bpf_cgroup_link, link);
 930        u64 cg_id = 0;
 931
 932        mutex_lock(&cgroup_mutex);
 933        if (cg_link->cgroup)
 934                cg_id = cgroup_id(cg_link->cgroup);
 935        mutex_unlock(&cgroup_mutex);
 936
 937        seq_printf(seq,
 938                   "cgroup_id:\t%llu\n"
 939                   "attach_type:\t%d\n",
 940                   cg_id,
 941                   cg_link->type);
 942}
 943
 944static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link,
 945                                          struct bpf_link_info *info)
 946{
 947        struct bpf_cgroup_link *cg_link =
 948                container_of(link, struct bpf_cgroup_link, link);
 949        u64 cg_id = 0;
 950
 951        mutex_lock(&cgroup_mutex);
 952        if (cg_link->cgroup)
 953                cg_id = cgroup_id(cg_link->cgroup);
 954        mutex_unlock(&cgroup_mutex);
 955
 956        info->cgroup.cgroup_id = cg_id;
 957        info->cgroup.attach_type = cg_link->type;
 958        return 0;
 959}
 960
 961static const struct bpf_link_ops bpf_cgroup_link_lops = {
 962        .release = bpf_cgroup_link_release,
 963        .dealloc = bpf_cgroup_link_dealloc,
 964        .detach = bpf_cgroup_link_detach,
 965        .update_prog = cgroup_bpf_replace,
 966        .show_fdinfo = bpf_cgroup_link_show_fdinfo,
 967        .fill_link_info = bpf_cgroup_link_fill_link_info,
 968};
 969
 970int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
 971{
 972        struct bpf_link_primer link_primer;
 973        struct bpf_cgroup_link *link;
 974        struct cgroup *cgrp;
 975        int err;
 976
 977        if (attr->link_create.flags)
 978                return -EINVAL;
 979
 980        cgrp = cgroup_get_from_fd(attr->link_create.target_fd);
 981        if (IS_ERR(cgrp))
 982                return PTR_ERR(cgrp);
 983
 984        link = kzalloc(sizeof(*link), GFP_USER);
 985        if (!link) {
 986                err = -ENOMEM;
 987                goto out_put_cgroup;
 988        }
 989        bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops,
 990                      prog);
 991        link->cgroup = cgrp;
 992        link->type = attr->link_create.attach_type;
 993
 994        err = bpf_link_prime(&link->link, &link_primer);
 995        if (err) {
 996                kfree(link);
 997                goto out_put_cgroup;
 998        }
 999
1000        err = cgroup_bpf_attach(cgrp, NULL, NULL, link,
1001                                link->type, BPF_F_ALLOW_MULTI);
1002        if (err) {
1003                bpf_link_cleanup(&link_primer);
1004                goto out_put_cgroup;
1005        }
1006
1007        return bpf_link_settle(&link_primer);
1008
1009out_put_cgroup:
1010        cgroup_put(cgrp);
1011        return err;
1012}
1013
1014int cgroup_bpf_prog_query(const union bpf_attr *attr,
1015                          union bpf_attr __user *uattr)
1016{
1017        struct cgroup *cgrp;
1018        int ret;
1019
1020        cgrp = cgroup_get_from_fd(attr->query.target_fd);
1021        if (IS_ERR(cgrp))
1022                return PTR_ERR(cgrp);
1023
1024        ret = cgroup_bpf_query(cgrp, attr, uattr);
1025
1026        cgroup_put(cgrp);
1027        return ret;
1028}
1029
1030/**
1031 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
1032 * @sk: The socket sending or receiving traffic
1033 * @skb: The skb that is being sent or received
1034 * @type: The type of program to be executed
1035 *
1036 * If no socket is passed, or the socket is not of type INET or INET6,
1037 * this function does nothing and returns 0.
1038 *
1039 * The program type passed in via @type must be suitable for network
1040 * filtering. No further check is performed to assert that.
1041 *
1042 * For egress packets, this function can return:
1043 *   NET_XMIT_SUCCESS    (0)    - continue with packet output
1044 *   NET_XMIT_DROP       (1)    - drop packet and notify TCP to call cwr
1045 *   NET_XMIT_CN         (2)    - continue with packet output and notify TCP
1046 *                                to call cwr
1047 *   -err                       - drop packet
1048 *
1049 * For ingress packets, this function will return -EPERM if any
1050 * attached program was found and if it returned != 1 during execution.
1051 * Otherwise 0 is returned.
1052 */
1053int __cgroup_bpf_run_filter_skb(struct sock *sk,
1054                                struct sk_buff *skb,
1055                                enum cgroup_bpf_attach_type atype)
1056{
1057        unsigned int offset = skb->data - skb_network_header(skb);
1058        struct sock *save_sk;
1059        void *saved_data_end;
1060        struct cgroup *cgrp;
1061        int ret;
1062
1063        if (!sk || !sk_fullsock(sk))
1064                return 0;
1065
1066        if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1067                return 0;
1068
1069        cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1070        save_sk = skb->sk;
1071        skb->sk = sk;
1072        __skb_push(skb, offset);
1073
1074        /* compute pointers for the bpf prog */
1075        bpf_compute_and_save_data_end(skb, &saved_data_end);
1076
1077        if (atype == CGROUP_INET_EGRESS) {
1078                ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(
1079                        cgrp->bpf.effective[atype], skb, __bpf_prog_run_save_cb);
1080        } else {
1081                ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], skb,
1082                                            __bpf_prog_run_save_cb, 0);
1083                if (ret && !IS_ERR_VALUE((long)ret))
1084                        ret = -EFAULT;
1085        }
1086        bpf_restore_data_end(skb, saved_data_end);
1087        __skb_pull(skb, offset);
1088        skb->sk = save_sk;
1089
1090        return ret;
1091}
1092EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
1093
1094/**
1095 * __cgroup_bpf_run_filter_sk() - Run a program on a sock
1096 * @sk: sock structure to manipulate
1097 * @type: The type of program to be executed
1098 *
1099 * socket is passed is expected to be of type INET or INET6.
1100 *
1101 * The program type passed in via @type must be suitable for sock
1102 * filtering. No further check is performed to assert that.
1103 *
1104 * This function will return %-EPERM if any if an attached program was found
1105 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1106 */
1107int __cgroup_bpf_run_filter_sk(struct sock *sk,
1108                               enum cgroup_bpf_attach_type atype)
1109{
1110        struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1111
1112        return BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sk,
1113                                     bpf_prog_run, 0);
1114}
1115EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
1116
1117/**
1118 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
1119 *                                       provided by user sockaddr
1120 * @sk: sock struct that will use sockaddr
1121 * @uaddr: sockaddr struct provided by user
1122 * @type: The type of program to be executed
1123 * @t_ctx: Pointer to attach type specific context
1124 * @flags: Pointer to u32 which contains higher bits of BPF program
1125 *         return value (OR'ed together).
1126 *
1127 * socket is expected to be of type INET or INET6.
1128 *
1129 * This function will return %-EPERM if an attached program is found and
1130 * returned value != 1 during execution. In all other cases, 0 is returned.
1131 */
1132int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
1133                                      struct sockaddr *uaddr,
1134                                      enum cgroup_bpf_attach_type atype,
1135                                      void *t_ctx,
1136                                      u32 *flags)
1137{
1138        struct bpf_sock_addr_kern ctx = {
1139                .sk = sk,
1140                .uaddr = uaddr,
1141                .t_ctx = t_ctx,
1142        };
1143        struct sockaddr_storage unspec;
1144        struct cgroup *cgrp;
1145
1146        /* Check socket family since not all sockets represent network
1147         * endpoint (e.g. AF_UNIX).
1148         */
1149        if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1150                return 0;
1151
1152        if (!ctx.uaddr) {
1153                memset(&unspec, 0, sizeof(unspec));
1154                ctx.uaddr = (struct sockaddr *)&unspec;
1155        }
1156
1157        cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1158        return BPF_PROG_RUN_ARRAY_CG_FLAGS(cgrp->bpf.effective[atype], &ctx,
1159                                           bpf_prog_run, 0, flags);
1160}
1161EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
1162
1163/**
1164 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
1165 * @sk: socket to get cgroup from
1166 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
1167 * sk with connection information (IP addresses, etc.) May not contain
1168 * cgroup info if it is a req sock.
1169 * @type: The type of program to be executed
1170 *
1171 * socket passed is expected to be of type INET or INET6.
1172 *
1173 * The program type passed in via @type must be suitable for sock_ops
1174 * filtering. No further check is performed to assert that.
1175 *
1176 * This function will return %-EPERM if any if an attached program was found
1177 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1178 */
1179int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
1180                                     struct bpf_sock_ops_kern *sock_ops,
1181                                     enum cgroup_bpf_attach_type atype)
1182{
1183        struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1184
1185        return BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sock_ops,
1186                                     bpf_prog_run, 0);
1187}
1188EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
1189
1190int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
1191                                      short access, enum cgroup_bpf_attach_type atype)
1192{
1193        struct cgroup *cgrp;
1194        struct bpf_cgroup_dev_ctx ctx = {
1195                .access_type = (access << 16) | dev_type,
1196                .major = major,
1197                .minor = minor,
1198        };
1199        int ret;
1200
1201        rcu_read_lock();
1202        cgrp = task_dfl_cgroup(current);
1203        ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx,
1204                                    bpf_prog_run, 0);
1205        rcu_read_unlock();
1206
1207        return ret;
1208}
1209
1210BPF_CALL_0(bpf_get_retval)
1211{
1212        struct bpf_cg_run_ctx *ctx =
1213                container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
1214
1215        return ctx->retval;
1216}
1217
1218static const struct bpf_func_proto bpf_get_retval_proto = {
1219        .func           = bpf_get_retval,
1220        .gpl_only       = false,
1221        .ret_type       = RET_INTEGER,
1222};
1223
1224BPF_CALL_1(bpf_set_retval, int, retval)
1225{
1226        struct bpf_cg_run_ctx *ctx =
1227                container_of(current->bpf_ctx, struct bpf_cg_run_ctx, run_ctx);
1228
1229        ctx->retval = retval;
1230        return 0;
1231}
1232
1233static const struct bpf_func_proto bpf_set_retval_proto = {
1234        .func           = bpf_set_retval,
1235        .gpl_only       = false,
1236        .ret_type       = RET_INTEGER,
1237        .arg1_type      = ARG_ANYTHING,
1238};
1239
1240static const struct bpf_func_proto *
1241cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1242{
1243        switch (func_id) {
1244        case BPF_FUNC_get_current_uid_gid:
1245                return &bpf_get_current_uid_gid_proto;
1246        case BPF_FUNC_get_local_storage:
1247                return &bpf_get_local_storage_proto;
1248        case BPF_FUNC_get_current_cgroup_id:
1249                return &bpf_get_current_cgroup_id_proto;
1250        case BPF_FUNC_perf_event_output:
1251                return &bpf_event_output_data_proto;
1252        case BPF_FUNC_get_retval:
1253                return &bpf_get_retval_proto;
1254        case BPF_FUNC_set_retval:
1255                return &bpf_set_retval_proto;
1256        default:
1257                return bpf_base_func_proto(func_id);
1258        }
1259}
1260
1261static const struct bpf_func_proto *
1262cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1263{
1264        return cgroup_base_func_proto(func_id, prog);
1265}
1266
1267static bool cgroup_dev_is_valid_access(int off, int size,
1268                                       enum bpf_access_type type,
1269                                       const struct bpf_prog *prog,
1270                                       struct bpf_insn_access_aux *info)
1271{
1272        const int size_default = sizeof(__u32);
1273
1274        if (type == BPF_WRITE)
1275                return false;
1276
1277        if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
1278                return false;
1279        /* The verifier guarantees that size > 0. */
1280        if (off % size != 0)
1281                return false;
1282
1283        switch (off) {
1284        case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
1285                bpf_ctx_record_field_size(info, size_default);
1286                if (!bpf_ctx_narrow_access_ok(off, size, size_default))
1287                        return false;
1288                break;
1289        default:
1290                if (size != size_default)
1291                        return false;
1292        }
1293
1294        return true;
1295}
1296
1297const struct bpf_prog_ops cg_dev_prog_ops = {
1298};
1299
1300const struct bpf_verifier_ops cg_dev_verifier_ops = {
1301        .get_func_proto         = cgroup_dev_func_proto,
1302        .is_valid_access        = cgroup_dev_is_valid_access,
1303};
1304
1305/**
1306 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
1307 *
1308 * @head: sysctl table header
1309 * @table: sysctl table
1310 * @write: sysctl is being read (= 0) or written (= 1)
1311 * @buf: pointer to buffer (in and out)
1312 * @pcount: value-result argument: value is size of buffer pointed to by @buf,
1313 *      result is size of @new_buf if program set new value, initial value
1314 *      otherwise
1315 * @ppos: value-result argument: value is position at which read from or write
1316 *      to sysctl is happening, result is new position if program overrode it,
1317 *      initial value otherwise
1318 * @type: type of program to be executed
1319 *
1320 * Program is run when sysctl is being accessed, either read or written, and
1321 * can allow or deny such access.
1322 *
1323 * This function will return %-EPERM if an attached program is found and
1324 * returned value != 1 during execution. In all other cases 0 is returned.
1325 */
1326int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
1327                                   struct ctl_table *table, int write,
1328                                   char **buf, size_t *pcount, loff_t *ppos,
1329                                   enum cgroup_bpf_attach_type atype)
1330{
1331        struct bpf_sysctl_kern ctx = {
1332                .head = head,
1333                .table = table,
1334                .write = write,
1335                .ppos = ppos,
1336                .cur_val = NULL,
1337                .cur_len = PAGE_SIZE,
1338                .new_val = NULL,
1339                .new_len = 0,
1340                .new_updated = 0,
1341        };
1342        struct cgroup *cgrp;
1343        loff_t pos = 0;
1344        int ret;
1345
1346        ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
1347        if (!ctx.cur_val ||
1348            table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) {
1349                /* Let BPF program decide how to proceed. */
1350                ctx.cur_len = 0;
1351        }
1352
1353        if (write && *buf && *pcount) {
1354                /* BPF program should be able to override new value with a
1355                 * buffer bigger than provided by user.
1356                 */
1357                ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
1358                ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
1359                if (ctx.new_val) {
1360                        memcpy(ctx.new_val, *buf, ctx.new_len);
1361                } else {
1362                        /* Let BPF program decide how to proceed. */
1363                        ctx.new_len = 0;
1364                }
1365        }
1366
1367        rcu_read_lock();
1368        cgrp = task_dfl_cgroup(current);
1369        ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx,
1370                                    bpf_prog_run, 0);
1371        rcu_read_unlock();
1372
1373        kfree(ctx.cur_val);
1374
1375        if (ret == 1 && ctx.new_updated) {
1376                kfree(*buf);
1377                *buf = ctx.new_val;
1378                *pcount = ctx.new_len;
1379        } else {
1380                kfree(ctx.new_val);
1381        }
1382
1383        return ret;
1384}
1385
1386#ifdef CONFIG_NET
1387static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen,
1388                             struct bpf_sockopt_buf *buf)
1389{
1390        if (unlikely(max_optlen < 0))
1391                return -EINVAL;
1392
1393        if (unlikely(max_optlen > PAGE_SIZE)) {
1394                /* We don't expose optvals that are greater than PAGE_SIZE
1395                 * to the BPF program.
1396                 */
1397                max_optlen = PAGE_SIZE;
1398        }
1399
1400        if (max_optlen <= sizeof(buf->data)) {
1401                /* When the optval fits into BPF_SOCKOPT_KERN_BUF_SIZE
1402                 * bytes avoid the cost of kzalloc.
1403                 */
1404                ctx->optval = buf->data;
1405                ctx->optval_end = ctx->optval + max_optlen;
1406                return max_optlen;
1407        }
1408
1409        ctx->optval = kzalloc(max_optlen, GFP_USER);
1410        if (!ctx->optval)
1411                return -ENOMEM;
1412
1413        ctx->optval_end = ctx->optval + max_optlen;
1414
1415        return max_optlen;
1416}
1417
1418static void sockopt_free_buf(struct bpf_sockopt_kern *ctx,
1419                             struct bpf_sockopt_buf *buf)
1420{
1421        if (ctx->optval == buf->data)
1422                return;
1423        kfree(ctx->optval);
1424}
1425
1426static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx,
1427                                  struct bpf_sockopt_buf *buf)
1428{
1429        return ctx->optval != buf->data;
1430}
1431
1432int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
1433                                       int *optname, char __user *optval,
1434                                       int *optlen, char **kernel_optval)
1435{
1436        struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1437        struct bpf_sockopt_buf buf = {};
1438        struct bpf_sockopt_kern ctx = {
1439                .sk = sk,
1440                .level = *level,
1441                .optname = *optname,
1442        };
1443        int ret, max_optlen;
1444
1445        /* Allocate a bit more than the initial user buffer for
1446         * BPF program. The canonical use case is overriding
1447         * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
1448         */
1449        max_optlen = max_t(int, 16, *optlen);
1450        max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
1451        if (max_optlen < 0)
1452                return max_optlen;
1453
1454        ctx.optlen = *optlen;
1455
1456        if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
1457                ret = -EFAULT;
1458                goto out;
1459        }
1460
1461        lock_sock(sk);
1462        ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_SETSOCKOPT],
1463                                    &ctx, bpf_prog_run, 0);
1464        release_sock(sk);
1465
1466        if (ret)
1467                goto out;
1468
1469        if (ctx.optlen == -1) {
1470                /* optlen set to -1, bypass kernel */
1471                ret = 1;
1472        } else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
1473                /* optlen is out of bounds */
1474                ret = -EFAULT;
1475        } else {
1476                /* optlen within bounds, run kernel handler */
1477                ret = 0;
1478
1479                /* export any potential modifications */
1480                *level = ctx.level;
1481                *optname = ctx.optname;
1482
1483                /* optlen == 0 from BPF indicates that we should
1484                 * use original userspace data.
1485                 */
1486                if (ctx.optlen != 0) {
1487                        *optlen = ctx.optlen;
1488                        /* We've used bpf_sockopt_kern->buf as an intermediary
1489                         * storage, but the BPF program indicates that we need
1490                         * to pass this data to the kernel setsockopt handler.
1491                         * No way to export on-stack buf, have to allocate a
1492                         * new buffer.
1493                         */
1494                        if (!sockopt_buf_allocated(&ctx, &buf)) {
1495                                void *p = kmalloc(ctx.optlen, GFP_USER);
1496
1497                                if (!p) {
1498                                        ret = -ENOMEM;
1499                                        goto out;
1500                                }
1501                                memcpy(p, ctx.optval, ctx.optlen);
1502                                *kernel_optval = p;
1503                        } else {
1504                                *kernel_optval = ctx.optval;
1505                        }
1506                        /* export and don't free sockopt buf */
1507                        return 0;
1508                }
1509        }
1510
1511out:
1512        sockopt_free_buf(&ctx, &buf);
1513        return ret;
1514}
1515
1516int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
1517                                       int optname, char __user *optval,
1518                                       int __user *optlen, int max_optlen,
1519                                       int retval)
1520{
1521        struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1522        struct bpf_sockopt_buf buf = {};
1523        struct bpf_sockopt_kern ctx = {
1524                .sk = sk,
1525                .level = level,
1526                .optname = optname,
1527                .current_task = current,
1528        };
1529        int ret;
1530
1531        ctx.optlen = max_optlen;
1532        max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
1533        if (max_optlen < 0)
1534                return max_optlen;
1535
1536        if (!retval) {
1537                /* If kernel getsockopt finished successfully,
1538                 * copy whatever was returned to the user back
1539                 * into our temporary buffer. Set optlen to the
1540                 * one that kernel returned as well to let
1541                 * BPF programs inspect the value.
1542                 */
1543
1544                if (get_user(ctx.optlen, optlen)) {
1545                        ret = -EFAULT;
1546                        goto out;
1547                }
1548
1549                if (ctx.optlen < 0) {
1550                        ret = -EFAULT;
1551                        goto out;
1552                }
1553
1554                if (copy_from_user(ctx.optval, optval,
1555                                   min(ctx.optlen, max_optlen)) != 0) {
1556                        ret = -EFAULT;
1557                        goto out;
1558                }
1559        }
1560
1561        lock_sock(sk);
1562        ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT],
1563                                    &ctx, bpf_prog_run, retval);
1564        release_sock(sk);
1565
1566        if (ret < 0)
1567                goto out;
1568
1569        if (ctx.optlen > max_optlen || ctx.optlen < 0) {
1570                ret = -EFAULT;
1571                goto out;
1572        }
1573
1574        if (ctx.optlen != 0) {
1575                if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
1576                    put_user(ctx.optlen, optlen)) {
1577                        ret = -EFAULT;
1578                        goto out;
1579                }
1580        }
1581
1582out:
1583        sockopt_free_buf(&ctx, &buf);
1584        return ret;
1585}
1586
1587int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
1588                                            int optname, void *optval,
1589                                            int *optlen, int retval)
1590{
1591        struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1592        struct bpf_sockopt_kern ctx = {
1593                .sk = sk,
1594                .level = level,
1595                .optname = optname,
1596                .optlen = *optlen,
1597                .optval = optval,
1598                .optval_end = optval + *optlen,
1599                .current_task = current,
1600        };
1601        int ret;
1602
1603        /* Note that __cgroup_bpf_run_filter_getsockopt doesn't copy
1604         * user data back into BPF buffer when reval != 0. This is
1605         * done as an optimization to avoid extra copy, assuming
1606         * kernel won't populate the data in case of an error.
1607         * Here we always pass the data and memset() should
1608         * be called if that data shouldn't be "exported".
1609         */
1610
1611        ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT],
1612                                    &ctx, bpf_prog_run, retval);
1613        if (ret < 0)
1614                return ret;
1615
1616        if (ctx.optlen > *optlen)
1617                return -EFAULT;
1618
1619        /* BPF programs can shrink the buffer, export the modifications.
1620         */
1621        if (ctx.optlen != 0)
1622                *optlen = ctx.optlen;
1623
1624        return ret;
1625}
1626#endif
1627
1628static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
1629                              size_t *lenp)
1630{
1631        ssize_t tmp_ret = 0, ret;
1632
1633        if (dir->header.parent) {
1634                tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
1635                if (tmp_ret < 0)
1636                        return tmp_ret;
1637        }
1638
1639        ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
1640        if (ret < 0)
1641                return ret;
1642        *bufp += ret;
1643        *lenp -= ret;
1644        ret += tmp_ret;
1645
1646        /* Avoid leading slash. */
1647        if (!ret)
1648                return ret;
1649
1650        tmp_ret = strscpy(*bufp, "/", *lenp);
1651        if (tmp_ret < 0)
1652                return tmp_ret;
1653        *bufp += tmp_ret;
1654        *lenp -= tmp_ret;
1655
1656        return ret + tmp_ret;
1657}
1658
1659BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
1660           size_t, buf_len, u64, flags)
1661{
1662        ssize_t tmp_ret = 0, ret;
1663
1664        if (!buf)
1665                return -EINVAL;
1666
1667        if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
1668                if (!ctx->head)
1669                        return -EINVAL;
1670                tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
1671                if (tmp_ret < 0)
1672                        return tmp_ret;
1673        }
1674
1675        ret = strscpy(buf, ctx->table->procname, buf_len);
1676
1677        return ret < 0 ? ret : tmp_ret + ret;
1678}
1679
1680static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
1681        .func           = bpf_sysctl_get_name,
1682        .gpl_only       = false,
1683        .ret_type       = RET_INTEGER,
1684        .arg1_type      = ARG_PTR_TO_CTX,
1685        .arg2_type      = ARG_PTR_TO_MEM,
1686        .arg3_type      = ARG_CONST_SIZE,
1687        .arg4_type      = ARG_ANYTHING,
1688};
1689
1690static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
1691                             size_t src_len)
1692{
1693        if (!dst)
1694                return -EINVAL;
1695
1696        if (!dst_len)
1697                return -E2BIG;
1698
1699        if (!src || !src_len) {
1700                memset(dst, 0, dst_len);
1701                return -EINVAL;
1702        }
1703
1704        memcpy(dst, src, min(dst_len, src_len));
1705
1706        if (dst_len > src_len) {
1707                memset(dst + src_len, '\0', dst_len - src_len);
1708                return src_len;
1709        }
1710
1711        dst[dst_len - 1] = '\0';
1712
1713        return -E2BIG;
1714}
1715
1716BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
1717           char *, buf, size_t, buf_len)
1718{
1719        return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
1720}
1721
1722static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
1723        .func           = bpf_sysctl_get_current_value,
1724        .gpl_only       = false,
1725        .ret_type       = RET_INTEGER,
1726        .arg1_type      = ARG_PTR_TO_CTX,
1727        .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1728        .arg3_type      = ARG_CONST_SIZE,
1729};
1730
1731BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
1732           size_t, buf_len)
1733{
1734        if (!ctx->write) {
1735                if (buf && buf_len)
1736                        memset(buf, '\0', buf_len);
1737                return -EINVAL;
1738        }
1739        return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
1740}
1741
1742static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
1743        .func           = bpf_sysctl_get_new_value,
1744        .gpl_only       = false,
1745        .ret_type       = RET_INTEGER,
1746        .arg1_type      = ARG_PTR_TO_CTX,
1747        .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1748        .arg3_type      = ARG_CONST_SIZE,
1749};
1750
1751BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
1752           const char *, buf, size_t, buf_len)
1753{
1754        if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
1755                return -EINVAL;
1756
1757        if (buf_len > PAGE_SIZE - 1)
1758                return -E2BIG;
1759
1760        memcpy(ctx->new_val, buf, buf_len);
1761        ctx->new_len = buf_len;
1762        ctx->new_updated = 1;
1763
1764        return 0;
1765}
1766
1767static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
1768        .func           = bpf_sysctl_set_new_value,
1769        .gpl_only       = false,
1770        .ret_type       = RET_INTEGER,
1771        .arg1_type      = ARG_PTR_TO_CTX,
1772        .arg2_type      = ARG_PTR_TO_MEM | MEM_RDONLY,
1773        .arg3_type      = ARG_CONST_SIZE,
1774};
1775
1776static const struct bpf_func_proto *
1777sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1778{
1779        switch (func_id) {
1780        case BPF_FUNC_strtol:
1781                return &bpf_strtol_proto;
1782        case BPF_FUNC_strtoul:
1783                return &bpf_strtoul_proto;
1784        case BPF_FUNC_sysctl_get_name:
1785                return &bpf_sysctl_get_name_proto;
1786        case BPF_FUNC_sysctl_get_current_value:
1787                return &bpf_sysctl_get_current_value_proto;
1788        case BPF_FUNC_sysctl_get_new_value:
1789                return &bpf_sysctl_get_new_value_proto;
1790        case BPF_FUNC_sysctl_set_new_value:
1791                return &bpf_sysctl_set_new_value_proto;
1792        case BPF_FUNC_ktime_get_coarse_ns:
1793                return &bpf_ktime_get_coarse_ns_proto;
1794        default:
1795                return cgroup_base_func_proto(func_id, prog);
1796        }
1797}
1798
1799static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
1800                                   const struct bpf_prog *prog,
1801                                   struct bpf_insn_access_aux *info)
1802{
1803        const int size_default = sizeof(__u32);
1804
1805        if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
1806                return false;
1807
1808        switch (off) {
1809        case bpf_ctx_range(struct bpf_sysctl, write):
1810                if (type != BPF_READ)
1811                        return false;
1812                bpf_ctx_record_field_size(info, size_default);
1813                return bpf_ctx_narrow_access_ok(off, size, size_default);
1814        case bpf_ctx_range(struct bpf_sysctl, file_pos):
1815                if (type == BPF_READ) {
1816                        bpf_ctx_record_field_size(info, size_default);
1817                        return bpf_ctx_narrow_access_ok(off, size, size_default);
1818                } else {
1819                        return size == size_default;
1820                }
1821        default:
1822                return false;
1823        }
1824}
1825
1826static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
1827                                     const struct bpf_insn *si,
1828                                     struct bpf_insn *insn_buf,
1829                                     struct bpf_prog *prog, u32 *target_size)
1830{
1831        struct bpf_insn *insn = insn_buf;
1832        u32 read_size;
1833
1834        switch (si->off) {
1835        case offsetof(struct bpf_sysctl, write):
1836                *insn++ = BPF_LDX_MEM(
1837                        BPF_SIZE(si->code), si->dst_reg, si->src_reg,
1838                        bpf_target_off(struct bpf_sysctl_kern, write,
1839                                       sizeof_field(struct bpf_sysctl_kern,
1840                                                    write),
1841                                       target_size));
1842                break;
1843        case offsetof(struct bpf_sysctl, file_pos):
1844                /* ppos is a pointer so it should be accessed via indirect
1845                 * loads and stores. Also for stores additional temporary
1846                 * register is used since neither src_reg nor dst_reg can be
1847                 * overridden.
1848                 */
1849                if (type == BPF_WRITE) {
1850                        int treg = BPF_REG_9;
1851
1852                        if (si->src_reg == treg || si->dst_reg == treg)
1853                                --treg;
1854                        if (si->src_reg == treg || si->dst_reg == treg)
1855                                --treg;
1856                        *insn++ = BPF_STX_MEM(
1857                                BPF_DW, si->dst_reg, treg,
1858                                offsetof(struct bpf_sysctl_kern, tmp_reg));
1859                        *insn++ = BPF_LDX_MEM(
1860                                BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1861                                treg, si->dst_reg,
1862                                offsetof(struct bpf_sysctl_kern, ppos));
1863                        *insn++ = BPF_STX_MEM(
1864                                BPF_SIZEOF(u32), treg, si->src_reg,
1865                                bpf_ctx_narrow_access_offset(
1866                                        0, sizeof(u32), sizeof(loff_t)));
1867                        *insn++ = BPF_LDX_MEM(
1868                                BPF_DW, treg, si->dst_reg,
1869                                offsetof(struct bpf_sysctl_kern, tmp_reg));
1870                } else {
1871                        *insn++ = BPF_LDX_MEM(
1872                                BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1873                                si->dst_reg, si->src_reg,
1874                                offsetof(struct bpf_sysctl_kern, ppos));
1875                        read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
1876                        *insn++ = BPF_LDX_MEM(
1877                                BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
1878                                bpf_ctx_narrow_access_offset(
1879                                        0, read_size, sizeof(loff_t)));
1880                }
1881                *target_size = sizeof(u32);
1882                break;
1883        }
1884
1885        return insn - insn_buf;
1886}
1887
1888const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
1889        .get_func_proto         = sysctl_func_proto,
1890        .is_valid_access        = sysctl_is_valid_access,
1891        .convert_ctx_access     = sysctl_convert_ctx_access,
1892};
1893
1894const struct bpf_prog_ops cg_sysctl_prog_ops = {
1895};
1896
1897#ifdef CONFIG_NET
1898BPF_CALL_1(bpf_get_netns_cookie_sockopt, struct bpf_sockopt_kern *, ctx)
1899{
1900        const struct net *net = ctx ? sock_net(ctx->sk) : &init_net;
1901
1902        return net->net_cookie;
1903}
1904
1905static const struct bpf_func_proto bpf_get_netns_cookie_sockopt_proto = {
1906        .func           = bpf_get_netns_cookie_sockopt,
1907        .gpl_only       = false,
1908        .ret_type       = RET_INTEGER,
1909        .arg1_type      = ARG_PTR_TO_CTX_OR_NULL,
1910};
1911#endif
1912
1913static const struct bpf_func_proto *
1914cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1915{
1916        switch (func_id) {
1917#ifdef CONFIG_NET
1918        case BPF_FUNC_get_netns_cookie:
1919                return &bpf_get_netns_cookie_sockopt_proto;
1920        case BPF_FUNC_sk_storage_get:
1921                return &bpf_sk_storage_get_proto;
1922        case BPF_FUNC_sk_storage_delete:
1923                return &bpf_sk_storage_delete_proto;
1924        case BPF_FUNC_setsockopt:
1925                if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
1926                        return &bpf_sk_setsockopt_proto;
1927                return NULL;
1928        case BPF_FUNC_getsockopt:
1929                if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
1930                        return &bpf_sk_getsockopt_proto;
1931                return NULL;
1932#endif
1933#ifdef CONFIG_INET
1934        case BPF_FUNC_tcp_sock:
1935                return &bpf_tcp_sock_proto;
1936#endif
1937        default:
1938                return cgroup_base_func_proto(func_id, prog);
1939        }
1940}
1941
1942static bool cg_sockopt_is_valid_access(int off, int size,
1943                                       enum bpf_access_type type,
1944                                       const struct bpf_prog *prog,
1945                                       struct bpf_insn_access_aux *info)
1946{
1947        const int size_default = sizeof(__u32);
1948
1949        if (off < 0 || off >= sizeof(struct bpf_sockopt))
1950                return false;
1951
1952        if (off % size != 0)
1953                return false;
1954
1955        if (type == BPF_WRITE) {
1956                switch (off) {
1957                case offsetof(struct bpf_sockopt, retval):
1958                        if (size != size_default)
1959                                return false;
1960                        return prog->expected_attach_type ==
1961                                BPF_CGROUP_GETSOCKOPT;
1962                case offsetof(struct bpf_sockopt, optname):
1963                        fallthrough;
1964                case offsetof(struct bpf_sockopt, level):
1965                        if (size != size_default)
1966                                return false;
1967                        return prog->expected_attach_type ==
1968                                BPF_CGROUP_SETSOCKOPT;
1969                case offsetof(struct bpf_sockopt, optlen):
1970                        return size == size_default;
1971                default:
1972                        return false;
1973                }
1974        }
1975
1976        switch (off) {
1977        case offsetof(struct bpf_sockopt, sk):
1978                if (size != sizeof(__u64))
1979                        return false;
1980                info->reg_type = PTR_TO_SOCKET;
1981                break;
1982        case offsetof(struct bpf_sockopt, optval):
1983                if (size != sizeof(__u64))
1984                        return false;
1985                info->reg_type = PTR_TO_PACKET;
1986                break;
1987        case offsetof(struct bpf_sockopt, optval_end):
1988                if (size != sizeof(__u64))
1989                        return false;
1990                info->reg_type = PTR_TO_PACKET_END;
1991                break;
1992        case offsetof(struct bpf_sockopt, retval):
1993                if (size != size_default)
1994                        return false;
1995                return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
1996        default:
1997                if (size != size_default)
1998                        return false;
1999                break;
2000        }
2001        return true;
2002}
2003
2004#define CG_SOCKOPT_ACCESS_FIELD(T, F)                                   \
2005        T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F),                 \
2006          si->dst_reg, si->src_reg,                                     \
2007          offsetof(struct bpf_sockopt_kern, F))
2008
2009static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
2010                                         const struct bpf_insn *si,
2011                                         struct bpf_insn *insn_buf,
2012                                         struct bpf_prog *prog,
2013                                         u32 *target_size)
2014{
2015        struct bpf_insn *insn = insn_buf;
2016
2017        switch (si->off) {
2018        case offsetof(struct bpf_sockopt, sk):
2019                *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk);
2020                break;
2021        case offsetof(struct bpf_sockopt, level):
2022                if (type == BPF_WRITE)
2023                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level);
2024                else
2025                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level);
2026                break;
2027        case offsetof(struct bpf_sockopt, optname):
2028                if (type == BPF_WRITE)
2029                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname);
2030                else
2031                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname);
2032                break;
2033        case offsetof(struct bpf_sockopt, optlen):
2034                if (type == BPF_WRITE)
2035                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen);
2036                else
2037                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen);
2038                break;
2039        case offsetof(struct bpf_sockopt, retval):
2040                BUILD_BUG_ON(offsetof(struct bpf_cg_run_ctx, run_ctx) != 0);
2041
2042                if (type == BPF_WRITE) {
2043                        int treg = BPF_REG_9;
2044
2045                        if (si->src_reg == treg || si->dst_reg == treg)
2046                                --treg;
2047                        if (si->src_reg == treg || si->dst_reg == treg)
2048                                --treg;
2049                        *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, treg,
2050                                              offsetof(struct bpf_sockopt_kern, tmp_reg));
2051                        *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task),
2052                                              treg, si->dst_reg,
2053                                              offsetof(struct bpf_sockopt_kern, current_task));
2054                        *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx),
2055                                              treg, treg,
2056                                              offsetof(struct task_struct, bpf_ctx));
2057                        *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval),
2058                                              treg, si->src_reg,
2059                                              offsetof(struct bpf_cg_run_ctx, retval));
2060                        *insn++ = BPF_LDX_MEM(BPF_DW, treg, si->dst_reg,
2061                                              offsetof(struct bpf_sockopt_kern, tmp_reg));
2062                } else {
2063                        *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task),
2064                                              si->dst_reg, si->src_reg,
2065                                              offsetof(struct bpf_sockopt_kern, current_task));
2066                        *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx),
2067                                              si->dst_reg, si->dst_reg,
2068                                              offsetof(struct task_struct, bpf_ctx));
2069                        *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval),
2070                                              si->dst_reg, si->dst_reg,
2071                                              offsetof(struct bpf_cg_run_ctx, retval));
2072                }
2073                break;
2074        case offsetof(struct bpf_sockopt, optval):
2075                *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval);
2076                break;
2077        case offsetof(struct bpf_sockopt, optval_end):
2078                *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end);
2079                break;
2080        }
2081
2082        return insn - insn_buf;
2083}
2084
2085static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf,
2086                                   bool direct_write,
2087                                   const struct bpf_prog *prog)
2088{
2089        /* Nothing to do for sockopt argument. The data is kzalloc'ated.
2090         */
2091        return 0;
2092}
2093
2094const struct bpf_verifier_ops cg_sockopt_verifier_ops = {
2095        .get_func_proto         = cg_sockopt_func_proto,
2096        .is_valid_access        = cg_sockopt_is_valid_access,
2097        .convert_ctx_access     = cg_sockopt_convert_ctx_access,
2098        .gen_prologue           = cg_sockopt_get_prologue,
2099};
2100
2101const struct bpf_prog_ops cg_sockopt_prog_ops = {
2102};
2103