linux/kernel/bpf/cgroup.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Functions to manage eBPF programs attached to cgroups
   4 *
   5 * Copyright (c) 2016 Daniel Mack
   6 */
   7
   8#include <linux/kernel.h>
   9#include <linux/atomic.h>
  10#include <linux/cgroup.h>
  11#include <linux/filter.h>
  12#include <linux/slab.h>
  13#include <linux/sysctl.h>
  14#include <linux/string.h>
  15#include <linux/bpf.h>
  16#include <linux/bpf-cgroup.h>
  17#include <net/sock.h>
  18#include <net/bpf_sk_storage.h>
  19
  20#include "../cgroup/cgroup-internal.h"
  21
  22DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE);
  23EXPORT_SYMBOL(cgroup_bpf_enabled_key);
  24
  25void cgroup_bpf_offline(struct cgroup *cgrp)
  26{
  27        cgroup_get(cgrp);
  28        percpu_ref_kill(&cgrp->bpf.refcnt);
  29}
  30
  31static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[])
  32{
  33        enum bpf_cgroup_storage_type stype;
  34
  35        for_each_cgroup_storage_type(stype)
  36                bpf_cgroup_storage_free(storages[stype]);
  37}
  38
  39static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[],
  40                                     struct bpf_cgroup_storage *new_storages[],
  41                                     enum bpf_attach_type type,
  42                                     struct bpf_prog *prog,
  43                                     struct cgroup *cgrp)
  44{
  45        enum bpf_cgroup_storage_type stype;
  46        struct bpf_cgroup_storage_key key;
  47        struct bpf_map *map;
  48
  49        key.cgroup_inode_id = cgroup_id(cgrp);
  50        key.attach_type = type;
  51
  52        for_each_cgroup_storage_type(stype) {
  53                map = prog->aux->cgroup_storage[stype];
  54                if (!map)
  55                        continue;
  56
  57                storages[stype] = cgroup_storage_lookup((void *)map, &key, false);
  58                if (storages[stype])
  59                        continue;
  60
  61                storages[stype] = bpf_cgroup_storage_alloc(prog, stype);
  62                if (IS_ERR(storages[stype])) {
  63                        bpf_cgroup_storages_free(new_storages);
  64                        return -ENOMEM;
  65                }
  66
  67                new_storages[stype] = storages[stype];
  68        }
  69
  70        return 0;
  71}
  72
  73static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[],
  74                                       struct bpf_cgroup_storage *src[])
  75{
  76        enum bpf_cgroup_storage_type stype;
  77
  78        for_each_cgroup_storage_type(stype)
  79                dst[stype] = src[stype];
  80}
  81
  82static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[],
  83                                     struct cgroup *cgrp,
  84                                     enum bpf_attach_type attach_type)
  85{
  86        enum bpf_cgroup_storage_type stype;
  87
  88        for_each_cgroup_storage_type(stype)
  89                bpf_cgroup_storage_link(storages[stype], cgrp, attach_type);
  90}
  91
  92/* Called when bpf_cgroup_link is auto-detached from dying cgroup.
  93 * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It
  94 * doesn't free link memory, which will eventually be done by bpf_link's
  95 * release() callback, when its last FD is closed.
  96 */
  97static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link)
  98{
  99        cgroup_put(link->cgroup);
 100        link->cgroup = NULL;
 101}
 102
 103/**
 104 * cgroup_bpf_release() - put references of all bpf programs and
 105 *                        release all cgroup bpf data
 106 * @work: work structure embedded into the cgroup to modify
 107 */
 108static void cgroup_bpf_release(struct work_struct *work)
 109{
 110        struct cgroup *p, *cgrp = container_of(work, struct cgroup,
 111                                               bpf.release_work);
 112        struct bpf_prog_array *old_array;
 113        struct list_head *storages = &cgrp->bpf.storages;
 114        struct bpf_cgroup_storage *storage, *stmp;
 115
 116        unsigned int atype;
 117
 118        mutex_lock(&cgroup_mutex);
 119
 120        for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) {
 121                struct list_head *progs = &cgrp->bpf.progs[atype];
 122                struct bpf_prog_list *pl, *pltmp;
 123
 124                list_for_each_entry_safe(pl, pltmp, progs, node) {
 125                        list_del(&pl->node);
 126                        if (pl->prog)
 127                                bpf_prog_put(pl->prog);
 128                        if (pl->link)
 129                                bpf_cgroup_link_auto_detach(pl->link);
 130                        kfree(pl);
 131                        static_branch_dec(&cgroup_bpf_enabled_key[atype]);
 132                }
 133                old_array = rcu_dereference_protected(
 134                                cgrp->bpf.effective[atype],
 135                                lockdep_is_held(&cgroup_mutex));
 136                bpf_prog_array_free(old_array);
 137        }
 138
 139        list_for_each_entry_safe(storage, stmp, storages, list_cg) {
 140                bpf_cgroup_storage_unlink(storage);
 141                bpf_cgroup_storage_free(storage);
 142        }
 143
 144        mutex_unlock(&cgroup_mutex);
 145
 146        for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
 147                cgroup_bpf_put(p);
 148
 149        percpu_ref_exit(&cgrp->bpf.refcnt);
 150        cgroup_put(cgrp);
 151}
 152
 153/**
 154 * cgroup_bpf_release_fn() - callback used to schedule releasing
 155 *                           of bpf cgroup data
 156 * @ref: percpu ref counter structure
 157 */
 158static void cgroup_bpf_release_fn(struct percpu_ref *ref)
 159{
 160        struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt);
 161
 162        INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release);
 163        queue_work(system_wq, &cgrp->bpf.release_work);
 164}
 165
 166/* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through
 167 * link or direct prog.
 168 */
 169static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl)
 170{
 171        if (pl->prog)
 172                return pl->prog;
 173        if (pl->link)
 174                return pl->link->link.prog;
 175        return NULL;
 176}
 177
 178/* count number of elements in the list.
 179 * it's slow but the list cannot be long
 180 */
 181static u32 prog_list_length(struct list_head *head)
 182{
 183        struct bpf_prog_list *pl;
 184        u32 cnt = 0;
 185
 186        list_for_each_entry(pl, head, node) {
 187                if (!prog_list_prog(pl))
 188                        continue;
 189                cnt++;
 190        }
 191        return cnt;
 192}
 193
 194/* if parent has non-overridable prog attached,
 195 * disallow attaching new programs to the descendent cgroup.
 196 * if parent has overridable or multi-prog, allow attaching
 197 */
 198static bool hierarchy_allows_attach(struct cgroup *cgrp,
 199                                    enum cgroup_bpf_attach_type atype)
 200{
 201        struct cgroup *p;
 202
 203        p = cgroup_parent(cgrp);
 204        if (!p)
 205                return true;
 206        do {
 207                u32 flags = p->bpf.flags[atype];
 208                u32 cnt;
 209
 210                if (flags & BPF_F_ALLOW_MULTI)
 211                        return true;
 212                cnt = prog_list_length(&p->bpf.progs[atype]);
 213                WARN_ON_ONCE(cnt > 1);
 214                if (cnt == 1)
 215                        return !!(flags & BPF_F_ALLOW_OVERRIDE);
 216                p = cgroup_parent(p);
 217        } while (p);
 218        return true;
 219}
 220
 221/* compute a chain of effective programs for a given cgroup:
 222 * start from the list of programs in this cgroup and add
 223 * all parent programs.
 224 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
 225 * to programs in this cgroup
 226 */
 227static int compute_effective_progs(struct cgroup *cgrp,
 228                                   enum cgroup_bpf_attach_type atype,
 229                                   struct bpf_prog_array **array)
 230{
 231        struct bpf_prog_array_item *item;
 232        struct bpf_prog_array *progs;
 233        struct bpf_prog_list *pl;
 234        struct cgroup *p = cgrp;
 235        int cnt = 0;
 236
 237        /* count number of effective programs by walking parents */
 238        do {
 239                if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
 240                        cnt += prog_list_length(&p->bpf.progs[atype]);
 241                p = cgroup_parent(p);
 242        } while (p);
 243
 244        progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
 245        if (!progs)
 246                return -ENOMEM;
 247
 248        /* populate the array with effective progs */
 249        cnt = 0;
 250        p = cgrp;
 251        do {
 252                if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
 253                        continue;
 254
 255                list_for_each_entry(pl, &p->bpf.progs[atype], node) {
 256                        if (!prog_list_prog(pl))
 257                                continue;
 258
 259                        item = &progs->items[cnt];
 260                        item->prog = prog_list_prog(pl);
 261                        bpf_cgroup_storages_assign(item->cgroup_storage,
 262                                                   pl->storage);
 263                        cnt++;
 264                }
 265        } while ((p = cgroup_parent(p)));
 266
 267        *array = progs;
 268        return 0;
 269}
 270
 271static void activate_effective_progs(struct cgroup *cgrp,
 272                                     enum cgroup_bpf_attach_type atype,
 273                                     struct bpf_prog_array *old_array)
 274{
 275        old_array = rcu_replace_pointer(cgrp->bpf.effective[atype], old_array,
 276                                        lockdep_is_held(&cgroup_mutex));
 277        /* free prog array after grace period, since __cgroup_bpf_run_*()
 278         * might be still walking the array
 279         */
 280        bpf_prog_array_free(old_array);
 281}
 282
 283/**
 284 * cgroup_bpf_inherit() - inherit effective programs from parent
 285 * @cgrp: the cgroup to modify
 286 */
 287int cgroup_bpf_inherit(struct cgroup *cgrp)
 288{
 289/* has to use marco instead of const int, since compiler thinks
 290 * that array below is variable length
 291 */
 292#define NR ARRAY_SIZE(cgrp->bpf.effective)
 293        struct bpf_prog_array *arrays[NR] = {};
 294        struct cgroup *p;
 295        int ret, i;
 296
 297        ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
 298                              GFP_KERNEL);
 299        if (ret)
 300                return ret;
 301
 302        for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
 303                cgroup_bpf_get(p);
 304
 305        for (i = 0; i < NR; i++)
 306                INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
 307
 308        INIT_LIST_HEAD(&cgrp->bpf.storages);
 309
 310        for (i = 0; i < NR; i++)
 311                if (compute_effective_progs(cgrp, i, &arrays[i]))
 312                        goto cleanup;
 313
 314        for (i = 0; i < NR; i++)
 315                activate_effective_progs(cgrp, i, arrays[i]);
 316
 317        return 0;
 318cleanup:
 319        for (i = 0; i < NR; i++)
 320                bpf_prog_array_free(arrays[i]);
 321
 322        for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
 323                cgroup_bpf_put(p);
 324
 325        percpu_ref_exit(&cgrp->bpf.refcnt);
 326
 327        return -ENOMEM;
 328}
 329
 330static int update_effective_progs(struct cgroup *cgrp,
 331                                  enum cgroup_bpf_attach_type atype)
 332{
 333        struct cgroup_subsys_state *css;
 334        int err;
 335
 336        /* allocate and recompute effective prog arrays */
 337        css_for_each_descendant_pre(css, &cgrp->self) {
 338                struct cgroup *desc = container_of(css, struct cgroup, self);
 339
 340                if (percpu_ref_is_zero(&desc->bpf.refcnt))
 341                        continue;
 342
 343                err = compute_effective_progs(desc, atype, &desc->bpf.inactive);
 344                if (err)
 345                        goto cleanup;
 346        }
 347
 348        /* all allocations were successful. Activate all prog arrays */
 349        css_for_each_descendant_pre(css, &cgrp->self) {
 350                struct cgroup *desc = container_of(css, struct cgroup, self);
 351
 352                if (percpu_ref_is_zero(&desc->bpf.refcnt)) {
 353                        if (unlikely(desc->bpf.inactive)) {
 354                                bpf_prog_array_free(desc->bpf.inactive);
 355                                desc->bpf.inactive = NULL;
 356                        }
 357                        continue;
 358                }
 359
 360                activate_effective_progs(desc, atype, desc->bpf.inactive);
 361                desc->bpf.inactive = NULL;
 362        }
 363
 364        return 0;
 365
 366cleanup:
 367        /* oom while computing effective. Free all computed effective arrays
 368         * since they were not activated
 369         */
 370        css_for_each_descendant_pre(css, &cgrp->self) {
 371                struct cgroup *desc = container_of(css, struct cgroup, self);
 372
 373                bpf_prog_array_free(desc->bpf.inactive);
 374                desc->bpf.inactive = NULL;
 375        }
 376
 377        return err;
 378}
 379
 380#define BPF_CGROUP_MAX_PROGS 64
 381
 382static struct bpf_prog_list *find_attach_entry(struct list_head *progs,
 383                                               struct bpf_prog *prog,
 384                                               struct bpf_cgroup_link *link,
 385                                               struct bpf_prog *replace_prog,
 386                                               bool allow_multi)
 387{
 388        struct bpf_prog_list *pl;
 389
 390        /* single-attach case */
 391        if (!allow_multi) {
 392                if (list_empty(progs))
 393                        return NULL;
 394                return list_first_entry(progs, typeof(*pl), node);
 395        }
 396
 397        list_for_each_entry(pl, progs, node) {
 398                if (prog && pl->prog == prog && prog != replace_prog)
 399                        /* disallow attaching the same prog twice */
 400                        return ERR_PTR(-EINVAL);
 401                if (link && pl->link == link)
 402                        /* disallow attaching the same link twice */
 403                        return ERR_PTR(-EINVAL);
 404        }
 405
 406        /* direct prog multi-attach w/ replacement case */
 407        if (replace_prog) {
 408                list_for_each_entry(pl, progs, node) {
 409                        if (pl->prog == replace_prog)
 410                                /* a match found */
 411                                return pl;
 412                }
 413                /* prog to replace not found for cgroup */
 414                return ERR_PTR(-ENOENT);
 415        }
 416
 417        return NULL;
 418}
 419
 420/**
 421 * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and
 422 *                         propagate the change to descendants
 423 * @cgrp: The cgroup which descendants to traverse
 424 * @prog: A program to attach
 425 * @link: A link to attach
 426 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set
 427 * @type: Type of attach operation
 428 * @flags: Option flags
 429 *
 430 * Exactly one of @prog or @link can be non-null.
 431 * Must be called with cgroup_mutex held.
 432 */
 433int __cgroup_bpf_attach(struct cgroup *cgrp,
 434                        struct bpf_prog *prog, struct bpf_prog *replace_prog,
 435                        struct bpf_cgroup_link *link,
 436                        enum bpf_attach_type type, u32 flags)
 437{
 438        u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
 439        struct bpf_prog *old_prog = NULL;
 440        struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
 441        struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
 442        enum cgroup_bpf_attach_type atype;
 443        struct bpf_prog_list *pl;
 444        struct list_head *progs;
 445        int err;
 446
 447        if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
 448            ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI)))
 449                /* invalid combination */
 450                return -EINVAL;
 451        if (link && (prog || replace_prog))
 452                /* only either link or prog/replace_prog can be specified */
 453                return -EINVAL;
 454        if (!!replace_prog != !!(flags & BPF_F_REPLACE))
 455                /* replace_prog implies BPF_F_REPLACE, and vice versa */
 456                return -EINVAL;
 457
 458        atype = to_cgroup_bpf_attach_type(type);
 459        if (atype < 0)
 460                return -EINVAL;
 461
 462        progs = &cgrp->bpf.progs[atype];
 463
 464        if (!hierarchy_allows_attach(cgrp, atype))
 465                return -EPERM;
 466
 467        if (!list_empty(progs) && cgrp->bpf.flags[atype] != saved_flags)
 468                /* Disallow attaching non-overridable on top
 469                 * of existing overridable in this cgroup.
 470                 * Disallow attaching multi-prog if overridable or none
 471                 */
 472                return -EPERM;
 473
 474        if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
 475                return -E2BIG;
 476
 477        pl = find_attach_entry(progs, prog, link, replace_prog,
 478                               flags & BPF_F_ALLOW_MULTI);
 479        if (IS_ERR(pl))
 480                return PTR_ERR(pl);
 481
 482        if (bpf_cgroup_storages_alloc(storage, new_storage, type,
 483                                      prog ? : link->link.prog, cgrp))
 484                return -ENOMEM;
 485
 486        if (pl) {
 487                old_prog = pl->prog;
 488        } else {
 489                pl = kmalloc(sizeof(*pl), GFP_KERNEL);
 490                if (!pl) {
 491                        bpf_cgroup_storages_free(new_storage);
 492                        return -ENOMEM;
 493                }
 494                list_add_tail(&pl->node, progs);
 495        }
 496
 497        pl->prog = prog;
 498        pl->link = link;
 499        bpf_cgroup_storages_assign(pl->storage, storage);
 500        cgrp->bpf.flags[atype] = saved_flags;
 501
 502        err = update_effective_progs(cgrp, atype);
 503        if (err)
 504                goto cleanup;
 505
 506        if (old_prog)
 507                bpf_prog_put(old_prog);
 508        else
 509                static_branch_inc(&cgroup_bpf_enabled_key[atype]);
 510        bpf_cgroup_storages_link(new_storage, cgrp, type);
 511        return 0;
 512
 513cleanup:
 514        if (old_prog) {
 515                pl->prog = old_prog;
 516                pl->link = NULL;
 517        }
 518        bpf_cgroup_storages_free(new_storage);
 519        if (!old_prog) {
 520                list_del(&pl->node);
 521                kfree(pl);
 522        }
 523        return err;
 524}
 525
 526/* Swap updated BPF program for given link in effective program arrays across
 527 * all descendant cgroups. This function is guaranteed to succeed.
 528 */
 529static void replace_effective_prog(struct cgroup *cgrp,
 530                                   enum cgroup_bpf_attach_type atype,
 531                                   struct bpf_cgroup_link *link)
 532{
 533        struct bpf_prog_array_item *item;
 534        struct cgroup_subsys_state *css;
 535        struct bpf_prog_array *progs;
 536        struct bpf_prog_list *pl;
 537        struct list_head *head;
 538        struct cgroup *cg;
 539        int pos;
 540
 541        css_for_each_descendant_pre(css, &cgrp->self) {
 542                struct cgroup *desc = container_of(css, struct cgroup, self);
 543
 544                if (percpu_ref_is_zero(&desc->bpf.refcnt))
 545                        continue;
 546
 547                /* find position of link in effective progs array */
 548                for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
 549                        if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
 550                                continue;
 551
 552                        head = &cg->bpf.progs[atype];
 553                        list_for_each_entry(pl, head, node) {
 554                                if (!prog_list_prog(pl))
 555                                        continue;
 556                                if (pl->link == link)
 557                                        goto found;
 558                                pos++;
 559                        }
 560                }
 561found:
 562                BUG_ON(!cg);
 563                progs = rcu_dereference_protected(
 564                                desc->bpf.effective[atype],
 565                                lockdep_is_held(&cgroup_mutex));
 566                item = &progs->items[pos];
 567                WRITE_ONCE(item->prog, link->link.prog);
 568        }
 569}
 570
 571/**
 572 * __cgroup_bpf_replace() - Replace link's program and propagate the change
 573 *                          to descendants
 574 * @cgrp: The cgroup which descendants to traverse
 575 * @link: A link for which to replace BPF program
 576 * @type: Type of attach operation
 577 *
 578 * Must be called with cgroup_mutex held.
 579 */
 580static int __cgroup_bpf_replace(struct cgroup *cgrp,
 581                                struct bpf_cgroup_link *link,
 582                                struct bpf_prog *new_prog)
 583{
 584        enum cgroup_bpf_attach_type atype;
 585        struct bpf_prog *old_prog;
 586        struct bpf_prog_list *pl;
 587        struct list_head *progs;
 588        bool found = false;
 589
 590        atype = to_cgroup_bpf_attach_type(link->type);
 591        if (atype < 0)
 592                return -EINVAL;
 593
 594        progs = &cgrp->bpf.progs[atype];
 595
 596        if (link->link.prog->type != new_prog->type)
 597                return -EINVAL;
 598
 599        list_for_each_entry(pl, progs, node) {
 600                if (pl->link == link) {
 601                        found = true;
 602                        break;
 603                }
 604        }
 605        if (!found)
 606                return -ENOENT;
 607
 608        old_prog = xchg(&link->link.prog, new_prog);
 609        replace_effective_prog(cgrp, atype, link);
 610        bpf_prog_put(old_prog);
 611        return 0;
 612}
 613
 614static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
 615                              struct bpf_prog *old_prog)
 616{
 617        struct bpf_cgroup_link *cg_link;
 618        int ret;
 619
 620        cg_link = container_of(link, struct bpf_cgroup_link, link);
 621
 622        mutex_lock(&cgroup_mutex);
 623        /* link might have been auto-released by dying cgroup, so fail */
 624        if (!cg_link->cgroup) {
 625                ret = -ENOLINK;
 626                goto out_unlock;
 627        }
 628        if (old_prog && link->prog != old_prog) {
 629                ret = -EPERM;
 630                goto out_unlock;
 631        }
 632        ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
 633out_unlock:
 634        mutex_unlock(&cgroup_mutex);
 635        return ret;
 636}
 637
 638static struct bpf_prog_list *find_detach_entry(struct list_head *progs,
 639                                               struct bpf_prog *prog,
 640                                               struct bpf_cgroup_link *link,
 641                                               bool allow_multi)
 642{
 643        struct bpf_prog_list *pl;
 644
 645        if (!allow_multi) {
 646                if (list_empty(progs))
 647                        /* report error when trying to detach and nothing is attached */
 648                        return ERR_PTR(-ENOENT);
 649
 650                /* to maintain backward compatibility NONE and OVERRIDE cgroups
 651                 * allow detaching with invalid FD (prog==NULL) in legacy mode
 652                 */
 653                return list_first_entry(progs, typeof(*pl), node);
 654        }
 655
 656        if (!prog && !link)
 657                /* to detach MULTI prog the user has to specify valid FD
 658                 * of the program or link to be detached
 659                 */
 660                return ERR_PTR(-EINVAL);
 661
 662        /* find the prog or link and detach it */
 663        list_for_each_entry(pl, progs, node) {
 664                if (pl->prog == prog && pl->link == link)
 665                        return pl;
 666        }
 667        return ERR_PTR(-ENOENT);
 668}
 669
 670/**
 671 * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and
 672 *                         propagate the change to descendants
 673 * @cgrp: The cgroup which descendants to traverse
 674 * @prog: A program to detach or NULL
 675 * @prog: A link to detach or NULL
 676 * @type: Type of detach operation
 677 *
 678 * At most one of @prog or @link can be non-NULL.
 679 * Must be called with cgroup_mutex held.
 680 */
 681int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
 682                        struct bpf_cgroup_link *link, enum bpf_attach_type type)
 683{
 684        enum cgroup_bpf_attach_type atype;
 685        struct bpf_prog *old_prog;
 686        struct bpf_prog_list *pl;
 687        struct list_head *progs;
 688        u32 flags;
 689        int err;
 690
 691        atype = to_cgroup_bpf_attach_type(type);
 692        if (atype < 0)
 693                return -EINVAL;
 694
 695        progs = &cgrp->bpf.progs[atype];
 696        flags = cgrp->bpf.flags[atype];
 697
 698        if (prog && link)
 699                /* only one of prog or link can be specified */
 700                return -EINVAL;
 701
 702        pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI);
 703        if (IS_ERR(pl))
 704                return PTR_ERR(pl);
 705
 706        /* mark it deleted, so it's ignored while recomputing effective */
 707        old_prog = pl->prog;
 708        pl->prog = NULL;
 709        pl->link = NULL;
 710
 711        err = update_effective_progs(cgrp, atype);
 712        if (err)
 713                goto cleanup;
 714
 715        /* now can actually delete it from this cgroup list */
 716        list_del(&pl->node);
 717        kfree(pl);
 718        if (list_empty(progs))
 719                /* last program was detached, reset flags to zero */
 720                cgrp->bpf.flags[atype] = 0;
 721        if (old_prog)
 722                bpf_prog_put(old_prog);
 723        static_branch_dec(&cgroup_bpf_enabled_key[atype]);
 724        return 0;
 725
 726cleanup:
 727        /* restore back prog or link */
 728        pl->prog = old_prog;
 729        pl->link = link;
 730        return err;
 731}
 732
 733/* Must be called with cgroup_mutex held to avoid races. */
 734int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
 735                       union bpf_attr __user *uattr)
 736{
 737        __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
 738        enum bpf_attach_type type = attr->query.attach_type;
 739        enum cgroup_bpf_attach_type atype;
 740        struct bpf_prog_array *effective;
 741        struct list_head *progs;
 742        struct bpf_prog *prog;
 743        int cnt, ret = 0, i;
 744        u32 flags;
 745
 746        atype = to_cgroup_bpf_attach_type(type);
 747        if (atype < 0)
 748                return -EINVAL;
 749
 750        progs = &cgrp->bpf.progs[atype];
 751        flags = cgrp->bpf.flags[atype];
 752
 753        effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
 754                                              lockdep_is_held(&cgroup_mutex));
 755
 756        if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
 757                cnt = bpf_prog_array_length(effective);
 758        else
 759                cnt = prog_list_length(progs);
 760
 761        if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
 762                return -EFAULT;
 763        if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
 764                return -EFAULT;
 765        if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
 766                /* return early if user requested only program count + flags */
 767                return 0;
 768        if (attr->query.prog_cnt < cnt) {
 769                cnt = attr->query.prog_cnt;
 770                ret = -ENOSPC;
 771        }
 772
 773        if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
 774                return bpf_prog_array_copy_to_user(effective, prog_ids, cnt);
 775        } else {
 776                struct bpf_prog_list *pl;
 777                u32 id;
 778
 779                i = 0;
 780                list_for_each_entry(pl, progs, node) {
 781                        prog = prog_list_prog(pl);
 782                        id = prog->aux->id;
 783                        if (copy_to_user(prog_ids + i, &id, sizeof(id)))
 784                                return -EFAULT;
 785                        if (++i == cnt)
 786                                break;
 787                }
 788        }
 789        return ret;
 790}
 791
 792int cgroup_bpf_prog_attach(const union bpf_attr *attr,
 793                           enum bpf_prog_type ptype, struct bpf_prog *prog)
 794{
 795        struct bpf_prog *replace_prog = NULL;
 796        struct cgroup *cgrp;
 797        int ret;
 798
 799        cgrp = cgroup_get_from_fd(attr->target_fd);
 800        if (IS_ERR(cgrp))
 801                return PTR_ERR(cgrp);
 802
 803        if ((attr->attach_flags & BPF_F_ALLOW_MULTI) &&
 804            (attr->attach_flags & BPF_F_REPLACE)) {
 805                replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype);
 806                if (IS_ERR(replace_prog)) {
 807                        cgroup_put(cgrp);
 808                        return PTR_ERR(replace_prog);
 809                }
 810        }
 811
 812        ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL,
 813                                attr->attach_type, attr->attach_flags);
 814
 815        if (replace_prog)
 816                bpf_prog_put(replace_prog);
 817        cgroup_put(cgrp);
 818        return ret;
 819}
 820
 821int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
 822{
 823        struct bpf_prog *prog;
 824        struct cgroup *cgrp;
 825        int ret;
 826
 827        cgrp = cgroup_get_from_fd(attr->target_fd);
 828        if (IS_ERR(cgrp))
 829                return PTR_ERR(cgrp);
 830
 831        prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
 832        if (IS_ERR(prog))
 833                prog = NULL;
 834
 835        ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type);
 836        if (prog)
 837                bpf_prog_put(prog);
 838
 839        cgroup_put(cgrp);
 840        return ret;
 841}
 842
 843static void bpf_cgroup_link_release(struct bpf_link *link)
 844{
 845        struct bpf_cgroup_link *cg_link =
 846                container_of(link, struct bpf_cgroup_link, link);
 847        struct cgroup *cg;
 848
 849        /* link might have been auto-detached by dying cgroup already,
 850         * in that case our work is done here
 851         */
 852        if (!cg_link->cgroup)
 853                return;
 854
 855        mutex_lock(&cgroup_mutex);
 856
 857        /* re-check cgroup under lock again */
 858        if (!cg_link->cgroup) {
 859                mutex_unlock(&cgroup_mutex);
 860                return;
 861        }
 862
 863        WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link,
 864                                    cg_link->type));
 865
 866        cg = cg_link->cgroup;
 867        cg_link->cgroup = NULL;
 868
 869        mutex_unlock(&cgroup_mutex);
 870
 871        cgroup_put(cg);
 872}
 873
 874static void bpf_cgroup_link_dealloc(struct bpf_link *link)
 875{
 876        struct bpf_cgroup_link *cg_link =
 877                container_of(link, struct bpf_cgroup_link, link);
 878
 879        kfree(cg_link);
 880}
 881
 882static int bpf_cgroup_link_detach(struct bpf_link *link)
 883{
 884        bpf_cgroup_link_release(link);
 885
 886        return 0;
 887}
 888
 889static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
 890                                        struct seq_file *seq)
 891{
 892        struct bpf_cgroup_link *cg_link =
 893                container_of(link, struct bpf_cgroup_link, link);
 894        u64 cg_id = 0;
 895
 896        mutex_lock(&cgroup_mutex);
 897        if (cg_link->cgroup)
 898                cg_id = cgroup_id(cg_link->cgroup);
 899        mutex_unlock(&cgroup_mutex);
 900
 901        seq_printf(seq,
 902                   "cgroup_id:\t%llu\n"
 903                   "attach_type:\t%d\n",
 904                   cg_id,
 905                   cg_link->type);
 906}
 907
 908static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link,
 909                                          struct bpf_link_info *info)
 910{
 911        struct bpf_cgroup_link *cg_link =
 912                container_of(link, struct bpf_cgroup_link, link);
 913        u64 cg_id = 0;
 914
 915        mutex_lock(&cgroup_mutex);
 916        if (cg_link->cgroup)
 917                cg_id = cgroup_id(cg_link->cgroup);
 918        mutex_unlock(&cgroup_mutex);
 919
 920        info->cgroup.cgroup_id = cg_id;
 921        info->cgroup.attach_type = cg_link->type;
 922        return 0;
 923}
 924
 925static const struct bpf_link_ops bpf_cgroup_link_lops = {
 926        .release = bpf_cgroup_link_release,
 927        .dealloc = bpf_cgroup_link_dealloc,
 928        .detach = bpf_cgroup_link_detach,
 929        .update_prog = cgroup_bpf_replace,
 930        .show_fdinfo = bpf_cgroup_link_show_fdinfo,
 931        .fill_link_info = bpf_cgroup_link_fill_link_info,
 932};
 933
 934int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
 935{
 936        struct bpf_link_primer link_primer;
 937        struct bpf_cgroup_link *link;
 938        struct cgroup *cgrp;
 939        int err;
 940
 941        if (attr->link_create.flags)
 942                return -EINVAL;
 943
 944        cgrp = cgroup_get_from_fd(attr->link_create.target_fd);
 945        if (IS_ERR(cgrp))
 946                return PTR_ERR(cgrp);
 947
 948        link = kzalloc(sizeof(*link), GFP_USER);
 949        if (!link) {
 950                err = -ENOMEM;
 951                goto out_put_cgroup;
 952        }
 953        bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops,
 954                      prog);
 955        link->cgroup = cgrp;
 956        link->type = attr->link_create.attach_type;
 957
 958        err = bpf_link_prime(&link->link, &link_primer);
 959        if (err) {
 960                kfree(link);
 961                goto out_put_cgroup;
 962        }
 963
 964        err = cgroup_bpf_attach(cgrp, NULL, NULL, link,
 965                                link->type, BPF_F_ALLOW_MULTI);
 966        if (err) {
 967                bpf_link_cleanup(&link_primer);
 968                goto out_put_cgroup;
 969        }
 970
 971        return bpf_link_settle(&link_primer);
 972
 973out_put_cgroup:
 974        cgroup_put(cgrp);
 975        return err;
 976}
 977
 978int cgroup_bpf_prog_query(const union bpf_attr *attr,
 979                          union bpf_attr __user *uattr)
 980{
 981        struct cgroup *cgrp;
 982        int ret;
 983
 984        cgrp = cgroup_get_from_fd(attr->query.target_fd);
 985        if (IS_ERR(cgrp))
 986                return PTR_ERR(cgrp);
 987
 988        ret = cgroup_bpf_query(cgrp, attr, uattr);
 989
 990        cgroup_put(cgrp);
 991        return ret;
 992}
 993
 994/**
 995 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
 996 * @sk: The socket sending or receiving traffic
 997 * @skb: The skb that is being sent or received
 998 * @type: The type of program to be exectuted
 999 *
1000 * If no socket is passed, or the socket is not of type INET or INET6,
1001 * this function does nothing and returns 0.
1002 *
1003 * The program type passed in via @type must be suitable for network
1004 * filtering. No further check is performed to assert that.
1005 *
1006 * For egress packets, this function can return:
1007 *   NET_XMIT_SUCCESS    (0)    - continue with packet output
1008 *   NET_XMIT_DROP       (1)    - drop packet and notify TCP to call cwr
1009 *   NET_XMIT_CN         (2)    - continue with packet output and notify TCP
1010 *                                to call cwr
1011 *   -EPERM                     - drop packet
1012 *
1013 * For ingress packets, this function will return -EPERM if any
1014 * attached program was found and if it returned != 1 during execution.
1015 * Otherwise 0 is returned.
1016 */
1017int __cgroup_bpf_run_filter_skb(struct sock *sk,
1018                                struct sk_buff *skb,
1019                                enum cgroup_bpf_attach_type atype)
1020{
1021        unsigned int offset = skb->data - skb_network_header(skb);
1022        struct sock *save_sk;
1023        void *saved_data_end;
1024        struct cgroup *cgrp;
1025        int ret;
1026
1027        if (!sk || !sk_fullsock(sk))
1028                return 0;
1029
1030        if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1031                return 0;
1032
1033        cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1034        save_sk = skb->sk;
1035        skb->sk = sk;
1036        __skb_push(skb, offset);
1037
1038        /* compute pointers for the bpf prog */
1039        bpf_compute_and_save_data_end(skb, &saved_data_end);
1040
1041        if (atype == CGROUP_INET_EGRESS) {
1042                ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(
1043                        cgrp->bpf.effective[atype], skb, __bpf_prog_run_save_cb);
1044        } else {
1045                ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], skb,
1046                                            __bpf_prog_run_save_cb);
1047                ret = (ret == 1 ? 0 : -EPERM);
1048        }
1049        bpf_restore_data_end(skb, saved_data_end);
1050        __skb_pull(skb, offset);
1051        skb->sk = save_sk;
1052
1053        return ret;
1054}
1055EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
1056
1057/**
1058 * __cgroup_bpf_run_filter_sk() - Run a program on a sock
1059 * @sk: sock structure to manipulate
1060 * @type: The type of program to be exectuted
1061 *
1062 * socket is passed is expected to be of type INET or INET6.
1063 *
1064 * The program type passed in via @type must be suitable for sock
1065 * filtering. No further check is performed to assert that.
1066 *
1067 * This function will return %-EPERM if any if an attached program was found
1068 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1069 */
1070int __cgroup_bpf_run_filter_sk(struct sock *sk,
1071                               enum cgroup_bpf_attach_type atype)
1072{
1073        struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1074        int ret;
1075
1076        ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sk, bpf_prog_run);
1077        return ret == 1 ? 0 : -EPERM;
1078}
1079EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
1080
1081/**
1082 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
1083 *                                       provided by user sockaddr
1084 * @sk: sock struct that will use sockaddr
1085 * @uaddr: sockaddr struct provided by user
1086 * @type: The type of program to be exectuted
1087 * @t_ctx: Pointer to attach type specific context
1088 * @flags: Pointer to u32 which contains higher bits of BPF program
1089 *         return value (OR'ed together).
1090 *
1091 * socket is expected to be of type INET or INET6.
1092 *
1093 * This function will return %-EPERM if an attached program is found and
1094 * returned value != 1 during execution. In all other cases, 0 is returned.
1095 */
1096int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
1097                                      struct sockaddr *uaddr,
1098                                      enum cgroup_bpf_attach_type atype,
1099                                      void *t_ctx,
1100                                      u32 *flags)
1101{
1102        struct bpf_sock_addr_kern ctx = {
1103                .sk = sk,
1104                .uaddr = uaddr,
1105                .t_ctx = t_ctx,
1106        };
1107        struct sockaddr_storage unspec;
1108        struct cgroup *cgrp;
1109        int ret;
1110
1111        /* Check socket family since not all sockets represent network
1112         * endpoint (e.g. AF_UNIX).
1113         */
1114        if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
1115                return 0;
1116
1117        if (!ctx.uaddr) {
1118                memset(&unspec, 0, sizeof(unspec));
1119                ctx.uaddr = (struct sockaddr *)&unspec;
1120        }
1121
1122        cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1123        ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(cgrp->bpf.effective[atype], &ctx,
1124                                          bpf_prog_run, flags);
1125
1126        return ret == 1 ? 0 : -EPERM;
1127}
1128EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
1129
1130/**
1131 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
1132 * @sk: socket to get cgroup from
1133 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
1134 * sk with connection information (IP addresses, etc.) May not contain
1135 * cgroup info if it is a req sock.
1136 * @type: The type of program to be exectuted
1137 *
1138 * socket passed is expected to be of type INET or INET6.
1139 *
1140 * The program type passed in via @type must be suitable for sock_ops
1141 * filtering. No further check is performed to assert that.
1142 *
1143 * This function will return %-EPERM if any if an attached program was found
1144 * and if it returned != 1 during execution. In all other cases, 0 is returned.
1145 */
1146int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
1147                                     struct bpf_sock_ops_kern *sock_ops,
1148                                     enum cgroup_bpf_attach_type atype)
1149{
1150        struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1151        int ret;
1152
1153        ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sock_ops,
1154                                    bpf_prog_run);
1155        return ret == 1 ? 0 : -EPERM;
1156}
1157EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
1158
1159int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
1160                                      short access, enum cgroup_bpf_attach_type atype)
1161{
1162        struct cgroup *cgrp;
1163        struct bpf_cgroup_dev_ctx ctx = {
1164                .access_type = (access << 16) | dev_type,
1165                .major = major,
1166                .minor = minor,
1167        };
1168        int allow;
1169
1170        rcu_read_lock();
1171        cgrp = task_dfl_cgroup(current);
1172        allow = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx,
1173                                      bpf_prog_run);
1174        rcu_read_unlock();
1175
1176        return !allow;
1177}
1178
1179static const struct bpf_func_proto *
1180cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1181{
1182        switch (func_id) {
1183        case BPF_FUNC_get_current_uid_gid:
1184                return &bpf_get_current_uid_gid_proto;
1185        case BPF_FUNC_get_local_storage:
1186                return &bpf_get_local_storage_proto;
1187        case BPF_FUNC_get_current_cgroup_id:
1188                return &bpf_get_current_cgroup_id_proto;
1189        case BPF_FUNC_perf_event_output:
1190                return &bpf_event_output_data_proto;
1191        default:
1192                return bpf_base_func_proto(func_id);
1193        }
1194}
1195
1196static const struct bpf_func_proto *
1197cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1198{
1199        return cgroup_base_func_proto(func_id, prog);
1200}
1201
1202static bool cgroup_dev_is_valid_access(int off, int size,
1203                                       enum bpf_access_type type,
1204                                       const struct bpf_prog *prog,
1205                                       struct bpf_insn_access_aux *info)
1206{
1207        const int size_default = sizeof(__u32);
1208
1209        if (type == BPF_WRITE)
1210                return false;
1211
1212        if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
1213                return false;
1214        /* The verifier guarantees that size > 0. */
1215        if (off % size != 0)
1216                return false;
1217
1218        switch (off) {
1219        case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
1220                bpf_ctx_record_field_size(info, size_default);
1221                if (!bpf_ctx_narrow_access_ok(off, size, size_default))
1222                        return false;
1223                break;
1224        default:
1225                if (size != size_default)
1226                        return false;
1227        }
1228
1229        return true;
1230}
1231
1232const struct bpf_prog_ops cg_dev_prog_ops = {
1233};
1234
1235const struct bpf_verifier_ops cg_dev_verifier_ops = {
1236        .get_func_proto         = cgroup_dev_func_proto,
1237        .is_valid_access        = cgroup_dev_is_valid_access,
1238};
1239
1240/**
1241 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
1242 *
1243 * @head: sysctl table header
1244 * @table: sysctl table
1245 * @write: sysctl is being read (= 0) or written (= 1)
1246 * @buf: pointer to buffer (in and out)
1247 * @pcount: value-result argument: value is size of buffer pointed to by @buf,
1248 *      result is size of @new_buf if program set new value, initial value
1249 *      otherwise
1250 * @ppos: value-result argument: value is position at which read from or write
1251 *      to sysctl is happening, result is new position if program overrode it,
1252 *      initial value otherwise
1253 * @type: type of program to be executed
1254 *
1255 * Program is run when sysctl is being accessed, either read or written, and
1256 * can allow or deny such access.
1257 *
1258 * This function will return %-EPERM if an attached program is found and
1259 * returned value != 1 during execution. In all other cases 0 is returned.
1260 */
1261int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
1262                                   struct ctl_table *table, int write,
1263                                   char **buf, size_t *pcount, loff_t *ppos,
1264                                   enum cgroup_bpf_attach_type atype)
1265{
1266        struct bpf_sysctl_kern ctx = {
1267                .head = head,
1268                .table = table,
1269                .write = write,
1270                .ppos = ppos,
1271                .cur_val = NULL,
1272                .cur_len = PAGE_SIZE,
1273                .new_val = NULL,
1274                .new_len = 0,
1275                .new_updated = 0,
1276        };
1277        struct cgroup *cgrp;
1278        loff_t pos = 0;
1279        int ret;
1280
1281        ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
1282        if (!ctx.cur_val ||
1283            table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) {
1284                /* Let BPF program decide how to proceed. */
1285                ctx.cur_len = 0;
1286        }
1287
1288        if (write && *buf && *pcount) {
1289                /* BPF program should be able to override new value with a
1290                 * buffer bigger than provided by user.
1291                 */
1292                ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
1293                ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
1294                if (ctx.new_val) {
1295                        memcpy(ctx.new_val, *buf, ctx.new_len);
1296                } else {
1297                        /* Let BPF program decide how to proceed. */
1298                        ctx.new_len = 0;
1299                }
1300        }
1301
1302        rcu_read_lock();
1303        cgrp = task_dfl_cgroup(current);
1304        ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx, bpf_prog_run);
1305        rcu_read_unlock();
1306
1307        kfree(ctx.cur_val);
1308
1309        if (ret == 1 && ctx.new_updated) {
1310                kfree(*buf);
1311                *buf = ctx.new_val;
1312                *pcount = ctx.new_len;
1313        } else {
1314                kfree(ctx.new_val);
1315        }
1316
1317        return ret == 1 ? 0 : -EPERM;
1318}
1319
1320#ifdef CONFIG_NET
1321static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
1322                                             enum cgroup_bpf_attach_type attach_type)
1323{
1324        struct bpf_prog_array *prog_array;
1325        bool empty;
1326
1327        rcu_read_lock();
1328        prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]);
1329        empty = bpf_prog_array_is_empty(prog_array);
1330        rcu_read_unlock();
1331
1332        return empty;
1333}
1334
1335static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen,
1336                             struct bpf_sockopt_buf *buf)
1337{
1338        if (unlikely(max_optlen < 0))
1339                return -EINVAL;
1340
1341        if (unlikely(max_optlen > PAGE_SIZE)) {
1342                /* We don't expose optvals that are greater than PAGE_SIZE
1343                 * to the BPF program.
1344                 */
1345                max_optlen = PAGE_SIZE;
1346        }
1347
1348        if (max_optlen <= sizeof(buf->data)) {
1349                /* When the optval fits into BPF_SOCKOPT_KERN_BUF_SIZE
1350                 * bytes avoid the cost of kzalloc.
1351                 */
1352                ctx->optval = buf->data;
1353                ctx->optval_end = ctx->optval + max_optlen;
1354                return max_optlen;
1355        }
1356
1357        ctx->optval = kzalloc(max_optlen, GFP_USER);
1358        if (!ctx->optval)
1359                return -ENOMEM;
1360
1361        ctx->optval_end = ctx->optval + max_optlen;
1362
1363        return max_optlen;
1364}
1365
1366static void sockopt_free_buf(struct bpf_sockopt_kern *ctx,
1367                             struct bpf_sockopt_buf *buf)
1368{
1369        if (ctx->optval == buf->data)
1370                return;
1371        kfree(ctx->optval);
1372}
1373
1374static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx,
1375                                  struct bpf_sockopt_buf *buf)
1376{
1377        return ctx->optval != buf->data;
1378}
1379
1380int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
1381                                       int *optname, char __user *optval,
1382                                       int *optlen, char **kernel_optval)
1383{
1384        struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1385        struct bpf_sockopt_buf buf = {};
1386        struct bpf_sockopt_kern ctx = {
1387                .sk = sk,
1388                .level = *level,
1389                .optname = *optname,
1390        };
1391        int ret, max_optlen;
1392
1393        /* Opportunistic check to see whether we have any BPF program
1394         * attached to the hook so we don't waste time allocating
1395         * memory and locking the socket.
1396         */
1397        if (__cgroup_bpf_prog_array_is_empty(cgrp, CGROUP_SETSOCKOPT))
1398                return 0;
1399
1400        /* Allocate a bit more than the initial user buffer for
1401         * BPF program. The canonical use case is overriding
1402         * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic).
1403         */
1404        max_optlen = max_t(int, 16, *optlen);
1405
1406        max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
1407        if (max_optlen < 0)
1408                return max_optlen;
1409
1410        ctx.optlen = *optlen;
1411
1412        if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
1413                ret = -EFAULT;
1414                goto out;
1415        }
1416
1417        lock_sock(sk);
1418        ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_SETSOCKOPT],
1419                                    &ctx, bpf_prog_run);
1420        release_sock(sk);
1421
1422        if (!ret) {
1423                ret = -EPERM;
1424                goto out;
1425        }
1426
1427        if (ctx.optlen == -1) {
1428                /* optlen set to -1, bypass kernel */
1429                ret = 1;
1430        } else if (ctx.optlen > max_optlen || ctx.optlen < -1) {
1431                /* optlen is out of bounds */
1432                ret = -EFAULT;
1433        } else {
1434                /* optlen within bounds, run kernel handler */
1435                ret = 0;
1436
1437                /* export any potential modifications */
1438                *level = ctx.level;
1439                *optname = ctx.optname;
1440
1441                /* optlen == 0 from BPF indicates that we should
1442                 * use original userspace data.
1443                 */
1444                if (ctx.optlen != 0) {
1445                        *optlen = ctx.optlen;
1446                        /* We've used bpf_sockopt_kern->buf as an intermediary
1447                         * storage, but the BPF program indicates that we need
1448                         * to pass this data to the kernel setsockopt handler.
1449                         * No way to export on-stack buf, have to allocate a
1450                         * new buffer.
1451                         */
1452                        if (!sockopt_buf_allocated(&ctx, &buf)) {
1453                                void *p = kmalloc(ctx.optlen, GFP_USER);
1454
1455                                if (!p) {
1456                                        ret = -ENOMEM;
1457                                        goto out;
1458                                }
1459                                memcpy(p, ctx.optval, ctx.optlen);
1460                                *kernel_optval = p;
1461                        } else {
1462                                *kernel_optval = ctx.optval;
1463                        }
1464                        /* export and don't free sockopt buf */
1465                        return 0;
1466                }
1467        }
1468
1469out:
1470        sockopt_free_buf(&ctx, &buf);
1471        return ret;
1472}
1473
1474int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
1475                                       int optname, char __user *optval,
1476                                       int __user *optlen, int max_optlen,
1477                                       int retval)
1478{
1479        struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1480        struct bpf_sockopt_buf buf = {};
1481        struct bpf_sockopt_kern ctx = {
1482                .sk = sk,
1483                .level = level,
1484                .optname = optname,
1485                .retval = retval,
1486        };
1487        int ret;
1488
1489        /* Opportunistic check to see whether we have any BPF program
1490         * attached to the hook so we don't waste time allocating
1491         * memory and locking the socket.
1492         */
1493        if (__cgroup_bpf_prog_array_is_empty(cgrp, CGROUP_GETSOCKOPT))
1494                return retval;
1495
1496        ctx.optlen = max_optlen;
1497
1498        max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf);
1499        if (max_optlen < 0)
1500                return max_optlen;
1501
1502        if (!retval) {
1503                /* If kernel getsockopt finished successfully,
1504                 * copy whatever was returned to the user back
1505                 * into our temporary buffer. Set optlen to the
1506                 * one that kernel returned as well to let
1507                 * BPF programs inspect the value.
1508                 */
1509
1510                if (get_user(ctx.optlen, optlen)) {
1511                        ret = -EFAULT;
1512                        goto out;
1513                }
1514
1515                if (ctx.optlen < 0) {
1516                        ret = -EFAULT;
1517                        goto out;
1518                }
1519
1520                if (copy_from_user(ctx.optval, optval,
1521                                   min(ctx.optlen, max_optlen)) != 0) {
1522                        ret = -EFAULT;
1523                        goto out;
1524                }
1525        }
1526
1527        lock_sock(sk);
1528        ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT],
1529                                    &ctx, bpf_prog_run);
1530        release_sock(sk);
1531
1532        if (!ret) {
1533                ret = -EPERM;
1534                goto out;
1535        }
1536
1537        if (ctx.optlen > max_optlen || ctx.optlen < 0) {
1538                ret = -EFAULT;
1539                goto out;
1540        }
1541
1542        /* BPF programs only allowed to set retval to 0, not some
1543         * arbitrary value.
1544         */
1545        if (ctx.retval != 0 && ctx.retval != retval) {
1546                ret = -EFAULT;
1547                goto out;
1548        }
1549
1550        if (ctx.optlen != 0) {
1551                if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
1552                    put_user(ctx.optlen, optlen)) {
1553                        ret = -EFAULT;
1554                        goto out;
1555                }
1556        }
1557
1558        ret = ctx.retval;
1559
1560out:
1561        sockopt_free_buf(&ctx, &buf);
1562        return ret;
1563}
1564
1565int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
1566                                            int optname, void *optval,
1567                                            int *optlen, int retval)
1568{
1569        struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
1570        struct bpf_sockopt_kern ctx = {
1571                .sk = sk,
1572                .level = level,
1573                .optname = optname,
1574                .retval = retval,
1575                .optlen = *optlen,
1576                .optval = optval,
1577                .optval_end = optval + *optlen,
1578        };
1579        int ret;
1580
1581        /* Note that __cgroup_bpf_run_filter_getsockopt doesn't copy
1582         * user data back into BPF buffer when reval != 0. This is
1583         * done as an optimization to avoid extra copy, assuming
1584         * kernel won't populate the data in case of an error.
1585         * Here we always pass the data and memset() should
1586         * be called if that data shouldn't be "exported".
1587         */
1588
1589        ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT],
1590                                    &ctx, bpf_prog_run);
1591        if (!ret)
1592                return -EPERM;
1593
1594        if (ctx.optlen > *optlen)
1595                return -EFAULT;
1596
1597        /* BPF programs only allowed to set retval to 0, not some
1598         * arbitrary value.
1599         */
1600        if (ctx.retval != 0 && ctx.retval != retval)
1601                return -EFAULT;
1602
1603        /* BPF programs can shrink the buffer, export the modifications.
1604         */
1605        if (ctx.optlen != 0)
1606                *optlen = ctx.optlen;
1607
1608        return ctx.retval;
1609}
1610#endif
1611
1612static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
1613                              size_t *lenp)
1614{
1615        ssize_t tmp_ret = 0, ret;
1616
1617        if (dir->header.parent) {
1618                tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
1619                if (tmp_ret < 0)
1620                        return tmp_ret;
1621        }
1622
1623        ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
1624        if (ret < 0)
1625                return ret;
1626        *bufp += ret;
1627        *lenp -= ret;
1628        ret += tmp_ret;
1629
1630        /* Avoid leading slash. */
1631        if (!ret)
1632                return ret;
1633
1634        tmp_ret = strscpy(*bufp, "/", *lenp);
1635        if (tmp_ret < 0)
1636                return tmp_ret;
1637        *bufp += tmp_ret;
1638        *lenp -= tmp_ret;
1639
1640        return ret + tmp_ret;
1641}
1642
1643BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
1644           size_t, buf_len, u64, flags)
1645{
1646        ssize_t tmp_ret = 0, ret;
1647
1648        if (!buf)
1649                return -EINVAL;
1650
1651        if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
1652                if (!ctx->head)
1653                        return -EINVAL;
1654                tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
1655                if (tmp_ret < 0)
1656                        return tmp_ret;
1657        }
1658
1659        ret = strscpy(buf, ctx->table->procname, buf_len);
1660
1661        return ret < 0 ? ret : tmp_ret + ret;
1662}
1663
1664static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
1665        .func           = bpf_sysctl_get_name,
1666        .gpl_only       = false,
1667        .ret_type       = RET_INTEGER,
1668        .arg1_type      = ARG_PTR_TO_CTX,
1669        .arg2_type      = ARG_PTR_TO_MEM,
1670        .arg3_type      = ARG_CONST_SIZE,
1671        .arg4_type      = ARG_ANYTHING,
1672};
1673
1674static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
1675                             size_t src_len)
1676{
1677        if (!dst)
1678                return -EINVAL;
1679
1680        if (!dst_len)
1681                return -E2BIG;
1682
1683        if (!src || !src_len) {
1684                memset(dst, 0, dst_len);
1685                return -EINVAL;
1686        }
1687
1688        memcpy(dst, src, min(dst_len, src_len));
1689
1690        if (dst_len > src_len) {
1691                memset(dst + src_len, '\0', dst_len - src_len);
1692                return src_len;
1693        }
1694
1695        dst[dst_len - 1] = '\0';
1696
1697        return -E2BIG;
1698}
1699
1700BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
1701           char *, buf, size_t, buf_len)
1702{
1703        return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
1704}
1705
1706static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
1707        .func           = bpf_sysctl_get_current_value,
1708        .gpl_only       = false,
1709        .ret_type       = RET_INTEGER,
1710        .arg1_type      = ARG_PTR_TO_CTX,
1711        .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1712        .arg3_type      = ARG_CONST_SIZE,
1713};
1714
1715BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
1716           size_t, buf_len)
1717{
1718        if (!ctx->write) {
1719                if (buf && buf_len)
1720                        memset(buf, '\0', buf_len);
1721                return -EINVAL;
1722        }
1723        return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
1724}
1725
1726static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
1727        .func           = bpf_sysctl_get_new_value,
1728        .gpl_only       = false,
1729        .ret_type       = RET_INTEGER,
1730        .arg1_type      = ARG_PTR_TO_CTX,
1731        .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
1732        .arg3_type      = ARG_CONST_SIZE,
1733};
1734
1735BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
1736           const char *, buf, size_t, buf_len)
1737{
1738        if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
1739                return -EINVAL;
1740
1741        if (buf_len > PAGE_SIZE - 1)
1742                return -E2BIG;
1743
1744        memcpy(ctx->new_val, buf, buf_len);
1745        ctx->new_len = buf_len;
1746        ctx->new_updated = 1;
1747
1748        return 0;
1749}
1750
1751static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
1752        .func           = bpf_sysctl_set_new_value,
1753        .gpl_only       = false,
1754        .ret_type       = RET_INTEGER,
1755        .arg1_type      = ARG_PTR_TO_CTX,
1756        .arg2_type      = ARG_PTR_TO_MEM,
1757        .arg3_type      = ARG_CONST_SIZE,
1758};
1759
1760static const struct bpf_func_proto *
1761sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1762{
1763        switch (func_id) {
1764        case BPF_FUNC_strtol:
1765                return &bpf_strtol_proto;
1766        case BPF_FUNC_strtoul:
1767                return &bpf_strtoul_proto;
1768        case BPF_FUNC_sysctl_get_name:
1769                return &bpf_sysctl_get_name_proto;
1770        case BPF_FUNC_sysctl_get_current_value:
1771                return &bpf_sysctl_get_current_value_proto;
1772        case BPF_FUNC_sysctl_get_new_value:
1773                return &bpf_sysctl_get_new_value_proto;
1774        case BPF_FUNC_sysctl_set_new_value:
1775                return &bpf_sysctl_set_new_value_proto;
1776        default:
1777                return cgroup_base_func_proto(func_id, prog);
1778        }
1779}
1780
1781static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
1782                                   const struct bpf_prog *prog,
1783                                   struct bpf_insn_access_aux *info)
1784{
1785        const int size_default = sizeof(__u32);
1786
1787        if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
1788                return false;
1789
1790        switch (off) {
1791        case bpf_ctx_range(struct bpf_sysctl, write):
1792                if (type != BPF_READ)
1793                        return false;
1794                bpf_ctx_record_field_size(info, size_default);
1795                return bpf_ctx_narrow_access_ok(off, size, size_default);
1796        case bpf_ctx_range(struct bpf_sysctl, file_pos):
1797                if (type == BPF_READ) {
1798                        bpf_ctx_record_field_size(info, size_default);
1799                        return bpf_ctx_narrow_access_ok(off, size, size_default);
1800                } else {
1801                        return size == size_default;
1802                }
1803        default:
1804                return false;
1805        }
1806}
1807
1808static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
1809                                     const struct bpf_insn *si,
1810                                     struct bpf_insn *insn_buf,
1811                                     struct bpf_prog *prog, u32 *target_size)
1812{
1813        struct bpf_insn *insn = insn_buf;
1814        u32 read_size;
1815
1816        switch (si->off) {
1817        case offsetof(struct bpf_sysctl, write):
1818                *insn++ = BPF_LDX_MEM(
1819                        BPF_SIZE(si->code), si->dst_reg, si->src_reg,
1820                        bpf_target_off(struct bpf_sysctl_kern, write,
1821                                       sizeof_field(struct bpf_sysctl_kern,
1822                                                    write),
1823                                       target_size));
1824                break;
1825        case offsetof(struct bpf_sysctl, file_pos):
1826                /* ppos is a pointer so it should be accessed via indirect
1827                 * loads and stores. Also for stores additional temporary
1828                 * register is used since neither src_reg nor dst_reg can be
1829                 * overridden.
1830                 */
1831                if (type == BPF_WRITE) {
1832                        int treg = BPF_REG_9;
1833
1834                        if (si->src_reg == treg || si->dst_reg == treg)
1835                                --treg;
1836                        if (si->src_reg == treg || si->dst_reg == treg)
1837                                --treg;
1838                        *insn++ = BPF_STX_MEM(
1839                                BPF_DW, si->dst_reg, treg,
1840                                offsetof(struct bpf_sysctl_kern, tmp_reg));
1841                        *insn++ = BPF_LDX_MEM(
1842                                BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1843                                treg, si->dst_reg,
1844                                offsetof(struct bpf_sysctl_kern, ppos));
1845                        *insn++ = BPF_STX_MEM(
1846                                BPF_SIZEOF(u32), treg, si->src_reg,
1847                                bpf_ctx_narrow_access_offset(
1848                                        0, sizeof(u32), sizeof(loff_t)));
1849                        *insn++ = BPF_LDX_MEM(
1850                                BPF_DW, treg, si->dst_reg,
1851                                offsetof(struct bpf_sysctl_kern, tmp_reg));
1852                } else {
1853                        *insn++ = BPF_LDX_MEM(
1854                                BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1855                                si->dst_reg, si->src_reg,
1856                                offsetof(struct bpf_sysctl_kern, ppos));
1857                        read_size = bpf_size_to_bytes(BPF_SIZE(si->code));
1858                        *insn++ = BPF_LDX_MEM(
1859                                BPF_SIZE(si->code), si->dst_reg, si->dst_reg,
1860                                bpf_ctx_narrow_access_offset(
1861                                        0, read_size, sizeof(loff_t)));
1862                }
1863                *target_size = sizeof(u32);
1864                break;
1865        }
1866
1867        return insn - insn_buf;
1868}
1869
1870const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
1871        .get_func_proto         = sysctl_func_proto,
1872        .is_valid_access        = sysctl_is_valid_access,
1873        .convert_ctx_access     = sysctl_convert_ctx_access,
1874};
1875
1876const struct bpf_prog_ops cg_sysctl_prog_ops = {
1877};
1878
1879#ifdef CONFIG_NET
1880BPF_CALL_1(bpf_get_netns_cookie_sockopt, struct bpf_sockopt_kern *, ctx)
1881{
1882        const struct net *net = ctx ? sock_net(ctx->sk) : &init_net;
1883
1884        return net->net_cookie;
1885}
1886
1887static const struct bpf_func_proto bpf_get_netns_cookie_sockopt_proto = {
1888        .func           = bpf_get_netns_cookie_sockopt,
1889        .gpl_only       = false,
1890        .ret_type       = RET_INTEGER,
1891        .arg1_type      = ARG_PTR_TO_CTX_OR_NULL,
1892};
1893#endif
1894
1895static const struct bpf_func_proto *
1896cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1897{
1898        switch (func_id) {
1899#ifdef CONFIG_NET
1900        case BPF_FUNC_get_netns_cookie:
1901                return &bpf_get_netns_cookie_sockopt_proto;
1902        case BPF_FUNC_sk_storage_get:
1903                return &bpf_sk_storage_get_proto;
1904        case BPF_FUNC_sk_storage_delete:
1905                return &bpf_sk_storage_delete_proto;
1906        case BPF_FUNC_setsockopt:
1907                if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
1908                        return &bpf_sk_setsockopt_proto;
1909                return NULL;
1910        case BPF_FUNC_getsockopt:
1911                if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT)
1912                        return &bpf_sk_getsockopt_proto;
1913                return NULL;
1914#endif
1915#ifdef CONFIG_INET
1916        case BPF_FUNC_tcp_sock:
1917                return &bpf_tcp_sock_proto;
1918#endif
1919        default:
1920                return cgroup_base_func_proto(func_id, prog);
1921        }
1922}
1923
1924static bool cg_sockopt_is_valid_access(int off, int size,
1925                                       enum bpf_access_type type,
1926                                       const struct bpf_prog *prog,
1927                                       struct bpf_insn_access_aux *info)
1928{
1929        const int size_default = sizeof(__u32);
1930
1931        if (off < 0 || off >= sizeof(struct bpf_sockopt))
1932                return false;
1933
1934        if (off % size != 0)
1935                return false;
1936
1937        if (type == BPF_WRITE) {
1938                switch (off) {
1939                case offsetof(struct bpf_sockopt, retval):
1940                        if (size != size_default)
1941                                return false;
1942                        return prog->expected_attach_type ==
1943                                BPF_CGROUP_GETSOCKOPT;
1944                case offsetof(struct bpf_sockopt, optname):
1945                        fallthrough;
1946                case offsetof(struct bpf_sockopt, level):
1947                        if (size != size_default)
1948                                return false;
1949                        return prog->expected_attach_type ==
1950                                BPF_CGROUP_SETSOCKOPT;
1951                case offsetof(struct bpf_sockopt, optlen):
1952                        return size == size_default;
1953                default:
1954                        return false;
1955                }
1956        }
1957
1958        switch (off) {
1959        case offsetof(struct bpf_sockopt, sk):
1960                if (size != sizeof(__u64))
1961                        return false;
1962                info->reg_type = PTR_TO_SOCKET;
1963                break;
1964        case offsetof(struct bpf_sockopt, optval):
1965                if (size != sizeof(__u64))
1966                        return false;
1967                info->reg_type = PTR_TO_PACKET;
1968                break;
1969        case offsetof(struct bpf_sockopt, optval_end):
1970                if (size != sizeof(__u64))
1971                        return false;
1972                info->reg_type = PTR_TO_PACKET_END;
1973                break;
1974        case offsetof(struct bpf_sockopt, retval):
1975                if (size != size_default)
1976                        return false;
1977                return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT;
1978        default:
1979                if (size != size_default)
1980                        return false;
1981                break;
1982        }
1983        return true;
1984}
1985
1986#define CG_SOCKOPT_ACCESS_FIELD(T, F)                                   \
1987        T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F),                 \
1988          si->dst_reg, si->src_reg,                                     \
1989          offsetof(struct bpf_sockopt_kern, F))
1990
1991static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type,
1992                                         const struct bpf_insn *si,
1993                                         struct bpf_insn *insn_buf,
1994                                         struct bpf_prog *prog,
1995                                         u32 *target_size)
1996{
1997        struct bpf_insn *insn = insn_buf;
1998
1999        switch (si->off) {
2000        case offsetof(struct bpf_sockopt, sk):
2001                *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk);
2002                break;
2003        case offsetof(struct bpf_sockopt, level):
2004                if (type == BPF_WRITE)
2005                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level);
2006                else
2007                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level);
2008                break;
2009        case offsetof(struct bpf_sockopt, optname):
2010                if (type == BPF_WRITE)
2011                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname);
2012                else
2013                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname);
2014                break;
2015        case offsetof(struct bpf_sockopt, optlen):
2016                if (type == BPF_WRITE)
2017                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen);
2018                else
2019                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen);
2020                break;
2021        case offsetof(struct bpf_sockopt, retval):
2022                if (type == BPF_WRITE)
2023                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval);
2024                else
2025                        *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval);
2026                break;
2027        case offsetof(struct bpf_sockopt, optval):
2028                *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval);
2029                break;
2030        case offsetof(struct bpf_sockopt, optval_end):
2031                *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end);
2032                break;
2033        }
2034
2035        return insn - insn_buf;
2036}
2037
2038static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf,
2039                                   bool direct_write,
2040                                   const struct bpf_prog *prog)
2041{
2042        /* Nothing to do for sockopt argument. The data is kzalloc'ated.
2043         */
2044        return 0;
2045}
2046
2047const struct bpf_verifier_ops cg_sockopt_verifier_ops = {
2048        .get_func_proto         = cg_sockopt_func_proto,
2049        .is_valid_access        = cg_sockopt_is_valid_access,
2050        .convert_ctx_access     = cg_sockopt_convert_ctx_access,
2051        .gen_prologue           = cg_sockopt_get_prologue,
2052};
2053
2054const struct bpf_prog_ops cg_sockopt_prog_ops = {
2055};
2056