linux/tools/testing/selftests/cgroup/test_core.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2
   3#define _GNU_SOURCE
   4#include <linux/limits.h>
   5#include <linux/sched.h>
   6#include <sys/types.h>
   7#include <sys/mman.h>
   8#include <sys/wait.h>
   9#include <unistd.h>
  10#include <fcntl.h>
  11#include <sched.h>
  12#include <stdio.h>
  13#include <errno.h>
  14#include <signal.h>
  15#include <string.h>
  16#include <pthread.h>
  17
  18#include "../kselftest.h"
  19#include "cgroup_util.h"
  20
  21static int touch_anon(char *buf, size_t size)
  22{
  23        int fd;
  24        char *pos = buf;
  25
  26        fd = open("/dev/urandom", O_RDONLY);
  27        if (fd < 0)
  28                return -1;
  29
  30        while (size > 0) {
  31                ssize_t ret = read(fd, pos, size);
  32
  33                if (ret < 0) {
  34                        if (errno != EINTR) {
  35                                close(fd);
  36                                return -1;
  37                        }
  38                } else {
  39                        pos += ret;
  40                        size -= ret;
  41                }
  42        }
  43        close(fd);
  44
  45        return 0;
  46}
  47
  48static int alloc_and_touch_anon_noexit(const char *cgroup, void *arg)
  49{
  50        int ppid = getppid();
  51        size_t size = (size_t)arg;
  52        void *buf;
  53
  54        buf = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
  55                   0, 0);
  56        if (buf == MAP_FAILED)
  57                return -1;
  58
  59        if (touch_anon((char *)buf, size)) {
  60                munmap(buf, size);
  61                return -1;
  62        }
  63
  64        while (getppid() == ppid)
  65                sleep(1);
  66
  67        munmap(buf, size);
  68        return 0;
  69}
  70
  71/*
  72 * Create a child process that allocates and touches 100MB, then waits to be
  73 * killed. Wait until the child is attached to the cgroup, kill all processes
  74 * in that cgroup and wait until "cgroup.procs" is empty. At this point try to
  75 * destroy the empty cgroup. The test helps detect race conditions between
  76 * dying processes leaving the cgroup and cgroup destruction path.
  77 */
  78static int test_cgcore_destroy(const char *root)
  79{
  80        int ret = KSFT_FAIL;
  81        char *cg_test = NULL;
  82        int child_pid;
  83        char buf[PAGE_SIZE];
  84
  85        cg_test = cg_name(root, "cg_test");
  86
  87        if (!cg_test)
  88                goto cleanup;
  89
  90        for (int i = 0; i < 10; i++) {
  91                if (cg_create(cg_test))
  92                        goto cleanup;
  93
  94                child_pid = cg_run_nowait(cg_test, alloc_and_touch_anon_noexit,
  95                                          (void *) MB(100));
  96
  97                if (child_pid < 0)
  98                        goto cleanup;
  99
 100                /* wait for the child to enter cgroup */
 101                if (cg_wait_for_proc_count(cg_test, 1))
 102                        goto cleanup;
 103
 104                if (cg_killall(cg_test))
 105                        goto cleanup;
 106
 107                /* wait for cgroup to be empty */
 108                while (1) {
 109                        if (cg_read(cg_test, "cgroup.procs", buf, sizeof(buf)))
 110                                goto cleanup;
 111                        if (buf[0] == '\0')
 112                                break;
 113                        usleep(1000);
 114                }
 115
 116                if (rmdir(cg_test))
 117                        goto cleanup;
 118
 119                if (waitpid(child_pid, NULL, 0) < 0)
 120                        goto cleanup;
 121        }
 122        ret = KSFT_PASS;
 123cleanup:
 124        if (cg_test)
 125                cg_destroy(cg_test);
 126        free(cg_test);
 127        return ret;
 128}
 129
 130/*
 131 * A(0) - B(0) - C(1)
 132 *        \ D(0)
 133 *
 134 * A, B and C's "populated" fields would be 1 while D's 0.
 135 * test that after the one process in C is moved to root,
 136 * A,B and C's "populated" fields would flip to "0" and file
 137 * modified events will be generated on the
 138 * "cgroup.events" files of both cgroups.
 139 */
 140static int test_cgcore_populated(const char *root)
 141{
 142        int ret = KSFT_FAIL;
 143        int err;
 144        char *cg_test_a = NULL, *cg_test_b = NULL;
 145        char *cg_test_c = NULL, *cg_test_d = NULL;
 146        int cgroup_fd = -EBADF;
 147        pid_t pid;
 148
 149        cg_test_a = cg_name(root, "cg_test_a");
 150        cg_test_b = cg_name(root, "cg_test_a/cg_test_b");
 151        cg_test_c = cg_name(root, "cg_test_a/cg_test_b/cg_test_c");
 152        cg_test_d = cg_name(root, "cg_test_a/cg_test_b/cg_test_d");
 153
 154        if (!cg_test_a || !cg_test_b || !cg_test_c || !cg_test_d)
 155                goto cleanup;
 156
 157        if (cg_create(cg_test_a))
 158                goto cleanup;
 159
 160        if (cg_create(cg_test_b))
 161                goto cleanup;
 162
 163        if (cg_create(cg_test_c))
 164                goto cleanup;
 165
 166        if (cg_create(cg_test_d))
 167                goto cleanup;
 168
 169        if (cg_enter_current(cg_test_c))
 170                goto cleanup;
 171
 172        if (cg_read_strcmp(cg_test_a, "cgroup.events", "populated 1\n"))
 173                goto cleanup;
 174
 175        if (cg_read_strcmp(cg_test_b, "cgroup.events", "populated 1\n"))
 176                goto cleanup;
 177
 178        if (cg_read_strcmp(cg_test_c, "cgroup.events", "populated 1\n"))
 179                goto cleanup;
 180
 181        if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
 182                goto cleanup;
 183
 184        if (cg_enter_current(root))
 185                goto cleanup;
 186
 187        if (cg_read_strcmp(cg_test_a, "cgroup.events", "populated 0\n"))
 188                goto cleanup;
 189
 190        if (cg_read_strcmp(cg_test_b, "cgroup.events", "populated 0\n"))
 191                goto cleanup;
 192
 193        if (cg_read_strcmp(cg_test_c, "cgroup.events", "populated 0\n"))
 194                goto cleanup;
 195
 196        if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
 197                goto cleanup;
 198
 199        /* Test that we can directly clone into a new cgroup. */
 200        cgroup_fd = dirfd_open_opath(cg_test_d);
 201        if (cgroup_fd < 0)
 202                goto cleanup;
 203
 204        pid = clone_into_cgroup(cgroup_fd);
 205        if (pid < 0) {
 206                if (errno == ENOSYS)
 207                        goto cleanup_pass;
 208                goto cleanup;
 209        }
 210
 211        if (pid == 0) {
 212                if (raise(SIGSTOP))
 213                        exit(EXIT_FAILURE);
 214                exit(EXIT_SUCCESS);
 215        }
 216
 217        err = cg_read_strcmp(cg_test_d, "cgroup.events", "populated 1\n");
 218
 219        (void)clone_reap(pid, WSTOPPED);
 220        (void)kill(pid, SIGCONT);
 221        (void)clone_reap(pid, WEXITED);
 222
 223        if (err)
 224                goto cleanup;
 225
 226        if (cg_read_strcmp(cg_test_d, "cgroup.events", "populated 0\n"))
 227                goto cleanup;
 228
 229        /* Remove cgroup. */
 230        if (cg_test_d) {
 231                cg_destroy(cg_test_d);
 232                free(cg_test_d);
 233                cg_test_d = NULL;
 234        }
 235
 236        pid = clone_into_cgroup(cgroup_fd);
 237        if (pid < 0)
 238                goto cleanup_pass;
 239        if (pid == 0)
 240                exit(EXIT_SUCCESS);
 241        (void)clone_reap(pid, WEXITED);
 242        goto cleanup;
 243
 244cleanup_pass:
 245        ret = KSFT_PASS;
 246
 247cleanup:
 248        if (cg_test_d)
 249                cg_destroy(cg_test_d);
 250        if (cg_test_c)
 251                cg_destroy(cg_test_c);
 252        if (cg_test_b)
 253                cg_destroy(cg_test_b);
 254        if (cg_test_a)
 255                cg_destroy(cg_test_a);
 256        free(cg_test_d);
 257        free(cg_test_c);
 258        free(cg_test_b);
 259        free(cg_test_a);
 260        if (cgroup_fd >= 0)
 261                close(cgroup_fd);
 262        return ret;
 263}
 264
 265/*
 266 * A (domain threaded) - B (threaded) - C (domain)
 267 *
 268 * test that C can't be used until it is turned into a
 269 * threaded cgroup.  "cgroup.type" file will report "domain (invalid)" in
 270 * these cases. Operations which fail due to invalid topology use
 271 * EOPNOTSUPP as the errno.
 272 */
 273static int test_cgcore_invalid_domain(const char *root)
 274{
 275        int ret = KSFT_FAIL;
 276        char *grandparent = NULL, *parent = NULL, *child = NULL;
 277
 278        grandparent = cg_name(root, "cg_test_grandparent");
 279        parent = cg_name(root, "cg_test_grandparent/cg_test_parent");
 280        child = cg_name(root, "cg_test_grandparent/cg_test_parent/cg_test_child");
 281        if (!parent || !child || !grandparent)
 282                goto cleanup;
 283
 284        if (cg_create(grandparent))
 285                goto cleanup;
 286
 287        if (cg_create(parent))
 288                goto cleanup;
 289
 290        if (cg_create(child))
 291                goto cleanup;
 292
 293        if (cg_write(parent, "cgroup.type", "threaded"))
 294                goto cleanup;
 295
 296        if (cg_read_strcmp(child, "cgroup.type", "domain invalid\n"))
 297                goto cleanup;
 298
 299        if (!cg_enter_current(child))
 300                goto cleanup;
 301
 302        if (errno != EOPNOTSUPP)
 303                goto cleanup;
 304
 305        if (!clone_into_cgroup_run_wait(child))
 306                goto cleanup;
 307
 308        if (errno == ENOSYS)
 309                goto cleanup_pass;
 310
 311        if (errno != EOPNOTSUPP)
 312                goto cleanup;
 313
 314cleanup_pass:
 315        ret = KSFT_PASS;
 316
 317cleanup:
 318        cg_enter_current(root);
 319        if (child)
 320                cg_destroy(child);
 321        if (parent)
 322                cg_destroy(parent);
 323        if (grandparent)
 324                cg_destroy(grandparent);
 325        free(child);
 326        free(parent);
 327        free(grandparent);
 328        return ret;
 329}
 330
 331/*
 332 * Test that when a child becomes threaded
 333 * the parent type becomes domain threaded.
 334 */
 335static int test_cgcore_parent_becomes_threaded(const char *root)
 336{
 337        int ret = KSFT_FAIL;
 338        char *parent = NULL, *child = NULL;
 339
 340        parent = cg_name(root, "cg_test_parent");
 341        child = cg_name(root, "cg_test_parent/cg_test_child");
 342        if (!parent || !child)
 343                goto cleanup;
 344
 345        if (cg_create(parent))
 346                goto cleanup;
 347
 348        if (cg_create(child))
 349                goto cleanup;
 350
 351        if (cg_write(child, "cgroup.type", "threaded"))
 352                goto cleanup;
 353
 354        if (cg_read_strcmp(parent, "cgroup.type", "domain threaded\n"))
 355                goto cleanup;
 356
 357        ret = KSFT_PASS;
 358
 359cleanup:
 360        if (child)
 361                cg_destroy(child);
 362        if (parent)
 363                cg_destroy(parent);
 364        free(child);
 365        free(parent);
 366        return ret;
 367
 368}
 369
 370/*
 371 * Test that there's no internal process constrain on threaded cgroups.
 372 * You can add threads/processes on a parent with a controller enabled.
 373 */
 374static int test_cgcore_no_internal_process_constraint_on_threads(const char *root)
 375{
 376        int ret = KSFT_FAIL;
 377        char *parent = NULL, *child = NULL;
 378
 379        if (cg_read_strstr(root, "cgroup.controllers", "cpu") ||
 380            cg_write(root, "cgroup.subtree_control", "+cpu")) {
 381                ret = KSFT_SKIP;
 382                goto cleanup;
 383        }
 384
 385        parent = cg_name(root, "cg_test_parent");
 386        child = cg_name(root, "cg_test_parent/cg_test_child");
 387        if (!parent || !child)
 388                goto cleanup;
 389
 390        if (cg_create(parent))
 391                goto cleanup;
 392
 393        if (cg_create(child))
 394                goto cleanup;
 395
 396        if (cg_write(parent, "cgroup.type", "threaded"))
 397                goto cleanup;
 398
 399        if (cg_write(child, "cgroup.type", "threaded"))
 400                goto cleanup;
 401
 402        if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
 403                goto cleanup;
 404
 405        if (cg_enter_current(parent))
 406                goto cleanup;
 407
 408        ret = KSFT_PASS;
 409
 410cleanup:
 411        cg_enter_current(root);
 412        cg_enter_current(root);
 413        if (child)
 414                cg_destroy(child);
 415        if (parent)
 416                cg_destroy(parent);
 417        free(child);
 418        free(parent);
 419        return ret;
 420}
 421
 422/*
 423 * Test that you can't enable a controller on a child if it's not enabled
 424 * on the parent.
 425 */
 426static int test_cgcore_top_down_constraint_enable(const char *root)
 427{
 428        int ret = KSFT_FAIL;
 429        char *parent = NULL, *child = NULL;
 430
 431        parent = cg_name(root, "cg_test_parent");
 432        child = cg_name(root, "cg_test_parent/cg_test_child");
 433        if (!parent || !child)
 434                goto cleanup;
 435
 436        if (cg_create(parent))
 437                goto cleanup;
 438
 439        if (cg_create(child))
 440                goto cleanup;
 441
 442        if (!cg_write(child, "cgroup.subtree_control", "+memory"))
 443                goto cleanup;
 444
 445        ret = KSFT_PASS;
 446
 447cleanup:
 448        if (child)
 449                cg_destroy(child);
 450        if (parent)
 451                cg_destroy(parent);
 452        free(child);
 453        free(parent);
 454        return ret;
 455}
 456
 457/*
 458 * Test that you can't disable a controller on a parent
 459 * if it's enabled in a child.
 460 */
 461static int test_cgcore_top_down_constraint_disable(const char *root)
 462{
 463        int ret = KSFT_FAIL;
 464        char *parent = NULL, *child = NULL;
 465
 466        parent = cg_name(root, "cg_test_parent");
 467        child = cg_name(root, "cg_test_parent/cg_test_child");
 468        if (!parent || !child)
 469                goto cleanup;
 470
 471        if (cg_create(parent))
 472                goto cleanup;
 473
 474        if (cg_create(child))
 475                goto cleanup;
 476
 477        if (cg_write(parent, "cgroup.subtree_control", "+memory"))
 478                goto cleanup;
 479
 480        if (cg_write(child, "cgroup.subtree_control", "+memory"))
 481                goto cleanup;
 482
 483        if (!cg_write(parent, "cgroup.subtree_control", "-memory"))
 484                goto cleanup;
 485
 486        ret = KSFT_PASS;
 487
 488cleanup:
 489        if (child)
 490                cg_destroy(child);
 491        if (parent)
 492                cg_destroy(parent);
 493        free(child);
 494        free(parent);
 495        return ret;
 496}
 497
 498/*
 499 * Test internal process constraint.
 500 * You can't add a pid to a domain parent if a controller is enabled.
 501 */
 502static int test_cgcore_internal_process_constraint(const char *root)
 503{
 504        int ret = KSFT_FAIL;
 505        char *parent = NULL, *child = NULL;
 506
 507        parent = cg_name(root, "cg_test_parent");
 508        child = cg_name(root, "cg_test_parent/cg_test_child");
 509        if (!parent || !child)
 510                goto cleanup;
 511
 512        if (cg_create(parent))
 513                goto cleanup;
 514
 515        if (cg_create(child))
 516                goto cleanup;
 517
 518        if (cg_write(parent, "cgroup.subtree_control", "+memory"))
 519                goto cleanup;
 520
 521        if (!cg_enter_current(parent))
 522                goto cleanup;
 523
 524        if (!clone_into_cgroup_run_wait(parent))
 525                goto cleanup;
 526
 527        ret = KSFT_PASS;
 528
 529cleanup:
 530        if (child)
 531                cg_destroy(child);
 532        if (parent)
 533                cg_destroy(parent);
 534        free(child);
 535        free(parent);
 536        return ret;
 537}
 538
 539static void *dummy_thread_fn(void *arg)
 540{
 541        return (void *)(size_t)pause();
 542}
 543
 544/*
 545 * Test threadgroup migration.
 546 * All threads of a process are migrated together.
 547 */
 548static int test_cgcore_proc_migration(const char *root)
 549{
 550        int ret = KSFT_FAIL;
 551        int t, c_threads = 0, n_threads = 13;
 552        char *src = NULL, *dst = NULL;
 553        pthread_t threads[n_threads];
 554
 555        src = cg_name(root, "cg_src");
 556        dst = cg_name(root, "cg_dst");
 557        if (!src || !dst)
 558                goto cleanup;
 559
 560        if (cg_create(src))
 561                goto cleanup;
 562        if (cg_create(dst))
 563                goto cleanup;
 564
 565        if (cg_enter_current(src))
 566                goto cleanup;
 567
 568        for (c_threads = 0; c_threads < n_threads; ++c_threads) {
 569                if (pthread_create(&threads[c_threads], NULL, dummy_thread_fn, NULL))
 570                        goto cleanup;
 571        }
 572
 573        cg_enter_current(dst);
 574        if (cg_read_lc(dst, "cgroup.threads") != n_threads + 1)
 575                goto cleanup;
 576
 577        ret = KSFT_PASS;
 578
 579cleanup:
 580        for (t = 0; t < c_threads; ++t) {
 581                pthread_cancel(threads[t]);
 582        }
 583
 584        for (t = 0; t < c_threads; ++t) {
 585                pthread_join(threads[t], NULL);
 586        }
 587
 588        cg_enter_current(root);
 589
 590        if (dst)
 591                cg_destroy(dst);
 592        if (src)
 593                cg_destroy(src);
 594        free(dst);
 595        free(src);
 596        return ret;
 597}
 598
 599static void *migrating_thread_fn(void *arg)
 600{
 601        int g, i, n_iterations = 1000;
 602        char **grps = arg;
 603        char lines[3][PATH_MAX];
 604
 605        for (g = 1; g < 3; ++g)
 606                snprintf(lines[g], sizeof(lines[g]), "0::%s", grps[g] + strlen(grps[0]));
 607
 608        for (i = 0; i < n_iterations; ++i) {
 609                cg_enter_current_thread(grps[(i % 2) + 1]);
 610
 611                if (proc_read_strstr(0, 1, "cgroup", lines[(i % 2) + 1]))
 612                        return (void *)-1;
 613        }
 614        return NULL;
 615}
 616
 617/*
 618 * Test single thread migration.
 619 * Threaded cgroups allow successful migration of a thread.
 620 */
 621static int test_cgcore_thread_migration(const char *root)
 622{
 623        int ret = KSFT_FAIL;
 624        char *dom = NULL;
 625        char line[PATH_MAX];
 626        char *grps[3] = { (char *)root, NULL, NULL };
 627        pthread_t thr;
 628        void *retval;
 629
 630        dom = cg_name(root, "cg_dom");
 631        grps[1] = cg_name(root, "cg_dom/cg_src");
 632        grps[2] = cg_name(root, "cg_dom/cg_dst");
 633        if (!grps[1] || !grps[2] || !dom)
 634                goto cleanup;
 635
 636        if (cg_create(dom))
 637                goto cleanup;
 638        if (cg_create(grps[1]))
 639                goto cleanup;
 640        if (cg_create(grps[2]))
 641                goto cleanup;
 642
 643        if (cg_write(grps[1], "cgroup.type", "threaded"))
 644                goto cleanup;
 645        if (cg_write(grps[2], "cgroup.type", "threaded"))
 646                goto cleanup;
 647
 648        if (cg_enter_current(grps[1]))
 649                goto cleanup;
 650
 651        if (pthread_create(&thr, NULL, migrating_thread_fn, grps))
 652                goto cleanup;
 653
 654        if (pthread_join(thr, &retval))
 655                goto cleanup;
 656
 657        if (retval)
 658                goto cleanup;
 659
 660        snprintf(line, sizeof(line), "0::%s", grps[1] + strlen(grps[0]));
 661        if (proc_read_strstr(0, 1, "cgroup", line))
 662                goto cleanup;
 663
 664        ret = KSFT_PASS;
 665
 666cleanup:
 667        cg_enter_current(root);
 668        if (grps[2])
 669                cg_destroy(grps[2]);
 670        if (grps[1])
 671                cg_destroy(grps[1]);
 672        if (dom)
 673                cg_destroy(dom);
 674        free(grps[2]);
 675        free(grps[1]);
 676        free(dom);
 677        return ret;
 678}
 679
 680/*
 681 * cgroup migration permission check should be performed based on the
 682 * credentials at the time of open instead of write.
 683 */
 684static int test_cgcore_lesser_euid_open(const char *root)
 685{
 686        const uid_t test_euid = 65534;  /* usually nobody, any !root is fine */
 687        int ret = KSFT_FAIL;
 688        char *cg_test_a = NULL, *cg_test_b = NULL;
 689        char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL;
 690        int cg_test_b_procs_fd = -1;
 691        uid_t saved_uid;
 692
 693        cg_test_a = cg_name(root, "cg_test_a");
 694        cg_test_b = cg_name(root, "cg_test_b");
 695
 696        if (!cg_test_a || !cg_test_b)
 697                goto cleanup;
 698
 699        cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs");
 700        cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs");
 701
 702        if (!cg_test_a_procs || !cg_test_b_procs)
 703                goto cleanup;
 704
 705        if (cg_create(cg_test_a) || cg_create(cg_test_b))
 706                goto cleanup;
 707
 708        if (cg_enter_current(cg_test_a))
 709                goto cleanup;
 710
 711        if (chown(cg_test_a_procs, test_euid, -1) ||
 712            chown(cg_test_b_procs, test_euid, -1))
 713                goto cleanup;
 714
 715        saved_uid = geteuid();
 716        if (seteuid(test_euid))
 717                goto cleanup;
 718
 719        cg_test_b_procs_fd = open(cg_test_b_procs, O_RDWR);
 720
 721        if (seteuid(saved_uid))
 722                goto cleanup;
 723
 724        if (cg_test_b_procs_fd < 0)
 725                goto cleanup;
 726
 727        if (write(cg_test_b_procs_fd, "0", 1) >= 0 || errno != EACCES)
 728                goto cleanup;
 729
 730        ret = KSFT_PASS;
 731
 732cleanup:
 733        cg_enter_current(root);
 734        if (cg_test_b_procs_fd >= 0)
 735                close(cg_test_b_procs_fd);
 736        if (cg_test_b)
 737                cg_destroy(cg_test_b);
 738        if (cg_test_a)
 739                cg_destroy(cg_test_a);
 740        free(cg_test_b_procs);
 741        free(cg_test_a_procs);
 742        free(cg_test_b);
 743        free(cg_test_a);
 744        return ret;
 745}
 746
 747struct lesser_ns_open_thread_arg {
 748        const char      *path;
 749        int             fd;
 750        int             err;
 751};
 752
 753static int lesser_ns_open_thread_fn(void *arg)
 754{
 755        struct lesser_ns_open_thread_arg *targ = arg;
 756
 757        targ->fd = open(targ->path, O_RDWR);
 758        targ->err = errno;
 759        return 0;
 760}
 761
 762/*
 763 * cgroup migration permission check should be performed based on the cgroup
 764 * namespace at the time of open instead of write.
 765 */
 766static int test_cgcore_lesser_ns_open(const char *root)
 767{
 768        static char stack[65536];
 769        const uid_t test_euid = 65534;  /* usually nobody, any !root is fine */
 770        int ret = KSFT_FAIL;
 771        char *cg_test_a = NULL, *cg_test_b = NULL;
 772        char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL;
 773        int cg_test_b_procs_fd = -1;
 774        struct lesser_ns_open_thread_arg targ = { .fd = -1 };
 775        pid_t pid;
 776        int status;
 777
 778        cg_test_a = cg_name(root, "cg_test_a");
 779        cg_test_b = cg_name(root, "cg_test_b");
 780
 781        if (!cg_test_a || !cg_test_b)
 782                goto cleanup;
 783
 784        cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs");
 785        cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs");
 786
 787        if (!cg_test_a_procs || !cg_test_b_procs)
 788                goto cleanup;
 789
 790        if (cg_create(cg_test_a) || cg_create(cg_test_b))
 791                goto cleanup;
 792
 793        if (cg_enter_current(cg_test_b))
 794                goto cleanup;
 795
 796        if (chown(cg_test_a_procs, test_euid, -1) ||
 797            chown(cg_test_b_procs, test_euid, -1))
 798                goto cleanup;
 799
 800        targ.path = cg_test_b_procs;
 801        pid = clone(lesser_ns_open_thread_fn, stack + sizeof(stack),
 802                    CLONE_NEWCGROUP | CLONE_FILES | CLONE_VM | SIGCHLD,
 803                    &targ);
 804        if (pid < 0)
 805                goto cleanup;
 806
 807        if (waitpid(pid, &status, 0) < 0)
 808                goto cleanup;
 809
 810        if (!WIFEXITED(status))
 811                goto cleanup;
 812
 813        cg_test_b_procs_fd = targ.fd;
 814        if (cg_test_b_procs_fd < 0)
 815                goto cleanup;
 816
 817        if (cg_enter_current(cg_test_a))
 818                goto cleanup;
 819
 820        if ((status = write(cg_test_b_procs_fd, "0", 1)) >= 0 || errno != ENOENT)
 821                goto cleanup;
 822
 823        ret = KSFT_PASS;
 824
 825cleanup:
 826        cg_enter_current(root);
 827        if (cg_test_b_procs_fd >= 0)
 828                close(cg_test_b_procs_fd);
 829        if (cg_test_b)
 830                cg_destroy(cg_test_b);
 831        if (cg_test_a)
 832                cg_destroy(cg_test_a);
 833        free(cg_test_b_procs);
 834        free(cg_test_a_procs);
 835        free(cg_test_b);
 836        free(cg_test_a);
 837        return ret;
 838}
 839
 840#define T(x) { x, #x }
 841struct corecg_test {
 842        int (*fn)(const char *root);
 843        const char *name;
 844} tests[] = {
 845        T(test_cgcore_internal_process_constraint),
 846        T(test_cgcore_top_down_constraint_enable),
 847        T(test_cgcore_top_down_constraint_disable),
 848        T(test_cgcore_no_internal_process_constraint_on_threads),
 849        T(test_cgcore_parent_becomes_threaded),
 850        T(test_cgcore_invalid_domain),
 851        T(test_cgcore_populated),
 852        T(test_cgcore_proc_migration),
 853        T(test_cgcore_thread_migration),
 854        T(test_cgcore_destroy),
 855        T(test_cgcore_lesser_euid_open),
 856        T(test_cgcore_lesser_ns_open),
 857};
 858#undef T
 859
 860int main(int argc, char *argv[])
 861{
 862        char root[PATH_MAX];
 863        int i, ret = EXIT_SUCCESS;
 864
 865        if (cg_find_unified_root(root, sizeof(root)))
 866                ksft_exit_skip("cgroup v2 isn't mounted\n");
 867
 868        if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
 869                if (cg_write(root, "cgroup.subtree_control", "+memory"))
 870                        ksft_exit_skip("Failed to set memory controller\n");
 871
 872        for (i = 0; i < ARRAY_SIZE(tests); i++) {
 873                switch (tests[i].fn(root)) {
 874                case KSFT_PASS:
 875                        ksft_test_result_pass("%s\n", tests[i].name);
 876                        break;
 877                case KSFT_SKIP:
 878                        ksft_test_result_skip("%s\n", tests[i].name);
 879                        break;
 880                default:
 881                        ret = EXIT_FAILURE;
 882                        ksft_test_result_fail("%s\n", tests[i].name);
 883                        break;
 884                }
 885        }
 886
 887        return ret;
 888}
 889