linux/drivers/base/arch_topology.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Arch specific cpu topology information
   4 *
   5 * Copyright (C) 2016, ARM Ltd.
   6 * Written by: Juri Lelli, ARM Ltd.
   7 */
   8
   9#include <linux/acpi.h>
  10#include <linux/cpu.h>
  11#include <linux/cpufreq.h>
  12#include <linux/device.h>
  13#include <linux/of.h>
  14#include <linux/slab.h>
  15#include <linux/string.h>
  16#include <linux/sched/topology.h>
  17#include <linux/cpuset.h>
  18#include <linux/cpumask.h>
  19#include <linux/init.h>
  20#include <linux/percpu.h>
  21#include <linux/rcupdate.h>
  22#include <linux/sched.h>
  23#include <linux/smp.h>
  24
  25static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
  26static struct cpumask scale_freq_counters_mask;
  27static bool scale_freq_invariant;
  28
  29static bool supports_scale_freq_counters(const struct cpumask *cpus)
  30{
  31        return cpumask_subset(cpus, &scale_freq_counters_mask);
  32}
  33
  34bool topology_scale_freq_invariant(void)
  35{
  36        return cpufreq_supports_freq_invariance() ||
  37               supports_scale_freq_counters(cpu_online_mask);
  38}
  39
  40static void update_scale_freq_invariant(bool status)
  41{
  42        if (scale_freq_invariant == status)
  43                return;
  44
  45        /*
  46         * Task scheduler behavior depends on frequency invariance support,
  47         * either cpufreq or counter driven. If the support status changes as
  48         * a result of counter initialisation and use, retrigger the build of
  49         * scheduling domains to ensure the information is propagated properly.
  50         */
  51        if (topology_scale_freq_invariant() == status) {
  52                scale_freq_invariant = status;
  53                rebuild_sched_domains_energy();
  54        }
  55}
  56
  57void topology_set_scale_freq_source(struct scale_freq_data *data,
  58                                    const struct cpumask *cpus)
  59{
  60        struct scale_freq_data *sfd;
  61        int cpu;
  62
  63        /*
  64         * Avoid calling rebuild_sched_domains() unnecessarily if FIE is
  65         * supported by cpufreq.
  66         */
  67        if (cpumask_empty(&scale_freq_counters_mask))
  68                scale_freq_invariant = topology_scale_freq_invariant();
  69
  70        rcu_read_lock();
  71
  72        for_each_cpu(cpu, cpus) {
  73                sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
  74
  75                /* Use ARCH provided counters whenever possible */
  76                if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
  77                        rcu_assign_pointer(per_cpu(sft_data, cpu), data);
  78                        cpumask_set_cpu(cpu, &scale_freq_counters_mask);
  79                }
  80        }
  81
  82        rcu_read_unlock();
  83
  84        update_scale_freq_invariant(true);
  85}
  86EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
  87
  88void topology_clear_scale_freq_source(enum scale_freq_source source,
  89                                      const struct cpumask *cpus)
  90{
  91        struct scale_freq_data *sfd;
  92        int cpu;
  93
  94        rcu_read_lock();
  95
  96        for_each_cpu(cpu, cpus) {
  97                sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
  98
  99                if (sfd && sfd->source == source) {
 100                        rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
 101                        cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
 102                }
 103        }
 104
 105        rcu_read_unlock();
 106
 107        /*
 108         * Make sure all references to previous sft_data are dropped to avoid
 109         * use-after-free races.
 110         */
 111        synchronize_rcu();
 112
 113        update_scale_freq_invariant(false);
 114}
 115EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
 116
 117void topology_scale_freq_tick(void)
 118{
 119        struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data));
 120
 121        if (sfd)
 122                sfd->set_freq_scale();
 123}
 124
 125DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
 126EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
 127
 128void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
 129                             unsigned long max_freq)
 130{
 131        unsigned long scale;
 132        int i;
 133
 134        if (WARN_ON_ONCE(!cur_freq || !max_freq))
 135                return;
 136
 137        /*
 138         * If the use of counters for FIE is enabled, just return as we don't
 139         * want to update the scale factor with information from CPUFREQ.
 140         * Instead the scale factor will be updated from arch_scale_freq_tick.
 141         */
 142        if (supports_scale_freq_counters(cpus))
 143                return;
 144
 145        scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
 146
 147        for_each_cpu(i, cpus)
 148                per_cpu(arch_freq_scale, i) = scale;
 149}
 150
 151DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
 152
 153void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
 154{
 155        per_cpu(cpu_scale, cpu) = capacity;
 156}
 157
 158DEFINE_PER_CPU(unsigned long, thermal_pressure);
 159
 160void topology_set_thermal_pressure(const struct cpumask *cpus,
 161                               unsigned long th_pressure)
 162{
 163        int cpu;
 164
 165        for_each_cpu(cpu, cpus)
 166                WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
 167}
 168
 169static ssize_t cpu_capacity_show(struct device *dev,
 170                                 struct device_attribute *attr,
 171                                 char *buf)
 172{
 173        struct cpu *cpu = container_of(dev, struct cpu, dev);
 174
 175        return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
 176}
 177
 178static void update_topology_flags_workfn(struct work_struct *work);
 179static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
 180
 181static DEVICE_ATTR_RO(cpu_capacity);
 182
 183static int register_cpu_capacity_sysctl(void)
 184{
 185        int i;
 186        struct device *cpu;
 187
 188        for_each_possible_cpu(i) {
 189                cpu = get_cpu_device(i);
 190                if (!cpu) {
 191                        pr_err("%s: too early to get CPU%d device!\n",
 192                               __func__, i);
 193                        continue;
 194                }
 195                device_create_file(cpu, &dev_attr_cpu_capacity);
 196        }
 197
 198        return 0;
 199}
 200subsys_initcall(register_cpu_capacity_sysctl);
 201
 202static int update_topology;
 203
 204int topology_update_cpu_topology(void)
 205{
 206        return update_topology;
 207}
 208
 209/*
 210 * Updating the sched_domains can't be done directly from cpufreq callbacks
 211 * due to locking, so queue the work for later.
 212 */
 213static void update_topology_flags_workfn(struct work_struct *work)
 214{
 215        update_topology = 1;
 216        rebuild_sched_domains();
 217        pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
 218        update_topology = 0;
 219}
 220
 221static DEFINE_PER_CPU(u32, freq_factor) = 1;
 222static u32 *raw_capacity;
 223
 224static int free_raw_capacity(void)
 225{
 226        kfree(raw_capacity);
 227        raw_capacity = NULL;
 228
 229        return 0;
 230}
 231
 232void topology_normalize_cpu_scale(void)
 233{
 234        u64 capacity;
 235        u64 capacity_scale;
 236        int cpu;
 237
 238        if (!raw_capacity)
 239                return;
 240
 241        capacity_scale = 1;
 242        for_each_possible_cpu(cpu) {
 243                capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
 244                capacity_scale = max(capacity, capacity_scale);
 245        }
 246
 247        pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
 248        for_each_possible_cpu(cpu) {
 249                capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
 250                capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
 251                        capacity_scale);
 252                topology_set_cpu_scale(cpu, capacity);
 253                pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
 254                        cpu, topology_get_cpu_scale(cpu));
 255        }
 256}
 257
 258bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
 259{
 260        struct clk *cpu_clk;
 261        static bool cap_parsing_failed;
 262        int ret;
 263        u32 cpu_capacity;
 264
 265        if (cap_parsing_failed)
 266                return false;
 267
 268        ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
 269                                   &cpu_capacity);
 270        if (!ret) {
 271                if (!raw_capacity) {
 272                        raw_capacity = kcalloc(num_possible_cpus(),
 273                                               sizeof(*raw_capacity),
 274                                               GFP_KERNEL);
 275                        if (!raw_capacity) {
 276                                cap_parsing_failed = true;
 277                                return false;
 278                        }
 279                }
 280                raw_capacity[cpu] = cpu_capacity;
 281                pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
 282                        cpu_node, raw_capacity[cpu]);
 283
 284                /*
 285                 * Update freq_factor for calculating early boot cpu capacities.
 286                 * For non-clk CPU DVFS mechanism, there's no way to get the
 287                 * frequency value now, assuming they are running at the same
 288                 * frequency (by keeping the initial freq_factor value).
 289                 */
 290                cpu_clk = of_clk_get(cpu_node, 0);
 291                if (!PTR_ERR_OR_ZERO(cpu_clk)) {
 292                        per_cpu(freq_factor, cpu) =
 293                                clk_get_rate(cpu_clk) / 1000;
 294                        clk_put(cpu_clk);
 295                }
 296        } else {
 297                if (raw_capacity) {
 298                        pr_err("cpu_capacity: missing %pOF raw capacity\n",
 299                                cpu_node);
 300                        pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
 301                }
 302                cap_parsing_failed = true;
 303                free_raw_capacity();
 304        }
 305
 306        return !ret;
 307}
 308
 309#ifdef CONFIG_CPU_FREQ
 310static cpumask_var_t cpus_to_visit;
 311static void parsing_done_workfn(struct work_struct *work);
 312static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
 313
 314static int
 315init_cpu_capacity_callback(struct notifier_block *nb,
 316                           unsigned long val,
 317                           void *data)
 318{
 319        struct cpufreq_policy *policy = data;
 320        int cpu;
 321
 322        if (!raw_capacity)
 323                return 0;
 324
 325        if (val != CPUFREQ_CREATE_POLICY)
 326                return 0;
 327
 328        pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
 329                 cpumask_pr_args(policy->related_cpus),
 330                 cpumask_pr_args(cpus_to_visit));
 331
 332        cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
 333
 334        for_each_cpu(cpu, policy->related_cpus)
 335                per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
 336
 337        if (cpumask_empty(cpus_to_visit)) {
 338                topology_normalize_cpu_scale();
 339                schedule_work(&update_topology_flags_work);
 340                free_raw_capacity();
 341                pr_debug("cpu_capacity: parsing done\n");
 342                schedule_work(&parsing_done_work);
 343        }
 344
 345        return 0;
 346}
 347
 348static struct notifier_block init_cpu_capacity_notifier = {
 349        .notifier_call = init_cpu_capacity_callback,
 350};
 351
 352static int __init register_cpufreq_notifier(void)
 353{
 354        int ret;
 355
 356        /*
 357         * on ACPI-based systems we need to use the default cpu capacity
 358         * until we have the necessary code to parse the cpu capacity, so
 359         * skip registering cpufreq notifier.
 360         */
 361        if (!acpi_disabled || !raw_capacity)
 362                return -EINVAL;
 363
 364        if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
 365                return -ENOMEM;
 366
 367        cpumask_copy(cpus_to_visit, cpu_possible_mask);
 368
 369        ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
 370                                        CPUFREQ_POLICY_NOTIFIER);
 371
 372        if (ret)
 373                free_cpumask_var(cpus_to_visit);
 374
 375        return ret;
 376}
 377core_initcall(register_cpufreq_notifier);
 378
 379static void parsing_done_workfn(struct work_struct *work)
 380{
 381        cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
 382                                         CPUFREQ_POLICY_NOTIFIER);
 383        free_cpumask_var(cpus_to_visit);
 384}
 385
 386#else
 387core_initcall(free_raw_capacity);
 388#endif
 389
 390#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
 391/*
 392 * This function returns the logic cpu number of the node.
 393 * There are basically three kinds of return values:
 394 * (1) logic cpu number which is > 0.
 395 * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
 396 * there is no possible logical CPU in the kernel to match. This happens
 397 * when CONFIG_NR_CPUS is configure to be smaller than the number of
 398 * CPU nodes in DT. We need to just ignore this case.
 399 * (3) -1 if the node does not exist in the device tree
 400 */
 401static int __init get_cpu_for_node(struct device_node *node)
 402{
 403        struct device_node *cpu_node;
 404        int cpu;
 405
 406        cpu_node = of_parse_phandle(node, "cpu", 0);
 407        if (!cpu_node)
 408                return -1;
 409
 410        cpu = of_cpu_node_to_id(cpu_node);
 411        if (cpu >= 0)
 412                topology_parse_cpu_capacity(cpu_node, cpu);
 413        else
 414                pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
 415                        cpu_node, cpumask_pr_args(cpu_possible_mask));
 416
 417        of_node_put(cpu_node);
 418        return cpu;
 419}
 420
 421static int __init parse_core(struct device_node *core, int package_id,
 422                             int core_id)
 423{
 424        char name[20];
 425        bool leaf = true;
 426        int i = 0;
 427        int cpu;
 428        struct device_node *t;
 429
 430        do {
 431                snprintf(name, sizeof(name), "thread%d", i);
 432                t = of_get_child_by_name(core, name);
 433                if (t) {
 434                        leaf = false;
 435                        cpu = get_cpu_for_node(t);
 436                        if (cpu >= 0) {
 437                                cpu_topology[cpu].package_id = package_id;
 438                                cpu_topology[cpu].core_id = core_id;
 439                                cpu_topology[cpu].thread_id = i;
 440                        } else if (cpu != -ENODEV) {
 441                                pr_err("%pOF: Can't get CPU for thread\n", t);
 442                                of_node_put(t);
 443                                return -EINVAL;
 444                        }
 445                        of_node_put(t);
 446                }
 447                i++;
 448        } while (t);
 449
 450        cpu = get_cpu_for_node(core);
 451        if (cpu >= 0) {
 452                if (!leaf) {
 453                        pr_err("%pOF: Core has both threads and CPU\n",
 454                               core);
 455                        return -EINVAL;
 456                }
 457
 458                cpu_topology[cpu].package_id = package_id;
 459                cpu_topology[cpu].core_id = core_id;
 460        } else if (leaf && cpu != -ENODEV) {
 461                pr_err("%pOF: Can't get CPU for leaf core\n", core);
 462                return -EINVAL;
 463        }
 464
 465        return 0;
 466}
 467
 468static int __init parse_cluster(struct device_node *cluster, int depth)
 469{
 470        char name[20];
 471        bool leaf = true;
 472        bool has_cores = false;
 473        struct device_node *c;
 474        static int package_id __initdata;
 475        int core_id = 0;
 476        int i, ret;
 477
 478        /*
 479         * First check for child clusters; we currently ignore any
 480         * information about the nesting of clusters and present the
 481         * scheduler with a flat list of them.
 482         */
 483        i = 0;
 484        do {
 485                snprintf(name, sizeof(name), "cluster%d", i);
 486                c = of_get_child_by_name(cluster, name);
 487                if (c) {
 488                        leaf = false;
 489                        ret = parse_cluster(c, depth + 1);
 490                        of_node_put(c);
 491                        if (ret != 0)
 492                                return ret;
 493                }
 494                i++;
 495        } while (c);
 496
 497        /* Now check for cores */
 498        i = 0;
 499        do {
 500                snprintf(name, sizeof(name), "core%d", i);
 501                c = of_get_child_by_name(cluster, name);
 502                if (c) {
 503                        has_cores = true;
 504
 505                        if (depth == 0) {
 506                                pr_err("%pOF: cpu-map children should be clusters\n",
 507                                       c);
 508                                of_node_put(c);
 509                                return -EINVAL;
 510                        }
 511
 512                        if (leaf) {
 513                                ret = parse_core(c, package_id, core_id++);
 514                        } else {
 515                                pr_err("%pOF: Non-leaf cluster with core %s\n",
 516                                       cluster, name);
 517                                ret = -EINVAL;
 518                        }
 519
 520                        of_node_put(c);
 521                        if (ret != 0)
 522                                return ret;
 523                }
 524                i++;
 525        } while (c);
 526
 527        if (leaf && !has_cores)
 528                pr_warn("%pOF: empty cluster\n", cluster);
 529
 530        if (leaf)
 531                package_id++;
 532
 533        return 0;
 534}
 535
 536static int __init parse_dt_topology(void)
 537{
 538        struct device_node *cn, *map;
 539        int ret = 0;
 540        int cpu;
 541
 542        cn = of_find_node_by_path("/cpus");
 543        if (!cn) {
 544                pr_err("No CPU information found in DT\n");
 545                return 0;
 546        }
 547
 548        /*
 549         * When topology is provided cpu-map is essentially a root
 550         * cluster with restricted subnodes.
 551         */
 552        map = of_get_child_by_name(cn, "cpu-map");
 553        if (!map)
 554                goto out;
 555
 556        ret = parse_cluster(map, 0);
 557        if (ret != 0)
 558                goto out_map;
 559
 560        topology_normalize_cpu_scale();
 561
 562        /*
 563         * Check that all cores are in the topology; the SMP code will
 564         * only mark cores described in the DT as possible.
 565         */
 566        for_each_possible_cpu(cpu)
 567                if (cpu_topology[cpu].package_id == -1)
 568                        ret = -EINVAL;
 569
 570out_map:
 571        of_node_put(map);
 572out:
 573        of_node_put(cn);
 574        return ret;
 575}
 576#endif
 577
 578/*
 579 * cpu topology table
 580 */
 581struct cpu_topology cpu_topology[NR_CPUS];
 582EXPORT_SYMBOL_GPL(cpu_topology);
 583
 584const struct cpumask *cpu_coregroup_mask(int cpu)
 585{
 586        const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
 587
 588        /* Find the smaller of NUMA, core or LLC siblings */
 589        if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
 590                /* not numa in package, lets use the package siblings */
 591                core_mask = &cpu_topology[cpu].core_sibling;
 592        }
 593        if (cpu_topology[cpu].llc_id != -1) {
 594                if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
 595                        core_mask = &cpu_topology[cpu].llc_sibling;
 596        }
 597
 598        return core_mask;
 599}
 600
 601void update_siblings_masks(unsigned int cpuid)
 602{
 603        struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
 604        int cpu;
 605
 606        /* update core and thread sibling masks */
 607        for_each_online_cpu(cpu) {
 608                cpu_topo = &cpu_topology[cpu];
 609
 610                if (cpuid_topo->llc_id == cpu_topo->llc_id) {
 611                        cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
 612                        cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
 613                }
 614
 615                if (cpuid_topo->package_id != cpu_topo->package_id)
 616                        continue;
 617
 618                cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
 619                cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
 620
 621                if (cpuid_topo->core_id != cpu_topo->core_id)
 622                        continue;
 623
 624                cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
 625                cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
 626        }
 627}
 628
 629static void clear_cpu_topology(int cpu)
 630{
 631        struct cpu_topology *cpu_topo = &cpu_topology[cpu];
 632
 633        cpumask_clear(&cpu_topo->llc_sibling);
 634        cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
 635
 636        cpumask_clear(&cpu_topo->core_sibling);
 637        cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
 638        cpumask_clear(&cpu_topo->thread_sibling);
 639        cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
 640}
 641
 642void __init reset_cpu_topology(void)
 643{
 644        unsigned int cpu;
 645
 646        for_each_possible_cpu(cpu) {
 647                struct cpu_topology *cpu_topo = &cpu_topology[cpu];
 648
 649                cpu_topo->thread_id = -1;
 650                cpu_topo->core_id = -1;
 651                cpu_topo->package_id = -1;
 652                cpu_topo->llc_id = -1;
 653
 654                clear_cpu_topology(cpu);
 655        }
 656}
 657
 658void remove_cpu_topology(unsigned int cpu)
 659{
 660        int sibling;
 661
 662        for_each_cpu(sibling, topology_core_cpumask(cpu))
 663                cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
 664        for_each_cpu(sibling, topology_sibling_cpumask(cpu))
 665                cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
 666        for_each_cpu(sibling, topology_llc_cpumask(cpu))
 667                cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
 668
 669        clear_cpu_topology(cpu);
 670}
 671
 672__weak int __init parse_acpi_topology(void)
 673{
 674        return 0;
 675}
 676
 677#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
 678void __init init_cpu_topology(void)
 679{
 680        reset_cpu_topology();
 681
 682        /*
 683         * Discard anything that was parsed if we hit an error so we
 684         * don't use partial information.
 685         */
 686        if (parse_acpi_topology())
 687                reset_cpu_topology();
 688        else if (of_have_populated_dt() && parse_dt_topology())
 689                reset_cpu_topology();
 690}
 691#endif
 692