linux/arch/s390/kernel/topology.c
<<
>>
Prefs
   1/*
   2 *    Copyright IBM Corp. 2007, 2011
   3 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
   4 */
   5
   6#define KMSG_COMPONENT "cpu"
   7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
   8
   9#include <linux/workqueue.h>
  10#include <linux/bootmem.h>
  11#include <linux/cpuset.h>
  12#include <linux/device.h>
  13#include <linux/export.h>
  14#include <linux/kernel.h>
  15#include <linux/sched.h>
  16#include <linux/init.h>
  17#include <linux/delay.h>
  18#include <linux/cpu.h>
  19#include <linux/smp.h>
  20#include <linux/mm.h>
  21#include <asm/sysinfo.h>
  22
  23#define PTF_HORIZONTAL  (0UL)
  24#define PTF_VERTICAL    (1UL)
  25#define PTF_CHECK       (2UL)
  26
  27struct mask_info {
  28        struct mask_info *next;
  29        unsigned char id;
  30        cpumask_t mask;
  31};
  32
  33static void set_topology_timer(void);
  34static void topology_work_fn(struct work_struct *work);
  35static struct sysinfo_15_1_x *tl_info;
  36
  37static int topology_enabled = 1;
  38static DECLARE_WORK(topology_work, topology_work_fn);
  39
  40/* topology_lock protects the socket and book linked lists */
  41static DEFINE_SPINLOCK(topology_lock);
  42static struct mask_info socket_info;
  43static struct mask_info book_info;
  44
  45struct cpu_topology_s390 cpu_topology[NR_CPUS];
  46EXPORT_SYMBOL_GPL(cpu_topology);
  47
  48static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
  49{
  50        cpumask_t mask;
  51
  52        cpumask_copy(&mask, cpumask_of(cpu));
  53        if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
  54                return mask;
  55        for (; info; info = info->next) {
  56                if (cpumask_test_cpu(cpu, &info->mask))
  57                        return info->mask;
  58        }
  59        return mask;
  60}
  61
  62static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
  63                                          struct mask_info *book,
  64                                          struct mask_info *socket,
  65                                          int one_socket_per_cpu)
  66{
  67        unsigned int cpu;
  68
  69        for_each_set_bit(cpu, &tl_cpu->mask[0], TOPOLOGY_CPU_BITS) {
  70                unsigned int rcpu;
  71                int lcpu;
  72
  73                rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
  74                lcpu = smp_find_processor_id(rcpu);
  75                if (lcpu < 0)
  76                        continue;
  77                cpumask_set_cpu(lcpu, &book->mask);
  78                cpu_topology[lcpu].book_id = book->id;
  79                cpumask_set_cpu(lcpu, &socket->mask);
  80                cpu_topology[lcpu].core_id = rcpu;
  81                if (one_socket_per_cpu) {
  82                        cpu_topology[lcpu].socket_id = rcpu;
  83                        socket = socket->next;
  84                } else {
  85                        cpu_topology[lcpu].socket_id = socket->id;
  86                }
  87                smp_cpu_set_polarization(lcpu, tl_cpu->pp);
  88        }
  89        return socket;
  90}
  91
  92static void clear_masks(void)
  93{
  94        struct mask_info *info;
  95
  96        info = &socket_info;
  97        while (info) {
  98                cpumask_clear(&info->mask);
  99                info = info->next;
 100        }
 101        info = &book_info;
 102        while (info) {
 103                cpumask_clear(&info->mask);
 104                info = info->next;
 105        }
 106}
 107
 108static union topology_entry *next_tle(union topology_entry *tle)
 109{
 110        if (!tle->nl)
 111                return (union topology_entry *)((struct topology_cpu *)tle + 1);
 112        return (union topology_entry *)((struct topology_container *)tle + 1);
 113}
 114
 115static void __tl_to_masks_generic(struct sysinfo_15_1_x *info)
 116{
 117        struct mask_info *socket = &socket_info;
 118        struct mask_info *book = &book_info;
 119        union topology_entry *tle, *end;
 120
 121        tle = info->tle;
 122        end = (union topology_entry *)((unsigned long)info + info->length);
 123        while (tle < end) {
 124                switch (tle->nl) {
 125                case 2:
 126                        book = book->next;
 127                        book->id = tle->container.id;
 128                        break;
 129                case 1:
 130                        socket = socket->next;
 131                        socket->id = tle->container.id;
 132                        break;
 133                case 0:
 134                        add_cpus_to_mask(&tle->cpu, book, socket, 0);
 135                        break;
 136                default:
 137                        clear_masks();
 138                        return;
 139                }
 140                tle = next_tle(tle);
 141        }
 142}
 143
 144static void __tl_to_masks_z10(struct sysinfo_15_1_x *info)
 145{
 146        struct mask_info *socket = &socket_info;
 147        struct mask_info *book = &book_info;
 148        union topology_entry *tle, *end;
 149
 150        tle = info->tle;
 151        end = (union topology_entry *)((unsigned long)info + info->length);
 152        while (tle < end) {
 153                switch (tle->nl) {
 154                case 1:
 155                        book = book->next;
 156                        book->id = tle->container.id;
 157                        break;
 158                case 0:
 159                        socket = add_cpus_to_mask(&tle->cpu, book, socket, 1);
 160                        break;
 161                default:
 162                        clear_masks();
 163                        return;
 164                }
 165                tle = next_tle(tle);
 166        }
 167}
 168
 169static void tl_to_masks(struct sysinfo_15_1_x *info)
 170{
 171        struct cpuid cpu_id;
 172
 173        spin_lock_irq(&topology_lock);
 174        get_cpu_id(&cpu_id);
 175        clear_masks();
 176        switch (cpu_id.machine) {
 177        case 0x2097:
 178        case 0x2098:
 179                __tl_to_masks_z10(info);
 180                break;
 181        default:
 182                __tl_to_masks_generic(info);
 183        }
 184        spin_unlock_irq(&topology_lock);
 185}
 186
 187static void topology_update_polarization_simple(void)
 188{
 189        int cpu;
 190
 191        mutex_lock(&smp_cpu_state_mutex);
 192        for_each_possible_cpu(cpu)
 193                smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
 194        mutex_unlock(&smp_cpu_state_mutex);
 195}
 196
 197static int ptf(unsigned long fc)
 198{
 199        int rc;
 200
 201        asm volatile(
 202                "       .insn   rre,0xb9a20000,%1,%1\n"
 203                "       ipm     %0\n"
 204                "       srl     %0,28\n"
 205                : "=d" (rc)
 206                : "d" (fc)  : "cc");
 207        return rc;
 208}
 209
 210int topology_set_cpu_management(int fc)
 211{
 212        int cpu, rc;
 213
 214        if (!MACHINE_HAS_TOPOLOGY)
 215                return -EOPNOTSUPP;
 216        if (fc)
 217                rc = ptf(PTF_VERTICAL);
 218        else
 219                rc = ptf(PTF_HORIZONTAL);
 220        if (rc)
 221                return -EBUSY;
 222        for_each_possible_cpu(cpu)
 223                smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
 224        return rc;
 225}
 226
 227static void update_cpu_masks(void)
 228{
 229        unsigned long flags;
 230        int cpu;
 231
 232        spin_lock_irqsave(&topology_lock, flags);
 233        for_each_possible_cpu(cpu) {
 234                cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
 235                cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
 236                if (!MACHINE_HAS_TOPOLOGY) {
 237                        cpu_topology[cpu].core_id = cpu;
 238                        cpu_topology[cpu].socket_id = cpu;
 239                        cpu_topology[cpu].book_id = cpu;
 240                }
 241        }
 242        spin_unlock_irqrestore(&topology_lock, flags);
 243}
 244
 245void store_topology(struct sysinfo_15_1_x *info)
 246{
 247        if (topology_max_mnest >= 3)
 248                stsi(info, 15, 1, 3);
 249        else
 250                stsi(info, 15, 1, 2);
 251}
 252
 253int arch_update_cpu_topology(void)
 254{
 255        struct sysinfo_15_1_x *info = tl_info;
 256        struct device *dev;
 257        int cpu;
 258
 259        if (!MACHINE_HAS_TOPOLOGY) {
 260                update_cpu_masks();
 261                topology_update_polarization_simple();
 262                return 0;
 263        }
 264        store_topology(info);
 265        tl_to_masks(info);
 266        update_cpu_masks();
 267        for_each_online_cpu(cpu) {
 268                dev = get_cpu_device(cpu);
 269                kobject_uevent(&dev->kobj, KOBJ_CHANGE);
 270        }
 271        return 1;
 272}
 273
 274static void topology_work_fn(struct work_struct *work)
 275{
 276        rebuild_sched_domains();
 277}
 278
 279void topology_schedule_update(void)
 280{
 281        schedule_work(&topology_work);
 282}
 283
 284static void topology_timer_fn(unsigned long ignored)
 285{
 286        if (ptf(PTF_CHECK))
 287                topology_schedule_update();
 288        set_topology_timer();
 289}
 290
 291static struct timer_list topology_timer =
 292        TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
 293
 294static atomic_t topology_poll = ATOMIC_INIT(0);
 295
 296static void set_topology_timer(void)
 297{
 298        if (atomic_add_unless(&topology_poll, -1, 0))
 299                mod_timer(&topology_timer, jiffies + HZ / 10);
 300        else
 301                mod_timer(&topology_timer, jiffies + HZ * 60);
 302}
 303
 304void topology_expect_change(void)
 305{
 306        if (!MACHINE_HAS_TOPOLOGY)
 307                return;
 308        /* This is racy, but it doesn't matter since it is just a heuristic.
 309         * Worst case is that we poll in a higher frequency for a bit longer.
 310         */
 311        if (atomic_read(&topology_poll) > 60)
 312                return;
 313        atomic_add(60, &topology_poll);
 314        set_topology_timer();
 315}
 316
 317static int __init early_parse_topology(char *p)
 318{
 319        if (strncmp(p, "off", 3))
 320                return 0;
 321        topology_enabled = 0;
 322        return 0;
 323}
 324early_param("topology", early_parse_topology);
 325
 326static void __init alloc_masks(struct sysinfo_15_1_x *info,
 327                               struct mask_info *mask, int offset)
 328{
 329        int i, nr_masks;
 330
 331        nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
 332        for (i = 0; i < info->mnest - offset; i++)
 333                nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
 334        nr_masks = max(nr_masks, 1);
 335        for (i = 0; i < nr_masks; i++) {
 336                mask->next = alloc_bootmem(sizeof(struct mask_info));
 337                mask = mask->next;
 338        }
 339}
 340
 341void __init s390_init_cpu_topology(void)
 342{
 343        struct sysinfo_15_1_x *info;
 344        int i;
 345
 346        if (!MACHINE_HAS_TOPOLOGY)
 347                return;
 348        tl_info = alloc_bootmem_pages(PAGE_SIZE);
 349        info = tl_info;
 350        store_topology(info);
 351        pr_info("The CPU configuration topology of the machine is:");
 352        for (i = 0; i < TOPOLOGY_NR_MAG; i++)
 353                printk(KERN_CONT " %d", info->mag[i]);
 354        printk(KERN_CONT " / %d\n", info->mnest);
 355        alloc_masks(info, &socket_info, 1);
 356        alloc_masks(info, &book_info, 2);
 357}
 358
 359static int cpu_management;
 360
 361static ssize_t dispatching_show(struct device *dev,
 362                                struct device_attribute *attr,
 363                                char *buf)
 364{
 365        ssize_t count;
 366
 367        mutex_lock(&smp_cpu_state_mutex);
 368        count = sprintf(buf, "%d\n", cpu_management);
 369        mutex_unlock(&smp_cpu_state_mutex);
 370        return count;
 371}
 372
 373static ssize_t dispatching_store(struct device *dev,
 374                                 struct device_attribute *attr,
 375                                 const char *buf,
 376                                 size_t count)
 377{
 378        int val, rc;
 379        char delim;
 380
 381        if (sscanf(buf, "%d %c", &val, &delim) != 1)
 382                return -EINVAL;
 383        if (val != 0 && val != 1)
 384                return -EINVAL;
 385        rc = 0;
 386        get_online_cpus();
 387        mutex_lock(&smp_cpu_state_mutex);
 388        if (cpu_management == val)
 389                goto out;
 390        rc = topology_set_cpu_management(val);
 391        if (rc)
 392                goto out;
 393        cpu_management = val;
 394        topology_expect_change();
 395out:
 396        mutex_unlock(&smp_cpu_state_mutex);
 397        put_online_cpus();
 398        return rc ? rc : count;
 399}
 400static DEVICE_ATTR(dispatching, 0644, dispatching_show,
 401                         dispatching_store);
 402
 403static ssize_t cpu_polarization_show(struct device *dev,
 404                                     struct device_attribute *attr, char *buf)
 405{
 406        int cpu = dev->id;
 407        ssize_t count;
 408
 409        mutex_lock(&smp_cpu_state_mutex);
 410        switch (smp_cpu_get_polarization(cpu)) {
 411        case POLARIZATION_HRZ:
 412                count = sprintf(buf, "horizontal\n");
 413                break;
 414        case POLARIZATION_VL:
 415                count = sprintf(buf, "vertical:low\n");
 416                break;
 417        case POLARIZATION_VM:
 418                count = sprintf(buf, "vertical:medium\n");
 419                break;
 420        case POLARIZATION_VH:
 421                count = sprintf(buf, "vertical:high\n");
 422                break;
 423        default:
 424                count = sprintf(buf, "unknown\n");
 425                break;
 426        }
 427        mutex_unlock(&smp_cpu_state_mutex);
 428        return count;
 429}
 430static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
 431
 432static struct attribute *topology_cpu_attrs[] = {
 433        &dev_attr_polarization.attr,
 434        NULL,
 435};
 436
 437static struct attribute_group topology_cpu_attr_group = {
 438        .attrs = topology_cpu_attrs,
 439};
 440
 441int topology_cpu_init(struct cpu *cpu)
 442{
 443        return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
 444}
 445
 446static int __init topology_init(void)
 447{
 448        if (!MACHINE_HAS_TOPOLOGY) {
 449                topology_update_polarization_simple();
 450                goto out;
 451        }
 452        set_topology_timer();
 453out:
 454        update_cpu_masks();
 455        return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
 456}
 457device_initcall(topology_init);
 458