linux/arch/s390/kernel/topology.c
<<
>>
Prefs
   1/*
   2 *    Copyright IBM Corp. 2007
   3 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
   4 */
   5
   6#define KMSG_COMPONENT "cpu"
   7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
   8
   9#include <linux/kernel.h>
  10#include <linux/mm.h>
  11#include <linux/init.h>
  12#include <linux/device.h>
  13#include <linux/bootmem.h>
  14#include <linux/sched.h>
  15#include <linux/workqueue.h>
  16#include <linux/cpu.h>
  17#include <linux/smp.h>
  18#include <linux/cpuset.h>
  19#include <asm/delay.h>
  20
  21#define PTF_HORIZONTAL  (0UL)
  22#define PTF_VERTICAL    (1UL)
  23#define PTF_CHECK       (2UL)
  24
  25struct mask_info {
  26        struct mask_info *next;
  27        unsigned char id;
  28        cpumask_t mask;
  29};
  30
  31static int topology_enabled = 1;
  32static void topology_work_fn(struct work_struct *work);
  33static struct sysinfo_15_1_x *tl_info;
  34static struct timer_list topology_timer;
  35static void set_topology_timer(void);
  36static DECLARE_WORK(topology_work, topology_work_fn);
  37/* topology_lock protects the core linked list */
  38static DEFINE_SPINLOCK(topology_lock);
  39
  40static struct mask_info core_info;
  41cpumask_t cpu_core_map[NR_CPUS];
  42unsigned char cpu_core_id[NR_CPUS];
  43
  44#ifdef CONFIG_SCHED_BOOK
  45static struct mask_info book_info;
  46cpumask_t cpu_book_map[NR_CPUS];
  47unsigned char cpu_book_id[NR_CPUS];
  48#endif
  49
  50static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
  51{
  52        cpumask_t mask;
  53
  54        cpumask_clear(&mask);
  55        if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
  56                cpumask_copy(&mask, cpumask_of(cpu));
  57                return mask;
  58        }
  59        while (info) {
  60                if (cpumask_test_cpu(cpu, &info->mask)) {
  61                        mask = info->mask;
  62                        break;
  63                }
  64                info = info->next;
  65        }
  66        if (cpumask_empty(&mask))
  67                cpumask_copy(&mask, cpumask_of(cpu));
  68        return mask;
  69}
  70
  71static void add_cpus_to_mask(struct topology_cpu *tl_cpu,
  72                             struct mask_info *book, struct mask_info *core)
  73{
  74        unsigned int cpu;
  75
  76        for (cpu = find_first_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS);
  77             cpu < TOPOLOGY_CPU_BITS;
  78             cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1))
  79        {
  80                unsigned int rcpu, lcpu;
  81
  82                rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
  83                for_each_present_cpu(lcpu) {
  84                        if (cpu_logical_map(lcpu) != rcpu)
  85                                continue;
  86#ifdef CONFIG_SCHED_BOOK
  87                        cpumask_set_cpu(lcpu, &book->mask);
  88                        cpu_book_id[lcpu] = book->id;
  89#endif
  90                        cpumask_set_cpu(lcpu, &core->mask);
  91                        cpu_core_id[lcpu] = core->id;
  92                        smp_cpu_polarization[lcpu] = tl_cpu->pp;
  93                }
  94        }
  95}
  96
  97static void clear_masks(void)
  98{
  99        struct mask_info *info;
 100
 101        info = &core_info;
 102        while (info) {
 103                cpumask_clear(&info->mask);
 104                info = info->next;
 105        }
 106#ifdef CONFIG_SCHED_BOOK
 107        info = &book_info;
 108        while (info) {
 109                cpumask_clear(&info->mask);
 110                info = info->next;
 111        }
 112#endif
 113}
 114
 115static union topology_entry *next_tle(union topology_entry *tle)
 116{
 117        if (!tle->nl)
 118                return (union topology_entry *)((struct topology_cpu *)tle + 1);
 119        return (union topology_entry *)((struct topology_container *)tle + 1);
 120}
 121
 122static void tl_to_cores(struct sysinfo_15_1_x *info)
 123{
 124#ifdef CONFIG_SCHED_BOOK
 125        struct mask_info *book = &book_info;
 126#else
 127        struct mask_info *book = NULL;
 128#endif
 129        struct mask_info *core = &core_info;
 130        union topology_entry *tle, *end;
 131
 132
 133        spin_lock_irq(&topology_lock);
 134        clear_masks();
 135        tle = info->tle;
 136        end = (union topology_entry *)((unsigned long)info + info->length);
 137        while (tle < end) {
 138                switch (tle->nl) {
 139#ifdef CONFIG_SCHED_BOOK
 140                case 2:
 141                        book = book->next;
 142                        book->id = tle->container.id;
 143                        break;
 144#endif
 145                case 1:
 146                        core = core->next;
 147                        core->id = tle->container.id;
 148                        break;
 149                case 0:
 150                        add_cpus_to_mask(&tle->cpu, book, core);
 151                        break;
 152                default:
 153                        clear_masks();
 154                        goto out;
 155                }
 156                tle = next_tle(tle);
 157        }
 158out:
 159        spin_unlock_irq(&topology_lock);
 160}
 161
 162static void topology_update_polarization_simple(void)
 163{
 164        int cpu;
 165
 166        mutex_lock(&smp_cpu_state_mutex);
 167        for_each_possible_cpu(cpu)
 168                smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
 169        mutex_unlock(&smp_cpu_state_mutex);
 170}
 171
 172static int ptf(unsigned long fc)
 173{
 174        int rc;
 175
 176        asm volatile(
 177                "       .insn   rre,0xb9a20000,%1,%1\n"
 178                "       ipm     %0\n"
 179                "       srl     %0,28\n"
 180                : "=d" (rc)
 181                : "d" (fc)  : "cc");
 182        return rc;
 183}
 184
 185int topology_set_cpu_management(int fc)
 186{
 187        int cpu;
 188        int rc;
 189
 190        if (!MACHINE_HAS_TOPOLOGY)
 191                return -EOPNOTSUPP;
 192        if (fc)
 193                rc = ptf(PTF_VERTICAL);
 194        else
 195                rc = ptf(PTF_HORIZONTAL);
 196        if (rc)
 197                return -EBUSY;
 198        for_each_possible_cpu(cpu)
 199                smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
 200        return rc;
 201}
 202
 203static void update_cpu_core_map(void)
 204{
 205        unsigned long flags;
 206        int cpu;
 207
 208        spin_lock_irqsave(&topology_lock, flags);
 209        for_each_possible_cpu(cpu) {
 210                cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
 211#ifdef CONFIG_SCHED_BOOK
 212                cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
 213#endif
 214        }
 215        spin_unlock_irqrestore(&topology_lock, flags);
 216}
 217
 218void store_topology(struct sysinfo_15_1_x *info)
 219{
 220#ifdef CONFIG_SCHED_BOOK
 221        int rc;
 222
 223        rc = stsi(info, 15, 1, 3);
 224        if (rc != -ENOSYS)
 225                return;
 226#endif
 227        stsi(info, 15, 1, 2);
 228}
 229
 230int arch_update_cpu_topology(void)
 231{
 232        struct sysinfo_15_1_x *info = tl_info;
 233        struct sys_device *sysdev;
 234        int cpu;
 235
 236        if (!MACHINE_HAS_TOPOLOGY) {
 237                update_cpu_core_map();
 238                topology_update_polarization_simple();
 239                return 0;
 240        }
 241        store_topology(info);
 242        tl_to_cores(info);
 243        update_cpu_core_map();
 244        for_each_online_cpu(cpu) {
 245                sysdev = get_cpu_sysdev(cpu);
 246                kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
 247        }
 248        return 1;
 249}
 250
 251static void topology_work_fn(struct work_struct *work)
 252{
 253        rebuild_sched_domains();
 254}
 255
 256void topology_schedule_update(void)
 257{
 258        schedule_work(&topology_work);
 259}
 260
 261static void topology_timer_fn(unsigned long ignored)
 262{
 263        if (ptf(PTF_CHECK))
 264                topology_schedule_update();
 265        set_topology_timer();
 266}
 267
 268static void set_topology_timer(void)
 269{
 270        topology_timer.function = topology_timer_fn;
 271        topology_timer.data = 0;
 272        topology_timer.expires = jiffies + 60 * HZ;
 273        add_timer(&topology_timer);
 274}
 275
 276static int __init early_parse_topology(char *p)
 277{
 278        if (strncmp(p, "off", 3))
 279                return 0;
 280        topology_enabled = 0;
 281        return 0;
 282}
 283early_param("topology", early_parse_topology);
 284
 285static int __init init_topology_update(void)
 286{
 287        int rc;
 288
 289        rc = 0;
 290        if (!MACHINE_HAS_TOPOLOGY) {
 291                topology_update_polarization_simple();
 292                goto out;
 293        }
 294        init_timer_deferrable(&topology_timer);
 295        set_topology_timer();
 296out:
 297        update_cpu_core_map();
 298        return rc;
 299}
 300__initcall(init_topology_update);
 301
 302static void alloc_masks(struct sysinfo_15_1_x *info, struct mask_info *mask,
 303                        int offset)
 304{
 305        int i, nr_masks;
 306
 307        nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
 308        for (i = 0; i < info->mnest - offset; i++)
 309                nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
 310        nr_masks = max(nr_masks, 1);
 311        for (i = 0; i < nr_masks; i++) {
 312                mask->next = alloc_bootmem(sizeof(struct mask_info));
 313                mask = mask->next;
 314        }
 315}
 316
 317void __init s390_init_cpu_topology(void)
 318{
 319        struct sysinfo_15_1_x *info;
 320        int i;
 321
 322        if (!MACHINE_HAS_TOPOLOGY)
 323                return;
 324        tl_info = alloc_bootmem_pages(PAGE_SIZE);
 325        info = tl_info;
 326        store_topology(info);
 327        pr_info("The CPU configuration topology of the machine is:");
 328        for (i = 0; i < TOPOLOGY_NR_MAG; i++)
 329                printk(" %d", info->mag[i]);
 330        printk(" / %d\n", info->mnest);
 331        alloc_masks(info, &core_info, 2);
 332#ifdef CONFIG_SCHED_BOOK
 333        alloc_masks(info, &book_info, 3);
 334#endif
 335}
 336