linux/arch/powerpc/platforms/85xx/smp.c
<<
>>
Prefs
   1/*
   2 * Author: Andy Fleming <afleming@freescale.com>
   3 *         Kumar Gala <galak@kernel.crashing.org>
   4 *
   5 * Copyright 2006-2008, 2011-2012 Freescale Semiconductor Inc.
   6 *
   7 * This program is free software; you can redistribute  it and/or modify it
   8 * under  the terms of  the GNU General  Public License as published by the
   9 * Free Software Foundation;  either version 2 of the  License, or (at your
  10 * option) any later version.
  11 */
  12
  13#include <linux/stddef.h>
  14#include <linux/kernel.h>
  15#include <linux/init.h>
  16#include <linux/delay.h>
  17#include <linux/of.h>
  18#include <linux/of_address.h>
  19#include <linux/kexec.h>
  20#include <linux/highmem.h>
  21#include <linux/cpu.h>
  22
  23#include <asm/machdep.h>
  24#include <asm/pgtable.h>
  25#include <asm/page.h>
  26#include <asm/mpic.h>
  27#include <asm/cacheflush.h>
  28#include <asm/dbell.h>
  29#include <asm/fsl_guts.h>
  30#include <asm/code-patching.h>
  31
  32#include <sysdev/fsl_soc.h>
  33#include <sysdev/mpic.h>
  34#include "smp.h"
  35
  36struct epapr_spin_table {
  37        u32     addr_h;
  38        u32     addr_l;
  39        u32     r3_h;
  40        u32     r3_l;
  41        u32     reserved;
  42        u32     pir;
  43};
  44
  45static struct ccsr_guts __iomem *guts;
  46static u64 timebase;
  47static int tb_req;
  48static int tb_valid;
  49
  50static void mpc85xx_timebase_freeze(int freeze)
  51{
  52        uint32_t mask;
  53
  54        mask = CCSR_GUTS_DEVDISR_TB0 | CCSR_GUTS_DEVDISR_TB1;
  55        if (freeze)
  56                setbits32(&guts->devdisr, mask);
  57        else
  58                clrbits32(&guts->devdisr, mask);
  59
  60        in_be32(&guts->devdisr);
  61}
  62
  63static void mpc85xx_give_timebase(void)
  64{
  65        unsigned long flags;
  66
  67        local_irq_save(flags);
  68
  69        while (!tb_req)
  70                barrier();
  71        tb_req = 0;
  72
  73        mpc85xx_timebase_freeze(1);
  74        timebase = get_tb();
  75        mb();
  76        tb_valid = 1;
  77
  78        while (tb_valid)
  79                barrier();
  80
  81        mpc85xx_timebase_freeze(0);
  82
  83        local_irq_restore(flags);
  84}
  85
  86static void mpc85xx_take_timebase(void)
  87{
  88        unsigned long flags;
  89
  90        local_irq_save(flags);
  91
  92        tb_req = 1;
  93        while (!tb_valid)
  94                barrier();
  95
  96        set_tb(timebase >> 32, timebase & 0xffffffff);
  97        isync();
  98        tb_valid = 0;
  99
 100        local_irq_restore(flags);
 101}
 102
 103#ifdef CONFIG_HOTPLUG_CPU
 104static void smp_85xx_mach_cpu_die(void)
 105{
 106        unsigned int cpu = smp_processor_id();
 107        u32 tmp;
 108
 109        local_irq_disable();
 110        idle_task_exit();
 111        generic_set_cpu_dead(cpu);
 112        mb();
 113
 114        mtspr(SPRN_TCR, 0);
 115
 116        __flush_disable_L1();
 117        tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP;
 118        mtspr(SPRN_HID0, tmp);
 119        isync();
 120
 121        /* Enter NAP mode. */
 122        tmp = mfmsr();
 123        tmp |= MSR_WE;
 124        mb();
 125        mtmsr(tmp);
 126        isync();
 127
 128        while (1)
 129                ;
 130}
 131#endif
 132
 133static inline void flush_spin_table(void *spin_table)
 134{
 135        flush_dcache_range((ulong)spin_table,
 136                (ulong)spin_table + sizeof(struct epapr_spin_table));
 137}
 138
 139static inline u32 read_spin_table_addr_l(void *spin_table)
 140{
 141        flush_dcache_range((ulong)spin_table,
 142                (ulong)spin_table + sizeof(struct epapr_spin_table));
 143        return in_be32(&((struct epapr_spin_table *)spin_table)->addr_l);
 144}
 145
 146static int smp_85xx_kick_cpu(int nr)
 147{
 148        unsigned long flags;
 149        const u64 *cpu_rel_addr;
 150        __iomem struct epapr_spin_table *spin_table;
 151        struct device_node *np;
 152        int hw_cpu = get_hard_smp_processor_id(nr);
 153        int ioremappable;
 154        int ret = 0;
 155
 156        WARN_ON(nr < 0 || nr >= NR_CPUS);
 157        WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
 158
 159        pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);
 160
 161        np = of_get_cpu_node(nr, NULL);
 162        cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);
 163
 164        if (cpu_rel_addr == NULL) {
 165                printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
 166                return -ENOENT;
 167        }
 168
 169        /*
 170         * A secondary core could be in a spinloop in the bootpage
 171         * (0xfffff000), somewhere in highmem, or somewhere in lowmem.
 172         * The bootpage and highmem can be accessed via ioremap(), but
 173         * we need to directly access the spinloop if its in lowmem.
 174         */
 175        ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);
 176
 177        /* Map the spin table */
 178        if (ioremappable)
 179                spin_table = ioremap_prot(*cpu_rel_addr,
 180                        sizeof(struct epapr_spin_table), _PAGE_COHERENT);
 181        else
 182                spin_table = phys_to_virt(*cpu_rel_addr);
 183
 184        local_irq_save(flags);
 185#ifdef CONFIG_PPC32
 186#ifdef CONFIG_HOTPLUG_CPU
 187        /* Corresponding to generic_set_cpu_dead() */
 188        generic_set_cpu_up(nr);
 189
 190        if (system_state == SYSTEM_RUNNING) {
 191                /*
 192                 * To keep it compatible with old boot program which uses
 193                 * cache-inhibit spin table, we need to flush the cache
 194                 * before accessing spin table to invalidate any staled data.
 195                 * We also need to flush the cache after writing to spin
 196                 * table to push data out.
 197                 */
 198                flush_spin_table(spin_table);
 199                out_be32(&spin_table->addr_l, 0);
 200                flush_spin_table(spin_table);
 201
 202                /*
 203                 * We don't set the BPTR register here since it already points
 204                 * to the boot page properly.
 205                 */
 206                mpic_reset_core(nr);
 207
 208                /*
 209                 * wait until core is ready...
 210                 * We need to invalidate the stale data, in case the boot
 211                 * loader uses a cache-inhibited spin table.
 212                 */
 213                if (!spin_event_timeout(
 214                                read_spin_table_addr_l(spin_table) == 1,
 215                                10000, 100)) {
 216                        pr_err("%s: timeout waiting for core %d to reset\n",
 217                                                        __func__, hw_cpu);
 218                        ret = -ENOENT;
 219                        goto out;
 220                }
 221
 222                /*  clear the acknowledge status */
 223                __secondary_hold_acknowledge = -1;
 224        }
 225#endif
 226        flush_spin_table(spin_table);
 227        out_be32(&spin_table->pir, hw_cpu);
 228        out_be32(&spin_table->addr_l, __pa(__early_start));
 229        flush_spin_table(spin_table);
 230
 231        /* Wait a bit for the CPU to ack. */
 232        if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu,
 233                                        10000, 100)) {
 234                pr_err("%s: timeout waiting for core %d to ack\n",
 235                                                __func__, hw_cpu);
 236                ret = -ENOENT;
 237                goto out;
 238        }
 239out:
 240#else
 241        smp_generic_kick_cpu(nr);
 242
 243        flush_spin_table(spin_table);
 244        out_be32(&spin_table->pir, hw_cpu);
 245        out_be64((u64 *)(&spin_table->addr_h),
 246                __pa(ppc_function_entry(generic_secondary_smp_init)));
 247        flush_spin_table(spin_table);
 248#endif
 249
 250        local_irq_restore(flags);
 251
 252        if (ioremappable)
 253                iounmap(spin_table);
 254
 255        return ret;
 256}
 257
 258struct smp_ops_t smp_85xx_ops = {
 259        .kick_cpu = smp_85xx_kick_cpu,
 260        .cpu_bootable = smp_generic_cpu_bootable,
 261#ifdef CONFIG_HOTPLUG_CPU
 262        .cpu_disable    = generic_cpu_disable,
 263        .cpu_die        = generic_cpu_die,
 264#endif
 265#ifdef CONFIG_KEXEC
 266        .give_timebase  = smp_generic_give_timebase,
 267        .take_timebase  = smp_generic_take_timebase,
 268#endif
 269};
 270
 271#ifdef CONFIG_KEXEC
 272atomic_t kexec_down_cpus = ATOMIC_INIT(0);
 273
 274void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
 275{
 276        local_irq_disable();
 277
 278        if (secondary) {
 279                atomic_inc(&kexec_down_cpus);
 280                /* loop forever */
 281                while (1);
 282        }
 283}
 284
 285static void mpc85xx_smp_kexec_down(void *arg)
 286{
 287        if (ppc_md.kexec_cpu_down)
 288                ppc_md.kexec_cpu_down(0,1);
 289}
 290
 291static void map_and_flush(unsigned long paddr)
 292{
 293        struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
 294        unsigned long kaddr  = (unsigned long)kmap(page);
 295
 296        flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
 297        kunmap(page);
 298}
 299
 300/**
 301 * Before we reset the other cores, we need to flush relevant cache
 302 * out to memory so we don't get anything corrupted, some of these flushes
 303 * are performed out of an overabundance of caution as interrupts are not
 304 * disabled yet and we can switch cores
 305 */
 306static void mpc85xx_smp_flush_dcache_kexec(struct kimage *image)
 307{
 308        kimage_entry_t *ptr, entry;
 309        unsigned long paddr;
 310        int i;
 311
 312        if (image->type == KEXEC_TYPE_DEFAULT) {
 313                /* normal kexec images are stored in temporary pages */
 314                for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
 315                     ptr = (entry & IND_INDIRECTION) ?
 316                                phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
 317                        if (!(entry & IND_DESTINATION)) {
 318                                map_and_flush(entry);
 319                        }
 320                }
 321                /* flush out last IND_DONE page */
 322                map_and_flush(entry);
 323        } else {
 324                /* crash type kexec images are copied to the crash region */
 325                for (i = 0; i < image->nr_segments; i++) {
 326                        struct kexec_segment *seg = &image->segment[i];
 327                        for (paddr = seg->mem; paddr < seg->mem + seg->memsz;
 328                             paddr += PAGE_SIZE) {
 329                                map_and_flush(paddr);
 330                        }
 331                }
 332        }
 333
 334        /* also flush the kimage struct to be passed in as well */
 335        flush_dcache_range((unsigned long)image,
 336                           (unsigned long)image + sizeof(*image));
 337}
 338
 339static void mpc85xx_smp_machine_kexec(struct kimage *image)
 340{
 341        int timeout = INT_MAX;
 342        int i, num_cpus = num_present_cpus();
 343
 344        mpc85xx_smp_flush_dcache_kexec(image);
 345
 346        if (image->type == KEXEC_TYPE_DEFAULT)
 347                smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);
 348
 349        while ( (atomic_read(&kexec_down_cpus) != (num_cpus - 1)) &&
 350                ( timeout > 0 ) )
 351        {
 352                timeout--;
 353        }
 354
 355        if ( !timeout )
 356                printk(KERN_ERR "Unable to bring down secondary cpu(s)");
 357
 358        for_each_online_cpu(i)
 359        {
 360                if ( i == smp_processor_id() ) continue;
 361                mpic_reset_core(i);
 362        }
 363
 364        default_machine_kexec(image);
 365}
 366#endif /* CONFIG_KEXEC */
 367
 368static void smp_85xx_setup_cpu(int cpu_nr)
 369{
 370        if (smp_85xx_ops.probe == smp_mpic_probe)
 371                mpic_setup_this_cpu();
 372
 373        if (cpu_has_feature(CPU_FTR_DBELL))
 374                doorbell_setup_this_cpu();
 375}
 376
 377static const struct of_device_id mpc85xx_smp_guts_ids[] = {
 378        { .compatible = "fsl,mpc8572-guts", },
 379        { .compatible = "fsl,p1020-guts", },
 380        { .compatible = "fsl,p1021-guts", },
 381        { .compatible = "fsl,p1022-guts", },
 382        { .compatible = "fsl,p1023-guts", },
 383        { .compatible = "fsl,p2020-guts", },
 384        {},
 385};
 386
 387void __init mpc85xx_smp_init(void)
 388{
 389        struct device_node *np;
 390
 391        smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu;
 392
 393        np = of_find_node_by_type(NULL, "open-pic");
 394        if (np) {
 395                smp_85xx_ops.probe = smp_mpic_probe;
 396                smp_85xx_ops.message_pass = smp_mpic_message_pass;
 397        }
 398
 399        if (cpu_has_feature(CPU_FTR_DBELL)) {
 400                /*
 401                 * If left NULL, .message_pass defaults to
 402                 * smp_muxed_ipi_message_pass
 403                 */
 404                smp_85xx_ops.message_pass = NULL;
 405                smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
 406        }
 407
 408        np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids);
 409        if (np) {
 410                guts = of_iomap(np, 0);
 411                of_node_put(np);
 412                if (!guts) {
 413                        pr_err("%s: Could not map guts node address\n",
 414                                                                __func__);
 415                        return;
 416                }
 417                smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
 418                smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
 419#ifdef CONFIG_HOTPLUG_CPU
 420                ppc_md.cpu_die = smp_85xx_mach_cpu_die;
 421#endif
 422        }
 423
 424        smp_ops = &smp_85xx_ops;
 425
 426#ifdef CONFIG_KEXEC
 427        ppc_md.kexec_cpu_down = mpc85xx_smp_kexec_cpu_down;
 428        ppc_md.machine_kexec = mpc85xx_smp_machine_kexec;
 429#endif
 430}
 431