linux/arch/mips/kernel/smtc.c
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or
   3 * modify it under the terms of the GNU General Public License
   4 * as published by the Free Software Foundation; either version 2
   5 * of the License, or (at your option) any later version.
   6 *
   7 * This program is distributed in the hope that it will be useful,
   8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  10 * GNU General Public License for more details.
  11 *
  12 * You should have received a copy of the GNU General Public License
  13 * along with this program; if not, write to the Free Software
  14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  15 *
  16 * Copyright (C) 2004 Mips Technologies, Inc
  17 * Copyright (C) 2008 Kevin D. Kissell
  18 */
  19
  20#include <linux/clockchips.h>
  21#include <linux/kernel.h>
  22#include <linux/sched.h>
  23#include <linux/smp.h>
  24#include <linux/cpumask.h>
  25#include <linux/interrupt.h>
  26#include <linux/kernel_stat.h>
  27#include <linux/module.h>
  28
  29#include <asm/cpu.h>
  30#include <asm/processor.h>
  31#include <asm/atomic.h>
  32#include <asm/system.h>
  33#include <asm/hardirq.h>
  34#include <asm/hazards.h>
  35#include <asm/irq.h>
  36#include <asm/mmu_context.h>
  37#include <asm/mipsregs.h>
  38#include <asm/cacheflush.h>
  39#include <asm/time.h>
  40#include <asm/addrspace.h>
  41#include <asm/smtc.h>
  42#include <asm/smtc_proc.h>
  43
  44/*
  45 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
  46 * in do_IRQ. These are passed in setup_irq_smtc() and stored
  47 * in this table.
  48 */
  49unsigned long irq_hwmask[NR_IRQS];
  50
  51#define LOCK_MT_PRA() \
  52        local_irq_save(flags); \
  53        mtflags = dmt()
  54
  55#define UNLOCK_MT_PRA() \
  56        emt(mtflags); \
  57        local_irq_restore(flags)
  58
  59#define LOCK_CORE_PRA() \
  60        local_irq_save(flags); \
  61        mtflags = dvpe()
  62
  63#define UNLOCK_CORE_PRA() \
  64        evpe(mtflags); \
  65        local_irq_restore(flags)
  66
  67/*
  68 * Data structures purely associated with SMTC parallelism
  69 */
  70
  71
  72/*
  73 * Table for tracking ASIDs whose lifetime is prolonged.
  74 */
  75
  76asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
  77
  78/*
  79 * Number of InterProcessor Interrupt (IPI) message buffers to allocate
  80 */
  81
  82#define IPIBUF_PER_CPU 4
  83
  84struct smtc_ipi_q IPIQ[NR_CPUS];
  85static struct smtc_ipi_q freeIPIq;
  86
  87
  88/* Forward declarations */
  89
  90void ipi_decode(struct smtc_ipi *);
  91static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
  92static void setup_cross_vpe_interrupts(unsigned int nvpe);
  93void init_smtc_stats(void);
  94
  95/* Global SMTC Status */
  96
  97unsigned int smtc_status;
  98
  99/* Boot command line configuration overrides */
 100
 101static int vpe0limit;
 102static int ipibuffers;
 103static int nostlb;
 104static int asidmask;
 105unsigned long smtc_asid_mask = 0xff;
 106
 107static int __init vpe0tcs(char *str)
 108{
 109        get_option(&str, &vpe0limit);
 110
 111        return 1;
 112}
 113
 114static int __init ipibufs(char *str)
 115{
 116        get_option(&str, &ipibuffers);
 117        return 1;
 118}
 119
 120static int __init stlb_disable(char *s)
 121{
 122        nostlb = 1;
 123        return 1;
 124}
 125
 126static int __init asidmask_set(char *str)
 127{
 128        get_option(&str, &asidmask);
 129        switch (asidmask) {
 130        case 0x1:
 131        case 0x3:
 132        case 0x7:
 133        case 0xf:
 134        case 0x1f:
 135        case 0x3f:
 136        case 0x7f:
 137        case 0xff:
 138                smtc_asid_mask = (unsigned long)asidmask;
 139                break;
 140        default:
 141                printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask);
 142        }
 143        return 1;
 144}
 145
 146__setup("vpe0tcs=", vpe0tcs);
 147__setup("ipibufs=", ipibufs);
 148__setup("nostlb", stlb_disable);
 149__setup("asidmask=", asidmask_set);
 150
 151#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
 152
 153static int hang_trig;
 154
 155static int __init hangtrig_enable(char *s)
 156{
 157        hang_trig = 1;
 158        return 1;
 159}
 160
 161
 162__setup("hangtrig", hangtrig_enable);
 163
 164#define DEFAULT_BLOCKED_IPI_LIMIT 32
 165
 166static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;
 167
 168static int __init tintq(char *str)
 169{
 170        get_option(&str, &timerq_limit);
 171        return 1;
 172}
 173
 174__setup("tintq=", tintq);
 175
 176static int imstuckcount[2][8];
 177/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
 178static int vpemask[2][8] = {
 179        {0, 0, 1, 0, 0, 0, 0, 1},
 180        {0, 0, 0, 0, 0, 0, 0, 1}
 181};
 182int tcnoprog[NR_CPUS];
 183static atomic_t idle_hook_initialized = {0};
 184static int clock_hang_reported[NR_CPUS];
 185
 186#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
 187
 188/*
 189 * Configure shared TLB - VPC configuration bit must be set by caller
 190 */
 191
 192static void smtc_configure_tlb(void)
 193{
 194        int i, tlbsiz, vpes;
 195        unsigned long mvpconf0;
 196        unsigned long config1val;
 197
 198        /* Set up ASID preservation table */
 199        for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) {
 200            for(i = 0; i < MAX_SMTC_ASIDS; i++) {
 201                smtc_live_asid[vpes][i] = 0;
 202            }
 203        }
 204        mvpconf0 = read_c0_mvpconf0();
 205
 206        if ((vpes = ((mvpconf0 & MVPCONF0_PVPE)
 207                        >> MVPCONF0_PVPE_SHIFT) + 1) > 1) {
 208            /* If we have multiple VPEs, try to share the TLB */
 209            if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) {
 210                /*
 211                 * If TLB sizing is programmable, shared TLB
 212                 * size is the total available complement.
 213                 * Otherwise, we have to take the sum of all
 214                 * static VPE TLB entries.
 215                 */
 216                if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE)
 217                                >> MVPCONF0_PTLBE_SHIFT)) == 0) {
 218                    /*
 219                     * If there's more than one VPE, there had better
 220                     * be more than one TC, because we need one to bind
 221                     * to each VPE in turn to be able to read
 222                     * its configuration state!
 223                     */
 224                    settc(1);
 225                    /* Stop the TC from doing anything foolish */
 226                    write_tc_c0_tchalt(TCHALT_H);
 227                    mips_ihb();
 228                    /* No need to un-Halt - that happens later anyway */
 229                    for (i=0; i < vpes; i++) {
 230                        write_tc_c0_tcbind(i);
 231                        /*
 232                         * To be 100% sure we're really getting the right
 233                         * information, we exit the configuration state
 234                         * and do an IHB after each rebinding.
 235                         */
 236                        write_c0_mvpcontrol(
 237                                read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
 238                        mips_ihb();
 239                        /*
 240                         * Only count if the MMU Type indicated is TLB
 241                         */
 242                        if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
 243                                config1val = read_vpe_c0_config1();
 244                                tlbsiz += ((config1val >> 25) & 0x3f) + 1;
 245                        }
 246
 247                        /* Put core back in configuration state */
 248                        write_c0_mvpcontrol(
 249                                read_c0_mvpcontrol() | MVPCONTROL_VPC );
 250                        mips_ihb();
 251                    }
 252                }
 253                write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
 254                ehb();
 255
 256                /*
 257                 * Setup kernel data structures to use software total,
 258                 * rather than read the per-VPE Config1 value. The values
 259                 * for "CPU 0" gets copied to all the other CPUs as part
 260                 * of their initialization in smtc_cpu_setup().
 261                 */
 262
 263                /* MIPS32 limits TLB indices to 64 */
 264                if (tlbsiz > 64)
 265                        tlbsiz = 64;
 266                cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz;
 267                smtc_status |= SMTC_TLB_SHARED;
 268                local_flush_tlb_all();
 269
 270                printk("TLB of %d entry pairs shared by %d VPEs\n",
 271                        tlbsiz, vpes);
 272            } else {
 273                printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
 274            }
 275        }
 276}
 277
 278
 279/*
 280 * Incrementally build the CPU map out of constituent MIPS MT cores,
 281 * using the specified available VPEs and TCs.  Plaform code needs
 282 * to ensure that each MIPS MT core invokes this routine on reset,
 283 * one at a time(!).
 284 *
 285 * This version of the build_cpu_map and prepare_cpus routines assumes
 286 * that *all* TCs of a MIPS MT core will be used for Linux, and that
 287 * they will be spread across *all* available VPEs (to minimise the
 288 * loss of efficiency due to exception service serialization).
 289 * An improved version would pick up configuration information and
 290 * possibly leave some TCs/VPEs as "slave" processors.
 291 *
 292 * Use c0_MVPConf0 to find out how many TCs are available, setting up
 293 * cpu_possible_map and the logical/physical mappings.
 294 */
 295
 296int __init smtc_build_cpu_map(int start_cpu_slot)
 297{
 298        int i, ntcs;
 299
 300        /*
 301         * The CPU map isn't actually used for anything at this point,
 302         * so it's not clear what else we should do apart from set
 303         * everything up so that "logical" = "physical".
 304         */
 305        ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
 306        for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
 307                set_cpu_possible(i, true);
 308                __cpu_number_map[i] = i;
 309                __cpu_logical_map[i] = i;
 310        }
 311#ifdef CONFIG_MIPS_MT_FPAFF
 312        /* Initialize map of CPUs with FPUs */
 313        cpus_clear(mt_fpu_cpumask);
 314#endif
 315
 316        /* One of those TC's is the one booting, and not a secondary... */
 317        printk("%i available secondary CPU TC(s)\n", i - 1);
 318
 319        return i;
 320}
 321
 322/*
 323 * Common setup before any secondaries are started
 324 * Make sure all CPU's are in a sensible state before we boot any of the
 325 * secondaries.
 326 *
 327 * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
 328 * as possible across the available VPEs.
 329 */
 330
 331static void smtc_tc_setup(int vpe, int tc, int cpu)
 332{
 333        settc(tc);
 334        write_tc_c0_tchalt(TCHALT_H);
 335        mips_ihb();
 336        write_tc_c0_tcstatus((read_tc_c0_tcstatus()
 337                        & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
 338                        | TCSTATUS_A);
 339        /*
 340         * TCContext gets an offset from the base of the IPIQ array
 341         * to be used in low-level code to detect the presence of
 342         * an active IPI queue
 343         */
 344        write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
 345        /* Bind tc to vpe */
 346        write_tc_c0_tcbind(vpe);
 347        /* In general, all TCs should have the same cpu_data indications */
 348        memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
 349        /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
 350        if (cpu_data[0].cputype == CPU_34K ||
 351            cpu_data[0].cputype == CPU_1004K)
 352                cpu_data[cpu].options &= ~MIPS_CPU_FPU;
 353        cpu_data[cpu].vpe_id = vpe;
 354        cpu_data[cpu].tc_id = tc;
 355        /* Multi-core SMTC hasn't been tested, but be prepared */
 356        cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff;
 357}
 358
 359/*
 360 * Tweak to get Count registes in as close a sync as possible.
 361 * Value seems good for 34K-class cores.
 362 */
 363
 364#define CP0_SKEW 8
 365
 366void smtc_prepare_cpus(int cpus)
 367{
 368        int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu;
 369        unsigned long flags;
 370        unsigned long val;
 371        int nipi;
 372        struct smtc_ipi *pipi;
 373
 374        /* disable interrupts so we can disable MT */
 375        local_irq_save(flags);
 376        /* disable MT so we can configure */
 377        dvpe();
 378        dmt();
 379
 380        spin_lock_init(&freeIPIq.lock);
 381
 382        /*
 383         * We probably don't have as many VPEs as we do SMP "CPUs",
 384         * but it's possible - and in any case we'll never use more!
 385         */
 386        for (i=0; i<NR_CPUS; i++) {
 387                IPIQ[i].head = IPIQ[i].tail = NULL;
 388                spin_lock_init(&IPIQ[i].lock);
 389                IPIQ[i].depth = 0;
 390                IPIQ[i].resched_flag = 0; /* No reschedules queued initially */
 391        }
 392
 393        /* cpu_data index starts at zero */
 394        cpu = 0;
 395        cpu_data[cpu].vpe_id = 0;
 396        cpu_data[cpu].tc_id = 0;
 397        cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff;
 398        cpu++;
 399
 400        /* Report on boot-time options */
 401        mips_mt_set_cpuoptions();
 402        if (vpelimit > 0)
 403                printk("Limit of %d VPEs set\n", vpelimit);
 404        if (tclimit > 0)
 405                printk("Limit of %d TCs set\n", tclimit);
 406        if (nostlb) {
 407                printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
 408        }
 409        if (asidmask)
 410                printk("ASID mask value override to 0x%x\n", asidmask);
 411
 412        /* Temporary */
 413#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
 414        if (hang_trig)
 415                printk("Logic Analyser Trigger on suspected TC hang\n");
 416#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
 417
 418        /* Put MVPE's into 'configuration state' */
 419        write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
 420
 421        val = read_c0_mvpconf0();
 422        nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
 423        if (vpelimit > 0 && nvpe > vpelimit)
 424                nvpe = vpelimit;
 425        ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
 426        if (ntc > NR_CPUS)
 427                ntc = NR_CPUS;
 428        if (tclimit > 0 && ntc > tclimit)
 429                ntc = tclimit;
 430        slop = ntc % nvpe;
 431        for (i = 0; i < nvpe; i++) {
 432                tcpervpe[i] = ntc / nvpe;
 433                if (slop) {
 434                        if((slop - i) > 0) tcpervpe[i]++;
 435                }
 436        }
 437        /* Handle command line override for VPE0 */
 438        if (vpe0limit > ntc) vpe0limit = ntc;
 439        if (vpe0limit > 0) {
 440                int slopslop;
 441                if (vpe0limit < tcpervpe[0]) {
 442                    /* Reducing TC count - distribute to others */
 443                    slop = tcpervpe[0] - vpe0limit;
 444                    slopslop = slop % (nvpe - 1);
 445                    tcpervpe[0] = vpe0limit;
 446                    for (i = 1; i < nvpe; i++) {
 447                        tcpervpe[i] += slop / (nvpe - 1);
 448                        if(slopslop && ((slopslop - (i - 1) > 0)))
 449                                tcpervpe[i]++;
 450                    }
 451                } else if (vpe0limit > tcpervpe[0]) {
 452                    /* Increasing TC count - steal from others */
 453                    slop = vpe0limit - tcpervpe[0];
 454                    slopslop = slop % (nvpe - 1);
 455                    tcpervpe[0] = vpe0limit;
 456                    for (i = 1; i < nvpe; i++) {
 457                        tcpervpe[i] -= slop / (nvpe - 1);
 458                        if(slopslop && ((slopslop - (i - 1) > 0)))
 459                                tcpervpe[i]--;
 460                    }
 461                }
 462        }
 463
 464        /* Set up shared TLB */
 465        smtc_configure_tlb();
 466
 467        for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
 468                if (tcpervpe[vpe] == 0)
 469                        continue;
 470                if (vpe != 0)
 471                        printk(", ");
 472                printk("VPE %d: TC", vpe);
 473                for (i = 0; i < tcpervpe[vpe]; i++) {
 474                        /*
 475                         * TC 0 is bound to VPE 0 at reset,
 476                         * and is presumably executing this
 477                         * code.  Leave it alone!
 478                         */
 479                        if (tc != 0) {
 480                                smtc_tc_setup(vpe, tc, cpu);
 481                                cpu++;
 482                        }
 483                        printk(" %d", tc);
 484                        tc++;
 485                }
 486                if (vpe != 0) {
 487                        /*
 488                         * Allow this VPE to control others.
 489                         */
 490                        write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() |
 491                                              VPECONF0_MVP);
 492
 493                        /*
 494                         * Clear any stale software interrupts from VPE's Cause
 495                         */
 496                        write_vpe_c0_cause(0);
 497
 498                        /*
 499                         * Clear ERL/EXL of VPEs other than 0
 500                         * and set restricted interrupt enable/mask.
 501                         */
 502                        write_vpe_c0_status((read_vpe_c0_status()
 503                                & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))
 504                                | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7
 505                                | ST0_IE));
 506                        /*
 507                         * set config to be the same as vpe0,
 508                         *  particularly kseg0 coherency alg
 509                         */
 510                        write_vpe_c0_config(read_c0_config());
 511                        /* Clear any pending timer interrupt */
 512                        write_vpe_c0_compare(0);
 513                        /* Propagate Config7 */
 514                        write_vpe_c0_config7(read_c0_config7());
 515                        write_vpe_c0_count(read_c0_count() + CP0_SKEW);
 516                        ehb();
 517                }
 518                /* enable multi-threading within VPE */
 519                write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
 520                /* enable the VPE */
 521                write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
 522        }
 523
 524        /*
 525         * Pull any physically present but unused TCs out of circulation.
 526         */
 527        while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
 528                set_cpu_possible(tc, false);
 529                set_cpu_present(tc, false);
 530                tc++;
 531        }
 532
 533        /* release config state */
 534        write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
 535
 536        printk("\n");
 537
 538        /* Set up coprocessor affinity CPU mask(s) */
 539
 540#ifdef CONFIG_MIPS_MT_FPAFF
 541        for (tc = 0; tc < ntc; tc++) {
 542                if (cpu_data[tc].options & MIPS_CPU_FPU)
 543                        cpu_set(tc, mt_fpu_cpumask);
 544        }
 545#endif
 546
 547        /* set up ipi interrupts... */
 548
 549        /* If we have multiple VPEs running, set up the cross-VPE interrupt */
 550
 551        setup_cross_vpe_interrupts(nvpe);
 552
 553        /* Set up queue of free IPI "messages". */
 554        nipi = NR_CPUS * IPIBUF_PER_CPU;
 555        if (ipibuffers > 0)
 556                nipi = ipibuffers;
 557
 558        pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
 559        if (pipi == NULL)
 560                panic("kmalloc of IPI message buffers failed\n");
 561        else
 562                printk("IPI buffer pool of %d buffers\n", nipi);
 563        for (i = 0; i < nipi; i++) {
 564                smtc_ipi_nq(&freeIPIq, pipi);
 565                pipi++;
 566        }
 567
 568        /* Arm multithreading and enable other VPEs - but all TCs are Halted */
 569        emt(EMT_ENABLE);
 570        evpe(EVPE_ENABLE);
 571        local_irq_restore(flags);
 572        /* Initialize SMTC /proc statistics/diagnostics */
 573        init_smtc_stats();
 574}
 575
 576
 577/*
 578 * Setup the PC, SP, and GP of a secondary processor and start it
 579 * running!
 580 * smp_bootstrap is the place to resume from
 581 * __KSTK_TOS(idle) is apparently the stack pointer
 582 * (unsigned long)idle->thread_info the gp
 583 *
 584 */
 585void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
 586{
 587        extern u32 kernelsp[NR_CPUS];
 588        unsigned long flags;
 589        int mtflags;
 590
 591        LOCK_MT_PRA();
 592        if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
 593                dvpe();
 594        }
 595        settc(cpu_data[cpu].tc_id);
 596
 597        /* pc */
 598        write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
 599
 600        /* stack pointer */
 601        kernelsp[cpu] = __KSTK_TOS(idle);
 602        write_tc_gpr_sp(__KSTK_TOS(idle));
 603
 604        /* global pointer */
 605        write_tc_gpr_gp((unsigned long)task_thread_info(idle));
 606
 607        smtc_status |= SMTC_MTC_ACTIVE;
 608        write_tc_c0_tchalt(0);
 609        if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
 610                evpe(EVPE_ENABLE);
 611        }
 612        UNLOCK_MT_PRA();
 613}
 614
 615void smtc_init_secondary(void)
 616{
 617        local_irq_enable();
 618}
 619
 620void smtc_smp_finish(void)
 621{
 622        int cpu = smp_processor_id();
 623
 624        /*
 625         * Lowest-numbered CPU per VPE starts a clock tick.
 626         * Like per_cpu_trap_init() hack, this assumes that
 627         * SMTC init code assigns TCs consdecutively and
 628         * in ascending order across available VPEs.
 629         */
 630        if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
 631                write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
 632
 633        printk("TC %d going on-line as CPU %d\n",
 634                cpu_data[smp_processor_id()].tc_id, smp_processor_id());
 635}
 636
 637void smtc_cpus_done(void)
 638{
 639}
 640
 641/*
 642 * Support for SMTC-optimized driver IRQ registration
 643 */
 644
 645/*
 646 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
 647 * in do_IRQ. These are passed in setup_irq_smtc() and stored
 648 * in this table.
 649 */
 650
 651int setup_irq_smtc(unsigned int irq, struct irqaction * new,
 652                        unsigned long hwmask)
 653{
 654#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
 655        unsigned int vpe = current_cpu_data.vpe_id;
 656
 657        vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1;
 658#endif
 659        irq_hwmask[irq] = hwmask;
 660
 661        return setup_irq(irq, new);
 662}
 663
 664#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
 665/*
 666 * Support for IRQ affinity to TCs
 667 */
 668
 669void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
 670{
 671        /*
 672         * If a "fast path" cache of quickly decodable affinity state
 673         * is maintained, this is where it gets done, on a call up
 674         * from the platform affinity code.
 675         */
 676}
 677
 678void smtc_forward_irq(unsigned int irq)
 679{
 680        int target;
 681
 682        /*
 683         * OK wise guy, now figure out how to get the IRQ
 684         * to be serviced on an authorized "CPU".
 685         *
 686         * Ideally, to handle the situation where an IRQ has multiple
 687         * eligible CPUS, we would maintain state per IRQ that would
 688         * allow a fair distribution of service requests.  Since the
 689         * expected use model is any-or-only-one, for simplicity
 690         * and efficiency, we just pick the easiest one to find.
 691         */
 692
 693        target = cpumask_first(irq_desc[irq].affinity);
 694
 695        /*
 696         * We depend on the platform code to have correctly processed
 697         * IRQ affinity change requests to ensure that the IRQ affinity
 698         * mask has been purged of bits corresponding to nonexistent and
 699         * offline "CPUs", and to TCs bound to VPEs other than the VPE
 700         * connected to the physical interrupt input for the interrupt
 701         * in question.  Otherwise we have a nasty problem with interrupt
 702         * mask management.  This is best handled in non-performance-critical
 703         * platform IRQ affinity setting code,  to minimize interrupt-time
 704         * checks.
 705         */
 706
 707        /* If no one is eligible, service locally */
 708        if (target >= NR_CPUS) {
 709                do_IRQ_no_affinity(irq);
 710                return;
 711        }
 712
 713        smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
 714}
 715
 716#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
 717
 718/*
 719 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
 720 * Within a VPE one TC can interrupt another by different approaches.
 721 * The easiest to get right would probably be to make all TCs except
 722 * the target IXMT and set a software interrupt, but an IXMT-based
 723 * scheme requires that a handler must run before a new IPI could
 724 * be sent, which would break the "broadcast" loops in MIPS MT.
 725 * A more gonzo approach within a VPE is to halt the TC, extract
 726 * its Restart, Status, and a couple of GPRs, and program the Restart
 727 * address to emulate an interrupt.
 728 *
 729 * Within a VPE, one can be confident that the target TC isn't in
 730 * a critical EXL state when halted, since the write to the Halt
 731 * register could not have issued on the writing thread if the
 732 * halting thread had EXL set. So k0 and k1 of the target TC
 733 * can be used by the injection code.  Across VPEs, one can't
 734 * be certain that the target TC isn't in a critical exception
 735 * state. So we try a two-step process of sending a software
 736 * interrupt to the target VPE, which either handles the event
 737 * itself (if it was the target) or injects the event within
 738 * the VPE.
 739 */
 740
 741static void smtc_ipi_qdump(void)
 742{
 743        int i;
 744        struct smtc_ipi *temp;
 745
 746        for (i = 0; i < NR_CPUS ;i++) {
 747                pr_info("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
 748                        i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
 749                        IPIQ[i].depth);
 750                temp = IPIQ[i].head;
 751
 752                while (temp != IPIQ[i].tail) {
 753                        pr_debug("%d %d %d: ", temp->type, temp->dest,
 754                               (int)temp->arg);
 755#ifdef  SMTC_IPI_DEBUG
 756                    pr_debug("%u %lu\n", temp->sender, temp->stamp);
 757#else
 758                    pr_debug("\n");
 759#endif
 760                    temp = temp->flink;
 761                }
 762        }
 763}
 764
 765/*
 766 * The standard atomic.h primitives don't quite do what we want
 767 * here: We need an atomic add-and-return-previous-value (which
 768 * could be done with atomic_add_return and a decrement) and an
 769 * atomic set/zero-and-return-previous-value (which can't really
 770 * be done with the atomic.h primitives). And since this is
 771 * MIPS MT, we can assume that we have LL/SC.
 772 */
 773static inline int atomic_postincrement(atomic_t *v)
 774{
 775        unsigned long result;
 776
 777        unsigned long temp;
 778
 779        __asm__ __volatile__(
 780        "1:     ll      %0, %2                                  \n"
 781        "       addu    %1, %0, 1                               \n"
 782        "       sc      %1, %2                                  \n"
 783        "       beqz    %1, 1b                                  \n"
 784        __WEAK_LLSC_MB
 785        : "=&r" (result), "=&r" (temp), "=m" (v->counter)
 786        : "m" (v->counter)
 787        : "memory");
 788
 789        return result;
 790}
 791
 792void smtc_send_ipi(int cpu, int type, unsigned int action)
 793{
 794        int tcstatus;
 795        struct smtc_ipi *pipi;
 796        unsigned long flags;
 797        int mtflags;
 798        unsigned long tcrestart;
 799        extern void r4k_wait_irqoff(void), __pastwait(void);
 800        int set_resched_flag = (type == LINUX_SMP_IPI &&
 801                                action == SMP_RESCHEDULE_YOURSELF);
 802
 803        if (cpu == smp_processor_id()) {
 804                printk("Cannot Send IPI to self!\n");
 805                return;
 806        }
 807        if (set_resched_flag && IPIQ[cpu].resched_flag != 0)
 808                return; /* There is a reschedule queued already */
 809
 810        /* Set up a descriptor, to be delivered either promptly or queued */
 811        pipi = smtc_ipi_dq(&freeIPIq);
 812        if (pipi == NULL) {
 813                bust_spinlocks(1);
 814                mips_mt_regdump(dvpe());
 815                panic("IPI Msg. Buffers Depleted\n");
 816        }
 817        pipi->type = type;
 818        pipi->arg = (void *)action;
 819        pipi->dest = cpu;
 820        if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
 821                /* If not on same VPE, enqueue and send cross-VPE interrupt */
 822                IPIQ[cpu].resched_flag |= set_resched_flag;
 823                smtc_ipi_nq(&IPIQ[cpu], pipi);
 824                LOCK_CORE_PRA();
 825                settc(cpu_data[cpu].tc_id);
 826                write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);
 827                UNLOCK_CORE_PRA();
 828        } else {
 829                /*
 830                 * Not sufficient to do a LOCK_MT_PRA (dmt) here,
 831                 * since ASID shootdown on the other VPE may
 832                 * collide with this operation.
 833                 */
 834                LOCK_CORE_PRA();
 835                settc(cpu_data[cpu].tc_id);
 836                /* Halt the targeted TC */
 837                write_tc_c0_tchalt(TCHALT_H);
 838                mips_ihb();
 839
 840                /*
 841                 * Inspect TCStatus - if IXMT is set, we have to queue
 842                 * a message. Otherwise, we set up the "interrupt"
 843                 * of the other TC
 844                 */
 845                tcstatus = read_tc_c0_tcstatus();
 846
 847                if ((tcstatus & TCSTATUS_IXMT) != 0) {
 848                        /*
 849                         * If we're in the the irq-off version of the wait
 850                         * loop, we need to force exit from the wait and
 851                         * do a direct post of the IPI.
 852                         */
 853                        if (cpu_wait == r4k_wait_irqoff) {
 854                                tcrestart = read_tc_c0_tcrestart();
 855                                if (tcrestart >= (unsigned long)r4k_wait_irqoff
 856                                    && tcrestart < (unsigned long)__pastwait) {
 857                                        write_tc_c0_tcrestart(__pastwait);
 858                                        tcstatus &= ~TCSTATUS_IXMT;
 859                                        write_tc_c0_tcstatus(tcstatus);
 860                                        goto postdirect;
 861                                }
 862                        }
 863                        /*
 864                         * Otherwise we queue the message for the target TC
 865                         * to pick up when he does a local_irq_restore()
 866                         */
 867                        write_tc_c0_tchalt(0);
 868                        UNLOCK_CORE_PRA();
 869                        IPIQ[cpu].resched_flag |= set_resched_flag;
 870                        smtc_ipi_nq(&IPIQ[cpu], pipi);
 871                } else {
 872postdirect:
 873                        post_direct_ipi(cpu, pipi);
 874                        write_tc_c0_tchalt(0);
 875                        UNLOCK_CORE_PRA();
 876                }
 877        }
 878}
 879
 880/*
 881 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
 882 */
 883static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
 884{
 885        struct pt_regs *kstack;
 886        unsigned long tcstatus;
 887        unsigned long tcrestart;
 888        extern u32 kernelsp[NR_CPUS];
 889        extern void __smtc_ipi_vector(void);
 890//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu);
 891
 892        /* Extract Status, EPC from halted TC */
 893        tcstatus = read_tc_c0_tcstatus();
 894        tcrestart = read_tc_c0_tcrestart();
 895        /* If TCRestart indicates a WAIT instruction, advance the PC */
 896        if ((tcrestart & 0x80000000)
 897            && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {
 898                tcrestart += 4;
 899        }
 900        /*
 901         * Save on TC's future kernel stack
 902         *
 903         * CU bit of Status is indicator that TC was
 904         * already running on a kernel stack...
 905         */
 906        if (tcstatus & ST0_CU0)  {
 907                /* Note that this "- 1" is pointer arithmetic */
 908                kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
 909        } else {
 910                kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;
 911        }
 912
 913        kstack->cp0_epc = (long)tcrestart;
 914        /* Save TCStatus */
 915        kstack->cp0_tcstatus = tcstatus;
 916        /* Pass token of operation to be performed kernel stack pad area */
 917        kstack->pad0[4] = (unsigned long)pipi;
 918        /* Pass address of function to be called likewise */
 919        kstack->pad0[5] = (unsigned long)&ipi_decode;
 920        /* Set interrupt exempt and kernel mode */
 921        tcstatus |= TCSTATUS_IXMT;
 922        tcstatus &= ~TCSTATUS_TKSU;
 923        write_tc_c0_tcstatus(tcstatus);
 924        ehb();
 925        /* Set TC Restart address to be SMTC IPI vector */
 926        write_tc_c0_tcrestart(__smtc_ipi_vector);
 927}
 928
 929static void ipi_resched_interrupt(void)
 930{
 931        /* Return from interrupt should be enough to cause scheduler check */
 932}
 933
 934static void ipi_call_interrupt(void)
 935{
 936        /* Invoke generic function invocation code in smp.c */
 937        smp_call_function_interrupt();
 938}
 939
 940DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
 941
 942void ipi_decode(struct smtc_ipi *pipi)
 943{
 944        unsigned int cpu = smp_processor_id();
 945        struct clock_event_device *cd;
 946        void *arg_copy = pipi->arg;
 947        int type_copy = pipi->type;
 948        int irq = MIPS_CPU_IRQ_BASE + 1;
 949
 950        smtc_ipi_nq(&freeIPIq, pipi);
 951
 952        switch (type_copy) {
 953        case SMTC_CLOCK_TICK:
 954                irq_enter();
 955                kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
 956                cd = &per_cpu(mips_clockevent_device, cpu);
 957                cd->event_handler(cd);
 958                irq_exit();
 959                break;
 960
 961        case LINUX_SMP_IPI:
 962                switch ((int)arg_copy) {
 963                case SMP_RESCHEDULE_YOURSELF:
 964                        ipi_resched_interrupt();
 965                        break;
 966                case SMP_CALL_FUNCTION:
 967                        ipi_call_interrupt();
 968                        break;
 969                default:
 970                        printk("Impossible SMTC IPI Argument 0x%x\n",
 971                                (int)arg_copy);
 972                        break;
 973                }
 974                break;
 975#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
 976        case IRQ_AFFINITY_IPI:
 977                /*
 978                 * Accept a "forwarded" interrupt that was initially
 979                 * taken by a TC who doesn't have affinity for the IRQ.
 980                 */
 981                do_IRQ_no_affinity((int)arg_copy);
 982                break;
 983#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
 984        default:
 985                printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
 986                break;
 987        }
 988}
 989
 990/*
 991 * Similar to smtc_ipi_replay(), but invoked from context restore,
 992 * so it reuses the current exception frame rather than set up a
 993 * new one with self_ipi.
 994 */
 995
 996void deferred_smtc_ipi(void)
 997{
 998        int cpu = smp_processor_id();
 999
1000        /*
1001         * Test is not atomic, but much faster than a dequeue,
1002         * and the vast majority of invocations will have a null queue.
1003         * If irq_disabled when this was called, then any IPIs queued
1004         * after we test last will be taken on the next irq_enable/restore.
1005         * If interrupts were enabled, then any IPIs added after the
1006         * last test will be taken directly.
1007         */
1008
1009        while (IPIQ[cpu].head != NULL) {
1010                struct smtc_ipi_q *q = &IPIQ[cpu];
1011                struct smtc_ipi *pipi;
1012                unsigned long flags;
1013
1014                /*
1015                 * It may be possible we'll come in with interrupts
1016                 * already enabled.
1017                 */
1018                local_irq_save(flags);
1019                spin_lock(&q->lock);
1020                pipi = __smtc_ipi_dq(q);
1021                spin_unlock(&q->lock);
1022                if (pipi != NULL) {
1023                        if (pipi->type == LINUX_SMP_IPI &&
1024                            (int)pipi->arg == SMP_RESCHEDULE_YOURSELF)
1025                                IPIQ[cpu].resched_flag = 0;
1026                        ipi_decode(pipi);
1027                }
1028                /*
1029                 * The use of the __raw_local restore isn't
1030                 * as obviously necessary here as in smtc_ipi_replay(),
1031                 * but it's more efficient, given that we're already
1032                 * running down the IPI queue.
1033                 */
1034                __raw_local_irq_restore(flags);
1035        }
1036}
1037
1038/*
1039 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
1040 * set via cross-VPE MTTR manipulation of the Cause register. It would be
1041 * in some regards preferable to have external logic for "doorbell" hardware
1042 * interrupts.
1043 */
1044
1045static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
1046
1047static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
1048{
1049        int my_vpe = cpu_data[smp_processor_id()].vpe_id;
1050        int my_tc = cpu_data[smp_processor_id()].tc_id;
1051        int cpu;
1052        struct smtc_ipi *pipi;
1053        unsigned long tcstatus;
1054        int sent;
1055        unsigned long flags;
1056        unsigned int mtflags;
1057        unsigned int vpflags;
1058
1059        /*
1060         * So long as cross-VPE interrupts are done via
1061         * MFTR/MTTR read-modify-writes of Cause, we need
1062         * to stop other VPEs whenever the local VPE does
1063         * anything similar.
1064         */
1065        local_irq_save(flags);
1066        vpflags = dvpe();
1067        clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ);
1068        set_c0_status(0x100 << MIPS_CPU_IPI_IRQ);
1069        irq_enable_hazard();
1070        evpe(vpflags);
1071        local_irq_restore(flags);
1072
1073        /*
1074         * Cross-VPE Interrupt handler: Try to directly deliver IPIs
1075         * queued for TCs on this VPE other than the current one.
1076         * Return-from-interrupt should cause us to drain the queue
1077         * for the current TC, so we ought not to have to do it explicitly here.
1078         */
1079
1080        for_each_online_cpu(cpu) {
1081                if (cpu_data[cpu].vpe_id != my_vpe)
1082                        continue;
1083
1084                pipi = smtc_ipi_dq(&IPIQ[cpu]);
1085                if (pipi != NULL) {
1086                        if (cpu_data[cpu].tc_id != my_tc) {
1087                                sent = 0;
1088                                LOCK_MT_PRA();
1089                                settc(cpu_data[cpu].tc_id);
1090                                write_tc_c0_tchalt(TCHALT_H);
1091                                mips_ihb();
1092                                tcstatus = read_tc_c0_tcstatus();
1093                                if ((tcstatus & TCSTATUS_IXMT) == 0) {
1094                                        post_direct_ipi(cpu, pipi);
1095                                        sent = 1;
1096                                }
1097                                write_tc_c0_tchalt(0);
1098                                UNLOCK_MT_PRA();
1099                                if (!sent) {
1100                                        smtc_ipi_req(&IPIQ[cpu], pipi);
1101                                }
1102                        } else {
1103                                /*
1104                                 * ipi_decode() should be called
1105                                 * with interrupts off
1106                                 */
1107                                local_irq_save(flags);
1108                                if (pipi->type == LINUX_SMP_IPI &&
1109                                    (int)pipi->arg == SMP_RESCHEDULE_YOURSELF)
1110                                        IPIQ[cpu].resched_flag = 0;
1111                                ipi_decode(pipi);
1112                                local_irq_restore(flags);
1113                        }
1114                }
1115        }
1116
1117        return IRQ_HANDLED;
1118}
1119
1120static void ipi_irq_dispatch(void)
1121{
1122        do_IRQ(cpu_ipi_irq);
1123}
1124
1125static struct irqaction irq_ipi = {
1126        .handler        = ipi_interrupt,
1127        .flags          = IRQF_DISABLED | IRQF_PERCPU,
1128        .name           = "SMTC_IPI"
1129};
1130
1131static void setup_cross_vpe_interrupts(unsigned int nvpe)
1132{
1133        if (nvpe < 1)
1134                return;
1135
1136        if (!cpu_has_vint)
1137                panic("SMTC Kernel requires Vectored Interrupt support");
1138
1139        set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
1140
1141        setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
1142
1143        set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
1144}
1145
1146/*
1147 * SMTC-specific hacks invoked from elsewhere in the kernel.
1148 */
1149
1150 /*
1151  * smtc_ipi_replay is called from raw_local_irq_restore
1152  */
1153
1154void smtc_ipi_replay(void)
1155{
1156        unsigned int cpu = smp_processor_id();
1157
1158        /*
1159         * To the extent that we've ever turned interrupts off,
1160         * we may have accumulated deferred IPIs.  This is subtle.
1161         * we should be OK:  If we pick up something and dispatch
1162         * it here, that's great. If we see nothing, but concurrent
1163         * with this operation, another TC sends us an IPI, IXMT
1164         * is clear, and we'll handle it as a real pseudo-interrupt
1165         * and not a pseudo-pseudo interrupt.  The important thing
1166         * is to do the last check for queued message *after* the
1167         * re-enabling of interrupts.
1168         */
1169        while (IPIQ[cpu].head != NULL) {
1170                struct smtc_ipi_q *q = &IPIQ[cpu];
1171                struct smtc_ipi *pipi;
1172                unsigned long flags;
1173
1174                /*
1175                 * It's just possible we'll come in with interrupts
1176                 * already enabled.
1177                 */
1178                local_irq_save(flags);
1179
1180                spin_lock(&q->lock);
1181                pipi = __smtc_ipi_dq(q);
1182                spin_unlock(&q->lock);
1183                /*
1184                 ** But use a raw restore here to avoid recursion.
1185                 */
1186                __raw_local_irq_restore(flags);
1187
1188                if (pipi) {
1189                        self_ipi(pipi);
1190                        smtc_cpu_stats[cpu].selfipis++;
1191                }
1192        }
1193}
1194
1195EXPORT_SYMBOL(smtc_ipi_replay);
1196
1197void smtc_idle_loop_hook(void)
1198{
1199#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
1200        int im;
1201        int flags;
1202        int mtflags;
1203        int bit;
1204        int vpe;
1205        int tc;
1206        int hook_ntcs;
1207        /*
1208         * printk within DMT-protected regions can deadlock,
1209         * so buffer diagnostic messages for later output.
1210         */
1211        char *pdb_msg;
1212        char id_ho_db_msg[768]; /* worst-case use should be less than 700 */
1213
1214        if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */
1215                if (atomic_add_return(1, &idle_hook_initialized) == 1) {
1216                        int mvpconf0;
1217                        /* Tedious stuff to just do once */
1218                        mvpconf0 = read_c0_mvpconf0();
1219                        hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
1220                        if (hook_ntcs > NR_CPUS)
1221                                hook_ntcs = NR_CPUS;
1222                        for (tc = 0; tc < hook_ntcs; tc++) {
1223                                tcnoprog[tc] = 0;
1224                                clock_hang_reported[tc] = 0;
1225                        }
1226                        for (vpe = 0; vpe < 2; vpe++)
1227                                for (im = 0; im < 8; im++)
1228                                        imstuckcount[vpe][im] = 0;
1229                        printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs);
1230                        atomic_set(&idle_hook_initialized, 1000);
1231                } else {
1232                        /* Someone else is initializing in parallel - let 'em finish */
1233                        while (atomic_read(&idle_hook_initialized) < 1000)
1234                                ;
1235                }
1236        }
1237
1238        /* Have we stupidly left IXMT set somewhere? */
1239        if (read_c0_tcstatus() & 0x400) {
1240                write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
1241                ehb();
1242                printk("Dangling IXMT in cpu_idle()\n");
1243        }
1244
1245        /* Have we stupidly left an IM bit turned off? */
1246#define IM_LIMIT 2000
1247        local_irq_save(flags);
1248        mtflags = dmt();
1249        pdb_msg = &id_ho_db_msg[0];
1250        im = read_c0_status();
1251        vpe = current_cpu_data.vpe_id;
1252        for (bit = 0; bit < 8; bit++) {
1253                /*
1254                 * In current prototype, I/O interrupts
1255                 * are masked for VPE > 0
1256                 */
1257                if (vpemask[vpe][bit]) {
1258                        if (!(im & (0x100 << bit)))
1259                                imstuckcount[vpe][bit]++;
1260                        else
1261                                imstuckcount[vpe][bit] = 0;
1262                        if (imstuckcount[vpe][bit] > IM_LIMIT) {
1263                                set_c0_status(0x100 << bit);
1264                                ehb();
1265                                imstuckcount[vpe][bit] = 0;
1266                                pdb_msg += sprintf(pdb_msg,
1267                                        "Dangling IM %d fixed for VPE %d\n", bit,
1268                                        vpe);
1269                        }
1270                }
1271        }
1272
1273        emt(mtflags);
1274        local_irq_restore(flags);
1275        if (pdb_msg != &id_ho_db_msg[0])
1276                printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
1277#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
1278
1279        smtc_ipi_replay();
1280}
1281
1282void smtc_soft_dump(void)
1283{
1284        int i;
1285
1286        printk("Counter Interrupts taken per CPU (TC)\n");
1287        for (i=0; i < NR_CPUS; i++) {
1288                printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints);
1289        }
1290        printk("Self-IPI invocations:\n");
1291        for (i=0; i < NR_CPUS; i++) {
1292                printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
1293        }
1294        smtc_ipi_qdump();
1295        printk("%d Recoveries of \"stolen\" FPU\n",
1296               atomic_read(&smtc_fpu_recoveries));
1297}
1298
1299
1300/*
1301 * TLB management routines special to SMTC
1302 */
1303
1304void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1305{
1306        unsigned long flags, mtflags, tcstat, prevhalt, asid;
1307        int tlb, i;
1308
1309        /*
1310         * It would be nice to be able to use a spinlock here,
1311         * but this is invoked from within TLB flush routines
1312         * that protect themselves with DVPE, so if a lock is
1313         * held by another TC, it'll never be freed.
1314         *
1315         * DVPE/DMT must not be done with interrupts enabled,
1316         * so even so most callers will already have disabled
1317         * them, let's be really careful...
1318         */
1319
1320        local_irq_save(flags);
1321        if (smtc_status & SMTC_TLB_SHARED) {
1322                mtflags = dvpe();
1323                tlb = 0;
1324        } else {
1325                mtflags = dmt();
1326                tlb = cpu_data[cpu].vpe_id;
1327        }
1328        asid = asid_cache(cpu);
1329
1330        do {
1331                if (!((asid += ASID_INC) & ASID_MASK) ) {
1332                        if (cpu_has_vtag_icache)
1333                                flush_icache_all();
1334                        /* Traverse all online CPUs (hack requires contigous range) */
1335                        for_each_online_cpu(i) {
1336                                /*
1337                                 * We don't need to worry about our own CPU, nor those of
1338                                 * CPUs who don't share our TLB.
1339                                 */
1340                                if ((i != smp_processor_id()) &&
1341                                    ((smtc_status & SMTC_TLB_SHARED) ||
1342                                     (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) {
1343                                        settc(cpu_data[i].tc_id);
1344                                        prevhalt = read_tc_c0_tchalt() & TCHALT_H;
1345                                        if (!prevhalt) {
1346                                                write_tc_c0_tchalt(TCHALT_H);
1347                                                mips_ihb();
1348                                        }
1349                                        tcstat = read_tc_c0_tcstatus();
1350                                        smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
1351                                        if (!prevhalt)
1352                                                write_tc_c0_tchalt(0);
1353                                }
1354                        }
1355                        if (!asid)              /* fix version if needed */
1356                                asid = ASID_FIRST_VERSION;
1357                        local_flush_tlb_all();  /* start new asid cycle */
1358                }
1359        } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
1360
1361        /*
1362         * SMTC shares the TLB within VPEs and possibly across all VPEs.
1363         */
1364        for_each_online_cpu(i) {
1365                if ((smtc_status & SMTC_TLB_SHARED) ||
1366                    (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
1367                        cpu_context(i, mm) = asid_cache(i) = asid;
1368        }
1369
1370        if (smtc_status & SMTC_TLB_SHARED)
1371                evpe(mtflags);
1372        else
1373                emt(mtflags);
1374        local_irq_restore(flags);
1375}
1376
1377/*
1378 * Invoked from macros defined in mmu_context.h
1379 * which must already have disabled interrupts
1380 * and done a DVPE or DMT as appropriate.
1381 */
1382
1383void smtc_flush_tlb_asid(unsigned long asid)
1384{
1385        int entry;
1386        unsigned long ehi;
1387
1388        entry = read_c0_wired();
1389
1390        /* Traverse all non-wired entries */
1391        while (entry < current_cpu_data.tlbsize) {
1392                write_c0_index(entry);
1393                ehb();
1394                tlb_read();
1395                ehb();
1396                ehi = read_c0_entryhi();
1397                if ((ehi & ASID_MASK) == asid) {
1398                    /*
1399                     * Invalidate only entries with specified ASID,
1400                     * makiing sure all entries differ.
1401                     */
1402                    write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
1403                    write_c0_entrylo0(0);
1404                    write_c0_entrylo1(0);
1405                    mtc0_tlbw_hazard();
1406                    tlb_write_indexed();
1407                }
1408                entry++;
1409        }
1410        write_c0_index(PARKED_INDEX);
1411        tlbw_use_hazard();
1412}
1413
1414/*
1415 * Support for single-threading cache flush operations.
1416 */
1417
1418static int halt_state_save[NR_CPUS];
1419
1420/*
1421 * To really, really be sure that nothing is being done
1422 * by other TCs, halt them all.  This code assumes that
1423 * a DVPE has already been done, so while their Halted
1424 * state is theoretically architecturally unstable, in
1425 * practice, it's not going to change while we're looking
1426 * at it.
1427 */
1428
1429void smtc_cflush_lockdown(void)
1430{
1431        int cpu;
1432
1433        for_each_online_cpu(cpu) {
1434                if (cpu != smp_processor_id()) {
1435                        settc(cpu_data[cpu].tc_id);
1436                        halt_state_save[cpu] = read_tc_c0_tchalt();
1437                        write_tc_c0_tchalt(TCHALT_H);
1438                }
1439        }
1440        mips_ihb();
1441}
1442
1443/* It would be cheating to change the cpu_online states during a flush! */
1444
1445void smtc_cflush_release(void)
1446{
1447        int cpu;
1448
1449        /*
1450         * Start with a hazard barrier to ensure
1451         * that all CACHE ops have played through.
1452         */
1453        mips_ihb();
1454
1455        for_each_online_cpu(cpu) {
1456                if (cpu != smp_processor_id()) {
1457                        settc(cpu_data[cpu].tc_id);
1458                        write_tc_c0_tchalt(halt_state_save[cpu]);
1459                }
1460        }
1461        mips_ihb();
1462}
1463