linux/arch/mips/kernel/smtc.c
<<
>>
Prefs
   1/* Copyright (C) 2004 Mips Technologies, Inc */
   2
   3#include <linux/clockchips.h>
   4#include <linux/kernel.h>
   5#include <linux/sched.h>
   6#include <linux/cpumask.h>
   7#include <linux/interrupt.h>
   8#include <linux/kernel_stat.h>
   9#include <linux/module.h>
  10
  11#include <asm/cpu.h>
  12#include <asm/processor.h>
  13#include <asm/atomic.h>
  14#include <asm/system.h>
  15#include <asm/hardirq.h>
  16#include <asm/hazards.h>
  17#include <asm/irq.h>
  18#include <asm/mmu_context.h>
  19#include <asm/smp.h>
  20#include <asm/mipsregs.h>
  21#include <asm/cacheflush.h>
  22#include <asm/time.h>
  23#include <asm/addrspace.h>
  24#include <asm/smtc.h>
  25#include <asm/smtc_ipi.h>
  26#include <asm/smtc_proc.h>
  27
  28/*
  29 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
  30 * in do_IRQ. These are passed in setup_irq_smtc() and stored
  31 * in this table.
  32 */
  33unsigned long irq_hwmask[NR_IRQS];
  34
  35#define LOCK_MT_PRA() \
  36        local_irq_save(flags); \
  37        mtflags = dmt()
  38
  39#define UNLOCK_MT_PRA() \
  40        emt(mtflags); \
  41        local_irq_restore(flags)
  42
  43#define LOCK_CORE_PRA() \
  44        local_irq_save(flags); \
  45        mtflags = dvpe()
  46
  47#define UNLOCK_CORE_PRA() \
  48        evpe(mtflags); \
  49        local_irq_restore(flags)
  50
  51/*
  52 * Data structures purely associated with SMTC parallelism
  53 */
  54
  55
  56/*
  57 * Table for tracking ASIDs whose lifetime is prolonged.
  58 */
  59
  60asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
  61
  62/*
  63 * Clock interrupt "latch" buffers, per "CPU"
  64 */
  65
  66static atomic_t ipi_timer_latch[NR_CPUS];
  67
  68/*
  69 * Number of InterProcessor Interupt (IPI) message buffers to allocate
  70 */
  71
  72#define IPIBUF_PER_CPU 4
  73
  74static struct smtc_ipi_q IPIQ[NR_CPUS];
  75static struct smtc_ipi_q freeIPIq;
  76
  77
  78/* Forward declarations */
  79
  80void ipi_decode(struct smtc_ipi *);
  81static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
  82static void setup_cross_vpe_interrupts(unsigned int nvpe);
  83void init_smtc_stats(void);
  84
  85/* Global SMTC Status */
  86
  87unsigned int smtc_status = 0;
  88
  89/* Boot command line configuration overrides */
  90
  91static int vpe0limit;
  92static int ipibuffers = 0;
  93static int nostlb = 0;
  94static int asidmask = 0;
  95unsigned long smtc_asid_mask = 0xff;
  96
  97static int __init vpe0tcs(char *str)
  98{
  99        get_option(&str, &vpe0limit);
 100
 101        return 1;
 102}
 103
 104static int __init ipibufs(char *str)
 105{
 106        get_option(&str, &ipibuffers);
 107        return 1;
 108}
 109
 110static int __init stlb_disable(char *s)
 111{
 112        nostlb = 1;
 113        return 1;
 114}
 115
 116static int __init asidmask_set(char *str)
 117{
 118        get_option(&str, &asidmask);
 119        switch (asidmask) {
 120        case 0x1:
 121        case 0x3:
 122        case 0x7:
 123        case 0xf:
 124        case 0x1f:
 125        case 0x3f:
 126        case 0x7f:
 127        case 0xff:
 128                smtc_asid_mask = (unsigned long)asidmask;
 129                break;
 130        default:
 131                printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask);
 132        }
 133        return 1;
 134}
 135
 136__setup("vpe0tcs=", vpe0tcs);
 137__setup("ipibufs=", ipibufs);
 138__setup("nostlb", stlb_disable);
 139__setup("asidmask=", asidmask_set);
 140
 141#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
 142
 143static int hang_trig = 0;
 144
 145static int __init hangtrig_enable(char *s)
 146{
 147        hang_trig = 1;
 148        return 1;
 149}
 150
 151
 152__setup("hangtrig", hangtrig_enable);
 153
 154#define DEFAULT_BLOCKED_IPI_LIMIT 32
 155
 156static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;
 157
 158static int __init tintq(char *str)
 159{
 160        get_option(&str, &timerq_limit);
 161        return 1;
 162}
 163
 164__setup("tintq=", tintq);
 165
 166static int imstuckcount[2][8];
 167/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
 168static int vpemask[2][8] = {
 169        {0, 0, 1, 0, 0, 0, 0, 1},
 170        {0, 0, 0, 0, 0, 0, 0, 1}
 171};
 172int tcnoprog[NR_CPUS];
 173static atomic_t idle_hook_initialized = {0};
 174static int clock_hang_reported[NR_CPUS];
 175
 176#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
 177
 178/* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
 179
 180void __init sanitize_tlb_entries(void)
 181{
 182        printk("Deprecated sanitize_tlb_entries() invoked\n");
 183}
 184
 185
 186/*
 187 * Configure shared TLB - VPC configuration bit must be set by caller
 188 */
 189
 190static void smtc_configure_tlb(void)
 191{
 192        int i, tlbsiz, vpes;
 193        unsigned long mvpconf0;
 194        unsigned long config1val;
 195
 196        /* Set up ASID preservation table */
 197        for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) {
 198            for(i = 0; i < MAX_SMTC_ASIDS; i++) {
 199                smtc_live_asid[vpes][i] = 0;
 200            }
 201        }
 202        mvpconf0 = read_c0_mvpconf0();
 203
 204        if ((vpes = ((mvpconf0 & MVPCONF0_PVPE)
 205                        >> MVPCONF0_PVPE_SHIFT) + 1) > 1) {
 206            /* If we have multiple VPEs, try to share the TLB */
 207            if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) {
 208                /*
 209                 * If TLB sizing is programmable, shared TLB
 210                 * size is the total available complement.
 211                 * Otherwise, we have to take the sum of all
 212                 * static VPE TLB entries.
 213                 */
 214                if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE)
 215                                >> MVPCONF0_PTLBE_SHIFT)) == 0) {
 216                    /*
 217                     * If there's more than one VPE, there had better
 218                     * be more than one TC, because we need one to bind
 219                     * to each VPE in turn to be able to read
 220                     * its configuration state!
 221                     */
 222                    settc(1);
 223                    /* Stop the TC from doing anything foolish */
 224                    write_tc_c0_tchalt(TCHALT_H);
 225                    mips_ihb();
 226                    /* No need to un-Halt - that happens later anyway */
 227                    for (i=0; i < vpes; i++) {
 228                        write_tc_c0_tcbind(i);
 229                        /*
 230                         * To be 100% sure we're really getting the right
 231                         * information, we exit the configuration state
 232                         * and do an IHB after each rebinding.
 233                         */
 234                        write_c0_mvpcontrol(
 235                                read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
 236                        mips_ihb();
 237                        /*
 238                         * Only count if the MMU Type indicated is TLB
 239                         */
 240                        if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
 241                                config1val = read_vpe_c0_config1();
 242                                tlbsiz += ((config1val >> 25) & 0x3f) + 1;
 243                        }
 244
 245                        /* Put core back in configuration state */
 246                        write_c0_mvpcontrol(
 247                                read_c0_mvpcontrol() | MVPCONTROL_VPC );
 248                        mips_ihb();
 249                    }
 250                }
 251                write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
 252                ehb();
 253
 254                /*
 255                 * Setup kernel data structures to use software total,
 256                 * rather than read the per-VPE Config1 value. The values
 257                 * for "CPU 0" gets copied to all the other CPUs as part
 258                 * of their initialization in smtc_cpu_setup().
 259                 */
 260
 261                /* MIPS32 limits TLB indices to 64 */
 262                if (tlbsiz > 64)
 263                        tlbsiz = 64;
 264                cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz;
 265                smtc_status |= SMTC_TLB_SHARED;
 266                local_flush_tlb_all();
 267
 268                printk("TLB of %d entry pairs shared by %d VPEs\n",
 269                        tlbsiz, vpes);
 270            } else {
 271                printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
 272            }
 273        }
 274}
 275
 276
 277/*
 278 * Incrementally build the CPU map out of constituent MIPS MT cores,
 279 * using the specified available VPEs and TCs.  Plaform code needs
 280 * to ensure that each MIPS MT core invokes this routine on reset,
 281 * one at a time(!).
 282 *
 283 * This version of the build_cpu_map and prepare_cpus routines assumes
 284 * that *all* TCs of a MIPS MT core will be used for Linux, and that
 285 * they will be spread across *all* available VPEs (to minimise the
 286 * loss of efficiency due to exception service serialization).
 287 * An improved version would pick up configuration information and
 288 * possibly leave some TCs/VPEs as "slave" processors.
 289 *
 290 * Use c0_MVPConf0 to find out how many TCs are available, setting up
 291 * phys_cpu_present_map and the logical/physical mappings.
 292 */
 293
 294int __init mipsmt_build_cpu_map(int start_cpu_slot)
 295{
 296        int i, ntcs;
 297
 298        /*
 299         * The CPU map isn't actually used for anything at this point,
 300         * so it's not clear what else we should do apart from set
 301         * everything up so that "logical" = "physical".
 302         */
 303        ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
 304        for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
 305                cpu_set(i, phys_cpu_present_map);
 306                __cpu_number_map[i] = i;
 307                __cpu_logical_map[i] = i;
 308        }
 309#ifdef CONFIG_MIPS_MT_FPAFF
 310        /* Initialize map of CPUs with FPUs */
 311        cpus_clear(mt_fpu_cpumask);
 312#endif
 313
 314        /* One of those TC's is the one booting, and not a secondary... */
 315        printk("%i available secondary CPU TC(s)\n", i - 1);
 316
 317        return i;
 318}
 319
 320/*
 321 * Common setup before any secondaries are started
 322 * Make sure all CPU's are in a sensible state before we boot any of the
 323 * secondaries.
 324 *
 325 * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
 326 * as possible across the available VPEs.
 327 */
 328
 329static void smtc_tc_setup(int vpe, int tc, int cpu)
 330{
 331        settc(tc);
 332        write_tc_c0_tchalt(TCHALT_H);
 333        mips_ihb();
 334        write_tc_c0_tcstatus((read_tc_c0_tcstatus()
 335                        & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
 336                        | TCSTATUS_A);
 337        write_tc_c0_tccontext(0);
 338        /* Bind tc to vpe */
 339        write_tc_c0_tcbind(vpe);
 340        /* In general, all TCs should have the same cpu_data indications */
 341        memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
 342        /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
 343        if (cpu_data[0].cputype == CPU_34K)
 344                cpu_data[cpu].options &= ~MIPS_CPU_FPU;
 345        cpu_data[cpu].vpe_id = vpe;
 346        cpu_data[cpu].tc_id = tc;
 347}
 348
 349
 350void mipsmt_prepare_cpus(void)
 351{
 352        int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu;
 353        unsigned long flags;
 354        unsigned long val;
 355        int nipi;
 356        struct smtc_ipi *pipi;
 357
 358        /* disable interrupts so we can disable MT */
 359        local_irq_save(flags);
 360        /* disable MT so we can configure */
 361        dvpe();
 362        dmt();
 363
 364        spin_lock_init(&freeIPIq.lock);
 365
 366        /*
 367         * We probably don't have as many VPEs as we do SMP "CPUs",
 368         * but it's possible - and in any case we'll never use more!
 369         */
 370        for (i=0; i<NR_CPUS; i++) {
 371                IPIQ[i].head = IPIQ[i].tail = NULL;
 372                spin_lock_init(&IPIQ[i].lock);
 373                IPIQ[i].depth = 0;
 374                atomic_set(&ipi_timer_latch[i], 0);
 375        }
 376
 377        /* cpu_data index starts at zero */
 378        cpu = 0;
 379        cpu_data[cpu].vpe_id = 0;
 380        cpu_data[cpu].tc_id = 0;
 381        cpu++;
 382
 383        /* Report on boot-time options */
 384        mips_mt_set_cpuoptions();
 385        if (vpelimit > 0)
 386                printk("Limit of %d VPEs set\n", vpelimit);
 387        if (tclimit > 0)
 388                printk("Limit of %d TCs set\n", tclimit);
 389        if (nostlb) {
 390                printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
 391        }
 392        if (asidmask)
 393                printk("ASID mask value override to 0x%x\n", asidmask);
 394
 395        /* Temporary */
 396#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
 397        if (hang_trig)
 398                printk("Logic Analyser Trigger on suspected TC hang\n");
 399#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
 400
 401        /* Put MVPE's into 'configuration state' */
 402        write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
 403
 404        val = read_c0_mvpconf0();
 405        nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
 406        if (vpelimit > 0 && nvpe > vpelimit)
 407                nvpe = vpelimit;
 408        ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
 409        if (ntc > NR_CPUS)
 410                ntc = NR_CPUS;
 411        if (tclimit > 0 && ntc > tclimit)
 412                ntc = tclimit;
 413        slop = ntc % nvpe;
 414        for (i = 0; i < nvpe; i++) {
 415                tcpervpe[i] = ntc / nvpe;
 416                if (slop) {
 417                        if((slop - i) > 0) tcpervpe[i]++;
 418                }
 419        }
 420        /* Handle command line override for VPE0 */
 421        if (vpe0limit > ntc) vpe0limit = ntc;
 422        if (vpe0limit > 0) {
 423                int slopslop;
 424                if (vpe0limit < tcpervpe[0]) {
 425                    /* Reducing TC count - distribute to others */
 426                    slop = tcpervpe[0] - vpe0limit;
 427                    slopslop = slop % (nvpe - 1);
 428                    tcpervpe[0] = vpe0limit;
 429                    for (i = 1; i < nvpe; i++) {
 430                        tcpervpe[i] += slop / (nvpe - 1);
 431                        if(slopslop && ((slopslop - (i - 1) > 0)))
 432                                tcpervpe[i]++;
 433                    }
 434                } else if (vpe0limit > tcpervpe[0]) {
 435                    /* Increasing TC count - steal from others */
 436                    slop = vpe0limit - tcpervpe[0];
 437                    slopslop = slop % (nvpe - 1);
 438                    tcpervpe[0] = vpe0limit;
 439                    for (i = 1; i < nvpe; i++) {
 440                        tcpervpe[i] -= slop / (nvpe - 1);
 441                        if(slopslop && ((slopslop - (i - 1) > 0)))
 442                                tcpervpe[i]--;
 443                    }
 444                }
 445        }
 446
 447        /* Set up shared TLB */
 448        smtc_configure_tlb();
 449
 450        for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
 451                /*
 452                 * Set the MVP bits.
 453                 */
 454                settc(tc);
 455                write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP);
 456                if (vpe != 0)
 457                        printk(", ");
 458                printk("VPE %d: TC", vpe);
 459                for (i = 0; i < tcpervpe[vpe]; i++) {
 460                        /*
 461                         * TC 0 is bound to VPE 0 at reset,
 462                         * and is presumably executing this
 463                         * code.  Leave it alone!
 464                         */
 465                        if (tc != 0) {
 466                                smtc_tc_setup(vpe, tc, cpu);
 467                                cpu++;
 468                        }
 469                        printk(" %d", tc);
 470                        tc++;
 471                }
 472                if (vpe != 0) {
 473                        /*
 474                         * Clear any stale software interrupts from VPE's Cause
 475                         */
 476                        write_vpe_c0_cause(0);
 477
 478                        /*
 479                         * Clear ERL/EXL of VPEs other than 0
 480                         * and set restricted interrupt enable/mask.
 481                         */
 482                        write_vpe_c0_status((read_vpe_c0_status()
 483                                & ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))
 484                                | (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7
 485                                | ST0_IE));
 486                        /*
 487                         * set config to be the same as vpe0,
 488                         *  particularly kseg0 coherency alg
 489                         */
 490                        write_vpe_c0_config(read_c0_config());
 491                        /* Clear any pending timer interrupt */
 492                        write_vpe_c0_compare(0);
 493                        /* Propagate Config7 */
 494                        write_vpe_c0_config7(read_c0_config7());
 495                        write_vpe_c0_count(read_c0_count());
 496                }
 497                /* enable multi-threading within VPE */
 498                write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
 499                /* enable the VPE */
 500                write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
 501        }
 502
 503        /*
 504         * Pull any physically present but unused TCs out of circulation.
 505         */
 506        while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
 507                cpu_clear(tc, phys_cpu_present_map);
 508                cpu_clear(tc, cpu_present_map);
 509                tc++;
 510        }
 511
 512        /* release config state */
 513        write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
 514
 515        printk("\n");
 516
 517        /* Set up coprocessor affinity CPU mask(s) */
 518
 519#ifdef CONFIG_MIPS_MT_FPAFF
 520        for (tc = 0; tc < ntc; tc++) {
 521                if (cpu_data[tc].options & MIPS_CPU_FPU)
 522                        cpu_set(tc, mt_fpu_cpumask);
 523        }
 524#endif
 525
 526        /* set up ipi interrupts... */
 527
 528        /* If we have multiple VPEs running, set up the cross-VPE interrupt */
 529
 530        setup_cross_vpe_interrupts(nvpe);
 531
 532        /* Set up queue of free IPI "messages". */
 533        nipi = NR_CPUS * IPIBUF_PER_CPU;
 534        if (ipibuffers > 0)
 535                nipi = ipibuffers;
 536
 537        pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
 538        if (pipi == NULL)
 539                panic("kmalloc of IPI message buffers failed\n");
 540        else
 541                printk("IPI buffer pool of %d buffers\n", nipi);
 542        for (i = 0; i < nipi; i++) {
 543                smtc_ipi_nq(&freeIPIq, pipi);
 544                pipi++;
 545        }
 546
 547        /* Arm multithreading and enable other VPEs - but all TCs are Halted */
 548        emt(EMT_ENABLE);
 549        evpe(EVPE_ENABLE);
 550        local_irq_restore(flags);
 551        /* Initialize SMTC /proc statistics/diagnostics */
 552        init_smtc_stats();
 553}
 554
 555
 556/*
 557 * Setup the PC, SP, and GP of a secondary processor and start it
 558 * running!
 559 * smp_bootstrap is the place to resume from
 560 * __KSTK_TOS(idle) is apparently the stack pointer
 561 * (unsigned long)idle->thread_info the gp
 562 *
 563 */
 564void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
 565{
 566        extern u32 kernelsp[NR_CPUS];
 567        long flags;
 568        int mtflags;
 569
 570        LOCK_MT_PRA();
 571        if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
 572                dvpe();
 573        }
 574        settc(cpu_data[cpu].tc_id);
 575
 576        /* pc */
 577        write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
 578
 579        /* stack pointer */
 580        kernelsp[cpu] = __KSTK_TOS(idle);
 581        write_tc_gpr_sp(__KSTK_TOS(idle));
 582
 583        /* global pointer */
 584        write_tc_gpr_gp((unsigned long)task_thread_info(idle));
 585
 586        smtc_status |= SMTC_MTC_ACTIVE;
 587        write_tc_c0_tchalt(0);
 588        if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
 589                evpe(EVPE_ENABLE);
 590        }
 591        UNLOCK_MT_PRA();
 592}
 593
 594void smtc_init_secondary(void)
 595{
 596        /*
 597         * Start timer on secondary VPEs if necessary.
 598         * plat_timer_setup has already have been invoked by init/main
 599         * on "boot" TC.  Like per_cpu_trap_init() hack, this assumes that
 600         * SMTC init code assigns TCs consdecutively and in ascending order
 601         * to across available VPEs.
 602         */
 603        if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
 604            ((read_c0_tcbind() & TCBIND_CURVPE)
 605            != cpu_data[smp_processor_id() - 1].vpe_id)){
 606                write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
 607        }
 608
 609        local_irq_enable();
 610}
 611
 612void smtc_smp_finish(void)
 613{
 614        printk("TC %d going on-line as CPU %d\n",
 615                cpu_data[smp_processor_id()].tc_id, smp_processor_id());
 616}
 617
 618void smtc_cpus_done(void)
 619{
 620}
 621
 622/*
 623 * Support for SMTC-optimized driver IRQ registration
 624 */
 625
 626/*
 627 * SMTC Kernel needs to manipulate low-level CPU interrupt mask
 628 * in do_IRQ. These are passed in setup_irq_smtc() and stored
 629 * in this table.
 630 */
 631
 632int setup_irq_smtc(unsigned int irq, struct irqaction * new,
 633                        unsigned long hwmask)
 634{
 635#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
 636        unsigned int vpe = current_cpu_data.vpe_id;
 637
 638        vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1;
 639#endif
 640        irq_hwmask[irq] = hwmask;
 641
 642        return setup_irq(irq, new);
 643}
 644
 645#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
 646/*
 647 * Support for IRQ affinity to TCs
 648 */
 649
 650void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
 651{
 652        /*
 653         * If a "fast path" cache of quickly decodable affinity state
 654         * is maintained, this is where it gets done, on a call up
 655         * from the platform affinity code.
 656         */
 657}
 658
 659void smtc_forward_irq(unsigned int irq)
 660{
 661        int target;
 662
 663        /*
 664         * OK wise guy, now figure out how to get the IRQ
 665         * to be serviced on an authorized "CPU".
 666         *
 667         * Ideally, to handle the situation where an IRQ has multiple
 668         * eligible CPUS, we would maintain state per IRQ that would
 669         * allow a fair distribution of service requests.  Since the
 670         * expected use model is any-or-only-one, for simplicity
 671         * and efficiency, we just pick the easiest one to find.
 672         */
 673
 674        target = first_cpu(irq_desc[irq].affinity);
 675
 676        /*
 677         * We depend on the platform code to have correctly processed
 678         * IRQ affinity change requests to ensure that the IRQ affinity
 679         * mask has been purged of bits corresponding to nonexistent and
 680         * offline "CPUs", and to TCs bound to VPEs other than the VPE
 681         * connected to the physical interrupt input for the interrupt
 682         * in question.  Otherwise we have a nasty problem with interrupt
 683         * mask management.  This is best handled in non-performance-critical
 684         * platform IRQ affinity setting code,  to minimize interrupt-time
 685         * checks.
 686         */
 687
 688        /* If no one is eligible, service locally */
 689        if (target >= NR_CPUS) {
 690                do_IRQ_no_affinity(irq);
 691                return;
 692        }
 693
 694        smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
 695}
 696
 697#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
 698
 699/*
 700 * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
 701 * Within a VPE one TC can interrupt another by different approaches.
 702 * The easiest to get right would probably be to make all TCs except
 703 * the target IXMT and set a software interrupt, but an IXMT-based
 704 * scheme requires that a handler must run before a new IPI could
 705 * be sent, which would break the "broadcast" loops in MIPS MT.
 706 * A more gonzo approach within a VPE is to halt the TC, extract
 707 * its Restart, Status, and a couple of GPRs, and program the Restart
 708 * address to emulate an interrupt.
 709 *
 710 * Within a VPE, one can be confident that the target TC isn't in
 711 * a critical EXL state when halted, since the write to the Halt
 712 * register could not have issued on the writing thread if the
 713 * halting thread had EXL set. So k0 and k1 of the target TC
 714 * can be used by the injection code.  Across VPEs, one can't
 715 * be certain that the target TC isn't in a critical exception
 716 * state. So we try a two-step process of sending a software
 717 * interrupt to the target VPE, which either handles the event
 718 * itself (if it was the target) or injects the event within
 719 * the VPE.
 720 */
 721
 722static void smtc_ipi_qdump(void)
 723{
 724        int i;
 725
 726        for (i = 0; i < NR_CPUS ;i++) {
 727                printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
 728                        i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
 729                        IPIQ[i].depth);
 730        }
 731}
 732
 733/*
 734 * The standard atomic.h primitives don't quite do what we want
 735 * here: We need an atomic add-and-return-previous-value (which
 736 * could be done with atomic_add_return and a decrement) and an
 737 * atomic set/zero-and-return-previous-value (which can't really
 738 * be done with the atomic.h primitives). And since this is
 739 * MIPS MT, we can assume that we have LL/SC.
 740 */
 741static inline int atomic_postincrement(atomic_t *v)
 742{
 743        unsigned long result;
 744
 745        unsigned long temp;
 746
 747        __asm__ __volatile__(
 748        "1:     ll      %0, %2                                  \n"
 749        "       addu    %1, %0, 1                               \n"
 750        "       sc      %1, %2                                  \n"
 751        "       beqz    %1, 1b                                  \n"
 752        __WEAK_LLSC_MB
 753        : "=&r" (result), "=&r" (temp), "=m" (v->counter)
 754        : "m" (v->counter)
 755        : "memory");
 756
 757        return result;
 758}
 759
 760void smtc_send_ipi(int cpu, int type, unsigned int action)
 761{
 762        int tcstatus;
 763        struct smtc_ipi *pipi;
 764        long flags;
 765        int mtflags;
 766
 767        if (cpu == smp_processor_id()) {
 768                printk("Cannot Send IPI to self!\n");
 769                return;
 770        }
 771        /* Set up a descriptor, to be delivered either promptly or queued */
 772        pipi = smtc_ipi_dq(&freeIPIq);
 773        if (pipi == NULL) {
 774                bust_spinlocks(1);
 775                mips_mt_regdump(dvpe());
 776                panic("IPI Msg. Buffers Depleted\n");
 777        }
 778        pipi->type = type;
 779        pipi->arg = (void *)action;
 780        pipi->dest = cpu;
 781        if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
 782                if (type == SMTC_CLOCK_TICK)
 783                        atomic_inc(&ipi_timer_latch[cpu]);
 784                /* If not on same VPE, enqueue and send cross-VPE interupt */
 785                smtc_ipi_nq(&IPIQ[cpu], pipi);
 786                LOCK_CORE_PRA();
 787                settc(cpu_data[cpu].tc_id);
 788                write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);
 789                UNLOCK_CORE_PRA();
 790        } else {
 791                /*
 792                 * Not sufficient to do a LOCK_MT_PRA (dmt) here,
 793                 * since ASID shootdown on the other VPE may
 794                 * collide with this operation.
 795                 */
 796                LOCK_CORE_PRA();
 797                settc(cpu_data[cpu].tc_id);
 798                /* Halt the targeted TC */
 799                write_tc_c0_tchalt(TCHALT_H);
 800                mips_ihb();
 801
 802                /*
 803                 * Inspect TCStatus - if IXMT is set, we have to queue
 804                 * a message. Otherwise, we set up the "interrupt"
 805                 * of the other TC
 806                 */
 807                tcstatus = read_tc_c0_tcstatus();
 808
 809                if ((tcstatus & TCSTATUS_IXMT) != 0) {
 810                        /*
 811                         * Spin-waiting here can deadlock,
 812                         * so we queue the message for the target TC.
 813                         */
 814                        write_tc_c0_tchalt(0);
 815                        UNLOCK_CORE_PRA();
 816                        /* Try to reduce redundant timer interrupt messages */
 817                        if (type == SMTC_CLOCK_TICK) {
 818                            if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){
 819                                smtc_ipi_nq(&freeIPIq, pipi);
 820                                return;
 821                            }
 822                        }
 823                        smtc_ipi_nq(&IPIQ[cpu], pipi);
 824                } else {
 825                        if (type == SMTC_CLOCK_TICK)
 826                                atomic_inc(&ipi_timer_latch[cpu]);
 827                        post_direct_ipi(cpu, pipi);
 828                        write_tc_c0_tchalt(0);
 829                        UNLOCK_CORE_PRA();
 830                }
 831        }
 832}
 833
 834/*
 835 * Send IPI message to Halted TC, TargTC/TargVPE already having been set
 836 */
 837static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
 838{
 839        struct pt_regs *kstack;
 840        unsigned long tcstatus;
 841        unsigned long tcrestart;
 842        extern u32 kernelsp[NR_CPUS];
 843        extern void __smtc_ipi_vector(void);
 844//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu);
 845
 846        /* Extract Status, EPC from halted TC */
 847        tcstatus = read_tc_c0_tcstatus();
 848        tcrestart = read_tc_c0_tcrestart();
 849        /* If TCRestart indicates a WAIT instruction, advance the PC */
 850        if ((tcrestart & 0x80000000)
 851            && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {
 852                tcrestart += 4;
 853        }
 854        /*
 855         * Save on TC's future kernel stack
 856         *
 857         * CU bit of Status is indicator that TC was
 858         * already running on a kernel stack...
 859         */
 860        if (tcstatus & ST0_CU0)  {
 861                /* Note that this "- 1" is pointer arithmetic */
 862                kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
 863        } else {
 864                kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;
 865        }
 866
 867        kstack->cp0_epc = (long)tcrestart;
 868        /* Save TCStatus */
 869        kstack->cp0_tcstatus = tcstatus;
 870        /* Pass token of operation to be performed kernel stack pad area */
 871        kstack->pad0[4] = (unsigned long)pipi;
 872        /* Pass address of function to be called likewise */
 873        kstack->pad0[5] = (unsigned long)&ipi_decode;
 874        /* Set interrupt exempt and kernel mode */
 875        tcstatus |= TCSTATUS_IXMT;
 876        tcstatus &= ~TCSTATUS_TKSU;
 877        write_tc_c0_tcstatus(tcstatus);
 878        ehb();
 879        /* Set TC Restart address to be SMTC IPI vector */
 880        write_tc_c0_tcrestart(__smtc_ipi_vector);
 881}
 882
 883static void ipi_resched_interrupt(void)
 884{
 885        /* Return from interrupt should be enough to cause scheduler check */
 886}
 887
 888
 889static void ipi_call_interrupt(void)
 890{
 891        /* Invoke generic function invocation code in smp.c */
 892        smp_call_function_interrupt();
 893}
 894
 895DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
 896
 897void ipi_decode(struct smtc_ipi *pipi)
 898{
 899        unsigned int cpu = smp_processor_id();
 900        struct clock_event_device *cd;
 901        void *arg_copy = pipi->arg;
 902        int type_copy = pipi->type;
 903        int ticks;
 904
 905        smtc_ipi_nq(&freeIPIq, pipi);
 906        switch (type_copy) {
 907        case SMTC_CLOCK_TICK:
 908                irq_enter();
 909                kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
 910                cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
 911                ticks = atomic_read(&ipi_timer_latch[cpu]);
 912                atomic_sub(ticks, &ipi_timer_latch[cpu]);
 913                while (ticks) {
 914                        cd->event_handler(cd);
 915                        ticks--;
 916                }
 917                irq_exit();
 918                break;
 919
 920        case LINUX_SMP_IPI:
 921                switch ((int)arg_copy) {
 922                case SMP_RESCHEDULE_YOURSELF:
 923                        ipi_resched_interrupt();
 924                        break;
 925                case SMP_CALL_FUNCTION:
 926                        ipi_call_interrupt();
 927                        break;
 928                default:
 929                        printk("Impossible SMTC IPI Argument 0x%x\n",
 930                                (int)arg_copy);
 931                        break;
 932                }
 933                break;
 934#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
 935        case IRQ_AFFINITY_IPI:
 936                /*
 937                 * Accept a "forwarded" interrupt that was initially
 938                 * taken by a TC who doesn't have affinity for the IRQ.
 939                 */
 940                do_IRQ_no_affinity((int)arg_copy);
 941                break;
 942#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
 943        default:
 944                printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
 945                break;
 946        }
 947}
 948
 949void deferred_smtc_ipi(void)
 950{
 951        struct smtc_ipi *pipi;
 952        unsigned long flags;
 953/* DEBUG */
 954        int q = smp_processor_id();
 955
 956        /*
 957         * Test is not atomic, but much faster than a dequeue,
 958         * and the vast majority of invocations will have a null queue.
 959         */
 960        if (IPIQ[q].head != NULL) {
 961                while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) {
 962                        /* ipi_decode() should be called with interrupts off */
 963                        local_irq_save(flags);
 964                        ipi_decode(pipi);
 965                        local_irq_restore(flags);
 966                }
 967        }
 968}
 969
 970/*
 971 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
 972 * set via cross-VPE MTTR manipulation of the Cause register. It would be
 973 * in some regards preferable to have external logic for "doorbell" hardware
 974 * interrupts.
 975 */
 976
 977static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
 978
 979static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
 980{
 981        int my_vpe = cpu_data[smp_processor_id()].vpe_id;
 982        int my_tc = cpu_data[smp_processor_id()].tc_id;
 983        int cpu;
 984        struct smtc_ipi *pipi;
 985        unsigned long tcstatus;
 986        int sent;
 987        long flags;
 988        unsigned int mtflags;
 989        unsigned int vpflags;
 990
 991        /*
 992         * So long as cross-VPE interrupts are done via
 993         * MFTR/MTTR read-modify-writes of Cause, we need
 994         * to stop other VPEs whenever the local VPE does
 995         * anything similar.
 996         */
 997        local_irq_save(flags);
 998        vpflags = dvpe();
 999        clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ);
1000        set_c0_status(0x100 << MIPS_CPU_IPI_IRQ);
1001        irq_enable_hazard();
1002        evpe(vpflags);
1003        local_irq_restore(flags);
1004
1005        /*
1006         * Cross-VPE Interrupt handler: Try to directly deliver IPIs
1007         * queued for TCs on this VPE other than the current one.
1008         * Return-from-interrupt should cause us to drain the queue
1009         * for the current TC, so we ought not to have to do it explicitly here.
1010         */
1011
1012        for_each_online_cpu(cpu) {
1013                if (cpu_data[cpu].vpe_id != my_vpe)
1014                        continue;
1015
1016                pipi = smtc_ipi_dq(&IPIQ[cpu]);
1017                if (pipi != NULL) {
1018                        if (cpu_data[cpu].tc_id != my_tc) {
1019                                sent = 0;
1020                                LOCK_MT_PRA();
1021                                settc(cpu_data[cpu].tc_id);
1022                                write_tc_c0_tchalt(TCHALT_H);
1023                                mips_ihb();
1024                                tcstatus = read_tc_c0_tcstatus();
1025                                if ((tcstatus & TCSTATUS_IXMT) == 0) {
1026                                        post_direct_ipi(cpu, pipi);
1027                                        sent = 1;
1028                                }
1029                                write_tc_c0_tchalt(0);
1030                                UNLOCK_MT_PRA();
1031                                if (!sent) {
1032                                        smtc_ipi_req(&IPIQ[cpu], pipi);
1033                                }
1034                        } else {
1035                                /*
1036                                 * ipi_decode() should be called
1037                                 * with interrupts off
1038                                 */
1039                                local_irq_save(flags);
1040                                ipi_decode(pipi);
1041                                local_irq_restore(flags);
1042                        }
1043                }
1044        }
1045
1046        return IRQ_HANDLED;
1047}
1048
1049static void ipi_irq_dispatch(void)
1050{
1051        do_IRQ(cpu_ipi_irq);
1052}
1053
1054static struct irqaction irq_ipi = {
1055        .handler        = ipi_interrupt,
1056        .flags          = IRQF_DISABLED,
1057        .name           = "SMTC_IPI",
1058        .flags          = IRQF_PERCPU
1059};
1060
1061static void setup_cross_vpe_interrupts(unsigned int nvpe)
1062{
1063        if (nvpe < 1)
1064                return;
1065
1066        if (!cpu_has_vint)
1067                panic("SMTC Kernel requires Vectored Interupt support");
1068
1069        set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
1070
1071        setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
1072
1073        set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
1074}
1075
1076/*
1077 * SMTC-specific hacks invoked from elsewhere in the kernel.
1078 *
1079 * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
1080 * called with interrupts disabled.  We do rely on interrupts being disabled
1081 * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
1082 * result in a recursive call to raw_local_irq_restore().
1083 */
1084
1085static void __smtc_ipi_replay(void)
1086{
1087        unsigned int cpu = smp_processor_id();
1088
1089        /*
1090         * To the extent that we've ever turned interrupts off,
1091         * we may have accumulated deferred IPIs.  This is subtle.
1092         * If we use the smtc_ipi_qdepth() macro, we'll get an
1093         * exact number - but we'll also disable interrupts
1094         * and create a window of failure where a new IPI gets
1095         * queued after we test the depth but before we re-enable
1096         * interrupts. So long as IXMT never gets set, however,
1097         * we should be OK:  If we pick up something and dispatch
1098         * it here, that's great. If we see nothing, but concurrent
1099         * with this operation, another TC sends us an IPI, IXMT
1100         * is clear, and we'll handle it as a real pseudo-interrupt
1101         * and not a pseudo-pseudo interrupt.
1102         */
1103        if (IPIQ[cpu].depth > 0) {
1104                while (1) {
1105                        struct smtc_ipi_q *q = &IPIQ[cpu];
1106                        struct smtc_ipi *pipi;
1107                        extern void self_ipi(struct smtc_ipi *);
1108
1109                        spin_lock(&q->lock);
1110                        pipi = __smtc_ipi_dq(q);
1111                        spin_unlock(&q->lock);
1112                        if (!pipi)
1113                                break;
1114
1115                        self_ipi(pipi);
1116                        smtc_cpu_stats[cpu].selfipis++;
1117                }
1118        }
1119}
1120
1121void smtc_ipi_replay(void)
1122{
1123        raw_local_irq_disable();
1124        __smtc_ipi_replay();
1125}
1126
1127EXPORT_SYMBOL(smtc_ipi_replay);
1128
1129void smtc_idle_loop_hook(void)
1130{
1131#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
1132        int im;
1133        int flags;
1134        int mtflags;
1135        int bit;
1136        int vpe;
1137        int tc;
1138        int hook_ntcs;
1139        /*
1140         * printk within DMT-protected regions can deadlock,
1141         * so buffer diagnostic messages for later output.
1142         */
1143        char *pdb_msg;
1144        char id_ho_db_msg[768]; /* worst-case use should be less than 700 */
1145
1146        if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */
1147                if (atomic_add_return(1, &idle_hook_initialized) == 1) {
1148                        int mvpconf0;
1149                        /* Tedious stuff to just do once */
1150                        mvpconf0 = read_c0_mvpconf0();
1151                        hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
1152                        if (hook_ntcs > NR_CPUS)
1153                                hook_ntcs = NR_CPUS;
1154                        for (tc = 0; tc < hook_ntcs; tc++) {
1155                                tcnoprog[tc] = 0;
1156                                clock_hang_reported[tc] = 0;
1157                        }
1158                        for (vpe = 0; vpe < 2; vpe++)
1159                                for (im = 0; im < 8; im++)
1160                                        imstuckcount[vpe][im] = 0;
1161                        printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs);
1162                        atomic_set(&idle_hook_initialized, 1000);
1163                } else {
1164                        /* Someone else is initializing in parallel - let 'em finish */
1165                        while (atomic_read(&idle_hook_initialized) < 1000)
1166                                ;
1167                }
1168        }
1169
1170        /* Have we stupidly left IXMT set somewhere? */
1171        if (read_c0_tcstatus() & 0x400) {
1172                write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
1173                ehb();
1174                printk("Dangling IXMT in cpu_idle()\n");
1175        }
1176
1177        /* Have we stupidly left an IM bit turned off? */
1178#define IM_LIMIT 2000
1179        local_irq_save(flags);
1180        mtflags = dmt();
1181        pdb_msg = &id_ho_db_msg[0];
1182        im = read_c0_status();
1183        vpe = current_cpu_data.vpe_id;
1184        for (bit = 0; bit < 8; bit++) {
1185                /*
1186                 * In current prototype, I/O interrupts
1187                 * are masked for VPE > 0
1188                 */
1189                if (vpemask[vpe][bit]) {
1190                        if (!(im & (0x100 << bit)))
1191                                imstuckcount[vpe][bit]++;
1192                        else
1193                                imstuckcount[vpe][bit] = 0;
1194                        if (imstuckcount[vpe][bit] > IM_LIMIT) {
1195                                set_c0_status(0x100 << bit);
1196                                ehb();
1197                                imstuckcount[vpe][bit] = 0;
1198                                pdb_msg += sprintf(pdb_msg,
1199                                        "Dangling IM %d fixed for VPE %d\n", bit,
1200                                        vpe);
1201                        }
1202                }
1203        }
1204
1205        /*
1206         * Now that we limit outstanding timer IPIs, check for hung TC
1207         */
1208        for (tc = 0; tc < NR_CPUS; tc++) {
1209                /* Don't check ourself - we'll dequeue IPIs just below */
1210                if ((tc != smp_processor_id()) &&
1211                    atomic_read(&ipi_timer_latch[tc]) > timerq_limit) {
1212                    if (clock_hang_reported[tc] == 0) {
1213                        pdb_msg += sprintf(pdb_msg,
1214                                "TC %d looks hung with timer latch at %d\n",
1215                                tc, atomic_read(&ipi_timer_latch[tc]));
1216                        clock_hang_reported[tc]++;
1217                        }
1218                }
1219        }
1220        emt(mtflags);
1221        local_irq_restore(flags);
1222        if (pdb_msg != &id_ho_db_msg[0])
1223                printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
1224#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
1225
1226        /*
1227         * Replay any accumulated deferred IPIs. If "Instant Replay"
1228         * is in use, there should never be any.
1229         */
1230#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
1231        {
1232                unsigned long flags;
1233
1234                local_irq_save(flags);
1235                __smtc_ipi_replay();
1236                local_irq_restore(flags);
1237        }
1238#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
1239}
1240
1241void smtc_soft_dump(void)
1242{
1243        int i;
1244
1245        printk("Counter Interrupts taken per CPU (TC)\n");
1246        for (i=0; i < NR_CPUS; i++) {
1247                printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints);
1248        }
1249        printk("Self-IPI invocations:\n");
1250        for (i=0; i < NR_CPUS; i++) {
1251                printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
1252        }
1253        smtc_ipi_qdump();
1254        printk("Timer IPI Backlogs:\n");
1255        for (i=0; i < NR_CPUS; i++) {
1256                printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i]));
1257        }
1258        printk("%d Recoveries of \"stolen\" FPU\n",
1259               atomic_read(&smtc_fpu_recoveries));
1260}
1261
1262
1263/*
1264 * TLB management routines special to SMTC
1265 */
1266
1267void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1268{
1269        unsigned long flags, mtflags, tcstat, prevhalt, asid;
1270        int tlb, i;
1271
1272        /*
1273         * It would be nice to be able to use a spinlock here,
1274         * but this is invoked from within TLB flush routines
1275         * that protect themselves with DVPE, so if a lock is
1276         * held by another TC, it'll never be freed.
1277         *
1278         * DVPE/DMT must not be done with interrupts enabled,
1279         * so even so most callers will already have disabled
1280         * them, let's be really careful...
1281         */
1282
1283        local_irq_save(flags);
1284        if (smtc_status & SMTC_TLB_SHARED) {
1285                mtflags = dvpe();
1286                tlb = 0;
1287        } else {
1288                mtflags = dmt();
1289                tlb = cpu_data[cpu].vpe_id;
1290        }
1291        asid = asid_cache(cpu);
1292
1293        do {
1294                if (!((asid += ASID_INC) & ASID_MASK) ) {
1295                        if (cpu_has_vtag_icache)
1296                                flush_icache_all();
1297                        /* Traverse all online CPUs (hack requires contigous range) */
1298                        for_each_online_cpu(i) {
1299                                /*
1300                                 * We don't need to worry about our own CPU, nor those of
1301                                 * CPUs who don't share our TLB.
1302                                 */
1303                                if ((i != smp_processor_id()) &&
1304                                    ((smtc_status & SMTC_TLB_SHARED) ||
1305                                     (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) {
1306                                        settc(cpu_data[i].tc_id);
1307                                        prevhalt = read_tc_c0_tchalt() & TCHALT_H;
1308                                        if (!prevhalt) {
1309                                                write_tc_c0_tchalt(TCHALT_H);
1310                                                mips_ihb();
1311                                        }
1312                                        tcstat = read_tc_c0_tcstatus();
1313                                        smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
1314                                        if (!prevhalt)
1315                                                write_tc_c0_tchalt(0);
1316                                }
1317                        }
1318                        if (!asid)              /* fix version if needed */
1319                                asid = ASID_FIRST_VERSION;
1320                        local_flush_tlb_all();  /* start new asid cycle */
1321                }
1322        } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
1323
1324        /*
1325         * SMTC shares the TLB within VPEs and possibly across all VPEs.
1326         */
1327        for_each_online_cpu(i) {
1328                if ((smtc_status & SMTC_TLB_SHARED) ||
1329                    (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
1330                        cpu_context(i, mm) = asid_cache(i) = asid;
1331        }
1332
1333        if (smtc_status & SMTC_TLB_SHARED)
1334                evpe(mtflags);
1335        else
1336                emt(mtflags);
1337        local_irq_restore(flags);
1338}
1339
1340/*
1341 * Invoked from macros defined in mmu_context.h
1342 * which must already have disabled interrupts
1343 * and done a DVPE or DMT as appropriate.
1344 */
1345
1346void smtc_flush_tlb_asid(unsigned long asid)
1347{
1348        int entry;
1349        unsigned long ehi;
1350
1351        entry = read_c0_wired();
1352
1353        /* Traverse all non-wired entries */
1354        while (entry < current_cpu_data.tlbsize) {
1355                write_c0_index(entry);
1356                ehb();
1357                tlb_read();
1358                ehb();
1359                ehi = read_c0_entryhi();
1360                if ((ehi & ASID_MASK) == asid) {
1361                    /*
1362                     * Invalidate only entries with specified ASID,
1363                     * makiing sure all entries differ.
1364                     */
1365                    write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
1366                    write_c0_entrylo0(0);
1367                    write_c0_entrylo1(0);
1368                    mtc0_tlbw_hazard();
1369                    tlb_write_indexed();
1370                }
1371                entry++;
1372        }
1373        write_c0_index(PARKED_INDEX);
1374        tlbw_use_hazard();
1375}
1376
1377/*
1378 * Support for single-threading cache flush operations.
1379 */
1380
1381static int halt_state_save[NR_CPUS];
1382
1383/*
1384 * To really, really be sure that nothing is being done
1385 * by other TCs, halt them all.  This code assumes that
1386 * a DVPE has already been done, so while their Halted
1387 * state is theoretically architecturally unstable, in
1388 * practice, it's not going to change while we're looking
1389 * at it.
1390 */
1391
1392void smtc_cflush_lockdown(void)
1393{
1394        int cpu;
1395
1396        for_each_online_cpu(cpu) {
1397                if (cpu != smp_processor_id()) {
1398                        settc(cpu_data[cpu].tc_id);
1399                        halt_state_save[cpu] = read_tc_c0_tchalt();
1400                        write_tc_c0_tchalt(TCHALT_H);
1401                }
1402        }
1403        mips_ihb();
1404}
1405
1406/* It would be cheating to change the cpu_online states during a flush! */
1407
1408void smtc_cflush_release(void)
1409{
1410        int cpu;
1411
1412        /*
1413         * Start with a hazard barrier to ensure
1414         * that all CACHE ops have played through.
1415         */
1416        mips_ihb();
1417
1418        for_each_online_cpu(cpu) {
1419                if (cpu != smp_processor_id()) {
1420                        settc(cpu_data[cpu].tc_id);
1421                        write_tc_c0_tchalt(halt_state_save[cpu]);
1422                }
1423        }
1424        mips_ihb();
1425}
1426