linux/arch/alpha/kernel/smp.c
<<
>>
Prefs
   1/*
   2 *      linux/arch/alpha/kernel/smp.c
   3 *
   4 *      2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
   5 *            Renamed modified smp_call_function to smp_call_function_on_cpu()
   6 *            Created an function that conforms to the old calling convention
   7 *            of smp_call_function().
   8 *
   9 *            This is helpful for DCPI.
  10 *
  11 */
  12
  13#include <linux/errno.h>
  14#include <linux/kernel.h>
  15#include <linux/kernel_stat.h>
  16#include <linux/module.h>
  17#include <linux/sched.h>
  18#include <linux/mm.h>
  19#include <linux/err.h>
  20#include <linux/threads.h>
  21#include <linux/smp.h>
  22#include <linux/interrupt.h>
  23#include <linux/init.h>
  24#include <linux/delay.h>
  25#include <linux/spinlock.h>
  26#include <linux/irq.h>
  27#include <linux/cache.h>
  28#include <linux/profile.h>
  29#include <linux/bitops.h>
  30#include <linux/cpu.h>
  31
  32#include <asm/hwrpb.h>
  33#include <asm/ptrace.h>
  34#include <linux/atomic.h>
  35
  36#include <asm/io.h>
  37#include <asm/irq.h>
  38#include <asm/pgtable.h>
  39#include <asm/pgalloc.h>
  40#include <asm/mmu_context.h>
  41#include <asm/tlbflush.h>
  42
  43#include "proto.h"
  44#include "irq_impl.h"
  45
  46
  47#define DEBUG_SMP 0
  48#if DEBUG_SMP
  49#define DBGS(args)      printk args
  50#else
  51#define DBGS(args)
  52#endif
  53
  54/* A collection of per-processor data.  */
  55struct cpuinfo_alpha cpu_data[NR_CPUS];
  56EXPORT_SYMBOL(cpu_data);
  57
  58/* A collection of single bit ipi messages.  */
  59static struct {
  60        unsigned long bits ____cacheline_aligned;
  61} ipi_data[NR_CPUS] __cacheline_aligned;
  62
  63enum ipi_message_type {
  64        IPI_RESCHEDULE,
  65        IPI_CALL_FUNC,
  66        IPI_CALL_FUNC_SINGLE,
  67        IPI_CPU_STOP,
  68};
  69
  70/* Set to a secondary's cpuid when it comes online.  */
  71static int smp_secondary_alive = 0;
  72
  73int smp_num_probed;             /* Internal processor count */
  74int smp_num_cpus = 1;           /* Number that came online.  */
  75EXPORT_SYMBOL(smp_num_cpus);
  76
  77/*
  78 * Called by both boot and secondaries to move global data into
  79 *  per-processor storage.
  80 */
  81static inline void __init
  82smp_store_cpu_info(int cpuid)
  83{
  84        cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
  85        cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
  86        cpu_data[cpuid].need_new_asn = 0;
  87        cpu_data[cpuid].asn_lock = 0;
  88}
  89
  90/*
  91 * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
  92 */
  93static inline void __init
  94smp_setup_percpu_timer(int cpuid)
  95{
  96        cpu_data[cpuid].prof_counter = 1;
  97        cpu_data[cpuid].prof_multiplier = 1;
  98}
  99
 100static void __init
 101wait_boot_cpu_to_stop(int cpuid)
 102{
 103        unsigned long stop = jiffies + 10*HZ;
 104
 105        while (time_before(jiffies, stop)) {
 106                if (!smp_secondary_alive)
 107                        return;
 108                barrier();
 109        }
 110
 111        printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
 112        for (;;)
 113                barrier();
 114}
 115
 116/*
 117 * Where secondaries begin a life of C.
 118 */
 119void
 120smp_callin(void)
 121{
 122        int cpuid = hard_smp_processor_id();
 123
 124        if (cpu_online(cpuid)) {
 125                printk("??, cpu 0x%x already present??\n", cpuid);
 126                BUG();
 127        }
 128        set_cpu_online(cpuid, true);
 129
 130        /* Turn on machine checks.  */
 131        wrmces(7);
 132
 133        /* Set trap vectors.  */
 134        trap_init();
 135
 136        /* Set interrupt vector.  */
 137        wrent(entInt, 0);
 138
 139        /* Get our local ticker going. */
 140        smp_setup_percpu_timer(cpuid);
 141
 142        /* Call platform-specific callin, if specified */
 143        if (alpha_mv.smp_callin) alpha_mv.smp_callin();
 144
 145        /* All kernel threads share the same mm context.  */
 146        atomic_inc(&init_mm.mm_count);
 147        current->active_mm = &init_mm;
 148
 149        /* inform the notifiers about the new cpu */
 150        notify_cpu_starting(cpuid);
 151
 152        /* Must have completely accurate bogos.  */
 153        local_irq_enable();
 154
 155        /* Wait boot CPU to stop with irq enabled before running
 156           calibrate_delay. */
 157        wait_boot_cpu_to_stop(cpuid);
 158        mb();
 159        calibrate_delay();
 160
 161        smp_store_cpu_info(cpuid);
 162        /* Allow master to continue only after we written loops_per_jiffy.  */
 163        wmb();
 164        smp_secondary_alive = 1;
 165
 166        DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
 167              cpuid, current, current->active_mm));
 168
 169        preempt_disable();
 170        cpu_startup_entry(CPUHP_ONLINE);
 171}
 172
 173/* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
 174static int
 175wait_for_txrdy (unsigned long cpumask)
 176{
 177        unsigned long timeout;
 178
 179        if (!(hwrpb->txrdy & cpumask))
 180                return 0;
 181
 182        timeout = jiffies + 10*HZ;
 183        while (time_before(jiffies, timeout)) {
 184                if (!(hwrpb->txrdy & cpumask))
 185                        return 0;
 186                udelay(10);
 187                barrier();
 188        }
 189
 190        return -1;
 191}
 192
 193/*
 194 * Send a message to a secondary's console.  "START" is one such
 195 * interesting message.  ;-)
 196 */
 197static void
 198send_secondary_console_msg(char *str, int cpuid)
 199{
 200        struct percpu_struct *cpu;
 201        register char *cp1, *cp2;
 202        unsigned long cpumask;
 203        size_t len;
 204
 205        cpu = (struct percpu_struct *)
 206                ((char*)hwrpb
 207                 + hwrpb->processor_offset
 208                 + cpuid * hwrpb->processor_size);
 209
 210        cpumask = (1UL << cpuid);
 211        if (wait_for_txrdy(cpumask))
 212                goto timeout;
 213
 214        cp2 = str;
 215        len = strlen(cp2);
 216        *(unsigned int *)&cpu->ipc_buffer[0] = len;
 217        cp1 = (char *) &cpu->ipc_buffer[1];
 218        memcpy(cp1, cp2, len);
 219
 220        /* atomic test and set */
 221        wmb();
 222        set_bit(cpuid, &hwrpb->rxrdy);
 223
 224        if (wait_for_txrdy(cpumask))
 225                goto timeout;
 226        return;
 227
 228 timeout:
 229        printk("Processor %x not ready\n", cpuid);
 230}
 231
 232/*
 233 * A secondary console wants to send a message.  Receive it.
 234 */
 235static void
 236recv_secondary_console_msg(void)
 237{
 238        int mycpu, i, cnt;
 239        unsigned long txrdy = hwrpb->txrdy;
 240        char *cp1, *cp2, buf[80];
 241        struct percpu_struct *cpu;
 242
 243        DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
 244
 245        mycpu = hard_smp_processor_id();
 246
 247        for (i = 0; i < NR_CPUS; i++) {
 248                if (!(txrdy & (1UL << i)))
 249                        continue;
 250
 251                DBGS(("recv_secondary_console_msg: "
 252                      "TXRDY contains CPU %d.\n", i));
 253
 254                cpu = (struct percpu_struct *)
 255                  ((char*)hwrpb
 256                   + hwrpb->processor_offset
 257                   + i * hwrpb->processor_size);
 258
 259                DBGS(("recv_secondary_console_msg: on %d from %d"
 260                      " HALT_REASON 0x%lx FLAGS 0x%lx\n",
 261                      mycpu, i, cpu->halt_reason, cpu->flags));
 262
 263                cnt = cpu->ipc_buffer[0] >> 32;
 264                if (cnt <= 0 || cnt >= 80)
 265                        strcpy(buf, "<<< BOGUS MSG >>>");
 266                else {
 267                        cp1 = (char *) &cpu->ipc_buffer[1];
 268                        cp2 = buf;
 269                        memcpy(cp2, cp1, cnt);
 270                        cp2[cnt] = '\0';
 271                        
 272                        while ((cp2 = strchr(cp2, '\r')) != 0) {
 273                                *cp2 = ' ';
 274                                if (cp2[1] == '\n')
 275                                        cp2[1] = ' ';
 276                        }
 277                }
 278
 279                DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
 280                      "message is '%s'\n", mycpu, buf));
 281        }
 282
 283        hwrpb->txrdy = 0;
 284}
 285
 286/*
 287 * Convince the console to have a secondary cpu begin execution.
 288 */
 289static int
 290secondary_cpu_start(int cpuid, struct task_struct *idle)
 291{
 292        struct percpu_struct *cpu;
 293        struct pcb_struct *hwpcb, *ipcb;
 294        unsigned long timeout;
 295          
 296        cpu = (struct percpu_struct *)
 297                ((char*)hwrpb
 298                 + hwrpb->processor_offset
 299                 + cpuid * hwrpb->processor_size);
 300        hwpcb = (struct pcb_struct *) cpu->hwpcb;
 301        ipcb = &task_thread_info(idle)->pcb;
 302
 303        /* Initialize the CPU's HWPCB to something just good enough for
 304           us to get started.  Immediately after starting, we'll swpctx
 305           to the target idle task's pcb.  Reuse the stack in the mean
 306           time.  Precalculate the target PCBB.  */
 307        hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
 308        hwpcb->usp = 0;
 309        hwpcb->ptbr = ipcb->ptbr;
 310        hwpcb->pcc = 0;
 311        hwpcb->asn = 0;
 312        hwpcb->unique = virt_to_phys(ipcb);
 313        hwpcb->flags = ipcb->flags;
 314        hwpcb->res1 = hwpcb->res2 = 0;
 315
 316#if 0
 317        DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
 318              hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
 319#endif
 320        DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
 321              cpuid, idle->state, ipcb->flags));
 322
 323        /* Setup HWRPB fields that SRM uses to activate secondary CPU */
 324        hwrpb->CPU_restart = __smp_callin;
 325        hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
 326
 327        /* Recalculate and update the HWRPB checksum */
 328        hwrpb_update_checksum(hwrpb);
 329
 330        /*
 331         * Send a "start" command to the specified processor.
 332         */
 333
 334        /* SRM III 3.4.1.3 */
 335        cpu->flags |= 0x22;     /* turn on Context Valid and Restart Capable */
 336        cpu->flags &= ~1;       /* turn off Bootstrap In Progress */
 337        wmb();
 338
 339        send_secondary_console_msg("START\r\n", cpuid);
 340
 341        /* Wait 10 seconds for an ACK from the console.  */
 342        timeout = jiffies + 10*HZ;
 343        while (time_before(jiffies, timeout)) {
 344                if (cpu->flags & 1)
 345                        goto started;
 346                udelay(10);
 347                barrier();
 348        }
 349        printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
 350        return -1;
 351
 352 started:
 353        DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
 354        return 0;
 355}
 356
 357/*
 358 * Bring one cpu online.
 359 */
 360static int
 361smp_boot_one_cpu(int cpuid, struct task_struct *idle)
 362{
 363        unsigned long timeout;
 364
 365        /* Signal the secondary to wait a moment.  */
 366        smp_secondary_alive = -1;
 367
 368        /* Whirrr, whirrr, whirrrrrrrrr... */
 369        if (secondary_cpu_start(cpuid, idle))
 370                return -1;
 371
 372        /* Notify the secondary CPU it can run calibrate_delay.  */
 373        mb();
 374        smp_secondary_alive = 0;
 375
 376        /* We've been acked by the console; wait one second for
 377           the task to start up for real.  */
 378        timeout = jiffies + 1*HZ;
 379        while (time_before(jiffies, timeout)) {
 380                if (smp_secondary_alive == 1)
 381                        goto alive;
 382                udelay(10);
 383                barrier();
 384        }
 385
 386        /* We failed to boot the CPU.  */
 387
 388        printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
 389        return -1;
 390
 391 alive:
 392        /* Another "Red Snapper". */
 393        return 0;
 394}
 395
 396/*
 397 * Called from setup_arch.  Detect an SMP system and which processors
 398 * are present.
 399 */
 400void __init
 401setup_smp(void)
 402{
 403        struct percpu_struct *cpubase, *cpu;
 404        unsigned long i;
 405
 406        if (boot_cpuid != 0) {
 407                printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
 408                       boot_cpuid);
 409        }
 410
 411        if (hwrpb->nr_processors > 1) {
 412                int boot_cpu_palrev;
 413
 414                DBGS(("setup_smp: nr_processors %ld\n",
 415                      hwrpb->nr_processors));
 416
 417                cpubase = (struct percpu_struct *)
 418                        ((char*)hwrpb + hwrpb->processor_offset);
 419                boot_cpu_palrev = cpubase->pal_revision;
 420
 421                for (i = 0; i < hwrpb->nr_processors; i++) {
 422                        cpu = (struct percpu_struct *)
 423                                ((char *)cpubase + i*hwrpb->processor_size);
 424                        if ((cpu->flags & 0x1cc) == 0x1cc) {
 425                                smp_num_probed++;
 426                                set_cpu_possible(i, true);
 427                                set_cpu_present(i, true);
 428                                cpu->pal_revision = boot_cpu_palrev;
 429                        }
 430
 431                        DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
 432                              i, cpu->flags, cpu->type));
 433                        DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
 434                              i, cpu->pal_revision));
 435                }
 436        } else {
 437                smp_num_probed = 1;
 438        }
 439
 440        printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
 441               smp_num_probed, cpumask_bits(cpu_present_mask)[0]);
 442}
 443
 444/*
 445 * Called by smp_init prepare the secondaries
 446 */
 447void __init
 448smp_prepare_cpus(unsigned int max_cpus)
 449{
 450        /* Take care of some initial bookkeeping.  */
 451        memset(ipi_data, 0, sizeof(ipi_data));
 452
 453        current_thread_info()->cpu = boot_cpuid;
 454
 455        smp_store_cpu_info(boot_cpuid);
 456        smp_setup_percpu_timer(boot_cpuid);
 457
 458        /* Nothing to do on a UP box, or when told not to.  */
 459        if (smp_num_probed == 1 || max_cpus == 0) {
 460                init_cpu_possible(cpumask_of(boot_cpuid));
 461                init_cpu_present(cpumask_of(boot_cpuid));
 462                printk(KERN_INFO "SMP mode deactivated.\n");
 463                return;
 464        }
 465
 466        printk(KERN_INFO "SMP starting up secondaries.\n");
 467
 468        smp_num_cpus = smp_num_probed;
 469}
 470
 471void
 472smp_prepare_boot_cpu(void)
 473{
 474}
 475
 476int
 477__cpu_up(unsigned int cpu, struct task_struct *tidle)
 478{
 479        smp_boot_one_cpu(cpu, tidle);
 480
 481        return cpu_online(cpu) ? 0 : -ENOSYS;
 482}
 483
 484void __init
 485smp_cpus_done(unsigned int max_cpus)
 486{
 487        int cpu;
 488        unsigned long bogosum = 0;
 489
 490        for(cpu = 0; cpu < NR_CPUS; cpu++) 
 491                if (cpu_online(cpu))
 492                        bogosum += cpu_data[cpu].loops_per_jiffy;
 493        
 494        printk(KERN_INFO "SMP: Total of %d processors activated "
 495               "(%lu.%02lu BogoMIPS).\n",
 496               num_online_cpus(), 
 497               (bogosum + 2500) / (500000/HZ),
 498               ((bogosum + 2500) / (5000/HZ)) % 100);
 499}
 500
 501
 502void
 503smp_percpu_timer_interrupt(struct pt_regs *regs)
 504{
 505        struct pt_regs *old_regs;
 506        int cpu = smp_processor_id();
 507        unsigned long user = user_mode(regs);
 508        struct cpuinfo_alpha *data = &cpu_data[cpu];
 509
 510        old_regs = set_irq_regs(regs);
 511
 512        /* Record kernel PC.  */
 513        profile_tick(CPU_PROFILING);
 514
 515        if (!--data->prof_counter) {
 516                /* We need to make like a normal interrupt -- otherwise
 517                   timer interrupts ignore the global interrupt lock,
 518                   which would be a Bad Thing.  */
 519                irq_enter();
 520
 521                update_process_times(user);
 522
 523                data->prof_counter = data->prof_multiplier;
 524
 525                irq_exit();
 526        }
 527        set_irq_regs(old_regs);
 528}
 529
 530int
 531setup_profiling_timer(unsigned int multiplier)
 532{
 533        return -EINVAL;
 534}
 535
 536
 537static void
 538send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
 539{
 540        int i;
 541
 542        mb();
 543        for_each_cpu(i, to_whom)
 544                set_bit(operation, &ipi_data[i].bits);
 545
 546        mb();
 547        for_each_cpu(i, to_whom)
 548                wripir(i);
 549}
 550
 551void
 552handle_ipi(struct pt_regs *regs)
 553{
 554        int this_cpu = smp_processor_id();
 555        unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
 556        unsigned long ops;
 557
 558#if 0
 559        DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
 560              this_cpu, *pending_ipis, regs->pc));
 561#endif
 562
 563        mb();   /* Order interrupt and bit testing. */
 564        while ((ops = xchg(pending_ipis, 0)) != 0) {
 565          mb(); /* Order bit clearing and data access. */
 566          do {
 567                unsigned long which;
 568
 569                which = ops & -ops;
 570                ops &= ~which;
 571                which = __ffs(which);
 572
 573                switch (which) {
 574                case IPI_RESCHEDULE:
 575                        scheduler_ipi();
 576                        break;
 577
 578                case IPI_CALL_FUNC:
 579                        generic_smp_call_function_interrupt();
 580                        break;
 581
 582                case IPI_CALL_FUNC_SINGLE:
 583                        generic_smp_call_function_single_interrupt();
 584                        break;
 585
 586                case IPI_CPU_STOP:
 587                        halt();
 588
 589                default:
 590                        printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
 591                               this_cpu, which);
 592                        break;
 593                }
 594          } while (ops);
 595
 596          mb(); /* Order data access and bit testing. */
 597        }
 598
 599        cpu_data[this_cpu].ipi_count++;
 600
 601        if (hwrpb->txrdy)
 602                recv_secondary_console_msg();
 603}
 604
 605void
 606smp_send_reschedule(int cpu)
 607{
 608#ifdef DEBUG_IPI_MSG
 609        if (cpu == hard_smp_processor_id())
 610                printk(KERN_WARNING
 611                       "smp_send_reschedule: Sending IPI to self.\n");
 612#endif
 613        send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
 614}
 615
 616void
 617smp_send_stop(void)
 618{
 619        cpumask_t to_whom;
 620        cpumask_copy(&to_whom, cpu_possible_mask);
 621        cpumask_clear_cpu(smp_processor_id(), &to_whom);
 622#ifdef DEBUG_IPI_MSG
 623        if (hard_smp_processor_id() != boot_cpu_id)
 624                printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
 625#endif
 626        send_ipi_message(&to_whom, IPI_CPU_STOP);
 627}
 628
 629void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 630{
 631        send_ipi_message(mask, IPI_CALL_FUNC);
 632}
 633
 634void arch_send_call_function_single_ipi(int cpu)
 635{
 636        send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
 637}
 638
 639static void
 640ipi_imb(void *ignored)
 641{
 642        imb();
 643}
 644
 645void
 646smp_imb(void)
 647{
 648        /* Must wait other processors to flush their icache before continue. */
 649        if (on_each_cpu(ipi_imb, NULL, 1))
 650                printk(KERN_CRIT "smp_imb: timed out\n");
 651}
 652EXPORT_SYMBOL(smp_imb);
 653
 654static void
 655ipi_flush_tlb_all(void *ignored)
 656{
 657        tbia();
 658}
 659
 660void
 661flush_tlb_all(void)
 662{
 663        /* Although we don't have any data to pass, we do want to
 664           synchronize with the other processors.  */
 665        if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) {
 666                printk(KERN_CRIT "flush_tlb_all: timed out\n");
 667        }
 668}
 669
 670#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
 671
 672static void
 673ipi_flush_tlb_mm(void *x)
 674{
 675        struct mm_struct *mm = (struct mm_struct *) x;
 676        if (mm == current->active_mm && !asn_locked())
 677                flush_tlb_current(mm);
 678        else
 679                flush_tlb_other(mm);
 680}
 681
 682void
 683flush_tlb_mm(struct mm_struct *mm)
 684{
 685        preempt_disable();
 686
 687        if (mm == current->active_mm) {
 688                flush_tlb_current(mm);
 689                if (atomic_read(&mm->mm_users) <= 1) {
 690                        int cpu, this_cpu = smp_processor_id();
 691                        for (cpu = 0; cpu < NR_CPUS; cpu++) {
 692                                if (!cpu_online(cpu) || cpu == this_cpu)
 693                                        continue;
 694                                if (mm->context[cpu])
 695                                        mm->context[cpu] = 0;
 696                        }
 697                        preempt_enable();
 698                        return;
 699                }
 700        }
 701
 702        if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
 703                printk(KERN_CRIT "flush_tlb_mm: timed out\n");
 704        }
 705
 706        preempt_enable();
 707}
 708EXPORT_SYMBOL(flush_tlb_mm);
 709
 710struct flush_tlb_page_struct {
 711        struct vm_area_struct *vma;
 712        struct mm_struct *mm;
 713        unsigned long addr;
 714};
 715
 716static void
 717ipi_flush_tlb_page(void *x)
 718{
 719        struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
 720        struct mm_struct * mm = data->mm;
 721
 722        if (mm == current->active_mm && !asn_locked())
 723                flush_tlb_current_page(mm, data->vma, data->addr);
 724        else
 725                flush_tlb_other(mm);
 726}
 727
 728void
 729flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
 730{
 731        struct flush_tlb_page_struct data;
 732        struct mm_struct *mm = vma->vm_mm;
 733
 734        preempt_disable();
 735
 736        if (mm == current->active_mm) {
 737                flush_tlb_current_page(mm, vma, addr);
 738                if (atomic_read(&mm->mm_users) <= 1) {
 739                        int cpu, this_cpu = smp_processor_id();
 740                        for (cpu = 0; cpu < NR_CPUS; cpu++) {
 741                                if (!cpu_online(cpu) || cpu == this_cpu)
 742                                        continue;
 743                                if (mm->context[cpu])
 744                                        mm->context[cpu] = 0;
 745                        }
 746                        preempt_enable();
 747                        return;
 748                }
 749        }
 750
 751        data.vma = vma;
 752        data.mm = mm;
 753        data.addr = addr;
 754
 755        if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
 756                printk(KERN_CRIT "flush_tlb_page: timed out\n");
 757        }
 758
 759        preempt_enable();
 760}
 761EXPORT_SYMBOL(flush_tlb_page);
 762
 763void
 764flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 765{
 766        /* On the Alpha we always flush the whole user tlb.  */
 767        flush_tlb_mm(vma->vm_mm);
 768}
 769EXPORT_SYMBOL(flush_tlb_range);
 770
 771static void
 772ipi_flush_icache_page(void *x)
 773{
 774        struct mm_struct *mm = (struct mm_struct *) x;
 775        if (mm == current->active_mm && !asn_locked())
 776                __load_new_mm_context(mm);
 777        else
 778                flush_tlb_other(mm);
 779}
 780
 781void
 782flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
 783                        unsigned long addr, int len)
 784{
 785        struct mm_struct *mm = vma->vm_mm;
 786
 787        if ((vma->vm_flags & VM_EXEC) == 0)
 788                return;
 789
 790        preempt_disable();
 791
 792        if (mm == current->active_mm) {
 793                __load_new_mm_context(mm);
 794                if (atomic_read(&mm->mm_users) <= 1) {
 795                        int cpu, this_cpu = smp_processor_id();
 796                        for (cpu = 0; cpu < NR_CPUS; cpu++) {
 797                                if (!cpu_online(cpu) || cpu == this_cpu)
 798                                        continue;
 799                                if (mm->context[cpu])
 800                                        mm->context[cpu] = 0;
 801                        }
 802                        preempt_enable();
 803                        return;
 804                }
 805        }
 806
 807        if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
 808                printk(KERN_CRIT "flush_icache_page: timed out\n");
 809        }
 810
 811        preempt_enable();
 812}
 813