linux/arch/alpha/kernel/smp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *      linux/arch/alpha/kernel/smp.c
   4 *
   5 *      2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
   6 *            Renamed modified smp_call_function to smp_call_function_on_cpu()
   7 *            Created an function that conforms to the old calling convention
   8 *            of smp_call_function().
   9 *
  10 *            This is helpful for DCPI.
  11 *
  12 */
  13
  14#include <linux/errno.h>
  15#include <linux/kernel.h>
  16#include <linux/kernel_stat.h>
  17#include <linux/module.h>
  18#include <linux/sched/mm.h>
  19#include <linux/mm.h>
  20#include <linux/err.h>
  21#include <linux/threads.h>
  22#include <linux/smp.h>
  23#include <linux/interrupt.h>
  24#include <linux/init.h>
  25#include <linux/delay.h>
  26#include <linux/spinlock.h>
  27#include <linux/irq.h>
  28#include <linux/cache.h>
  29#include <linux/profile.h>
  30#include <linux/bitops.h>
  31#include <linux/cpu.h>
  32
  33#include <asm/hwrpb.h>
  34#include <asm/ptrace.h>
  35#include <linux/atomic.h>
  36
  37#include <asm/io.h>
  38#include <asm/irq.h>
  39#include <asm/pgtable.h>
  40#include <asm/pgalloc.h>
  41#include <asm/mmu_context.h>
  42#include <asm/tlbflush.h>
  43
  44#include "proto.h"
  45#include "irq_impl.h"
  46
  47
  48#define DEBUG_SMP 0
  49#if DEBUG_SMP
  50#define DBGS(args)      printk args
  51#else
  52#define DBGS(args)
  53#endif
  54
  55/* A collection of per-processor data.  */
  56struct cpuinfo_alpha cpu_data[NR_CPUS];
  57EXPORT_SYMBOL(cpu_data);
  58
  59/* A collection of single bit ipi messages.  */
  60static struct {
  61        unsigned long bits ____cacheline_aligned;
  62} ipi_data[NR_CPUS] __cacheline_aligned;
  63
  64enum ipi_message_type {
  65        IPI_RESCHEDULE,
  66        IPI_CALL_FUNC,
  67        IPI_CPU_STOP,
  68};
  69
  70/* Set to a secondary's cpuid when it comes online.  */
  71static int smp_secondary_alive = 0;
  72
  73int smp_num_probed;             /* Internal processor count */
  74int smp_num_cpus = 1;           /* Number that came online.  */
  75EXPORT_SYMBOL(smp_num_cpus);
  76
  77/*
  78 * Called by both boot and secondaries to move global data into
  79 *  per-processor storage.
  80 */
  81static inline void __init
  82smp_store_cpu_info(int cpuid)
  83{
  84        cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
  85        cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
  86        cpu_data[cpuid].need_new_asn = 0;
  87        cpu_data[cpuid].asn_lock = 0;
  88}
  89
  90/*
  91 * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
  92 */
  93static inline void __init
  94smp_setup_percpu_timer(int cpuid)
  95{
  96        cpu_data[cpuid].prof_counter = 1;
  97        cpu_data[cpuid].prof_multiplier = 1;
  98}
  99
 100static void __init
 101wait_boot_cpu_to_stop(int cpuid)
 102{
 103        unsigned long stop = jiffies + 10*HZ;
 104
 105        while (time_before(jiffies, stop)) {
 106                if (!smp_secondary_alive)
 107                        return;
 108                barrier();
 109        }
 110
 111        printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
 112        for (;;)
 113                barrier();
 114}
 115
 116/*
 117 * Where secondaries begin a life of C.
 118 */
 119void __init
 120smp_callin(void)
 121{
 122        int cpuid = hard_smp_processor_id();
 123
 124        if (cpu_online(cpuid)) {
 125                printk("??, cpu 0x%x already present??\n", cpuid);
 126                BUG();
 127        }
 128        set_cpu_online(cpuid, true);
 129
 130        /* Turn on machine checks.  */
 131        wrmces(7);
 132
 133        /* Set trap vectors.  */
 134        trap_init();
 135
 136        /* Set interrupt vector.  */
 137        wrent(entInt, 0);
 138
 139        /* Get our local ticker going. */
 140        smp_setup_percpu_timer(cpuid);
 141        init_clockevent();
 142
 143        /* Call platform-specific callin, if specified */
 144        if (alpha_mv.smp_callin)
 145                alpha_mv.smp_callin();
 146
 147        /* All kernel threads share the same mm context.  */
 148        mmgrab(&init_mm);
 149        current->active_mm = &init_mm;
 150
 151        /* inform the notifiers about the new cpu */
 152        notify_cpu_starting(cpuid);
 153
 154        /* Must have completely accurate bogos.  */
 155        local_irq_enable();
 156
 157        /* Wait boot CPU to stop with irq enabled before running
 158           calibrate_delay. */
 159        wait_boot_cpu_to_stop(cpuid);
 160        mb();
 161        calibrate_delay();
 162
 163        smp_store_cpu_info(cpuid);
 164        /* Allow master to continue only after we written loops_per_jiffy.  */
 165        wmb();
 166        smp_secondary_alive = 1;
 167
 168        DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
 169              cpuid, current, current->active_mm));
 170
 171        preempt_disable();
 172        cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
 173}
 174
 175/* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
 176static int
 177wait_for_txrdy (unsigned long cpumask)
 178{
 179        unsigned long timeout;
 180
 181        if (!(hwrpb->txrdy & cpumask))
 182                return 0;
 183
 184        timeout = jiffies + 10*HZ;
 185        while (time_before(jiffies, timeout)) {
 186                if (!(hwrpb->txrdy & cpumask))
 187                        return 0;
 188                udelay(10);
 189                barrier();
 190        }
 191
 192        return -1;
 193}
 194
 195/*
 196 * Send a message to a secondary's console.  "START" is one such
 197 * interesting message.  ;-)
 198 */
 199static void
 200send_secondary_console_msg(char *str, int cpuid)
 201{
 202        struct percpu_struct *cpu;
 203        register char *cp1, *cp2;
 204        unsigned long cpumask;
 205        size_t len;
 206
 207        cpu = (struct percpu_struct *)
 208                ((char*)hwrpb
 209                 + hwrpb->processor_offset
 210                 + cpuid * hwrpb->processor_size);
 211
 212        cpumask = (1UL << cpuid);
 213        if (wait_for_txrdy(cpumask))
 214                goto timeout;
 215
 216        cp2 = str;
 217        len = strlen(cp2);
 218        *(unsigned int *)&cpu->ipc_buffer[0] = len;
 219        cp1 = (char *) &cpu->ipc_buffer[1];
 220        memcpy(cp1, cp2, len);
 221
 222        /* atomic test and set */
 223        wmb();
 224        set_bit(cpuid, &hwrpb->rxrdy);
 225
 226        if (wait_for_txrdy(cpumask))
 227                goto timeout;
 228        return;
 229
 230 timeout:
 231        printk("Processor %x not ready\n", cpuid);
 232}
 233
 234/*
 235 * A secondary console wants to send a message.  Receive it.
 236 */
 237static void
 238recv_secondary_console_msg(void)
 239{
 240        int mycpu, i, cnt;
 241        unsigned long txrdy = hwrpb->txrdy;
 242        char *cp1, *cp2, buf[80];
 243        struct percpu_struct *cpu;
 244
 245        DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
 246
 247        mycpu = hard_smp_processor_id();
 248
 249        for (i = 0; i < NR_CPUS; i++) {
 250                if (!(txrdy & (1UL << i)))
 251                        continue;
 252
 253                DBGS(("recv_secondary_console_msg: "
 254                      "TXRDY contains CPU %d.\n", i));
 255
 256                cpu = (struct percpu_struct *)
 257                  ((char*)hwrpb
 258                   + hwrpb->processor_offset
 259                   + i * hwrpb->processor_size);
 260
 261                DBGS(("recv_secondary_console_msg: on %d from %d"
 262                      " HALT_REASON 0x%lx FLAGS 0x%lx\n",
 263                      mycpu, i, cpu->halt_reason, cpu->flags));
 264
 265                cnt = cpu->ipc_buffer[0] >> 32;
 266                if (cnt <= 0 || cnt >= 80)
 267                        strcpy(buf, "<<< BOGUS MSG >>>");
 268                else {
 269                        cp1 = (char *) &cpu->ipc_buffer[1];
 270                        cp2 = buf;
 271                        memcpy(cp2, cp1, cnt);
 272                        cp2[cnt] = '\0';
 273                        
 274                        while ((cp2 = strchr(cp2, '\r')) != 0) {
 275                                *cp2 = ' ';
 276                                if (cp2[1] == '\n')
 277                                        cp2[1] = ' ';
 278                        }
 279                }
 280
 281                DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
 282                      "message is '%s'\n", mycpu, buf));
 283        }
 284
 285        hwrpb->txrdy = 0;
 286}
 287
 288/*
 289 * Convince the console to have a secondary cpu begin execution.
 290 */
 291static int
 292secondary_cpu_start(int cpuid, struct task_struct *idle)
 293{
 294        struct percpu_struct *cpu;
 295        struct pcb_struct *hwpcb, *ipcb;
 296        unsigned long timeout;
 297          
 298        cpu = (struct percpu_struct *)
 299                ((char*)hwrpb
 300                 + hwrpb->processor_offset
 301                 + cpuid * hwrpb->processor_size);
 302        hwpcb = (struct pcb_struct *) cpu->hwpcb;
 303        ipcb = &task_thread_info(idle)->pcb;
 304
 305        /* Initialize the CPU's HWPCB to something just good enough for
 306           us to get started.  Immediately after starting, we'll swpctx
 307           to the target idle task's pcb.  Reuse the stack in the mean
 308           time.  Precalculate the target PCBB.  */
 309        hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
 310        hwpcb->usp = 0;
 311        hwpcb->ptbr = ipcb->ptbr;
 312        hwpcb->pcc = 0;
 313        hwpcb->asn = 0;
 314        hwpcb->unique = virt_to_phys(ipcb);
 315        hwpcb->flags = ipcb->flags;
 316        hwpcb->res1 = hwpcb->res2 = 0;
 317
 318#if 0
 319        DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
 320              hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
 321#endif
 322        DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
 323              cpuid, idle->state, ipcb->flags));
 324
 325        /* Setup HWRPB fields that SRM uses to activate secondary CPU */
 326        hwrpb->CPU_restart = __smp_callin;
 327        hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
 328
 329        /* Recalculate and update the HWRPB checksum */
 330        hwrpb_update_checksum(hwrpb);
 331
 332        /*
 333         * Send a "start" command to the specified processor.
 334         */
 335
 336        /* SRM III 3.4.1.3 */
 337        cpu->flags |= 0x22;     /* turn on Context Valid and Restart Capable */
 338        cpu->flags &= ~1;       /* turn off Bootstrap In Progress */
 339        wmb();
 340
 341        send_secondary_console_msg("START\r\n", cpuid);
 342
 343        /* Wait 10 seconds for an ACK from the console.  */
 344        timeout = jiffies + 10*HZ;
 345        while (time_before(jiffies, timeout)) {
 346                if (cpu->flags & 1)
 347                        goto started;
 348                udelay(10);
 349                barrier();
 350        }
 351        printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
 352        return -1;
 353
 354 started:
 355        DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
 356        return 0;
 357}
 358
 359/*
 360 * Bring one cpu online.
 361 */
 362static int
 363smp_boot_one_cpu(int cpuid, struct task_struct *idle)
 364{
 365        unsigned long timeout;
 366
 367        /* Signal the secondary to wait a moment.  */
 368        smp_secondary_alive = -1;
 369
 370        /* Whirrr, whirrr, whirrrrrrrrr... */
 371        if (secondary_cpu_start(cpuid, idle))
 372                return -1;
 373
 374        /* Notify the secondary CPU it can run calibrate_delay.  */
 375        mb();
 376        smp_secondary_alive = 0;
 377
 378        /* We've been acked by the console; wait one second for
 379           the task to start up for real.  */
 380        timeout = jiffies + 1*HZ;
 381        while (time_before(jiffies, timeout)) {
 382                if (smp_secondary_alive == 1)
 383                        goto alive;
 384                udelay(10);
 385                barrier();
 386        }
 387
 388        /* We failed to boot the CPU.  */
 389
 390        printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
 391        return -1;
 392
 393 alive:
 394        /* Another "Red Snapper". */
 395        return 0;
 396}
 397
 398/*
 399 * Called from setup_arch.  Detect an SMP system and which processors
 400 * are present.
 401 */
 402void __init
 403setup_smp(void)
 404{
 405        struct percpu_struct *cpubase, *cpu;
 406        unsigned long i;
 407
 408        if (boot_cpuid != 0) {
 409                printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
 410                       boot_cpuid);
 411        }
 412
 413        if (hwrpb->nr_processors > 1) {
 414                int boot_cpu_palrev;
 415
 416                DBGS(("setup_smp: nr_processors %ld\n",
 417                      hwrpb->nr_processors));
 418
 419                cpubase = (struct percpu_struct *)
 420                        ((char*)hwrpb + hwrpb->processor_offset);
 421                boot_cpu_palrev = cpubase->pal_revision;
 422
 423                for (i = 0; i < hwrpb->nr_processors; i++) {
 424                        cpu = (struct percpu_struct *)
 425                                ((char *)cpubase + i*hwrpb->processor_size);
 426                        if ((cpu->flags & 0x1cc) == 0x1cc) {
 427                                smp_num_probed++;
 428                                set_cpu_possible(i, true);
 429                                set_cpu_present(i, true);
 430                                cpu->pal_revision = boot_cpu_palrev;
 431                        }
 432
 433                        DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
 434                              i, cpu->flags, cpu->type));
 435                        DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
 436                              i, cpu->pal_revision));
 437                }
 438        } else {
 439                smp_num_probed = 1;
 440        }
 441
 442        printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
 443               smp_num_probed, cpumask_bits(cpu_present_mask)[0]);
 444}
 445
 446/*
 447 * Called by smp_init prepare the secondaries
 448 */
 449void __init
 450smp_prepare_cpus(unsigned int max_cpus)
 451{
 452        /* Take care of some initial bookkeeping.  */
 453        memset(ipi_data, 0, sizeof(ipi_data));
 454
 455        current_thread_info()->cpu = boot_cpuid;
 456
 457        smp_store_cpu_info(boot_cpuid);
 458        smp_setup_percpu_timer(boot_cpuid);
 459
 460        /* Nothing to do on a UP box, or when told not to.  */
 461        if (smp_num_probed == 1 || max_cpus == 0) {
 462                init_cpu_possible(cpumask_of(boot_cpuid));
 463                init_cpu_present(cpumask_of(boot_cpuid));
 464                printk(KERN_INFO "SMP mode deactivated.\n");
 465                return;
 466        }
 467
 468        printk(KERN_INFO "SMP starting up secondaries.\n");
 469
 470        smp_num_cpus = smp_num_probed;
 471}
 472
 473void
 474smp_prepare_boot_cpu(void)
 475{
 476}
 477
 478int
 479__cpu_up(unsigned int cpu, struct task_struct *tidle)
 480{
 481        smp_boot_one_cpu(cpu, tidle);
 482
 483        return cpu_online(cpu) ? 0 : -ENOSYS;
 484}
 485
 486void __init
 487smp_cpus_done(unsigned int max_cpus)
 488{
 489        int cpu;
 490        unsigned long bogosum = 0;
 491
 492        for(cpu = 0; cpu < NR_CPUS; cpu++) 
 493                if (cpu_online(cpu))
 494                        bogosum += cpu_data[cpu].loops_per_jiffy;
 495        
 496        printk(KERN_INFO "SMP: Total of %d processors activated "
 497               "(%lu.%02lu BogoMIPS).\n",
 498               num_online_cpus(), 
 499               (bogosum + 2500) / (500000/HZ),
 500               ((bogosum + 2500) / (5000/HZ)) % 100);
 501}
 502
 503int
 504setup_profiling_timer(unsigned int multiplier)
 505{
 506        return -EINVAL;
 507}
 508
 509static void
 510send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
 511{
 512        int i;
 513
 514        mb();
 515        for_each_cpu(i, to_whom)
 516                set_bit(operation, &ipi_data[i].bits);
 517
 518        mb();
 519        for_each_cpu(i, to_whom)
 520                wripir(i);
 521}
 522
 523void
 524handle_ipi(struct pt_regs *regs)
 525{
 526        int this_cpu = smp_processor_id();
 527        unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
 528        unsigned long ops;
 529
 530#if 0
 531        DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
 532              this_cpu, *pending_ipis, regs->pc));
 533#endif
 534
 535        mb();   /* Order interrupt and bit testing. */
 536        while ((ops = xchg(pending_ipis, 0)) != 0) {
 537          mb(); /* Order bit clearing and data access. */
 538          do {
 539                unsigned long which;
 540
 541                which = ops & -ops;
 542                ops &= ~which;
 543                which = __ffs(which);
 544
 545                switch (which) {
 546                case IPI_RESCHEDULE:
 547                        scheduler_ipi();
 548                        break;
 549
 550                case IPI_CALL_FUNC:
 551                        generic_smp_call_function_interrupt();
 552                        break;
 553
 554                case IPI_CPU_STOP:
 555                        halt();
 556
 557                default:
 558                        printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
 559                               this_cpu, which);
 560                        break;
 561                }
 562          } while (ops);
 563
 564          mb(); /* Order data access and bit testing. */
 565        }
 566
 567        cpu_data[this_cpu].ipi_count++;
 568
 569        if (hwrpb->txrdy)
 570                recv_secondary_console_msg();
 571}
 572
 573void
 574smp_send_reschedule(int cpu)
 575{
 576#ifdef DEBUG_IPI_MSG
 577        if (cpu == hard_smp_processor_id())
 578                printk(KERN_WARNING
 579                       "smp_send_reschedule: Sending IPI to self.\n");
 580#endif
 581        send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
 582}
 583
 584void
 585smp_send_stop(void)
 586{
 587        cpumask_t to_whom;
 588        cpumask_copy(&to_whom, cpu_possible_mask);
 589        cpumask_clear_cpu(smp_processor_id(), &to_whom);
 590#ifdef DEBUG_IPI_MSG
 591        if (hard_smp_processor_id() != boot_cpu_id)
 592                printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
 593#endif
 594        send_ipi_message(&to_whom, IPI_CPU_STOP);
 595}
 596
 597void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 598{
 599        send_ipi_message(mask, IPI_CALL_FUNC);
 600}
 601
 602void arch_send_call_function_single_ipi(int cpu)
 603{
 604        send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
 605}
 606
 607static void
 608ipi_imb(void *ignored)
 609{
 610        imb();
 611}
 612
 613void
 614smp_imb(void)
 615{
 616        /* Must wait other processors to flush their icache before continue. */
 617        if (on_each_cpu(ipi_imb, NULL, 1))
 618                printk(KERN_CRIT "smp_imb: timed out\n");
 619}
 620EXPORT_SYMBOL(smp_imb);
 621
 622static void
 623ipi_flush_tlb_all(void *ignored)
 624{
 625        tbia();
 626}
 627
 628void
 629flush_tlb_all(void)
 630{
 631        /* Although we don't have any data to pass, we do want to
 632           synchronize with the other processors.  */
 633        if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) {
 634                printk(KERN_CRIT "flush_tlb_all: timed out\n");
 635        }
 636}
 637
 638#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
 639
 640static void
 641ipi_flush_tlb_mm(void *x)
 642{
 643        struct mm_struct *mm = (struct mm_struct *) x;
 644        if (mm == current->active_mm && !asn_locked())
 645                flush_tlb_current(mm);
 646        else
 647                flush_tlb_other(mm);
 648}
 649
 650void
 651flush_tlb_mm(struct mm_struct *mm)
 652{
 653        preempt_disable();
 654
 655        if (mm == current->active_mm) {
 656                flush_tlb_current(mm);
 657                if (atomic_read(&mm->mm_users) <= 1) {
 658                        int cpu, this_cpu = smp_processor_id();
 659                        for (cpu = 0; cpu < NR_CPUS; cpu++) {
 660                                if (!cpu_online(cpu) || cpu == this_cpu)
 661                                        continue;
 662                                if (mm->context[cpu])
 663                                        mm->context[cpu] = 0;
 664                        }
 665                        preempt_enable();
 666                        return;
 667                }
 668        }
 669
 670        if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
 671                printk(KERN_CRIT "flush_tlb_mm: timed out\n");
 672        }
 673
 674        preempt_enable();
 675}
 676EXPORT_SYMBOL(flush_tlb_mm);
 677
 678struct flush_tlb_page_struct {
 679        struct vm_area_struct *vma;
 680        struct mm_struct *mm;
 681        unsigned long addr;
 682};
 683
 684static void
 685ipi_flush_tlb_page(void *x)
 686{
 687        struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
 688        struct mm_struct * mm = data->mm;
 689
 690        if (mm == current->active_mm && !asn_locked())
 691                flush_tlb_current_page(mm, data->vma, data->addr);
 692        else
 693                flush_tlb_other(mm);
 694}
 695
 696void
 697flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
 698{
 699        struct flush_tlb_page_struct data;
 700        struct mm_struct *mm = vma->vm_mm;
 701
 702        preempt_disable();
 703
 704        if (mm == current->active_mm) {
 705                flush_tlb_current_page(mm, vma, addr);
 706                if (atomic_read(&mm->mm_users) <= 1) {
 707                        int cpu, this_cpu = smp_processor_id();
 708                        for (cpu = 0; cpu < NR_CPUS; cpu++) {
 709                                if (!cpu_online(cpu) || cpu == this_cpu)
 710                                        continue;
 711                                if (mm->context[cpu])
 712                                        mm->context[cpu] = 0;
 713                        }
 714                        preempt_enable();
 715                        return;
 716                }
 717        }
 718
 719        data.vma = vma;
 720        data.mm = mm;
 721        data.addr = addr;
 722
 723        if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
 724                printk(KERN_CRIT "flush_tlb_page: timed out\n");
 725        }
 726
 727        preempt_enable();
 728}
 729EXPORT_SYMBOL(flush_tlb_page);
 730
 731void
 732flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 733{
 734        /* On the Alpha we always flush the whole user tlb.  */
 735        flush_tlb_mm(vma->vm_mm);
 736}
 737EXPORT_SYMBOL(flush_tlb_range);
 738
 739static void
 740ipi_flush_icache_page(void *x)
 741{
 742        struct mm_struct *mm = (struct mm_struct *) x;
 743        if (mm == current->active_mm && !asn_locked())
 744                __load_new_mm_context(mm);
 745        else
 746                flush_tlb_other(mm);
 747}
 748
 749void
 750flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
 751                        unsigned long addr, int len)
 752{
 753        struct mm_struct *mm = vma->vm_mm;
 754
 755        if ((vma->vm_flags & VM_EXEC) == 0)
 756                return;
 757
 758        preempt_disable();
 759
 760        if (mm == current->active_mm) {
 761                __load_new_mm_context(mm);
 762                if (atomic_read(&mm->mm_users) <= 1) {
 763                        int cpu, this_cpu = smp_processor_id();
 764                        for (cpu = 0; cpu < NR_CPUS; cpu++) {
 765                                if (!cpu_online(cpu) || cpu == this_cpu)
 766                                        continue;
 767                                if (mm->context[cpu])
 768                                        mm->context[cpu] = 0;
 769                        }
 770                        preempt_enable();
 771                        return;
 772                }
 773        }
 774
 775        if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
 776                printk(KERN_CRIT "flush_icache_page: timed out\n");
 777        }
 778
 779        preempt_enable();
 780}
 781