linux/arch/alpha/kernel/smp.c
<<
>>
Prefs
   1/*
   2 *      linux/arch/alpha/kernel/smp.c
   3 *
   4 *      2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
   5 *            Renamed modified smp_call_function to smp_call_function_on_cpu()
   6 *            Created an function that conforms to the old calling convention
   7 *            of smp_call_function().
   8 *
   9 *            This is helpful for DCPI.
  10 *
  11 */
  12
  13#include <linux/errno.h>
  14#include <linux/kernel.h>
  15#include <linux/kernel_stat.h>
  16#include <linux/module.h>
  17#include <linux/sched.h>
  18#include <linux/mm.h>
  19#include <linux/err.h>
  20#include <linux/threads.h>
  21#include <linux/smp.h>
  22#include <linux/interrupt.h>
  23#include <linux/init.h>
  24#include <linux/delay.h>
  25#include <linux/spinlock.h>
  26#include <linux/irq.h>
  27#include <linux/cache.h>
  28#include <linux/profile.h>
  29#include <linux/bitops.h>
  30
  31#include <asm/hwrpb.h>
  32#include <asm/ptrace.h>
  33#include <asm/atomic.h>
  34
  35#include <asm/io.h>
  36#include <asm/irq.h>
  37#include <asm/pgtable.h>
  38#include <asm/pgalloc.h>
  39#include <asm/mmu_context.h>
  40#include <asm/tlbflush.h>
  41
  42#include "proto.h"
  43#include "irq_impl.h"
  44
  45
  46#define DEBUG_SMP 0
  47#if DEBUG_SMP
  48#define DBGS(args)      printk args
  49#else
  50#define DBGS(args)
  51#endif
  52
  53/* A collection of per-processor data.  */
  54struct cpuinfo_alpha cpu_data[NR_CPUS];
  55EXPORT_SYMBOL(cpu_data);
  56
  57/* A collection of single bit ipi messages.  */
  58static struct {
  59        unsigned long bits ____cacheline_aligned;
  60} ipi_data[NR_CPUS] __cacheline_aligned;
  61
  62enum ipi_message_type {
  63        IPI_RESCHEDULE,
  64        IPI_CALL_FUNC,
  65        IPI_CPU_STOP,
  66};
  67
  68/* Set to a secondary's cpuid when it comes online.  */
  69static int smp_secondary_alive __devinitdata = 0;
  70
  71/* Which cpus ids came online.  */
  72cpumask_t cpu_online_map;
  73
  74EXPORT_SYMBOL(cpu_online_map);
  75
  76int smp_num_probed;             /* Internal processor count */
  77int smp_num_cpus = 1;           /* Number that came online.  */
  78EXPORT_SYMBOL(smp_num_cpus);
  79
  80extern void calibrate_delay(void);
  81
  82
  83
  84/*
  85 * Called by both boot and secondaries to move global data into
  86 *  per-processor storage.
  87 */
  88static inline void __init
  89smp_store_cpu_info(int cpuid)
  90{
  91        cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
  92        cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
  93        cpu_data[cpuid].need_new_asn = 0;
  94        cpu_data[cpuid].asn_lock = 0;
  95}
  96
  97/*
  98 * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
  99 */
 100static inline void __init
 101smp_setup_percpu_timer(int cpuid)
 102{
 103        cpu_data[cpuid].prof_counter = 1;
 104        cpu_data[cpuid].prof_multiplier = 1;
 105}
 106
 107static void __init
 108wait_boot_cpu_to_stop(int cpuid)
 109{
 110        unsigned long stop = jiffies + 10*HZ;
 111
 112        while (time_before(jiffies, stop)) {
 113                if (!smp_secondary_alive)
 114                        return;
 115                barrier();
 116        }
 117
 118        printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
 119        for (;;)
 120                barrier();
 121}
 122
 123/*
 124 * Where secondaries begin a life of C.
 125 */
 126void __init
 127smp_callin(void)
 128{
 129        int cpuid = hard_smp_processor_id();
 130
 131        if (cpu_test_and_set(cpuid, cpu_online_map)) {
 132                printk("??, cpu 0x%x already present??\n", cpuid);
 133                BUG();
 134        }
 135
 136        /* Turn on machine checks.  */
 137        wrmces(7);
 138
 139        /* Set trap vectors.  */
 140        trap_init();
 141
 142        /* Set interrupt vector.  */
 143        wrent(entInt, 0);
 144
 145        /* Get our local ticker going. */
 146        smp_setup_percpu_timer(cpuid);
 147
 148        /* Call platform-specific callin, if specified */
 149        if (alpha_mv.smp_callin) alpha_mv.smp_callin();
 150
 151        /* All kernel threads share the same mm context.  */
 152        atomic_inc(&init_mm.mm_count);
 153        current->active_mm = &init_mm;
 154
 155        /* Must have completely accurate bogos.  */
 156        local_irq_enable();
 157
 158        /* Wait boot CPU to stop with irq enabled before running
 159           calibrate_delay. */
 160        wait_boot_cpu_to_stop(cpuid);
 161        mb();
 162        calibrate_delay();
 163
 164        smp_store_cpu_info(cpuid);
 165        /* Allow master to continue only after we written loops_per_jiffy.  */
 166        wmb();
 167        smp_secondary_alive = 1;
 168
 169        DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
 170              cpuid, current, current->active_mm));
 171
 172        /* Do nothing.  */
 173        cpu_idle();
 174}
 175
 176/* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
 177static int __devinit
 178wait_for_txrdy (unsigned long cpumask)
 179{
 180        unsigned long timeout;
 181
 182        if (!(hwrpb->txrdy & cpumask))
 183                return 0;
 184
 185        timeout = jiffies + 10*HZ;
 186        while (time_before(jiffies, timeout)) {
 187                if (!(hwrpb->txrdy & cpumask))
 188                        return 0;
 189                udelay(10);
 190                barrier();
 191        }
 192
 193        return -1;
 194}
 195
 196/*
 197 * Send a message to a secondary's console.  "START" is one such
 198 * interesting message.  ;-)
 199 */
 200static void __init
 201send_secondary_console_msg(char *str, int cpuid)
 202{
 203        struct percpu_struct *cpu;
 204        register char *cp1, *cp2;
 205        unsigned long cpumask;
 206        size_t len;
 207
 208        cpu = (struct percpu_struct *)
 209                ((char*)hwrpb
 210                 + hwrpb->processor_offset
 211                 + cpuid * hwrpb->processor_size);
 212
 213        cpumask = (1UL << cpuid);
 214        if (wait_for_txrdy(cpumask))
 215                goto timeout;
 216
 217        cp2 = str;
 218        len = strlen(cp2);
 219        *(unsigned int *)&cpu->ipc_buffer[0] = len;
 220        cp1 = (char *) &cpu->ipc_buffer[1];
 221        memcpy(cp1, cp2, len);
 222
 223        /* atomic test and set */
 224        wmb();
 225        set_bit(cpuid, &hwrpb->rxrdy);
 226
 227        if (wait_for_txrdy(cpumask))
 228                goto timeout;
 229        return;
 230
 231 timeout:
 232        printk("Processor %x not ready\n", cpuid);
 233}
 234
 235/*
 236 * A secondary console wants to send a message.  Receive it.
 237 */
 238static void
 239recv_secondary_console_msg(void)
 240{
 241        int mycpu, i, cnt;
 242        unsigned long txrdy = hwrpb->txrdy;
 243        char *cp1, *cp2, buf[80];
 244        struct percpu_struct *cpu;
 245
 246        DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
 247
 248        mycpu = hard_smp_processor_id();
 249
 250        for (i = 0; i < NR_CPUS; i++) {
 251                if (!(txrdy & (1UL << i)))
 252                        continue;
 253
 254                DBGS(("recv_secondary_console_msg: "
 255                      "TXRDY contains CPU %d.\n", i));
 256
 257                cpu = (struct percpu_struct *)
 258                  ((char*)hwrpb
 259                   + hwrpb->processor_offset
 260                   + i * hwrpb->processor_size);
 261
 262                DBGS(("recv_secondary_console_msg: on %d from %d"
 263                      " HALT_REASON 0x%lx FLAGS 0x%lx\n",
 264                      mycpu, i, cpu->halt_reason, cpu->flags));
 265
 266                cnt = cpu->ipc_buffer[0] >> 32;
 267                if (cnt <= 0 || cnt >= 80)
 268                        strcpy(buf, "<<< BOGUS MSG >>>");
 269                else {
 270                        cp1 = (char *) &cpu->ipc_buffer[11];
 271                        cp2 = buf;
 272                        strcpy(cp2, cp1);
 273                        
 274                        while ((cp2 = strchr(cp2, '\r')) != 0) {
 275                                *cp2 = ' ';
 276                                if (cp2[1] == '\n')
 277                                        cp2[1] = ' ';
 278                        }
 279                }
 280
 281                DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
 282                      "message is '%s'\n", mycpu, buf));
 283        }
 284
 285        hwrpb->txrdy = 0;
 286}
 287
 288/*
 289 * Convince the console to have a secondary cpu begin execution.
 290 */
 291static int __init
 292secondary_cpu_start(int cpuid, struct task_struct *idle)
 293{
 294        struct percpu_struct *cpu;
 295        struct pcb_struct *hwpcb, *ipcb;
 296        unsigned long timeout;
 297          
 298        cpu = (struct percpu_struct *)
 299                ((char*)hwrpb
 300                 + hwrpb->processor_offset
 301                 + cpuid * hwrpb->processor_size);
 302        hwpcb = (struct pcb_struct *) cpu->hwpcb;
 303        ipcb = &task_thread_info(idle)->pcb;
 304
 305        /* Initialize the CPU's HWPCB to something just good enough for
 306           us to get started.  Immediately after starting, we'll swpctx
 307           to the target idle task's pcb.  Reuse the stack in the mean
 308           time.  Precalculate the target PCBB.  */
 309        hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
 310        hwpcb->usp = 0;
 311        hwpcb->ptbr = ipcb->ptbr;
 312        hwpcb->pcc = 0;
 313        hwpcb->asn = 0;
 314        hwpcb->unique = virt_to_phys(ipcb);
 315        hwpcb->flags = ipcb->flags;
 316        hwpcb->res1 = hwpcb->res2 = 0;
 317
 318#if 0
 319        DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
 320              hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
 321#endif
 322        DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
 323              cpuid, idle->state, ipcb->flags));
 324
 325        /* Setup HWRPB fields that SRM uses to activate secondary CPU */
 326        hwrpb->CPU_restart = __smp_callin;
 327        hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
 328
 329        /* Recalculate and update the HWRPB checksum */
 330        hwrpb_update_checksum(hwrpb);
 331
 332        /*
 333         * Send a "start" command to the specified processor.
 334         */
 335
 336        /* SRM III 3.4.1.3 */
 337        cpu->flags |= 0x22;     /* turn on Context Valid and Restart Capable */
 338        cpu->flags &= ~1;       /* turn off Bootstrap In Progress */
 339        wmb();
 340
 341        send_secondary_console_msg("START\r\n", cpuid);
 342
 343        /* Wait 10 seconds for an ACK from the console.  */
 344        timeout = jiffies + 10*HZ;
 345        while (time_before(jiffies, timeout)) {
 346                if (cpu->flags & 1)
 347                        goto started;
 348                udelay(10);
 349                barrier();
 350        }
 351        printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
 352        return -1;
 353
 354 started:
 355        DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
 356        return 0;
 357}
 358
 359/*
 360 * Bring one cpu online.
 361 */
 362static int __cpuinit
 363smp_boot_one_cpu(int cpuid)
 364{
 365        struct task_struct *idle;
 366        unsigned long timeout;
 367
 368        /* Cook up an idler for this guy.  Note that the address we
 369           give to kernel_thread is irrelevant -- it's going to start
 370           where HWRPB.CPU_restart says to start.  But this gets all
 371           the other task-y sort of data structures set up like we
 372           wish.  We can't use kernel_thread since we must avoid
 373           rescheduling the child.  */
 374        idle = fork_idle(cpuid);
 375        if (IS_ERR(idle))
 376                panic("failed fork for CPU %d", cpuid);
 377
 378        DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
 379              cpuid, idle->state, idle->flags));
 380
 381        /* Signal the secondary to wait a moment.  */
 382        smp_secondary_alive = -1;
 383
 384        /* Whirrr, whirrr, whirrrrrrrrr... */
 385        if (secondary_cpu_start(cpuid, idle))
 386                return -1;
 387
 388        /* Notify the secondary CPU it can run calibrate_delay.  */
 389        mb();
 390        smp_secondary_alive = 0;
 391
 392        /* We've been acked by the console; wait one second for
 393           the task to start up for real.  */
 394        timeout = jiffies + 1*HZ;
 395        while (time_before(jiffies, timeout)) {
 396                if (smp_secondary_alive == 1)
 397                        goto alive;
 398                udelay(10);
 399                barrier();
 400        }
 401
 402        /* We failed to boot the CPU.  */
 403
 404        printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
 405        return -1;
 406
 407 alive:
 408        /* Another "Red Snapper". */
 409        return 0;
 410}
 411
 412/*
 413 * Called from setup_arch.  Detect an SMP system and which processors
 414 * are present.
 415 */
 416void __init
 417setup_smp(void)
 418{
 419        struct percpu_struct *cpubase, *cpu;
 420        unsigned long i;
 421
 422        if (boot_cpuid != 0) {
 423                printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
 424                       boot_cpuid);
 425        }
 426
 427        if (hwrpb->nr_processors > 1) {
 428                int boot_cpu_palrev;
 429
 430                DBGS(("setup_smp: nr_processors %ld\n",
 431                      hwrpb->nr_processors));
 432
 433                cpubase = (struct percpu_struct *)
 434                        ((char*)hwrpb + hwrpb->processor_offset);
 435                boot_cpu_palrev = cpubase->pal_revision;
 436
 437                for (i = 0; i < hwrpb->nr_processors; i++) {
 438                        cpu = (struct percpu_struct *)
 439                                ((char *)cpubase + i*hwrpb->processor_size);
 440                        if ((cpu->flags & 0x1cc) == 0x1cc) {
 441                                smp_num_probed++;
 442                                cpu_set(i, cpu_present_map);
 443                                cpu->pal_revision = boot_cpu_palrev;
 444                        }
 445
 446                        DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
 447                              i, cpu->flags, cpu->type));
 448                        DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
 449                              i, cpu->pal_revision));
 450                }
 451        } else {
 452                smp_num_probed = 1;
 453        }
 454
 455        printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
 456               smp_num_probed, cpu_present_map.bits[0]);
 457}
 458
 459/*
 460 * Called by smp_init prepare the secondaries
 461 */
 462void __init
 463smp_prepare_cpus(unsigned int max_cpus)
 464{
 465        /* Take care of some initial bookkeeping.  */
 466        memset(ipi_data, 0, sizeof(ipi_data));
 467
 468        current_thread_info()->cpu = boot_cpuid;
 469
 470        smp_store_cpu_info(boot_cpuid);
 471        smp_setup_percpu_timer(boot_cpuid);
 472
 473        /* Nothing to do on a UP box, or when told not to.  */
 474        if (smp_num_probed == 1 || max_cpus == 0) {
 475                cpu_present_map = cpumask_of_cpu(boot_cpuid);
 476                printk(KERN_INFO "SMP mode deactivated.\n");
 477                return;
 478        }
 479
 480        printk(KERN_INFO "SMP starting up secondaries.\n");
 481
 482        smp_num_cpus = smp_num_probed;
 483}
 484
 485void __devinit
 486smp_prepare_boot_cpu(void)
 487{
 488}
 489
 490int __cpuinit
 491__cpu_up(unsigned int cpu)
 492{
 493        smp_boot_one_cpu(cpu);
 494
 495        return cpu_online(cpu) ? 0 : -ENOSYS;
 496}
 497
 498void __init
 499smp_cpus_done(unsigned int max_cpus)
 500{
 501        int cpu;
 502        unsigned long bogosum = 0;
 503
 504        for(cpu = 0; cpu < NR_CPUS; cpu++) 
 505                if (cpu_online(cpu))
 506                        bogosum += cpu_data[cpu].loops_per_jiffy;
 507        
 508        printk(KERN_INFO "SMP: Total of %d processors activated "
 509               "(%lu.%02lu BogoMIPS).\n",
 510               num_online_cpus(), 
 511               (bogosum + 2500) / (500000/HZ),
 512               ((bogosum + 2500) / (5000/HZ)) % 100);
 513}
 514
 515
 516void
 517smp_percpu_timer_interrupt(struct pt_regs *regs)
 518{
 519        struct pt_regs *old_regs;
 520        int cpu = smp_processor_id();
 521        unsigned long user = user_mode(regs);
 522        struct cpuinfo_alpha *data = &cpu_data[cpu];
 523
 524        old_regs = set_irq_regs(regs);
 525
 526        /* Record kernel PC.  */
 527        profile_tick(CPU_PROFILING);
 528
 529        if (!--data->prof_counter) {
 530                /* We need to make like a normal interrupt -- otherwise
 531                   timer interrupts ignore the global interrupt lock,
 532                   which would be a Bad Thing.  */
 533                irq_enter();
 534
 535                update_process_times(user);
 536
 537                data->prof_counter = data->prof_multiplier;
 538
 539                irq_exit();
 540        }
 541        set_irq_regs(old_regs);
 542}
 543
 544int
 545setup_profiling_timer(unsigned int multiplier)
 546{
 547        return -EINVAL;
 548}
 549
 550
 551static void
 552send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
 553{
 554        int i;
 555
 556        mb();
 557        for_each_cpu_mask(i, to_whom)
 558                set_bit(operation, &ipi_data[i].bits);
 559
 560        mb();
 561        for_each_cpu_mask(i, to_whom)
 562                wripir(i);
 563}
 564
 565/* Structure and data for smp_call_function.  This is designed to 
 566   minimize static memory requirements.  Plus it looks cleaner.  */
 567
 568struct smp_call_struct {
 569        void (*func) (void *info);
 570        void *info;
 571        long wait;
 572        atomic_t unstarted_count;
 573        atomic_t unfinished_count;
 574};
 575
 576static struct smp_call_struct *smp_call_function_data;
 577
 578/* Atomicly drop data into a shared pointer.  The pointer is free if
 579   it is initially locked.  If retry, spin until free.  */
 580
 581static int
 582pointer_lock (void *lock, void *data, int retry)
 583{
 584        void *old, *tmp;
 585
 586        mb();
 587 again:
 588        /* Compare and swap with zero.  */
 589        asm volatile (
 590        "1:     ldq_l   %0,%1\n"
 591        "       mov     %3,%2\n"
 592        "       bne     %0,2f\n"
 593        "       stq_c   %2,%1\n"
 594        "       beq     %2,1b\n"
 595        "2:"
 596        : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
 597        : "r"(data)
 598        : "memory");
 599
 600        if (old == 0)
 601                return 0;
 602        if (! retry)
 603                return -EBUSY;
 604
 605        while (*(void **)lock)
 606                barrier();
 607        goto again;
 608}
 609
 610void
 611handle_ipi(struct pt_regs *regs)
 612{
 613        int this_cpu = smp_processor_id();
 614        unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
 615        unsigned long ops;
 616
 617#if 0
 618        DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
 619              this_cpu, *pending_ipis, regs->pc));
 620#endif
 621
 622        mb();   /* Order interrupt and bit testing. */
 623        while ((ops = xchg(pending_ipis, 0)) != 0) {
 624          mb(); /* Order bit clearing and data access. */
 625          do {
 626                unsigned long which;
 627
 628                which = ops & -ops;
 629                ops &= ~which;
 630                which = __ffs(which);
 631
 632                switch (which) {
 633                case IPI_RESCHEDULE:
 634                        /* Reschedule callback.  Everything to be done
 635                           is done by the interrupt return path.  */
 636                        break;
 637
 638                case IPI_CALL_FUNC:
 639                    {
 640                        struct smp_call_struct *data;
 641                        void (*func)(void *info);
 642                        void *info;
 643                        int wait;
 644
 645                        data = smp_call_function_data;
 646                        func = data->func;
 647                        info = data->info;
 648                        wait = data->wait;
 649
 650                        /* Notify the sending CPU that the data has been
 651                           received, and execution is about to begin.  */
 652                        mb();
 653                        atomic_dec (&data->unstarted_count);
 654
 655                        /* At this point the structure may be gone unless
 656                           wait is true.  */
 657                        (*func)(info);
 658
 659                        /* Notify the sending CPU that the task is done.  */
 660                        mb();
 661                        if (wait) atomic_dec (&data->unfinished_count);
 662                        break;
 663                    }
 664
 665                case IPI_CPU_STOP:
 666                        halt();
 667
 668                default:
 669                        printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
 670                               this_cpu, which);
 671                        break;
 672                }
 673          } while (ops);
 674
 675          mb(); /* Order data access and bit testing. */
 676        }
 677
 678        cpu_data[this_cpu].ipi_count++;
 679
 680        if (hwrpb->txrdy)
 681                recv_secondary_console_msg();
 682}
 683
 684void
 685smp_send_reschedule(int cpu)
 686{
 687#ifdef DEBUG_IPI_MSG
 688        if (cpu == hard_smp_processor_id())
 689                printk(KERN_WARNING
 690                       "smp_send_reschedule: Sending IPI to self.\n");
 691#endif
 692        send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
 693}
 694
 695void
 696smp_send_stop(void)
 697{
 698        cpumask_t to_whom = cpu_possible_map;
 699        cpu_clear(smp_processor_id(), to_whom);
 700#ifdef DEBUG_IPI_MSG
 701        if (hard_smp_processor_id() != boot_cpu_id)
 702                printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
 703#endif
 704        send_ipi_message(to_whom, IPI_CPU_STOP);
 705}
 706
 707/*
 708 * Run a function on all other CPUs.
 709 *  <func>      The function to run. This must be fast and non-blocking.
 710 *  <info>      An arbitrary pointer to pass to the function.
 711 *  <retry>     If true, keep retrying until ready.
 712 *  <wait>      If true, wait until function has completed on other CPUs.
 713 *  [RETURNS]   0 on success, else a negative status code.
 714 *
 715 * Does not return until remote CPUs are nearly ready to execute <func>
 716 * or are or have executed.
 717 * You must not call this function with disabled interrupts or from a
 718 * hardware interrupt handler or from a bottom half handler.
 719 */
 720
 721int
 722smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
 723                          int wait, cpumask_t to_whom)
 724{
 725        struct smp_call_struct data;
 726        unsigned long timeout;
 727        int num_cpus_to_call;
 728        
 729        /* Can deadlock when called with interrupts disabled */
 730        WARN_ON(irqs_disabled());
 731
 732        data.func = func;
 733        data.info = info;
 734        data.wait = wait;
 735
 736        cpu_clear(smp_processor_id(), to_whom);
 737        num_cpus_to_call = cpus_weight(to_whom);
 738
 739        atomic_set(&data.unstarted_count, num_cpus_to_call);
 740        atomic_set(&data.unfinished_count, num_cpus_to_call);
 741
 742        /* Acquire the smp_call_function_data mutex.  */
 743        if (pointer_lock(&smp_call_function_data, &data, retry))
 744                return -EBUSY;
 745
 746        /* Send a message to the requested CPUs.  */
 747        send_ipi_message(to_whom, IPI_CALL_FUNC);
 748
 749        /* Wait for a minimal response.  */
 750        timeout = jiffies + HZ;
 751        while (atomic_read (&data.unstarted_count) > 0
 752               && time_before (jiffies, timeout))
 753                barrier();
 754
 755        /* If there's no response yet, log a message but allow a longer
 756         * timeout period -- if we get a response this time, log
 757         * a message saying when we got it.. 
 758         */
 759        if (atomic_read(&data.unstarted_count) > 0) {
 760                long start_time = jiffies;
 761                printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
 762                       __FUNCTION__);
 763                timeout = jiffies + 30 * HZ;
 764                while (atomic_read(&data.unstarted_count) > 0
 765                       && time_before(jiffies, timeout))
 766                        barrier();
 767                if (atomic_read(&data.unstarted_count) <= 0) {
 768                        long delta = jiffies - start_time;
 769                        printk(KERN_ERR 
 770                               "%s: response %ld.%ld seconds into long wait\n",
 771                               __FUNCTION__, delta / HZ,
 772                               (100 * (delta - ((delta / HZ) * HZ))) / HZ);
 773                }
 774        }
 775
 776        /* We either got one or timed out -- clear the lock. */
 777        mb();
 778        smp_call_function_data = NULL;
 779
 780        /* 
 781         * If after both the initial and long timeout periods we still don't
 782         * have a response, something is very wrong...
 783         */
 784        BUG_ON(atomic_read (&data.unstarted_count) > 0);
 785
 786        /* Wait for a complete response, if needed.  */
 787        if (wait) {
 788                while (atomic_read (&data.unfinished_count) > 0)
 789                        barrier();
 790        }
 791
 792        return 0;
 793}
 794EXPORT_SYMBOL(smp_call_function_on_cpu);
 795
 796int
 797smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
 798{
 799        return smp_call_function_on_cpu (func, info, retry, wait,
 800                                         cpu_online_map);
 801}
 802EXPORT_SYMBOL(smp_call_function);
 803
 804static void
 805ipi_imb(void *ignored)
 806{
 807        imb();
 808}
 809
 810void
 811smp_imb(void)
 812{
 813        /* Must wait other processors to flush their icache before continue. */
 814        if (on_each_cpu(ipi_imb, NULL, 1, 1))
 815                printk(KERN_CRIT "smp_imb: timed out\n");
 816}
 817EXPORT_SYMBOL(smp_imb);
 818
 819static void
 820ipi_flush_tlb_all(void *ignored)
 821{
 822        tbia();
 823}
 824
 825void
 826flush_tlb_all(void)
 827{
 828        /* Although we don't have any data to pass, we do want to
 829           synchronize with the other processors.  */
 830        if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
 831                printk(KERN_CRIT "flush_tlb_all: timed out\n");
 832        }
 833}
 834
 835#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
 836
 837static void
 838ipi_flush_tlb_mm(void *x)
 839{
 840        struct mm_struct *mm = (struct mm_struct *) x;
 841        if (mm == current->active_mm && !asn_locked())
 842                flush_tlb_current(mm);
 843        else
 844                flush_tlb_other(mm);
 845}
 846
 847void
 848flush_tlb_mm(struct mm_struct *mm)
 849{
 850        preempt_disable();
 851
 852        if (mm == current->active_mm) {
 853                flush_tlb_current(mm);
 854                if (atomic_read(&mm->mm_users) <= 1) {
 855                        int cpu, this_cpu = smp_processor_id();
 856                        for (cpu = 0; cpu < NR_CPUS; cpu++) {
 857                                if (!cpu_online(cpu) || cpu == this_cpu)
 858                                        continue;
 859                                if (mm->context[cpu])
 860                                        mm->context[cpu] = 0;
 861                        }
 862                        preempt_enable();
 863                        return;
 864                }
 865        }
 866
 867        if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
 868                printk(KERN_CRIT "flush_tlb_mm: timed out\n");
 869        }
 870
 871        preempt_enable();
 872}
 873EXPORT_SYMBOL(flush_tlb_mm);
 874
 875struct flush_tlb_page_struct {
 876        struct vm_area_struct *vma;
 877        struct mm_struct *mm;
 878        unsigned long addr;
 879};
 880
 881static void
 882ipi_flush_tlb_page(void *x)
 883{
 884        struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
 885        struct mm_struct * mm = data->mm;
 886
 887        if (mm == current->active_mm && !asn_locked())
 888                flush_tlb_current_page(mm, data->vma, data->addr);
 889        else
 890                flush_tlb_other(mm);
 891}
 892
 893void
 894flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
 895{
 896        struct flush_tlb_page_struct data;
 897        struct mm_struct *mm = vma->vm_mm;
 898
 899        preempt_disable();
 900
 901        if (mm == current->active_mm) {
 902                flush_tlb_current_page(mm, vma, addr);
 903                if (atomic_read(&mm->mm_users) <= 1) {
 904                        int cpu, this_cpu = smp_processor_id();
 905                        for (cpu = 0; cpu < NR_CPUS; cpu++) {
 906                                if (!cpu_online(cpu) || cpu == this_cpu)
 907                                        continue;
 908                                if (mm->context[cpu])
 909                                        mm->context[cpu] = 0;
 910                        }
 911                        preempt_enable();
 912                        return;
 913                }
 914        }
 915
 916        data.vma = vma;
 917        data.mm = mm;
 918        data.addr = addr;
 919
 920        if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
 921                printk(KERN_CRIT "flush_tlb_page: timed out\n");
 922        }
 923
 924        preempt_enable();
 925}
 926EXPORT_SYMBOL(flush_tlb_page);
 927
 928void
 929flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 930{
 931        /* On the Alpha we always flush the whole user tlb.  */
 932        flush_tlb_mm(vma->vm_mm);
 933}
 934EXPORT_SYMBOL(flush_tlb_range);
 935
 936static void
 937ipi_flush_icache_page(void *x)
 938{
 939        struct mm_struct *mm = (struct mm_struct *) x;
 940        if (mm == current->active_mm && !asn_locked())
 941                __load_new_mm_context(mm);
 942        else
 943                flush_tlb_other(mm);
 944}
 945
 946void
 947flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
 948                        unsigned long addr, int len)
 949{
 950        struct mm_struct *mm = vma->vm_mm;
 951
 952        if ((vma->vm_flags & VM_EXEC) == 0)
 953                return;
 954
 955        preempt_disable();
 956
 957        if (mm == current->active_mm) {
 958                __load_new_mm_context(mm);
 959                if (atomic_read(&mm->mm_users) <= 1) {
 960                        int cpu, this_cpu = smp_processor_id();
 961                        for (cpu = 0; cpu < NR_CPUS; cpu++) {
 962                                if (!cpu_online(cpu) || cpu == this_cpu)
 963                                        continue;
 964                                if (mm->context[cpu])
 965                                        mm->context[cpu] = 0;
 966                        }
 967                        preempt_enable();
 968                        return;
 969                }
 970        }
 971
 972        if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
 973                printk(KERN_CRIT "flush_icache_page: timed out\n");
 974        }
 975
 976        preempt_enable();
 977}
 978