1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/errno.h>
14#include <linux/kernel.h>
15#include <linux/kernel_stat.h>
16#include <linux/module.h>
17#include <linux/sched.h>
18#include <linux/mm.h>
19#include <linux/err.h>
20#include <linux/threads.h>
21#include <linux/smp.h>
22#include <linux/interrupt.h>
23#include <linux/init.h>
24#include <linux/delay.h>
25#include <linux/spinlock.h>
26#include <linux/irq.h>
27#include <linux/cache.h>
28#include <linux/profile.h>
29#include <linux/bitops.h>
30
31#include <asm/hwrpb.h>
32#include <asm/ptrace.h>
33#include <asm/atomic.h>
34
35#include <asm/io.h>
36#include <asm/irq.h>
37#include <asm/pgtable.h>
38#include <asm/pgalloc.h>
39#include <asm/mmu_context.h>
40#include <asm/tlbflush.h>
41
42#include "proto.h"
43#include "irq_impl.h"
44
45
46#define DEBUG_SMP 0
47#if DEBUG_SMP
48#define DBGS(args) printk args
49#else
50#define DBGS(args)
51#endif
52
53
54struct cpuinfo_alpha cpu_data[NR_CPUS];
55EXPORT_SYMBOL(cpu_data);
56
57
58static struct {
59 unsigned long bits ____cacheline_aligned;
60} ipi_data[NR_CPUS] __cacheline_aligned;
61
62enum ipi_message_type {
63 IPI_RESCHEDULE,
64 IPI_CALL_FUNC,
65 IPI_CPU_STOP,
66};
67
68
69static int smp_secondary_alive __devinitdata = 0;
70
71
72cpumask_t cpu_online_map;
73
74EXPORT_SYMBOL(cpu_online_map);
75
76int smp_num_probed;
77int smp_num_cpus = 1;
78EXPORT_SYMBOL(smp_num_cpus);
79
80extern void calibrate_delay(void);
81
82
83
84
85
86
87
88static inline void __init
89smp_store_cpu_info(int cpuid)
90{
91 cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
92 cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
93 cpu_data[cpuid].need_new_asn = 0;
94 cpu_data[cpuid].asn_lock = 0;
95}
96
97
98
99
100static inline void __init
101smp_setup_percpu_timer(int cpuid)
102{
103 cpu_data[cpuid].prof_counter = 1;
104 cpu_data[cpuid].prof_multiplier = 1;
105}
106
107static void __init
108wait_boot_cpu_to_stop(int cpuid)
109{
110 unsigned long stop = jiffies + 10*HZ;
111
112 while (time_before(jiffies, stop)) {
113 if (!smp_secondary_alive)
114 return;
115 barrier();
116 }
117
118 printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
119 for (;;)
120 barrier();
121}
122
123
124
125
126void __init
127smp_callin(void)
128{
129 int cpuid = hard_smp_processor_id();
130
131 if (cpu_test_and_set(cpuid, cpu_online_map)) {
132 printk("??, cpu 0x%x already present??\n", cpuid);
133 BUG();
134 }
135
136
137 wrmces(7);
138
139
140 trap_init();
141
142
143 wrent(entInt, 0);
144
145
146 smp_setup_percpu_timer(cpuid);
147
148
149 if (alpha_mv.smp_callin) alpha_mv.smp_callin();
150
151
152 atomic_inc(&init_mm.mm_count);
153 current->active_mm = &init_mm;
154
155
156 local_irq_enable();
157
158
159
160 wait_boot_cpu_to_stop(cpuid);
161 mb();
162 calibrate_delay();
163
164 smp_store_cpu_info(cpuid);
165
166 wmb();
167 smp_secondary_alive = 1;
168
169 DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
170 cpuid, current, current->active_mm));
171
172
173 cpu_idle();
174}
175
176
177static int __devinit
178wait_for_txrdy (unsigned long cpumask)
179{
180 unsigned long timeout;
181
182 if (!(hwrpb->txrdy & cpumask))
183 return 0;
184
185 timeout = jiffies + 10*HZ;
186 while (time_before(jiffies, timeout)) {
187 if (!(hwrpb->txrdy & cpumask))
188 return 0;
189 udelay(10);
190 barrier();
191 }
192
193 return -1;
194}
195
196
197
198
199
200static void __init
201send_secondary_console_msg(char *str, int cpuid)
202{
203 struct percpu_struct *cpu;
204 register char *cp1, *cp2;
205 unsigned long cpumask;
206 size_t len;
207
208 cpu = (struct percpu_struct *)
209 ((char*)hwrpb
210 + hwrpb->processor_offset
211 + cpuid * hwrpb->processor_size);
212
213 cpumask = (1UL << cpuid);
214 if (wait_for_txrdy(cpumask))
215 goto timeout;
216
217 cp2 = str;
218 len = strlen(cp2);
219 *(unsigned int *)&cpu->ipc_buffer[0] = len;
220 cp1 = (char *) &cpu->ipc_buffer[1];
221 memcpy(cp1, cp2, len);
222
223
224 wmb();
225 set_bit(cpuid, &hwrpb->rxrdy);
226
227 if (wait_for_txrdy(cpumask))
228 goto timeout;
229 return;
230
231 timeout:
232 printk("Processor %x not ready\n", cpuid);
233}
234
235
236
237
238static void
239recv_secondary_console_msg(void)
240{
241 int mycpu, i, cnt;
242 unsigned long txrdy = hwrpb->txrdy;
243 char *cp1, *cp2, buf[80];
244 struct percpu_struct *cpu;
245
246 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
247
248 mycpu = hard_smp_processor_id();
249
250 for (i = 0; i < NR_CPUS; i++) {
251 if (!(txrdy & (1UL << i)))
252 continue;
253
254 DBGS(("recv_secondary_console_msg: "
255 "TXRDY contains CPU %d.\n", i));
256
257 cpu = (struct percpu_struct *)
258 ((char*)hwrpb
259 + hwrpb->processor_offset
260 + i * hwrpb->processor_size);
261
262 DBGS(("recv_secondary_console_msg: on %d from %d"
263 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
264 mycpu, i, cpu->halt_reason, cpu->flags));
265
266 cnt = cpu->ipc_buffer[0] >> 32;
267 if (cnt <= 0 || cnt >= 80)
268 strcpy(buf, "<<< BOGUS MSG >>>");
269 else {
270 cp1 = (char *) &cpu->ipc_buffer[11];
271 cp2 = buf;
272 strcpy(cp2, cp1);
273
274 while ((cp2 = strchr(cp2, '\r')) != 0) {
275 *cp2 = ' ';
276 if (cp2[1] == '\n')
277 cp2[1] = ' ';
278 }
279 }
280
281 DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
282 "message is '%s'\n", mycpu, buf));
283 }
284
285 hwrpb->txrdy = 0;
286}
287
288
289
290
291static int __init
292secondary_cpu_start(int cpuid, struct task_struct *idle)
293{
294 struct percpu_struct *cpu;
295 struct pcb_struct *hwpcb, *ipcb;
296 unsigned long timeout;
297
298 cpu = (struct percpu_struct *)
299 ((char*)hwrpb
300 + hwrpb->processor_offset
301 + cpuid * hwrpb->processor_size);
302 hwpcb = (struct pcb_struct *) cpu->hwpcb;
303 ipcb = &task_thread_info(idle)->pcb;
304
305
306
307
308
309 hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
310 hwpcb->usp = 0;
311 hwpcb->ptbr = ipcb->ptbr;
312 hwpcb->pcc = 0;
313 hwpcb->asn = 0;
314 hwpcb->unique = virt_to_phys(ipcb);
315 hwpcb->flags = ipcb->flags;
316 hwpcb->res1 = hwpcb->res2 = 0;
317
318#if 0
319 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
320 hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
321#endif
322 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
323 cpuid, idle->state, ipcb->flags));
324
325
326 hwrpb->CPU_restart = __smp_callin;
327 hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
328
329
330 hwrpb_update_checksum(hwrpb);
331
332
333
334
335
336
337 cpu->flags |= 0x22;
338 cpu->flags &= ~1;
339 wmb();
340
341 send_secondary_console_msg("START\r\n", cpuid);
342
343
344 timeout = jiffies + 10*HZ;
345 while (time_before(jiffies, timeout)) {
346 if (cpu->flags & 1)
347 goto started;
348 udelay(10);
349 barrier();
350 }
351 printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
352 return -1;
353
354 started:
355 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
356 return 0;
357}
358
359
360
361
362static int __cpuinit
363smp_boot_one_cpu(int cpuid)
364{
365 struct task_struct *idle;
366 unsigned long timeout;
367
368
369
370
371
372
373
374 idle = fork_idle(cpuid);
375 if (IS_ERR(idle))
376 panic("failed fork for CPU %d", cpuid);
377
378 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
379 cpuid, idle->state, idle->flags));
380
381
382 smp_secondary_alive = -1;
383
384
385 if (secondary_cpu_start(cpuid, idle))
386 return -1;
387
388
389 mb();
390 smp_secondary_alive = 0;
391
392
393
394 timeout = jiffies + 1*HZ;
395 while (time_before(jiffies, timeout)) {
396 if (smp_secondary_alive == 1)
397 goto alive;
398 udelay(10);
399 barrier();
400 }
401
402
403
404 printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
405 return -1;
406
407 alive:
408
409 return 0;
410}
411
412
413
414
415
416void __init
417setup_smp(void)
418{
419 struct percpu_struct *cpubase, *cpu;
420 unsigned long i;
421
422 if (boot_cpuid != 0) {
423 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
424 boot_cpuid);
425 }
426
427 if (hwrpb->nr_processors > 1) {
428 int boot_cpu_palrev;
429
430 DBGS(("setup_smp: nr_processors %ld\n",
431 hwrpb->nr_processors));
432
433 cpubase = (struct percpu_struct *)
434 ((char*)hwrpb + hwrpb->processor_offset);
435 boot_cpu_palrev = cpubase->pal_revision;
436
437 for (i = 0; i < hwrpb->nr_processors; i++) {
438 cpu = (struct percpu_struct *)
439 ((char *)cpubase + i*hwrpb->processor_size);
440 if ((cpu->flags & 0x1cc) == 0x1cc) {
441 smp_num_probed++;
442 cpu_set(i, cpu_present_map);
443 cpu->pal_revision = boot_cpu_palrev;
444 }
445
446 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
447 i, cpu->flags, cpu->type));
448 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
449 i, cpu->pal_revision));
450 }
451 } else {
452 smp_num_probed = 1;
453 }
454
455 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
456 smp_num_probed, cpu_present_map.bits[0]);
457}
458
459
460
461
462void __init
463smp_prepare_cpus(unsigned int max_cpus)
464{
465
466 memset(ipi_data, 0, sizeof(ipi_data));
467
468 current_thread_info()->cpu = boot_cpuid;
469
470 smp_store_cpu_info(boot_cpuid);
471 smp_setup_percpu_timer(boot_cpuid);
472
473
474 if (smp_num_probed == 1 || max_cpus == 0) {
475 cpu_present_map = cpumask_of_cpu(boot_cpuid);
476 printk(KERN_INFO "SMP mode deactivated.\n");
477 return;
478 }
479
480 printk(KERN_INFO "SMP starting up secondaries.\n");
481
482 smp_num_cpus = smp_num_probed;
483}
484
485void __devinit
486smp_prepare_boot_cpu(void)
487{
488}
489
490int __cpuinit
491__cpu_up(unsigned int cpu)
492{
493 smp_boot_one_cpu(cpu);
494
495 return cpu_online(cpu) ? 0 : -ENOSYS;
496}
497
498void __init
499smp_cpus_done(unsigned int max_cpus)
500{
501 int cpu;
502 unsigned long bogosum = 0;
503
504 for(cpu = 0; cpu < NR_CPUS; cpu++)
505 if (cpu_online(cpu))
506 bogosum += cpu_data[cpu].loops_per_jiffy;
507
508 printk(KERN_INFO "SMP: Total of %d processors activated "
509 "(%lu.%02lu BogoMIPS).\n",
510 num_online_cpus(),
511 (bogosum + 2500) / (500000/HZ),
512 ((bogosum + 2500) / (5000/HZ)) % 100);
513}
514
515
516void
517smp_percpu_timer_interrupt(struct pt_regs *regs)
518{
519 struct pt_regs *old_regs;
520 int cpu = smp_processor_id();
521 unsigned long user = user_mode(regs);
522 struct cpuinfo_alpha *data = &cpu_data[cpu];
523
524 old_regs = set_irq_regs(regs);
525
526
527 profile_tick(CPU_PROFILING);
528
529 if (!--data->prof_counter) {
530
531
532
533 irq_enter();
534
535 update_process_times(user);
536
537 data->prof_counter = data->prof_multiplier;
538
539 irq_exit();
540 }
541 set_irq_regs(old_regs);
542}
543
544int
545setup_profiling_timer(unsigned int multiplier)
546{
547 return -EINVAL;
548}
549
550
551static void
552send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
553{
554 int i;
555
556 mb();
557 for_each_cpu_mask(i, to_whom)
558 set_bit(operation, &ipi_data[i].bits);
559
560 mb();
561 for_each_cpu_mask(i, to_whom)
562 wripir(i);
563}
564
565
566
567
568struct smp_call_struct {
569 void (*func) (void *info);
570 void *info;
571 long wait;
572 atomic_t unstarted_count;
573 atomic_t unfinished_count;
574};
575
576static struct smp_call_struct *smp_call_function_data;
577
578
579
580
581static int
582pointer_lock (void *lock, void *data, int retry)
583{
584 void *old, *tmp;
585
586 mb();
587 again:
588
589 asm volatile (
590 "1: ldq_l %0,%1\n"
591 " mov %3,%2\n"
592 " bne %0,2f\n"
593 " stq_c %2,%1\n"
594 " beq %2,1b\n"
595 "2:"
596 : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
597 : "r"(data)
598 : "memory");
599
600 if (old == 0)
601 return 0;
602 if (! retry)
603 return -EBUSY;
604
605 while (*(void **)lock)
606 barrier();
607 goto again;
608}
609
610void
611handle_ipi(struct pt_regs *regs)
612{
613 int this_cpu = smp_processor_id();
614 unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
615 unsigned long ops;
616
617#if 0
618 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
619 this_cpu, *pending_ipis, regs->pc));
620#endif
621
622 mb();
623 while ((ops = xchg(pending_ipis, 0)) != 0) {
624 mb();
625 do {
626 unsigned long which;
627
628 which = ops & -ops;
629 ops &= ~which;
630 which = __ffs(which);
631
632 switch (which) {
633 case IPI_RESCHEDULE:
634
635
636 break;
637
638 case IPI_CALL_FUNC:
639 {
640 struct smp_call_struct *data;
641 void (*func)(void *info);
642 void *info;
643 int wait;
644
645 data = smp_call_function_data;
646 func = data->func;
647 info = data->info;
648 wait = data->wait;
649
650
651
652 mb();
653 atomic_dec (&data->unstarted_count);
654
655
656
657 (*func)(info);
658
659
660 mb();
661 if (wait) atomic_dec (&data->unfinished_count);
662 break;
663 }
664
665 case IPI_CPU_STOP:
666 halt();
667
668 default:
669 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
670 this_cpu, which);
671 break;
672 }
673 } while (ops);
674
675 mb();
676 }
677
678 cpu_data[this_cpu].ipi_count++;
679
680 if (hwrpb->txrdy)
681 recv_secondary_console_msg();
682}
683
684void
685smp_send_reschedule(int cpu)
686{
687#ifdef DEBUG_IPI_MSG
688 if (cpu == hard_smp_processor_id())
689 printk(KERN_WARNING
690 "smp_send_reschedule: Sending IPI to self.\n");
691#endif
692 send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
693}
694
695void
696smp_send_stop(void)
697{
698 cpumask_t to_whom = cpu_possible_map;
699 cpu_clear(smp_processor_id(), to_whom);
700#ifdef DEBUG_IPI_MSG
701 if (hard_smp_processor_id() != boot_cpu_id)
702 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
703#endif
704 send_ipi_message(to_whom, IPI_CPU_STOP);
705}
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721int
722smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
723 int wait, cpumask_t to_whom)
724{
725 struct smp_call_struct data;
726 unsigned long timeout;
727 int num_cpus_to_call;
728
729
730 WARN_ON(irqs_disabled());
731
732 data.func = func;
733 data.info = info;
734 data.wait = wait;
735
736 cpu_clear(smp_processor_id(), to_whom);
737 num_cpus_to_call = cpus_weight(to_whom);
738
739 atomic_set(&data.unstarted_count, num_cpus_to_call);
740 atomic_set(&data.unfinished_count, num_cpus_to_call);
741
742
743 if (pointer_lock(&smp_call_function_data, &data, retry))
744 return -EBUSY;
745
746
747 send_ipi_message(to_whom, IPI_CALL_FUNC);
748
749
750 timeout = jiffies + HZ;
751 while (atomic_read (&data.unstarted_count) > 0
752 && time_before (jiffies, timeout))
753 barrier();
754
755
756
757
758
759 if (atomic_read(&data.unstarted_count) > 0) {
760 long start_time = jiffies;
761 printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
762 __FUNCTION__);
763 timeout = jiffies + 30 * HZ;
764 while (atomic_read(&data.unstarted_count) > 0
765 && time_before(jiffies, timeout))
766 barrier();
767 if (atomic_read(&data.unstarted_count) <= 0) {
768 long delta = jiffies - start_time;
769 printk(KERN_ERR
770 "%s: response %ld.%ld seconds into long wait\n",
771 __FUNCTION__, delta / HZ,
772 (100 * (delta - ((delta / HZ) * HZ))) / HZ);
773 }
774 }
775
776
777 mb();
778 smp_call_function_data = NULL;
779
780
781
782
783
784 BUG_ON(atomic_read (&data.unstarted_count) > 0);
785
786
787 if (wait) {
788 while (atomic_read (&data.unfinished_count) > 0)
789 barrier();
790 }
791
792 return 0;
793}
794EXPORT_SYMBOL(smp_call_function_on_cpu);
795
796int
797smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
798{
799 return smp_call_function_on_cpu (func, info, retry, wait,
800 cpu_online_map);
801}
802EXPORT_SYMBOL(smp_call_function);
803
804static void
805ipi_imb(void *ignored)
806{
807 imb();
808}
809
810void
811smp_imb(void)
812{
813
814 if (on_each_cpu(ipi_imb, NULL, 1, 1))
815 printk(KERN_CRIT "smp_imb: timed out\n");
816}
817EXPORT_SYMBOL(smp_imb);
818
819static void
820ipi_flush_tlb_all(void *ignored)
821{
822 tbia();
823}
824
825void
826flush_tlb_all(void)
827{
828
829
830 if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
831 printk(KERN_CRIT "flush_tlb_all: timed out\n");
832 }
833}
834
835#define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
836
837static void
838ipi_flush_tlb_mm(void *x)
839{
840 struct mm_struct *mm = (struct mm_struct *) x;
841 if (mm == current->active_mm && !asn_locked())
842 flush_tlb_current(mm);
843 else
844 flush_tlb_other(mm);
845}
846
847void
848flush_tlb_mm(struct mm_struct *mm)
849{
850 preempt_disable();
851
852 if (mm == current->active_mm) {
853 flush_tlb_current(mm);
854 if (atomic_read(&mm->mm_users) <= 1) {
855 int cpu, this_cpu = smp_processor_id();
856 for (cpu = 0; cpu < NR_CPUS; cpu++) {
857 if (!cpu_online(cpu) || cpu == this_cpu)
858 continue;
859 if (mm->context[cpu])
860 mm->context[cpu] = 0;
861 }
862 preempt_enable();
863 return;
864 }
865 }
866
867 if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
868 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
869 }
870
871 preempt_enable();
872}
873EXPORT_SYMBOL(flush_tlb_mm);
874
875struct flush_tlb_page_struct {
876 struct vm_area_struct *vma;
877 struct mm_struct *mm;
878 unsigned long addr;
879};
880
881static void
882ipi_flush_tlb_page(void *x)
883{
884 struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
885 struct mm_struct * mm = data->mm;
886
887 if (mm == current->active_mm && !asn_locked())
888 flush_tlb_current_page(mm, data->vma, data->addr);
889 else
890 flush_tlb_other(mm);
891}
892
893void
894flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
895{
896 struct flush_tlb_page_struct data;
897 struct mm_struct *mm = vma->vm_mm;
898
899 preempt_disable();
900
901 if (mm == current->active_mm) {
902 flush_tlb_current_page(mm, vma, addr);
903 if (atomic_read(&mm->mm_users) <= 1) {
904 int cpu, this_cpu = smp_processor_id();
905 for (cpu = 0; cpu < NR_CPUS; cpu++) {
906 if (!cpu_online(cpu) || cpu == this_cpu)
907 continue;
908 if (mm->context[cpu])
909 mm->context[cpu] = 0;
910 }
911 preempt_enable();
912 return;
913 }
914 }
915
916 data.vma = vma;
917 data.mm = mm;
918 data.addr = addr;
919
920 if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
921 printk(KERN_CRIT "flush_tlb_page: timed out\n");
922 }
923
924 preempt_enable();
925}
926EXPORT_SYMBOL(flush_tlb_page);
927
928void
929flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
930{
931
932 flush_tlb_mm(vma->vm_mm);
933}
934EXPORT_SYMBOL(flush_tlb_range);
935
936static void
937ipi_flush_icache_page(void *x)
938{
939 struct mm_struct *mm = (struct mm_struct *) x;
940 if (mm == current->active_mm && !asn_locked())
941 __load_new_mm_context(mm);
942 else
943 flush_tlb_other(mm);
944}
945
946void
947flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
948 unsigned long addr, int len)
949{
950 struct mm_struct *mm = vma->vm_mm;
951
952 if ((vma->vm_flags & VM_EXEC) == 0)
953 return;
954
955 preempt_disable();
956
957 if (mm == current->active_mm) {
958 __load_new_mm_context(mm);
959 if (atomic_read(&mm->mm_users) <= 1) {
960 int cpu, this_cpu = smp_processor_id();
961 for (cpu = 0; cpu < NR_CPUS; cpu++) {
962 if (!cpu_online(cpu) || cpu == this_cpu)
963 continue;
964 if (mm->context[cpu])
965 mm->context[cpu] = 0;
966 }
967 preempt_enable();
968 return;
969 }
970 }
971
972 if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
973 printk(KERN_CRIT "flush_icache_page: timed out\n");
974 }
975
976 preempt_enable();
977}
978