1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42#include <linux/module.h>
43#include <linux/cpu.h>
44#include <linux/init.h>
45#include <linux/kernel.h>
46#include <linux/mm.h>
47#include <linux/sched.h>
48#include <linux/err.h>
49#include <linux/irq.h>
50#include <linux/bootmem.h>
51#include <linux/delay.h>
52
53#include <asm/io.h>
54#include <asm/pgalloc.h>
55#include <asm/tlbflush.h>
56
57#define DEBUG_SMP
58#ifdef DEBUG_SMP
59#define Dprintk(x...) printk(x)
60#else
61#define Dprintk(x...)
62#endif
63
64extern cpumask_t cpu_initialized;
65
66
67
68
69
70
71static unsigned int bsp_phys_id = -1;
72
73
74physid_mask_t phys_cpu_present_map;
75
76cpumask_t cpu_bootout_map;
77cpumask_t cpu_bootin_map;
78static cpumask_t cpu_callin_map;
79cpumask_t cpu_callout_map;
80EXPORT_SYMBOL(cpu_callout_map);
81
82
83struct cpuinfo_m32r cpu_data[NR_CPUS] __cacheline_aligned;
84
85static int cpucount;
86static cpumask_t smp_commenced_mask;
87
88extern struct {
89 void * spi;
90 unsigned short ss;
91} stack_start;
92
93
94static volatile int physid_2_cpu[NR_CPUS];
95#define physid_to_cpu(physid) physid_2_cpu[physid]
96
97
98volatile int cpu_2_physid[NR_CPUS];
99
100DEFINE_PER_CPU(int, prof_multiplier) = 1;
101DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
102DEFINE_PER_CPU(int, prof_counter) = 1;
103
104spinlock_t ipi_lock[NR_IPIS];
105
106static unsigned int calibration_result;
107
108
109
110
111
112static void init_ipi_lock(void);
113static void do_boot_cpu(int);
114
115int start_secondary(void *);
116static void smp_callin(void);
117static void smp_online(void);
118
119static void show_mp_info(int);
120static void smp_store_cpu_info(int);
121static void show_cpu_info(int);
122int setup_profiling_timer(unsigned int);
123static void init_cpu_to_physid(void);
124static void map_cpu_to_physid(int, int);
125static void unmap_cpu_to_physid(int, int);
126
127
128
129
130void smp_prepare_boot_cpu(void)
131{
132 bsp_phys_id = hard_smp_processor_id();
133 physid_set(bsp_phys_id, phys_cpu_present_map);
134 set_cpu_online(0, true);
135 cpumask_set_cpu(0, &cpu_callout_map);
136 cpumask_set_cpu(0, &cpu_callin_map);
137
138
139
140
141 init_cpu_to_physid();
142 map_cpu_to_physid(0, bsp_phys_id);
143 current_thread_info()->cpu = 0;
144}
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163void __init smp_prepare_cpus(unsigned int max_cpus)
164{
165 int phys_id;
166 unsigned long nr_cpu;
167
168 nr_cpu = inl(M32R_FPGA_NUM_OF_CPUS_PORTL);
169 if (nr_cpu > NR_CPUS) {
170 printk(KERN_INFO "NUM_OF_CPUS reg. value [%ld] > NR_CPU [%d]",
171 nr_cpu, NR_CPUS);
172 goto smp_done;
173 }
174 for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
175 physid_set(phys_id, phys_cpu_present_map);
176#ifndef CONFIG_HOTPLUG_CPU
177 init_cpu_present(cpu_possible_mask);
178#endif
179
180 show_mp_info(nr_cpu);
181
182 init_ipi_lock();
183
184
185
186
187 smp_store_cpu_info(0);
188
189
190
191
192 if (!max_cpus) {
193 printk(KERN_INFO "SMP mode deactivated by commandline.\n");
194 goto smp_done;
195 }
196
197
198
199
200 Dprintk("CPU present map : %lx\n", physids_coerce(phys_cpu_present_map));
201
202 for (phys_id = 0 ; phys_id < NR_CPUS ; phys_id++) {
203
204
205
206 if (phys_id == bsp_phys_id)
207 continue;
208
209 if (!physid_isset(phys_id, phys_cpu_present_map))
210 continue;
211
212 if (max_cpus <= cpucount + 1)
213 continue;
214
215 do_boot_cpu(phys_id);
216
217
218
219
220 if (physid_to_cpu(phys_id) == -1) {
221 physid_clear(phys_id, phys_cpu_present_map);
222 printk("phys CPU#%d not responding - " \
223 "cannot use it.\n", phys_id);
224 }
225 }
226
227smp_done:
228 Dprintk("Boot done.\n");
229}
230
231
232
233
234static void __init init_ipi_lock(void)
235{
236 int ipi;
237
238 for (ipi = 0 ; ipi < NR_IPIS ; ipi++)
239 spin_lock_init(&ipi_lock[ipi]);
240}
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259static void __init do_boot_cpu(int phys_id)
260{
261 struct task_struct *idle;
262 unsigned long send_status, boot_status;
263 int timeout, cpu_id;
264
265 cpu_id = ++cpucount;
266
267
268
269
270
271 idle = fork_idle(cpu_id);
272 if (IS_ERR(idle))
273 panic("failed fork for CPU#%d.", cpu_id);
274
275 idle->thread.lr = (unsigned long)start_secondary;
276
277 map_cpu_to_physid(cpu_id, phys_id);
278
279
280 printk("Booting processor %d/%d\n", phys_id, cpu_id);
281 stack_start.spi = (void *)idle->thread.sp;
282 task_thread_info(idle)->cpu = cpu_id;
283
284
285
286
287
288
289
290 send_status = 0;
291 boot_status = 0;
292
293 cpumask_set_cpu(phys_id, &cpu_bootout_map);
294
295
296 send_IPI_mask_phys(cpumask_of(phys_id), CPU_BOOT_IPI, 0);
297
298 Dprintk("Waiting for send to finish...\n");
299 timeout = 0;
300
301
302 do {
303 Dprintk("+");
304 udelay(1000);
305 send_status = !cpumask_test_cpu(phys_id, &cpu_bootin_map);
306 } while (send_status && (timeout++ < 100));
307
308 Dprintk("After Startup.\n");
309
310 if (!send_status) {
311
312
313
314 Dprintk("Before Callout %d.\n", cpu_id);
315 cpumask_set_cpu(cpu_id, &cpu_callout_map);
316 Dprintk("After Callout %d.\n", cpu_id);
317
318
319
320
321 for (timeout = 0; timeout < 5000; timeout++) {
322 if (cpumask_test_cpu(cpu_id, &cpu_callin_map))
323 break;
324 udelay(1000);
325 }
326
327 if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
328
329 Dprintk("OK.\n");
330 } else {
331 boot_status = 1;
332 printk("Not responding.\n");
333 }
334 } else
335 printk("IPI never delivered???\n");
336
337 if (send_status || boot_status) {
338 unmap_cpu_to_physid(cpu_id, phys_id);
339 cpumask_clear_cpu(cpu_id, &cpu_callout_map);
340 cpumask_clear_cpu(cpu_id, &cpu_callin_map);
341 cpumask_clear_cpu(cpu_id, &cpu_initialized);
342 cpucount--;
343 }
344}
345
346int __cpu_up(unsigned int cpu_id, struct task_struct *tidle)
347{
348 int timeout;
349
350 cpumask_set_cpu(cpu_id, &smp_commenced_mask);
351
352
353
354
355 for (timeout = 0; timeout < 5000; timeout++) {
356 if (cpu_online(cpu_id))
357 break;
358 udelay(1000);
359 }
360 if (!cpu_online(cpu_id))
361 BUG();
362
363 return 0;
364}
365
366void __init smp_cpus_done(unsigned int max_cpus)
367{
368 int cpu_id, timeout;
369 unsigned long bogosum = 0;
370
371 for (timeout = 0; timeout < 5000; timeout++) {
372 if (cpumask_equal(&cpu_callin_map, cpu_online_mask))
373 break;
374 udelay(1000);
375 }
376 if (!cpumask_equal(&cpu_callin_map, cpu_online_mask))
377 BUG();
378
379 for_each_online_cpu(cpu_id)
380 show_cpu_info(cpu_id);
381
382
383
384
385 Dprintk("Before bogomips.\n");
386 if (cpucount) {
387 for_each_cpu(cpu_id,cpu_online_mask)
388 bogosum += cpu_data[cpu_id].loops_per_jiffy;
389
390 printk(KERN_INFO "Total of %d processors activated " \
391 "(%lu.%02lu BogoMIPS).\n", cpucount + 1,
392 bogosum / (500000 / HZ),
393 (bogosum / (5000 / HZ)) % 100);
394 Dprintk("Before bogocount - setting activated=1.\n");
395 }
396}
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419int __init start_secondary(void *unused)
420{
421 cpu_init();
422 preempt_disable();
423 smp_callin();
424 while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))
425 cpu_relax();
426
427 smp_online();
428
429
430
431
432
433 local_flush_tlb_all();
434
435 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
436 return 0;
437}
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456static void __init smp_callin(void)
457{
458 int phys_id = hard_smp_processor_id();
459 int cpu_id = smp_processor_id();
460 unsigned long timeout;
461
462 if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
463 printk("huh, phys CPU#%d, CPU#%d already present??\n",
464 phys_id, cpu_id);
465 BUG();
466 }
467 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpu_id, phys_id);
468
469
470 timeout = jiffies + (2 * HZ);
471 while (time_before(jiffies, timeout)) {
472
473 if (cpumask_test_cpu(cpu_id, &cpu_callout_map))
474 break;
475 cpu_relax();
476 }
477
478 if (!time_before(jiffies, timeout)) {
479 printk("BUG: CPU#%d started up but did not get a callout!\n",
480 cpu_id);
481 BUG();
482 }
483
484
485 cpumask_set_cpu(cpu_id, &cpu_callin_map);
486}
487
488static void __init smp_online(void)
489{
490 int cpu_id = smp_processor_id();
491
492 notify_cpu_starting(cpu_id);
493
494 local_irq_enable();
495
496
497 calibrate_delay();
498
499
500 smp_store_cpu_info(cpu_id);
501
502 set_cpu_online(cpu_id, true);
503}
504
505
506
507
508static void __init show_mp_info(int nr_cpu)
509{
510 int i;
511 char cpu_model0[17], cpu_model1[17], cpu_ver[9];
512
513 strncpy(cpu_model0, (char *)M32R_FPGA_CPU_NAME_ADDR, 16);
514 strncpy(cpu_model1, (char *)M32R_FPGA_MODEL_ID_ADDR, 16);
515 strncpy(cpu_ver, (char *)M32R_FPGA_VERSION_ADDR, 8);
516
517 cpu_model0[16] = '\0';
518 for (i = 15 ; i >= 0 ; i--) {
519 if (cpu_model0[i] != ' ')
520 break;
521 cpu_model0[i] = '\0';
522 }
523 cpu_model1[16] = '\0';
524 for (i = 15 ; i >= 0 ; i--) {
525 if (cpu_model1[i] != ' ')
526 break;
527 cpu_model1[i] = '\0';
528 }
529 cpu_ver[8] = '\0';
530 for (i = 7 ; i >= 0 ; i--) {
531 if (cpu_ver[i] != ' ')
532 break;
533 cpu_ver[i] = '\0';
534 }
535
536 printk(KERN_INFO "M32R-mp information\n");
537 printk(KERN_INFO " On-chip CPUs : %d\n", nr_cpu);
538 printk(KERN_INFO " CPU model : %s/%s(%s)\n", cpu_model0,
539 cpu_model1, cpu_ver);
540}
541
542
543
544
545
546static void __init smp_store_cpu_info(int cpu_id)
547{
548 struct cpuinfo_m32r *ci = cpu_data + cpu_id;
549
550 *ci = boot_cpu_data;
551 ci->loops_per_jiffy = loops_per_jiffy;
552}
553
554static void __init show_cpu_info(int cpu_id)
555{
556 struct cpuinfo_m32r *ci = &cpu_data[cpu_id];
557
558 printk("CPU#%d : ", cpu_id);
559
560#define PRINT_CLOCK(name, value) \
561 printk(name " clock %d.%02dMHz", \
562 ((value) / 1000000), ((value) % 1000000) / 10000)
563
564 PRINT_CLOCK("CPU", (int)ci->cpu_clock);
565 PRINT_CLOCK(", Bus", (int)ci->bus_clock);
566 printk(", loops_per_jiffy[%ld]\n", ci->loops_per_jiffy);
567}
568
569
570
571
572
573int setup_profiling_timer(unsigned int multiplier)
574{
575 int i;
576
577
578
579
580
581
582 if ( (!multiplier) || (calibration_result / multiplier < 500))
583 return -EINVAL;
584
585
586
587
588
589
590
591 for_each_possible_cpu(i)
592 per_cpu(prof_multiplier, i) = multiplier;
593
594 return 0;
595}
596
597
598static void __init init_cpu_to_physid(void)
599{
600 int i;
601
602 for (i = 0 ; i < NR_CPUS ; i++) {
603 cpu_2_physid[i] = -1;
604 physid_2_cpu[i] = -1;
605 }
606}
607
608
609
610
611
612static void __init map_cpu_to_physid(int cpu_id, int phys_id)
613{
614 physid_2_cpu[phys_id] = cpu_id;
615 cpu_2_physid[cpu_id] = phys_id;
616}
617
618
619
620
621
622static void __init unmap_cpu_to_physid(int cpu_id, int phys_id)
623{
624 physid_2_cpu[phys_id] = -1;
625 cpu_2_physid[cpu_id] = -1;
626}
627