1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/cpufreq.h>
18#include <linux/delay.h>
19#include <linux/init.h>
20#include <linux/jiffies.h>
21#include <linux/kthread.h>
22#include <linux/oprofile.h>
23#include <linux/percpu.h>
24#include <linux/smp.h>
25#include <linux/spinlock.h>
26#include <linux/timer.h>
27#include <asm/cell-pmu.h>
28#include <asm/cputable.h>
29#include <asm/firmware.h>
30#include <asm/io.h>
31#include <asm/oprofile_impl.h>
32#include <asm/processor.h>
33#include <asm/prom.h>
34#include <asm/ptrace.h>
35#include <asm/reg.h>
36#include <asm/rtas.h>
37#include <asm/system.h>
38#include <asm/cell-regs.h>
39
40#include "../platforms/cell/interrupt.h"
41#include "cell/pr_util.h"
42
43#define PPU_PROFILING 0
44#define SPU_PROFILING_CYCLES 1
45#define SPU_PROFILING_EVENTS 2
46
47#define SPU_EVENT_NUM_START 4100
48#define SPU_EVENT_NUM_STOP 4399
49#define SPU_PROFILE_EVENT_ADDR 4363
50#define SPU_PROFILE_EVENT_ADDR_MASK_A 0x146
51#define SPU_PROFILE_EVENT_ADDR_MASK_B 0x186
52
53#define NUM_SPUS_PER_NODE 8
54#define SPU_CYCLES_EVENT_NUM 2
55
56#define PPU_CYCLES_EVENT_NUM 1
57#define PPU_CYCLES_GRP_NUM 1
58
59
60#define CBE_COUNT_ALL_CYCLES 0x42800000
61
62#define NUM_THREADS 2
63
64
65#define NUM_DEBUG_BUS_WORDS 4
66#define NUM_INPUT_BUS_WORDS 2
67
68#define MAX_SPU_COUNT 0xFFFFFF
69
70
71
72
73
74#define NUM_INTERVAL_CYC 0xFFFFFFFF - 10
75
76
77
78
79
80
81static unsigned int spu_cycle_reset;
82static unsigned int profiling_mode;
83static int spu_evnt_phys_spu_indx;
84
85struct pmc_cntrl_data {
86 unsigned long vcntr;
87 unsigned long evnts;
88 unsigned long masks;
89 unsigned long enabled;
90};
91
92
93
94
95struct pm_signal {
96 u16 cpu;
97 u16 sub_unit;
98 short int signal_group;
99 u8 bus_word;
100
101
102 u8 bit;
103};
104
105
106
107
108enum {
109 SUBFUNC_RESET = 1,
110 SUBFUNC_ACTIVATE = 2,
111 SUBFUNC_DEACTIVATE = 3,
112
113 PASSTHRU_IGNORE = 0,
114 PASSTHRU_ENABLE = 1,
115 PASSTHRU_DISABLE = 2,
116};
117
118struct pm_cntrl {
119 u16 enable;
120 u16 stop_at_max;
121 u16 trace_mode;
122 u16 freeze;
123 u16 count_mode;
124 u16 spu_addr_trace;
125 u8 trace_buf_ovflw;
126};
127
128static struct {
129 u32 group_control;
130 u32 debug_bus_control;
131 struct pm_cntrl pm_cntrl;
132 u32 pm07_cntrl[NR_PHYS_CTRS];
133} pm_regs;
134
135#define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12)
136#define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4)
137#define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8)
138#define GET_POLARITY(x) ((x & 0x00000002) >> 1)
139#define GET_COUNT_CYCLES(x) (x & 0x00000001)
140#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
141
142static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
143static unsigned long spu_pm_cnt[MAX_NUMNODES * NUM_SPUS_PER_NODE];
144static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169static u32 hdw_thread;
170
171static u32 virt_cntr_inter_mask;
172static struct timer_list timer_virt_cntr;
173static struct timer_list timer_spu_event_swap;
174
175
176
177
178
179
180static struct pm_signal pm_signal[NR_PHYS_CTRS];
181static int pm_rtas_token;
182static int spu_rtas_token;
183
184static u32 reset_value[NR_PHYS_CTRS];
185static int num_counters;
186static int oprofile_running;
187static DEFINE_SPINLOCK(cntr_lock);
188
189static u32 ctr_enabled;
190
191static unsigned char input_bus[NUM_INPUT_BUS_WORDS];
192
193
194
195
196static int
197rtas_ibm_cbe_perftools(int subfunc, int passthru,
198 void *address, unsigned long length)
199{
200 u64 paddr = __pa(address);
201
202 return rtas_call(pm_rtas_token, 5, 1, NULL, subfunc,
203 passthru, paddr >> 32, paddr & 0xffffffff, length);
204}
205
206static void pm_rtas_reset_signals(u32 node)
207{
208 int ret;
209 struct pm_signal pm_signal_local;
210
211
212
213
214
215
216
217
218
219
220
221 pm_signal_local.cpu = node;
222 pm_signal_local.signal_group = 21;
223 pm_signal_local.bus_word = 1;
224 pm_signal_local.sub_unit = 0;
225 pm_signal_local.bit = 0;
226
227 ret = rtas_ibm_cbe_perftools(SUBFUNC_RESET, PASSTHRU_DISABLE,
228 &pm_signal_local,
229 sizeof(struct pm_signal));
230
231 if (unlikely(ret))
232
233
234
235
236
237 printk(KERN_WARNING "%s: rtas returned: %d\n",
238 __func__, ret);
239}
240
241static int pm_rtas_activate_signals(u32 node, u32 count)
242{
243 int ret;
244 int i, j;
245 struct pm_signal pm_signal_local[NR_PHYS_CTRS];
246
247
248
249
250
251
252
253
254
255 i = 0;
256 for (j = 0; j < count; j++) {
257 if (pm_signal[j].signal_group != PPU_CYCLES_GRP_NUM) {
258
259
260 pm_signal_local[i].cpu = node;
261 pm_signal_local[i].signal_group
262 = pm_signal[j].signal_group;
263 pm_signal_local[i].bus_word = pm_signal[j].bus_word;
264 pm_signal_local[i].sub_unit = pm_signal[j].sub_unit;
265 pm_signal_local[i].bit = pm_signal[j].bit;
266 i++;
267 }
268 }
269
270 if (i != 0) {
271 ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE,
272 pm_signal_local,
273 i * sizeof(struct pm_signal));
274
275 if (unlikely(ret)) {
276 printk(KERN_WARNING "%s: rtas returned: %d\n",
277 __func__, ret);
278 return -EIO;
279 }
280 }
281
282 return 0;
283}
284
285
286
287
288static void set_pm_event(u32 ctr, int event, u32 unit_mask)
289{
290 struct pm_signal *p;
291 u32 signal_bit;
292 u32 bus_word, bus_type, count_cycles, polarity, input_control;
293 int j, i;
294
295 if (event == PPU_CYCLES_EVENT_NUM) {
296
297 pm_regs.pm07_cntrl[ctr] = CBE_COUNT_ALL_CYCLES;
298 p = &(pm_signal[ctr]);
299 p->signal_group = PPU_CYCLES_GRP_NUM;
300 p->bus_word = 1;
301 p->sub_unit = 0;
302 p->bit = 0;
303 goto out;
304 } else {
305 pm_regs.pm07_cntrl[ctr] = 0;
306 }
307
308 bus_word = GET_BUS_WORD(unit_mask);
309 bus_type = GET_BUS_TYPE(unit_mask);
310 count_cycles = GET_COUNT_CYCLES(unit_mask);
311 polarity = GET_POLARITY(unit_mask);
312 input_control = GET_INPUT_CONTROL(unit_mask);
313 signal_bit = (event % 100);
314
315 p = &(pm_signal[ctr]);
316
317 p->signal_group = event / 100;
318 p->bus_word = bus_word;
319 p->sub_unit = GET_SUB_UNIT(unit_mask);
320
321 pm_regs.pm07_cntrl[ctr] = 0;
322 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles);
323 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity);
324 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control);
325
326
327
328
329
330
331
332
333
334
335 if (input_control == 0) {
336 if (signal_bit > 31) {
337 signal_bit -= 32;
338 if (bus_word == 0x3)
339 bus_word = 0x2;
340 else if (bus_word == 0xc)
341 bus_word = 0x8;
342 }
343
344 if ((bus_type == 0) && p->signal_group >= 60)
345 bus_type = 2;
346 if ((bus_type == 1) && p->signal_group >= 50)
347 bus_type = 0;
348
349 pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_MUX(signal_bit);
350 } else {
351 pm_regs.pm07_cntrl[ctr] = 0;
352 p->bit = signal_bit;
353 }
354
355 for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) {
356 if (bus_word & (1 << i)) {
357 pm_regs.debug_bus_control |=
358 (bus_type << (30 - (2 * i)));
359
360 for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
361 if (input_bus[j] == 0xff) {
362 input_bus[j] = i;
363 pm_regs.group_control |=
364 (i << (30 - (2 * j)));
365
366 break;
367 }
368 }
369 }
370 }
371out:
372 ;
373}
374
375static void write_pm_cntrl(int cpu)
376{
377
378
379
380
381
382 u32 val = 0;
383 if (pm_regs.pm_cntrl.enable == 1)
384 val |= CBE_PM_ENABLE_PERF_MON;
385
386 if (pm_regs.pm_cntrl.stop_at_max == 1)
387 val |= CBE_PM_STOP_AT_MAX;
388
389 if (pm_regs.pm_cntrl.trace_mode != 0)
390 val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode);
391
392 if (pm_regs.pm_cntrl.trace_buf_ovflw == 1)
393 val |= CBE_PM_TRACE_BUF_OVFLW(pm_regs.pm_cntrl.trace_buf_ovflw);
394 if (pm_regs.pm_cntrl.freeze == 1)
395 val |= CBE_PM_FREEZE_ALL_CTRS;
396
397 val |= CBE_PM_SPU_ADDR_TRACE_SET(pm_regs.pm_cntrl.spu_addr_trace);
398
399
400
401
402
403 val |= CBE_PM_COUNT_MODE_SET(pm_regs.pm_cntrl.count_mode);
404 cbe_write_pm(cpu, pm_control, val);
405}
406
407static inline void
408set_count_mode(u32 kernel, u32 user)
409{
410
411
412
413
414
415 if (kernel) {
416 if (user)
417 pm_regs.pm_cntrl.count_mode = CBE_COUNT_ALL_MODES;
418 else
419 pm_regs.pm_cntrl.count_mode =
420 CBE_COUNT_SUPERVISOR_MODE;
421 } else {
422 if (user)
423 pm_regs.pm_cntrl.count_mode = CBE_COUNT_PROBLEM_MODE;
424 else
425 pm_regs.pm_cntrl.count_mode =
426 CBE_COUNT_HYPERVISOR_MODE;
427 }
428}
429
430static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl)
431{
432
433 pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE;
434 cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]);
435}
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456static void cell_virtual_cntr(unsigned long data)
457{
458 int i, prev_hdw_thread, next_hdw_thread;
459 u32 cpu;
460 unsigned long flags;
461
462
463
464
465
466
467 spin_lock_irqsave(&cntr_lock, flags);
468
469 prev_hdw_thread = hdw_thread;
470
471
472 hdw_thread = 1 ^ hdw_thread;
473 next_hdw_thread = hdw_thread;
474
475 pm_regs.group_control = 0;
476 pm_regs.debug_bus_control = 0;
477
478 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
479 input_bus[i] = 0xff;
480
481
482
483
484
485 for (i = 0; i < num_counters; i++)
486 set_pm_event(i,
487 pmc_cntrl[next_hdw_thread][i].evnts,
488 pmc_cntrl[next_hdw_thread][i].masks);
489
490
491
492
493
494 for_each_online_cpu(cpu) {
495 if (cbe_get_hw_thread_id(cpu))
496 continue;
497
498
499
500
501
502 cbe_disable_pm(cpu);
503 cbe_disable_pm_interrupts(cpu);
504 for (i = 0; i < num_counters; i++) {
505 per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
506 = cbe_read_ctr(cpu, i);
507
508 if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
509 == 0xFFFFFFFF)
510
511
512
513
514
515
516
517
518
519
520 cbe_write_ctr(cpu, i, 0xFFFFFFF0);
521 else
522 cbe_write_ctr(cpu, i,
523 per_cpu(pmc_values,
524 cpu +
525 next_hdw_thread)[i]);
526 }
527
528
529
530
531
532
533 for (i = 0; i < num_counters; i++) {
534 if (pmc_cntrl[next_hdw_thread][i].enabled) {
535
536
537
538
539
540 enable_ctr(cpu, i,
541 pm_regs.pm07_cntrl);
542 } else {
543 cbe_write_pm07_control(cpu, i, 0);
544 }
545 }
546
547
548 cbe_enable_pm_interrupts(cpu, next_hdw_thread,
549 virt_cntr_inter_mask);
550 cbe_enable_pm(cpu);
551 }
552
553 spin_unlock_irqrestore(&cntr_lock, flags);
554
555 mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
556}
557
558static void start_virt_cntrs(void)
559{
560 init_timer(&timer_virt_cntr);
561 timer_virt_cntr.function = cell_virtual_cntr;
562 timer_virt_cntr.data = 0UL;
563 timer_virt_cntr.expires = jiffies + HZ / 10;
564 add_timer(&timer_virt_cntr);
565}
566
567static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr,
568 struct op_system_config *sys, int num_ctrs)
569{
570 spu_cycle_reset = ctr[0].count;
571
572
573
574
575
576 spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
577
578 if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
579 printk(KERN_ERR
580 "%s: rtas token ibm,cbe-spu-perftools unknown\n",
581 __func__);
582 return -EIO;
583 }
584 return 0;
585}
586
587
588
589
590
591
592
593
594static void spu_evnt_swap(unsigned long data)
595{
596 int node;
597 int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx;
598 unsigned long flags;
599 int cpu;
600 int ret;
601 u32 interrupt_mask;
602
603
604
605 interrupt_mask = CBE_PM_CTR_OVERFLOW_INTR(0);
606
607 hdw_thread = 0;
608
609
610
611
612 spin_lock_irqsave(&cntr_lock, flags);
613
614 cur_spu_evnt_phys_spu_indx = spu_evnt_phys_spu_indx;
615
616 if (++(spu_evnt_phys_spu_indx) == NUM_SPUS_PER_NODE)
617 spu_evnt_phys_spu_indx = 0;
618
619 pm_signal[0].sub_unit = spu_evnt_phys_spu_indx;
620 pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
621 pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
622
623
624 for_each_online_cpu(cpu) {
625 if (cbe_get_hw_thread_id(cpu))
626 continue;
627
628 node = cbe_cpu_to_node(cpu);
629 cur_phys_spu = (node * NUM_SPUS_PER_NODE)
630 + cur_spu_evnt_phys_spu_indx;
631 nxt_phys_spu = (node * NUM_SPUS_PER_NODE)
632 + spu_evnt_phys_spu_indx;
633
634
635
636
637
638 cbe_disable_pm(cpu);
639 cbe_disable_pm_interrupts(cpu);
640
641 spu_pm_cnt[cur_phys_spu]
642 = cbe_read_ctr(cpu, 0);
643
644
645
646
647
648 if (spu_pm_cnt[nxt_phys_spu] >= 0xFFFFFFFF)
649 cbe_write_ctr(cpu, 0, 0xFFFFFFF0);
650 else
651 cbe_write_ctr(cpu, 0, spu_pm_cnt[nxt_phys_spu]);
652
653 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
654
655
656
657
658
659 ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), 3);
660 if (ret)
661 printk(KERN_ERR "%s: pm_rtas_activate_signals failed, "
662 "SPU event swap\n", __func__);
663
664
665
666 cbe_write_pm(cpu, trace_address, 0);
667
668 enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
669
670
671 cbe_enable_pm_interrupts(cpu, hdw_thread,
672 interrupt_mask);
673 cbe_enable_pm(cpu);
674 }
675
676 spin_unlock_irqrestore(&cntr_lock, flags);
677
678
679 mod_timer(&timer_spu_event_swap, jiffies + HZ / 25);
680}
681
682static void start_spu_event_swap(void)
683{
684 init_timer(&timer_spu_event_swap);
685 timer_spu_event_swap.function = spu_evnt_swap;
686 timer_spu_event_swap.data = 0UL;
687 timer_spu_event_swap.expires = jiffies + HZ / 25;
688 add_timer(&timer_spu_event_swap);
689}
690
691static int cell_reg_setup_spu_events(struct op_counter_config *ctr,
692 struct op_system_config *sys, int num_ctrs)
693{
694 int i;
695
696
697
698 spu_evnt_phys_spu_indx = 0;
699
700
701
702
703
704
705 pm_rtas_token = rtas_token("ibm,cbe-perftools");
706
707 if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
708 printk(KERN_ERR
709 "%s: rtas token ibm,cbe-perftools unknown\n",
710 __func__);
711 return -EIO;
712 }
713
714
715
716
717
718 pm_regs.pm_cntrl.trace_buf_ovflw = 1;
719
720
721
722
723
724 pm_regs.pm_cntrl.trace_mode = 2;
725
726 pm_regs.pm_cntrl.spu_addr_trace = 0x1;
727
728
729
730
731
732 pm_signal[1].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
733 pm_signal[1].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_A);
734 pm_signal[1].bit = SPU_PROFILE_EVENT_ADDR % 100;
735 pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
736
737 pm_signal[2].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
738 pm_signal[2].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_B);
739 pm_signal[2].bit = SPU_PROFILE_EVENT_ADDR % 100;
740 pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
741
742
743
744
745 num_counters = 1;
746 set_pm_event(0, ctr[0].event, ctr[0].unit_mask);
747
748 reset_value[0] = 0xFFFFFFFF - ctr[0].count;
749
750
751 ctr_enabled |= 1;
752
753
754 for (i=0; i < MAX_NUMNODES * NUM_SPUS_PER_NODE; i++)
755 spu_pm_cnt[i] = reset_value[0];
756
757 return 0;
758}
759
760static int cell_reg_setup_ppu(struct op_counter_config *ctr,
761 struct op_system_config *sys, int num_ctrs)
762{
763
764 int i, j, cpu;
765
766 num_counters = num_ctrs;
767
768 if (unlikely(num_ctrs > NR_PHYS_CTRS)) {
769 printk(KERN_ERR
770 "%s: Oprofile, number of specified events " \
771 "exceeds number of physical counters\n",
772 __func__);
773 return -EIO;
774 }
775
776 set_count_mode(sys->enable_kernel, sys->enable_user);
777
778
779 for (i = 0; i < num_ctrs; ++i) {
780
781 pmc_cntrl[0][i].evnts = ctr[i].event;
782 pmc_cntrl[0][i].masks = ctr[i].unit_mask;
783 pmc_cntrl[0][i].enabled = ctr[i].enabled;
784 pmc_cntrl[0][i].vcntr = i;
785
786 for_each_possible_cpu(j)
787 per_cpu(pmc_values, j)[i] = 0;
788 }
789
790
791
792
793
794 for (i = 0; i < num_ctrs; ++i) {
795 if ((ctr[i].event >= 2100) && (ctr[i].event <= 2111))
796 pmc_cntrl[1][i].evnts = ctr[i].event + 19;
797 else if (ctr[i].event == 2203)
798 pmc_cntrl[1][i].evnts = ctr[i].event;
799 else if ((ctr[i].event >= 2200) && (ctr[i].event <= 2215))
800 pmc_cntrl[1][i].evnts = ctr[i].event + 16;
801 else
802 pmc_cntrl[1][i].evnts = ctr[i].event;
803
804 pmc_cntrl[1][i].masks = ctr[i].unit_mask;
805 pmc_cntrl[1][i].enabled = ctr[i].enabled;
806 pmc_cntrl[1][i].vcntr = i;
807 }
808
809 for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
810 input_bus[i] = 0xff;
811
812
813
814
815
816
817
818
819 for (i = 0; i < num_counters; ++i) {
820
821 if (pmc_cntrl[0][i].enabled) {
822
823 reset_value[i] = 0xFFFFFFFF - ctr[i].count;
824 set_pm_event(i,
825 pmc_cntrl[0][i].evnts,
826 pmc_cntrl[0][i].masks);
827
828
829 ctr_enabled |= (1 << i);
830 }
831 }
832
833
834 for_each_online_cpu(cpu)
835 for (i = 0; i < num_counters; ++i) {
836 per_cpu(pmc_values, cpu)[i] = reset_value[i];
837 }
838
839 return 0;
840}
841
842
843
844static int cell_reg_setup(struct op_counter_config *ctr,
845 struct op_system_config *sys, int num_ctrs)
846{
847 int ret=0;
848 spu_cycle_reset = 0;
849
850
851
852
853 pm_regs.group_control = 0;
854 pm_regs.debug_bus_control = 0;
855 pm_regs.pm_cntrl.stop_at_max = 1;
856 pm_regs.pm_cntrl.trace_mode = 0;
857 pm_regs.pm_cntrl.freeze = 1;
858 pm_regs.pm_cntrl.trace_buf_ovflw = 0;
859 pm_regs.pm_cntrl.spu_addr_trace = 0;
860
861
862
863
864
865
866
867 pm_rtas_token = rtas_token("ibm,cbe-perftools");
868
869 if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
870 printk(KERN_ERR
871 "%s: rtas token ibm,cbe-perftools unknown\n",
872 __func__);
873 return -EIO;
874 }
875
876 if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
877 profiling_mode = SPU_PROFILING_CYCLES;
878 ret = cell_reg_setup_spu_cycles(ctr, sys, num_ctrs);
879 } else if ((ctr[0].event >= SPU_EVENT_NUM_START) &&
880 (ctr[0].event <= SPU_EVENT_NUM_STOP)) {
881 profiling_mode = SPU_PROFILING_EVENTS;
882 spu_cycle_reset = ctr[0].count;
883
884
885
886
887
888
889
890 cell_reg_setup_spu_events(ctr, sys, num_ctrs);
891 } else {
892 profiling_mode = PPU_PROFILING;
893 ret = cell_reg_setup_ppu(ctr, sys, num_ctrs);
894 }
895
896 return ret;
897}
898
899
900
901
902static int cell_cpu_setup(struct op_counter_config *cntr)
903{
904 u32 cpu = smp_processor_id();
905 u32 num_enabled = 0;
906 int i;
907 int ret;
908
909
910
911
912
913 if (profiling_mode == SPU_PROFILING_CYCLES)
914 return 0;
915
916
917
918
919 if (cbe_get_hw_thread_id(cpu))
920 return 0;
921
922
923 cbe_disable_pm(cpu);
924 cbe_disable_pm_interrupts(cpu);
925
926 cbe_write_pm(cpu, pm_start_stop, 0);
927 cbe_write_pm(cpu, group_control, pm_regs.group_control);
928 cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
929 write_pm_cntrl(cpu);
930
931 for (i = 0; i < num_counters; ++i) {
932 if (ctr_enabled & (1 << i)) {
933 pm_signal[num_enabled].cpu = cbe_cpu_to_node(cpu);
934 num_enabled++;
935 }
936 }
937
938
939
940
941
942 if (profiling_mode == SPU_PROFILING_EVENTS) {
943
944
945
946 ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
947 num_enabled+2);
948
949
950
951 cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
952 return ret;
953 } else
954 return pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
955 num_enabled);
956}
957
958#define ENTRIES 303
959#define MAXLFSR 0xFFFFFF
960
961
962static int initial_lfsr[] = {
963 8221349, 12579195, 5379618, 10097839, 7512963, 7519310, 3955098, 10753424,
964 15507573, 7458917, 285419, 2641121, 9780088, 3915503, 6668768, 1548716,
965 4885000, 8774424, 9650099, 2044357, 2304411, 9326253, 10332526, 4421547,
966 3440748, 10179459, 13332843, 10375561, 1313462, 8375100, 5198480, 6071392,
967 9341783, 1526887, 3985002, 1439429, 13923762, 7010104, 11969769, 4547026,
968 2040072, 4025602, 3437678, 7939992, 11444177, 4496094, 9803157, 10745556,
969 3671780, 4257846, 5662259, 13196905, 3237343, 12077182, 16222879, 7587769,
970 14706824, 2184640, 12591135, 10420257, 7406075, 3648978, 11042541, 15906893,
971 11914928, 4732944, 10695697, 12928164, 11980531, 4430912, 11939291, 2917017,
972 6119256, 4172004, 9373765, 8410071, 14788383, 5047459, 5474428, 1737756,
973 15967514, 13351758, 6691285, 8034329, 2856544, 14394753, 11310160, 12149558,
974 7487528, 7542781, 15668898, 12525138, 12790975, 3707933, 9106617, 1965401,
975 16219109, 12801644, 2443203, 4909502, 8762329, 3120803, 6360315, 9309720,
976 15164599, 10844842, 4456529, 6667610, 14924259, 884312, 6234963, 3326042,
977 15973422, 13919464, 5272099, 6414643, 3909029, 2764324, 5237926, 4774955,
978 10445906, 4955302, 5203726, 10798229, 11443419, 2303395, 333836, 9646934,
979 3464726, 4159182, 568492, 995747, 10318756, 13299332, 4836017, 8237783,
980 3878992, 2581665, 11394667, 5672745, 14412947, 3159169, 9094251, 16467278,
981 8671392, 15230076, 4843545, 7009238, 15504095, 1494895, 9627886, 14485051,
982 8304291, 252817, 12421642, 16085736, 4774072, 2456177, 4160695, 15409741,
983 4902868, 5793091, 13162925, 16039714, 782255, 11347835, 14884586, 366972,
984 16308990, 11913488, 13390465, 2958444, 10340278, 1177858, 1319431, 10426302,
985 2868597, 126119, 5784857, 5245324, 10903900, 16436004, 3389013, 1742384,
986 14674502, 10279218, 8536112, 10364279, 6877778, 14051163, 1025130, 6072469,
987 1988305, 8354440, 8216060, 16342977, 13112639, 3976679, 5913576, 8816697,
988 6879995, 14043764, 3339515, 9364420, 15808858, 12261651, 2141560, 5636398,
989 10345425, 10414756, 781725, 6155650, 4746914, 5078683, 7469001, 6799140,
990 10156444, 9667150, 10116470, 4133858, 2121972, 1124204, 1003577, 1611214,
991 14304602, 16221850, 13878465, 13577744, 3629235, 8772583, 10881308, 2410386,
992 7300044, 5378855, 9301235, 12755149, 4977682, 8083074, 10327581, 6395087,
993 9155434, 15501696, 7514362, 14520507, 15808945, 3244584, 4741962, 9658130,
994 14336147, 8654727, 7969093, 15759799, 14029445, 5038459, 9894848, 8659300,
995 13699287, 8834306, 10712885, 14753895, 10410465, 3373251, 309501, 9561475,
996 5526688, 14647426, 14209836, 5339224, 207299, 14069911, 8722990, 2290950,
997 3258216, 12505185, 6007317, 9218111, 14661019, 10537428, 11731949, 9027003,
998 6641507, 9490160, 200241, 9720425, 16277895, 10816638, 1554761, 10431375,
999 7467528, 6790302, 3429078, 14633753, 14428997, 11463204, 3576212, 2003426,
1000 6123687, 820520, 9992513, 15784513, 5778891, 6428165, 8388607
1001};
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056#define V2_16 (0x1 << 16)
1057#define V2_19 (0x1 << 19)
1058#define V2_22 (0x1 << 22)
1059
1060static int calculate_lfsr(int n)
1061{
1062
1063
1064
1065
1066 int index;
1067
1068 if ((n >> 16) == 0)
1069 index = 0;
1070 else if (((n - V2_16) >> 19) == 0)
1071 index = ((n - V2_16) >> 12) + 1;
1072 else if (((n - V2_16 - V2_19) >> 22) == 0)
1073 index = ((n - V2_16 - V2_19) >> 15 ) + 1 + 128;
1074 else if (((n - V2_16 - V2_19 - V2_22) >> 24) == 0)
1075 index = ((n - V2_16 - V2_19 - V2_22) >> 18 ) + 1 + 256;
1076 else
1077 index = ENTRIES-1;
1078
1079
1080 if ((index > ENTRIES) || (index < 0))
1081 index = ENTRIES-1;
1082
1083 return initial_lfsr[index];
1084}
1085
1086static int pm_rtas_activate_spu_profiling(u32 node)
1087{
1088 int ret, i;
1089 struct pm_signal pm_signal_local[NUM_SPUS_PER_NODE];
1090
1091
1092
1093
1094
1095 for (i = 0; i < ARRAY_SIZE(pm_signal_local); i++) {
1096 pm_signal_local[i].cpu = node;
1097 pm_signal_local[i].signal_group = 41;
1098
1099 pm_signal_local[i].bus_word = 1 << i / 2;
1100
1101 pm_signal_local[i].sub_unit = i;
1102 pm_signal_local[i].bit = 63;
1103 }
1104
1105 ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE,
1106 PASSTHRU_ENABLE, pm_signal_local,
1107 (ARRAY_SIZE(pm_signal_local)
1108 * sizeof(struct pm_signal)));
1109
1110 if (unlikely(ret)) {
1111 printk(KERN_WARNING "%s: rtas returned: %d\n",
1112 __func__, ret);
1113 return -EIO;
1114 }
1115
1116 return 0;
1117}
1118
1119#ifdef CONFIG_CPU_FREQ
1120static int
1121oprof_cpufreq_notify(struct notifier_block *nb, unsigned long val, void *data)
1122{
1123 int ret = 0;
1124 struct cpufreq_freqs *frq = data;
1125 if ((val == CPUFREQ_PRECHANGE && frq->old < frq->new) ||
1126 (val == CPUFREQ_POSTCHANGE && frq->old > frq->new) ||
1127 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE))
1128 set_spu_profiling_frequency(frq->new, spu_cycle_reset);
1129 return ret;
1130}
1131
1132static struct notifier_block cpu_freq_notifier_block = {
1133 .notifier_call = oprof_cpufreq_notify
1134};
1135#endif
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145static void cell_global_stop_spu_cycles(void)
1146{
1147 int subfunc, rtn_value;
1148 unsigned int lfsr_value;
1149 int cpu;
1150
1151 oprofile_running = 0;
1152 smp_wmb();
1153
1154#ifdef CONFIG_CPU_FREQ
1155 cpufreq_unregister_notifier(&cpu_freq_notifier_block,
1156 CPUFREQ_TRANSITION_NOTIFIER);
1157#endif
1158
1159 for_each_online_cpu(cpu) {
1160 if (cbe_get_hw_thread_id(cpu))
1161 continue;
1162
1163 subfunc = 3;
1164
1165
1166
1167 lfsr_value = 0x8f100000;
1168
1169 rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
1170 subfunc, cbe_cpu_to_node(cpu),
1171 lfsr_value);
1172
1173 if (unlikely(rtn_value != 0)) {
1174 printk(KERN_ERR
1175 "%s: rtas call ibm,cbe-spu-perftools " \
1176 "failed, return = %d\n",
1177 __func__, rtn_value);
1178 }
1179
1180
1181 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1182 }
1183
1184 stop_spu_profiling_cycles();
1185}
1186
1187static void cell_global_stop_spu_events(void)
1188{
1189 int cpu;
1190 oprofile_running = 0;
1191
1192 stop_spu_profiling_events();
1193 smp_wmb();
1194
1195 for_each_online_cpu(cpu) {
1196 if (cbe_get_hw_thread_id(cpu))
1197 continue;
1198
1199 cbe_sync_irq(cbe_cpu_to_node(cpu));
1200
1201 cbe_disable_pm(cpu);
1202 cbe_write_pm07_control(cpu, 0, 0);
1203
1204
1205 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1206
1207
1208 cbe_disable_pm_interrupts(cpu);
1209 }
1210 del_timer_sync(&timer_spu_event_swap);
1211}
1212
1213static void cell_global_stop_ppu(void)
1214{
1215 int cpu;
1216
1217
1218
1219
1220
1221
1222 del_timer_sync(&timer_virt_cntr);
1223 oprofile_running = 0;
1224 smp_wmb();
1225
1226 for_each_online_cpu(cpu) {
1227 if (cbe_get_hw_thread_id(cpu))
1228 continue;
1229
1230 cbe_sync_irq(cbe_cpu_to_node(cpu));
1231
1232 cbe_disable_pm(cpu);
1233
1234
1235 pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1236
1237
1238 cbe_disable_pm_interrupts(cpu);
1239 }
1240}
1241
1242static void cell_global_stop(void)
1243{
1244 if (profiling_mode == PPU_PROFILING)
1245 cell_global_stop_ppu();
1246 else if (profiling_mode == SPU_PROFILING_EVENTS)
1247 cell_global_stop_spu_events();
1248 else
1249 cell_global_stop_spu_cycles();
1250}
1251
1252static int cell_global_start_spu_cycles(struct op_counter_config *ctr)
1253{
1254 int subfunc;
1255 unsigned int lfsr_value;
1256 int cpu;
1257 int ret;
1258 int rtas_error;
1259 unsigned int cpu_khzfreq = 0;
1260
1261
1262
1263
1264
1265
1266#ifdef CONFIG_CPU_FREQ
1267 ret = cpufreq_register_notifier(&cpu_freq_notifier_block,
1268 CPUFREQ_TRANSITION_NOTIFIER);
1269 if (ret < 0)
1270
1271 printk(KERN_ERR "CPU freq change registration failed: %d\n",
1272 ret);
1273
1274 else
1275 cpu_khzfreq = cpufreq_quick_get(smp_processor_id());
1276#endif
1277
1278 set_spu_profiling_frequency(cpu_khzfreq, spu_cycle_reset);
1279
1280 for_each_online_cpu(cpu) {
1281 if (cbe_get_hw_thread_id(cpu))
1282 continue;
1283
1284
1285
1286
1287
1288
1289 cbe_write_pm(cpu, pm_control, 0);
1290
1291 if (spu_cycle_reset > MAX_SPU_COUNT)
1292
1293 lfsr_value = calculate_lfsr(MAX_SPU_COUNT-1);
1294 else
1295 lfsr_value = calculate_lfsr(spu_cycle_reset);
1296
1297
1298 if (lfsr_value == 0)
1299 lfsr_value = calculate_lfsr(1);
1300
1301 lfsr_value = lfsr_value << 8;
1302
1303
1304
1305
1306 ret = pm_rtas_activate_spu_profiling(cbe_cpu_to_node(cpu));
1307
1308 if (unlikely(ret)) {
1309 rtas_error = ret;
1310 goto out;
1311 }
1312
1313
1314 subfunc = 2;
1315
1316
1317 ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc,
1318 cbe_cpu_to_node(cpu), lfsr_value);
1319
1320 if (unlikely(ret != 0)) {
1321 printk(KERN_ERR
1322 "%s: rtas call ibm,cbe-spu-perftools failed, " \
1323 "return = %d\n", __func__, ret);
1324 rtas_error = -EIO;
1325 goto out;
1326 }
1327 }
1328
1329 rtas_error = start_spu_profiling_cycles(spu_cycle_reset);
1330 if (rtas_error)
1331 goto out_stop;
1332
1333 oprofile_running = 1;
1334 return 0;
1335
1336out_stop:
1337 cell_global_stop_spu_cycles();
1338out:
1339 return rtas_error;
1340}
1341
1342static int cell_global_start_spu_events(struct op_counter_config *ctr)
1343{
1344 int cpu;
1345 u32 interrupt_mask = 0;
1346 int rtn = 0;
1347
1348 hdw_thread = 0;
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364 for_each_online_cpu(cpu) {
1365 if (cbe_get_hw_thread_id(cpu))
1366 continue;
1367
1368
1369
1370
1371
1372
1373
1374
1375 if (ctr_enabled & 1) {
1376 cbe_write_ctr(cpu, 0, reset_value[0]);
1377 enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
1378 interrupt_mask |=
1379 CBE_PM_CTR_OVERFLOW_INTR(0);
1380 } else {
1381
1382 cbe_write_pm07_control(cpu, 0, 0);
1383 }
1384
1385 cbe_get_and_clear_pm_interrupts(cpu);
1386 cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
1387 cbe_enable_pm(cpu);
1388
1389
1390 cbe_write_pm(cpu, trace_address, 0);
1391 }
1392
1393
1394
1395
1396
1397 start_spu_event_swap();
1398 start_spu_profiling_events();
1399 oprofile_running = 1;
1400 smp_wmb();
1401
1402 return rtn;
1403}
1404
1405static int cell_global_start_ppu(struct op_counter_config *ctr)
1406{
1407 u32 cpu, i;
1408 u32 interrupt_mask = 0;
1409
1410
1411
1412
1413
1414 for_each_online_cpu(cpu) {
1415 if (cbe_get_hw_thread_id(cpu))
1416 continue;
1417
1418 interrupt_mask = 0;
1419
1420 for (i = 0; i < num_counters; ++i) {
1421 if (ctr_enabled & (1 << i)) {
1422 cbe_write_ctr(cpu, i, reset_value[i]);
1423 enable_ctr(cpu, i, pm_regs.pm07_cntrl);
1424 interrupt_mask |= CBE_PM_CTR_OVERFLOW_INTR(i);
1425 } else {
1426
1427 cbe_write_pm07_control(cpu, i, 0);
1428 }
1429 }
1430
1431 cbe_get_and_clear_pm_interrupts(cpu);
1432 cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
1433 cbe_enable_pm(cpu);
1434 }
1435
1436 virt_cntr_inter_mask = interrupt_mask;
1437 oprofile_running = 1;
1438 smp_wmb();
1439
1440
1441
1442
1443
1444
1445
1446 start_virt_cntrs();
1447
1448 return 0;
1449}
1450
1451static int cell_global_start(struct op_counter_config *ctr)
1452{
1453 if (profiling_mode == SPU_PROFILING_CYCLES)
1454 return cell_global_start_spu_cycles(ctr);
1455 else if (profiling_mode == SPU_PROFILING_EVENTS)
1456 return cell_global_start_spu_events(ctr);
1457 else
1458 return cell_global_start_ppu(ctr);
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489static void cell_handle_interrupt_spu(struct pt_regs *regs,
1490 struct op_counter_config *ctr)
1491{
1492 u32 cpu, cpu_tmp;
1493 u64 trace_entry;
1494 u32 interrupt_mask;
1495 u64 trace_buffer[2];
1496 u64 last_trace_buffer;
1497 u32 sample;
1498 u32 trace_addr;
1499 unsigned long sample_array_lock_flags;
1500 int spu_num;
1501 unsigned long flags;
1502
1503
1504
1505
1506 cpu = smp_processor_id();
1507 spin_lock_irqsave(&cntr_lock, flags);
1508
1509 cpu_tmp = cpu;
1510 cbe_disable_pm(cpu);
1511
1512 interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
1513
1514 sample = 0xABCDEF;
1515 trace_entry = 0xfedcba;
1516 last_trace_buffer = 0xdeadbeaf;
1517
1518 if ((oprofile_running == 1) && (interrupt_mask != 0)) {
1519
1520 cbe_write_pm(cpu, pm_interval, 0);
1521
1522
1523 if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(0))
1524 && ctr[0].enabled)
1525
1526
1527
1528
1529 cbe_write_ctr(cpu, 0, reset_value[0]);
1530
1531 trace_addr = cbe_read_pm(cpu, trace_address);
1532
1533 while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
1534
1535
1536
1537
1538
1539 cbe_read_trace_buffer(cpu, trace_buffer);
1540 trace_addr = cbe_read_pm(cpu, trace_address);
1541 }
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558 trace_entry = trace_buffer[0]
1559 & 0x00000000FFFF0000;
1560
1561
1562
1563
1564 sample = trace_entry >> 14;
1565 last_trace_buffer = trace_buffer[0];
1566
1567 spu_num = spu_evnt_phys_spu_indx
1568 + (cbe_cpu_to_node(cpu) * NUM_SPUS_PER_NODE);
1569
1570
1571
1572
1573 spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
1574 sample_array_lock_flags);
1575 spu_sync_buffer(spu_num, &sample, 1);
1576 spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
1577 sample_array_lock_flags);
1578
1579 smp_wmb();
1580
1581
1582
1583
1584
1585 cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
1586 cbe_enable_pm_interrupts(cpu, hdw_thread,
1587 virt_cntr_inter_mask);
1588
1589
1590 cbe_write_pm(cpu, trace_address, 0);
1591 cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601 write_pm_cntrl(cpu);
1602 cbe_enable_pm(cpu);
1603 }
1604 spin_unlock_irqrestore(&cntr_lock, flags);
1605}
1606
1607static void cell_handle_interrupt_ppu(struct pt_regs *regs,
1608 struct op_counter_config *ctr)
1609{
1610 u32 cpu;
1611 u64 pc;
1612 int is_kernel;
1613 unsigned long flags = 0;
1614 u32 interrupt_mask;
1615 int i;
1616
1617 cpu = smp_processor_id();
1618
1619
1620
1621
1622
1623
1624 spin_lock_irqsave(&cntr_lock, flags);
1625
1626
1627
1628
1629
1630
1631
1632 cbe_disable_pm(cpu);
1633
1634 interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
1635
1636
1637
1638
1639
1640
1641
1642
1643 if ((oprofile_running == 1) && (interrupt_mask != 0)) {
1644 pc = regs->nip;
1645 is_kernel = is_kernel_addr(pc);
1646
1647 for (i = 0; i < num_counters; ++i) {
1648 if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i))
1649 && ctr[i].enabled) {
1650 oprofile_add_ext_sample(pc, regs, i, is_kernel);
1651 cbe_write_ctr(cpu, i, reset_value[i]);
1652 }
1653 }
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663 cbe_enable_pm_interrupts(cpu, hdw_thread,
1664 virt_cntr_inter_mask);
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675 cbe_enable_pm(cpu);
1676 }
1677 spin_unlock_irqrestore(&cntr_lock, flags);
1678}
1679
1680static void cell_handle_interrupt(struct pt_regs *regs,
1681 struct op_counter_config *ctr)
1682{
1683 if (profiling_mode == PPU_PROFILING)
1684 cell_handle_interrupt_ppu(regs, ctr);
1685 else
1686 cell_handle_interrupt_spu(regs, ctr);
1687}
1688
1689
1690
1691
1692
1693
1694static int cell_sync_start(void)
1695{
1696 if ((profiling_mode == SPU_PROFILING_CYCLES) ||
1697 (profiling_mode == SPU_PROFILING_EVENTS))
1698 return spu_sync_start();
1699 else
1700 return DO_GENERIC_SYNC;
1701}
1702
1703static int cell_sync_stop(void)
1704{
1705 if ((profiling_mode == SPU_PROFILING_CYCLES) ||
1706 (profiling_mode == SPU_PROFILING_EVENTS))
1707 return spu_sync_stop();
1708 else
1709 return 1;
1710}
1711
1712struct op_powerpc_model op_model_cell = {
1713 .reg_setup = cell_reg_setup,
1714 .cpu_setup = cell_cpu_setup,
1715 .global_start = cell_global_start,
1716 .global_stop = cell_global_stop,
1717 .sync_start = cell_sync_start,
1718 .sync_stop = cell_sync_stop,
1719 .handle_interrupt = cell_handle_interrupt,
1720};
1721