1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <linux/capability.h>
46#include <linux/init.h>
47#include <linux/proc_fs.h>
48#include <linux/miscdevice.h>
49#include <linux/spinlock.h>
50
51#include <asm/uaccess.h>
52#include <asm/perf.h>
53#include <asm/parisc-device.h>
54#include <asm/processor.h>
55#include <asm/runway.h>
56#include <asm/io.h>
57
58#include "perf_images.h"
59
60#define MAX_RDR_WORDS 24
61#define PERF_VERSION 2
62
63
64struct rdr_tbl_ent {
65 uint16_t width;
66 uint8_t num_words;
67 uint8_t write_control;
68};
69
70static int perf_processor_interface __read_mostly = UNKNOWN_INTF;
71static int perf_enabled __read_mostly;
72static spinlock_t perf_lock;
73struct parisc_device *cpu_device __read_mostly;
74
75
76static const int perf_rdrs_W[] =
77 { 0, 1, 4, 5, 6, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
78
79
80static const int perf_rdrs_U[] =
81 { 0, 1, 4, 5, 6, 7, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
82
83
84static const struct rdr_tbl_ent perf_rdr_tbl_W[] = {
85 { 19, 1, 8 },
86 { 16, 1, 16 },
87 { 72, 2, 0 },
88 { 81, 2, 0 },
89 { 328, 6, 0 },
90 { 160, 3, 0 },
91 { 336, 6, 0 },
92 { 164, 3, 0 },
93 { 0, 0, 0 },
94 { 35, 1, 0 },
95 { 6, 1, 0 },
96 { 18, 1, 0 },
97 { 13, 1, 0 },
98 { 8, 1, 0 },
99 { 8, 1, 0 },
100 { 8, 1, 0 },
101 { 1530, 24, 0 },
102 { 16, 1, 0 },
103 { 4, 1, 0 },
104 { 0, 0, 0 },
105 { 152, 3, 24 },
106 { 152, 3, 24 },
107 { 233, 4, 48 },
108 { 233, 4, 48 },
109 { 71, 2, 0 },
110 { 71, 2, 0 },
111 { 11, 1, 0 },
112 { 18, 1, 0 },
113 { 128, 2, 0 },
114 { 0, 0, 0 },
115 { 16, 1, 0 },
116 { 16, 1, 0 },
117};
118
119
120static const struct rdr_tbl_ent perf_rdr_tbl_U[] = {
121 { 19, 1, 8 },
122 { 32, 1, 16 },
123 { 20, 1, 0 },
124 { 0, 0, 0 },
125 { 344, 6, 0 },
126 { 176, 3, 0 },
127 { 336, 6, 0 },
128 { 0, 0, 0 },
129 { 0, 0, 0 },
130 { 0, 0, 0 },
131 { 28, 1, 0 },
132 { 33, 1, 0 },
133 { 0, 0, 0 },
134 { 230, 4, 0 },
135 { 32, 1, 0 },
136 { 128, 2, 0 },
137 { 1494, 24, 0 },
138 { 18, 1, 0 },
139 { 4, 1, 0 },
140 { 0, 0, 0 },
141 { 158, 3, 24 },
142 { 158, 3, 24 },
143 { 194, 4, 48 },
144 { 194, 4, 48 },
145 { 71, 2, 0 },
146 { 71, 2, 0 },
147 { 28, 1, 0 },
148 { 33, 1, 0 },
149 { 88, 2, 0 },
150 { 32, 1, 0 },
151 { 24, 1, 0 },
152 { 16, 1, 0 },
153};
154
155
156
157
158
159static const uint64_t perf_bitmasks[] = {
160 0x0000000000000000ul,
161 0xfdffe00000000000ul,
162 0x003f000000000000ul,
163 0x00fffffffffffffful,
164 0xfffffffffffffffful,
165 0xfffffffc00000000ul,
166 0xfffffffffffffffful,
167 0xfffffffffffffffful,
168 0xfffffffffffffffcul,
169 0xff00000000000000ul
170};
171
172
173
174
175
176static const uint64_t perf_bitmasks_piranha[] = {
177 0x0000000000000000ul,
178 0xfdffe00000000000ul,
179 0x003f000000000000ul,
180 0x00fffffffffffffful,
181 0xfffffffffffffffful,
182 0xfffffffc00000000ul,
183 0xfffffffffffffffful,
184 0xfffffffffffffffful,
185 0xfffffffffffffffful,
186 0xfffc000000000000ul
187};
188
189static const uint64_t *bitmask_array;
190
191
192
193
194static int perf_config(uint32_t *image_ptr);
195static int perf_release(struct inode *inode, struct file *file);
196static int perf_open(struct inode *inode, struct file *file);
197static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
198static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
199 loff_t *ppos);
200static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
201static void perf_start_counters(void);
202static int perf_stop_counters(uint32_t *raddr);
203static const struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num);
204static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer);
205static int perf_rdr_clear(uint32_t rdr_num);
206static int perf_write_image(uint64_t *memaddr);
207static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer);
208
209
210extern uint64_t perf_rdr_shift_in_W (uint32_t rdr_num, uint16_t width);
211extern uint64_t perf_rdr_shift_in_U (uint32_t rdr_num, uint16_t width);
212extern void perf_rdr_shift_out_W (uint32_t rdr_num, uint64_t buffer);
213extern void perf_rdr_shift_out_U (uint32_t rdr_num, uint64_t buffer);
214extern void perf_intrigue_enable_perf_counters (void);
215extern void perf_intrigue_disable_perf_counters (void);
216
217
218
219
220
221
222
223
224
225
226
227
228static int perf_config(uint32_t *image_ptr)
229{
230 long error;
231 uint32_t raddr[4];
232
233
234 error = perf_stop_counters(raddr);
235 if (error != 0) {
236 printk("perf_config: perf_stop_counters = %ld\n", error);
237 return -EINVAL;
238 }
239
240printk("Preparing to write image\n");
241
242 error = perf_write_image((uint64_t *)image_ptr);
243 if (error != 0) {
244 printk("perf_config: DOWNLOAD = %ld\n", error);
245 return -EINVAL;
246 }
247
248printk("Preparing to start counters\n");
249
250
251 perf_start_counters();
252
253 return sizeof(uint32_t);
254}
255
256
257
258
259
260
261static int perf_open(struct inode *inode, struct file *file)
262{
263 spin_lock(&perf_lock);
264 if (perf_enabled) {
265 spin_unlock(&perf_lock);
266 return -EBUSY;
267 }
268 perf_enabled = 1;
269 spin_unlock(&perf_lock);
270
271 return 0;
272}
273
274
275
276
277static int perf_release(struct inode *inode, struct file *file)
278{
279 spin_lock(&perf_lock);
280 perf_enabled = 0;
281 spin_unlock(&perf_lock);
282
283 return 0;
284}
285
286
287
288
289static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos)
290{
291 return 0;
292}
293
294
295
296
297
298
299
300
301static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
302 loff_t *ppos)
303{
304 int err;
305 size_t image_size;
306 uint32_t image_type;
307 uint32_t interface_type;
308 uint32_t test;
309
310 if (perf_processor_interface == ONYX_INTF)
311 image_size = PCXU_IMAGE_SIZE;
312 else if (perf_processor_interface == CUDA_INTF)
313 image_size = PCXW_IMAGE_SIZE;
314 else
315 return -EFAULT;
316
317 if (!capable(CAP_SYS_ADMIN))
318 return -EACCES;
319
320 if (count != sizeof(uint32_t))
321 return -EIO;
322
323 if ((err = copy_from_user(&image_type, buf, sizeof(uint32_t))) != 0)
324 return err;
325
326
327 interface_type = (image_type >> 16) & 0xffff;
328 test = (image_type & 0xffff);
329
330
331
332
333
334 if (((perf_processor_interface == CUDA_INTF) &&
335 (interface_type != CUDA_INTF)) ||
336 ((perf_processor_interface == ONYX_INTF) &&
337 (interface_type != ONYX_INTF)))
338 return -EINVAL;
339
340
341
342 if (((interface_type == CUDA_INTF) &&
343 (test >= MAX_CUDA_IMAGES)) ||
344 ((interface_type == ONYX_INTF) &&
345 (test >= MAX_ONYX_IMAGES)))
346 return -EINVAL;
347
348
349 if (interface_type == CUDA_INTF)
350 return perf_config(cuda_images[test]);
351 else
352 return perf_config(onyx_images[test]);
353
354 return count;
355}
356
357
358
359
360static void perf_patch_images(void)
361{
362#if 0
363
364
365
366
367 extern void $i_itlb_miss_2_0();
368 extern void $i_dtlb_miss_2_0();
369 extern void PA2_0_iva();
370
371
372
373
374
375 uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0);
376 uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0);
377 uint32_t IVAaddress = (uint32_t)&PA2_0_iva;
378
379 if (perf_processor_interface == ONYX_INTF) {
380
381 onyx_images[TLBMISS][15] &= 0xffffff00;
382
383 onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
384 onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
385 onyx_images[TLBMISS][17] = itlb_addr;
386
387
388 onyx_images[TLBHANDMISS][15] &= 0xffffff00;
389
390 onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
391 onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
392 onyx_images[TLBHANDMISS][17] = itlb_addr;
393
394
395 onyx_images[BIG_CPI][15] &= 0xffffff00;
396
397 onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
398 onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
399 onyx_images[BIG_CPI][17] = itlb_addr;
400
401 onyx_images[PANIC][15] &= 0xffffff00;
402 onyx_images[PANIC][15] |= (0x000000ff&((IVAaddress) >> 24));
403 onyx_images[PANIC][16] = (IVAaddress << 8)&0xffffff00;
404
405
406 } else if (perf_processor_interface == CUDA_INTF) {
407
408 cuda_images[TLBMISS][16] =
409 (cuda_images[TLBMISS][16]&0xffff0000) |
410 ((dtlb_addr >> 8)&0x0000ffff);
411 cuda_images[TLBMISS][17] =
412 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
413 cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
414
415 cuda_images[TLBHANDMISS][16] =
416 (cuda_images[TLBHANDMISS][16]&0xffff0000) |
417 ((dtlb_addr >> 8)&0x0000ffff);
418 cuda_images[TLBHANDMISS][17] =
419 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
420 cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
421
422 cuda_images[BIG_CPI][16] =
423 (cuda_images[BIG_CPI][16]&0xffff0000) |
424 ((dtlb_addr >> 8)&0x0000ffff);
425 cuda_images[BIG_CPI][17] =
426 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
427 cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
428 } else {
429
430 }
431#endif
432}
433
434
435
436
437
438
439
440
441static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
442{
443 long error_start;
444 uint32_t raddr[4];
445 int error = 0;
446
447 switch (cmd) {
448
449 case PA_PERF_ON:
450
451 perf_start_counters();
452 break;
453
454 case PA_PERF_OFF:
455 error_start = perf_stop_counters(raddr);
456 if (error_start != 0) {
457 printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start);
458 error = -EFAULT;
459 break;
460 }
461
462
463 if (copy_to_user((void __user *)arg, raddr,
464 sizeof (raddr)) != 0) {
465 error = -EFAULT;
466 break;
467 }
468 break;
469
470 case PA_PERF_VERSION:
471
472 error = put_user(PERF_VERSION, (int *)arg);
473 break;
474
475 default:
476 error = -ENOTTY;
477 }
478
479 return error;
480}
481
482static const struct file_operations perf_fops = {
483 .llseek = no_llseek,
484 .read = perf_read,
485 .write = perf_write,
486 .unlocked_ioctl = perf_ioctl,
487 .compat_ioctl = perf_ioctl,
488 .open = perf_open,
489 .release = perf_release
490};
491
492static struct miscdevice perf_dev = {
493 MISC_DYNAMIC_MINOR,
494 PA_PERF_DEV,
495 &perf_fops
496};
497
498
499
500
501static int __init perf_init(void)
502{
503 int ret;
504
505
506 bitmask_array = perf_bitmasks;
507
508 if (boot_cpu_data.cpu_type == pcxu ||
509 boot_cpu_data.cpu_type == pcxu_) {
510 perf_processor_interface = ONYX_INTF;
511 } else if (boot_cpu_data.cpu_type == pcxw ||
512 boot_cpu_data.cpu_type == pcxw_ ||
513 boot_cpu_data.cpu_type == pcxw2 ||
514 boot_cpu_data.cpu_type == mako ||
515 boot_cpu_data.cpu_type == mako2) {
516 perf_processor_interface = CUDA_INTF;
517 if (boot_cpu_data.cpu_type == pcxw2 ||
518 boot_cpu_data.cpu_type == mako ||
519 boot_cpu_data.cpu_type == mako2)
520 bitmask_array = perf_bitmasks_piranha;
521 } else {
522 perf_processor_interface = UNKNOWN_INTF;
523 printk("Performance monitoring counters not supported on this processor\n");
524 return -ENODEV;
525 }
526
527 ret = misc_register(&perf_dev);
528 if (ret) {
529 printk(KERN_ERR "Performance monitoring counters: "
530 "cannot register misc device.\n");
531 return ret;
532 }
533
534
535 perf_patch_images();
536
537 spin_lock_init(&perf_lock);
538
539
540 cpu_device = per_cpu(cpu_data, 0).dev;
541 printk("Performance monitoring counters enabled for %s\n",
542 per_cpu(cpu_data, 0).dev->name);
543
544 return 0;
545}
546
547
548
549
550
551
552static void perf_start_counters(void)
553{
554
555 perf_intrigue_enable_perf_counters();
556}
557
558
559
560
561
562
563
564static int perf_stop_counters(uint32_t *raddr)
565{
566 uint64_t userbuf[MAX_RDR_WORDS];
567
568
569 perf_intrigue_disable_perf_counters();
570
571 if (perf_processor_interface == ONYX_INTF) {
572 uint64_t tmp64;
573
574
575
576 if (!perf_rdr_read_ubuf(16, userbuf))
577 return -13;
578
579
580 tmp64 = (userbuf[21] << 22) & 0x00000000ffc00000;
581 tmp64 |= (userbuf[22] >> 42) & 0x00000000003fffff;
582
583 tmp64 |= (userbuf[22] >> 10) & 0x0000000080000000;
584 raddr[0] = (uint32_t)tmp64;
585
586
587 tmp64 = (userbuf[22] >> 9) & 0x00000000ffffffff;
588
589 tmp64 |= (userbuf[22] << 23) & 0x0000000080000000;
590 raddr[1] = (uint32_t)tmp64;
591
592
593 tmp64 = (userbuf[22] << 24) & 0x00000000ff000000;
594 tmp64 |= (userbuf[23] >> 40) & 0x0000000000ffffff;
595
596 tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
597 raddr[2] = (uint32_t)tmp64;
598
599
600 tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff;
601
602 tmp64 |= (userbuf[23] << 25) & 0x0000000080000000;
603 raddr[3] = (uint32_t)tmp64;
604
605
606
607
608
609
610
611
612
613
614
615
616 userbuf[21] &= 0xfffffffffffffc00ul;
617 userbuf[22] = 0;
618 userbuf[23] = 0;
619
620
621
622
623
624 perf_rdr_write(16, userbuf);
625 } else {
626
627
628
629
630 if (!perf_rdr_read_ubuf(15, userbuf)) {
631 return -13;
632 }
633
634
635
636
637 perf_rdr_clear(15);
638
639
640
641
642 raddr[0] = (uint32_t)((userbuf[0] >> 32) & 0x00000000ffffffffUL);
643 raddr[1] = (uint32_t)(userbuf[0] & 0x00000000ffffffffUL);
644 raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
645 raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
646 }
647
648 return 0;
649}
650
651
652
653
654
655
656
657static const struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num)
658{
659 if (perf_processor_interface == ONYX_INTF) {
660 return &perf_rdr_tbl_U[rdr_num];
661 } else {
662 return &perf_rdr_tbl_W[rdr_num];
663 }
664}
665
666
667
668
669
670
671static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
672{
673 uint64_t data, data_mask = 0;
674 uint32_t width, xbits, i;
675 const struct rdr_tbl_ent *tentry;
676
677 tentry = perf_rdr_get_entry(rdr_num);
678 if ((width = tentry->width) == 0)
679 return 0;
680
681
682 i = tentry->num_words;
683 while (i--) {
684 buffer[i] = 0;
685 }
686
687
688 if ((xbits = width & 0x03f) != 0) {
689 data_mask = 1;
690 data_mask <<= (64 - xbits);
691 data_mask--;
692 }
693
694
695 i = tentry->num_words;
696 while (i--) {
697
698 if (perf_processor_interface == ONYX_INTF) {
699 data = perf_rdr_shift_in_U(rdr_num, width);
700 } else {
701 data = perf_rdr_shift_in_W(rdr_num, width);
702 }
703 if (xbits) {
704 buffer[i] |= (data << (64 - xbits));
705 if (i) {
706 buffer[i-1] |= ((data >> xbits) & data_mask);
707 }
708 } else {
709 buffer[i] = data;
710 }
711 }
712
713 return 1;
714}
715
716
717
718
719
720
721static int perf_rdr_clear(uint32_t rdr_num)
722{
723 const struct rdr_tbl_ent *tentry;
724 int32_t i;
725
726 tentry = perf_rdr_get_entry(rdr_num);
727
728 if (tentry->width == 0) {
729 return -1;
730 }
731
732 i = tentry->num_words;
733 while (i--) {
734 if (perf_processor_interface == ONYX_INTF) {
735 perf_rdr_shift_out_U(rdr_num, 0UL);
736 } else {
737 perf_rdr_shift_out_W(rdr_num, 0UL);
738 }
739 }
740
741 return 0;
742}
743
744
745
746
747
748
749
750static int perf_write_image(uint64_t *memaddr)
751{
752 uint64_t buffer[MAX_RDR_WORDS];
753 uint64_t *bptr;
754 uint32_t dwords;
755 const uint32_t *intrigue_rdr;
756 const uint64_t *intrigue_bitmask;
757 uint64_t tmp64;
758 void __iomem *runway;
759 const struct rdr_tbl_ent *tentry;
760 int i;
761
762
763 if (perf_processor_interface == ONYX_INTF) {
764
765 perf_rdr_clear(16);
766
767
768 perf_intrigue_enable_perf_counters();
769 perf_intrigue_disable_perf_counters();
770
771 intrigue_rdr = perf_rdrs_U;
772 } else {
773 perf_rdr_clear(15);
774 intrigue_rdr = perf_rdrs_W;
775 }
776
777
778 while (*intrigue_rdr != -1) {
779 tentry = perf_rdr_get_entry(*intrigue_rdr);
780 perf_rdr_read_ubuf(*intrigue_rdr, buffer);
781 bptr = &buffer[0];
782 dwords = tentry->num_words;
783 if (tentry->write_control) {
784 intrigue_bitmask = &bitmask_array[tentry->write_control >> 3];
785 while (dwords--) {
786 tmp64 = *intrigue_bitmask & *memaddr++;
787 tmp64 |= (~(*intrigue_bitmask++)) & *bptr;
788 *bptr++ = tmp64;
789 }
790 } else {
791 while (dwords--) {
792 *bptr++ = *memaddr++;
793 }
794 }
795
796 perf_rdr_write(*intrigue_rdr, buffer);
797 intrigue_rdr++;
798 }
799
800
801
802
803
804 if (cpu_device == NULL)
805 {
806 printk(KERN_ERR "write_image: cpu_device not yet initialized!\n");
807 return -1;
808 }
809
810 runway = ioremap_nocache(cpu_device->hpa.start, 4096);
811
812
813 tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
814 __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
815 runway + RUNWAY_STATUS);
816
817
818 for (i = 0; i < 8; i++) {
819 __raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
820 }
821
822 return 0;
823}
824
825
826
827
828
829
830
831static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer)
832{
833 const struct rdr_tbl_ent *tentry;
834 int32_t i;
835
836printk("perf_rdr_write\n");
837 tentry = perf_rdr_get_entry(rdr_num);
838 if (tentry->width == 0) { return; }
839
840 i = tentry->num_words;
841 while (i--) {
842 if (perf_processor_interface == ONYX_INTF) {
843 perf_rdr_shift_out_U(rdr_num, buffer[i]);
844 } else {
845 perf_rdr_shift_out_W(rdr_num, buffer[i]);
846 }
847 }
848printk("perf_rdr_write done\n");
849}
850
851module_init(perf_init);
852