1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <linux/capability.h>
46#include <linux/init.h>
47#include <linux/proc_fs.h>
48#include <linux/miscdevice.h>
49#include <linux/spinlock.h>
50
51#include <asm/uaccess.h>
52#include <asm/perf.h>
53#include <asm/parisc-device.h>
54#include <asm/processor.h>
55#include <asm/runway.h>
56#include <asm/io.h>
57
58#include "perf_images.h"
59
60#define MAX_RDR_WORDS 24
61#define PERF_VERSION 2
62
63
64struct rdr_tbl_ent {
65 uint16_t width;
66 uint8_t num_words;
67 uint8_t write_control;
68};
69
70static int perf_processor_interface __read_mostly = UNKNOWN_INTF;
71static int perf_enabled __read_mostly;
72static spinlock_t perf_lock;
73struct parisc_device *cpu_device __read_mostly;
74
75
76static const int perf_rdrs_W[] =
77 { 0, 1, 4, 5, 6, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
78
79
80static const int perf_rdrs_U[] =
81 { 0, 1, 4, 5, 6, 7, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
82
83
84static const struct rdr_tbl_ent perf_rdr_tbl_W[] = {
85 { 19, 1, 8 },
86 { 16, 1, 16 },
87 { 72, 2, 0 },
88 { 81, 2, 0 },
89 { 328, 6, 0 },
90 { 160, 3, 0 },
91 { 336, 6, 0 },
92 { 164, 3, 0 },
93 { 0, 0, 0 },
94 { 35, 1, 0 },
95 { 6, 1, 0 },
96 { 18, 1, 0 },
97 { 13, 1, 0 },
98 { 8, 1, 0 },
99 { 8, 1, 0 },
100 { 8, 1, 0 },
101 { 1530, 24, 0 },
102 { 16, 1, 0 },
103 { 4, 1, 0 },
104 { 0, 0, 0 },
105 { 152, 3, 24 },
106 { 152, 3, 24 },
107 { 233, 4, 48 },
108 { 233, 4, 48 },
109 { 71, 2, 0 },
110 { 71, 2, 0 },
111 { 11, 1, 0 },
112 { 18, 1, 0 },
113 { 128, 2, 0 },
114 { 0, 0, 0 },
115 { 16, 1, 0 },
116 { 16, 1, 0 },
117};
118
119
120static const struct rdr_tbl_ent perf_rdr_tbl_U[] = {
121 { 19, 1, 8 },
122 { 32, 1, 16 },
123 { 20, 1, 0 },
124 { 0, 0, 0 },
125 { 344, 6, 0 },
126 { 176, 3, 0 },
127 { 336, 6, 0 },
128 { 0, 0, 0 },
129 { 0, 0, 0 },
130 { 0, 0, 0 },
131 { 28, 1, 0 },
132 { 33, 1, 0 },
133 { 0, 0, 0 },
134 { 230, 4, 0 },
135 { 32, 1, 0 },
136 { 128, 2, 0 },
137 { 1494, 24, 0 },
138 { 18, 1, 0 },
139 { 4, 1, 0 },
140 { 0, 0, 0 },
141 { 158, 3, 24 },
142 { 158, 3, 24 },
143 { 194, 4, 48 },
144 { 194, 4, 48 },
145 { 71, 2, 0 },
146 { 71, 2, 0 },
147 { 28, 1, 0 },
148 { 33, 1, 0 },
149 { 88, 2, 0 },
150 { 32, 1, 0 },
151 { 24, 1, 0 },
152 { 16, 1, 0 },
153};
154
155
156
157
158
159static const uint64_t perf_bitmasks[] = {
160 0x0000000000000000ul,
161 0xfdffe00000000000ul,
162 0x003f000000000000ul,
163 0x00fffffffffffffful,
164 0xfffffffffffffffful,
165 0xfffffffc00000000ul,
166 0xfffffffffffffffful,
167 0xfffffffffffffffful,
168 0xfffffffffffffffcul,
169 0xff00000000000000ul
170};
171
172
173
174
175
176static const uint64_t perf_bitmasks_piranha[] = {
177 0x0000000000000000ul,
178 0xfdffe00000000000ul,
179 0x003f000000000000ul,
180 0x00fffffffffffffful,
181 0xfffffffffffffffful,
182 0xfffffffc00000000ul,
183 0xfffffffffffffffful,
184 0xfffffffffffffffful,
185 0xfffffffffffffffful,
186 0xfffc000000000000ul
187};
188
189static const uint64_t *bitmask_array;
190
191
192
193
194static int perf_config(uint32_t *image_ptr);
195static int perf_release(struct inode *inode, struct file *file);
196static int perf_open(struct inode *inode, struct file *file);
197static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
198static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
199 loff_t *ppos);
200static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
201static void perf_start_counters(void);
202static int perf_stop_counters(uint32_t *raddr);
203static const struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num);
204static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer);
205static int perf_rdr_clear(uint32_t rdr_num);
206static int perf_write_image(uint64_t *memaddr);
207static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer);
208
209
210extern uint64_t perf_rdr_shift_in_W (uint32_t rdr_num, uint16_t width);
211extern uint64_t perf_rdr_shift_in_U (uint32_t rdr_num, uint16_t width);
212extern void perf_rdr_shift_out_W (uint32_t rdr_num, uint64_t buffer);
213extern void perf_rdr_shift_out_U (uint32_t rdr_num, uint64_t buffer);
214extern void perf_intrigue_enable_perf_counters (void);
215extern void perf_intrigue_disable_perf_counters (void);
216
217
218
219
220
221
222
223
224
225
226
227
228static int perf_config(uint32_t *image_ptr)
229{
230 long error;
231 uint32_t raddr[4];
232
233
234 error = perf_stop_counters(raddr);
235 if (error != 0) {
236 printk("perf_config: perf_stop_counters = %ld\n", error);
237 return -EINVAL;
238 }
239
240printk("Preparing to write image\n");
241
242 error = perf_write_image((uint64_t *)image_ptr);
243 if (error != 0) {
244 printk("perf_config: DOWNLOAD = %ld\n", error);
245 return -EINVAL;
246 }
247
248printk("Preparing to start counters\n");
249
250
251 perf_start_counters();
252
253 return sizeof(uint32_t);
254}
255
256
257
258
259
260
261static int perf_open(struct inode *inode, struct file *file)
262{
263 spin_lock(&perf_lock);
264 if (perf_enabled) {
265 spin_unlock(&perf_lock);
266 return -EBUSY;
267 }
268 perf_enabled = 1;
269 spin_unlock(&perf_lock);
270
271 return 0;
272}
273
274
275
276
277static int perf_release(struct inode *inode, struct file *file)
278{
279 spin_lock(&perf_lock);
280 perf_enabled = 0;
281 spin_unlock(&perf_lock);
282
283 return 0;
284}
285
286
287
288
289static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos)
290{
291 return 0;
292}
293
294
295
296
297
298
299
300
301static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
302 loff_t *ppos)
303{
304 int err;
305 size_t image_size;
306 uint32_t image_type;
307 uint32_t interface_type;
308 uint32_t test;
309
310 if (perf_processor_interface == ONYX_INTF)
311 image_size = PCXU_IMAGE_SIZE;
312 else if (perf_processor_interface == CUDA_INTF)
313 image_size = PCXW_IMAGE_SIZE;
314 else
315 return -EFAULT;
316
317 if (!capable(CAP_SYS_ADMIN))
318 return -EACCES;
319
320 if (count != sizeof(uint32_t))
321 return -EIO;
322
323 if ((err = copy_from_user(&image_type, buf, sizeof(uint32_t))) != 0)
324 return err;
325
326
327 interface_type = (image_type >> 16) & 0xffff;
328 test = (image_type & 0xffff);
329
330
331
332
333
334 if (((perf_processor_interface == CUDA_INTF) &&
335 (interface_type != CUDA_INTF)) ||
336 ((perf_processor_interface == ONYX_INTF) &&
337 (interface_type != ONYX_INTF)))
338 return -EINVAL;
339
340
341
342 if (((interface_type == CUDA_INTF) &&
343 (test >= MAX_CUDA_IMAGES)) ||
344 ((interface_type == ONYX_INTF) &&
345 (test >= MAX_ONYX_IMAGES)))
346 return -EINVAL;
347
348
349 if (interface_type == CUDA_INTF)
350 return perf_config(cuda_images[test]);
351 else
352 return perf_config(onyx_images[test]);
353
354 return count;
355}
356
357
358
359
360static void perf_patch_images(void)
361{
362#if 0
363
364
365
366
367 extern void $i_itlb_miss_2_0();
368 extern void $i_dtlb_miss_2_0();
369 extern void PA2_0_iva();
370
371
372
373
374
375 uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0);
376 uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0);
377 uint32_t IVAaddress = (uint32_t)&PA2_0_iva;
378
379 if (perf_processor_interface == ONYX_INTF) {
380
381 onyx_images[TLBMISS][15] &= 0xffffff00;
382
383 onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
384 onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
385 onyx_images[TLBMISS][17] = itlb_addr;
386
387
388 onyx_images[TLBHANDMISS][15] &= 0xffffff00;
389
390 onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
391 onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
392 onyx_images[TLBHANDMISS][17] = itlb_addr;
393
394
395 onyx_images[BIG_CPI][15] &= 0xffffff00;
396
397 onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
398 onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
399 onyx_images[BIG_CPI][17] = itlb_addr;
400
401 onyx_images[PANIC][15] &= 0xffffff00;
402 onyx_images[PANIC][15] |= (0x000000ff&((IVAaddress) >> 24));
403 onyx_images[PANIC][16] = (IVAaddress << 8)&0xffffff00;
404
405
406 } else if (perf_processor_interface == CUDA_INTF) {
407
408 cuda_images[TLBMISS][16] =
409 (cuda_images[TLBMISS][16]&0xffff0000) |
410 ((dtlb_addr >> 8)&0x0000ffff);
411 cuda_images[TLBMISS][17] =
412 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
413 cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
414
415 cuda_images[TLBHANDMISS][16] =
416 (cuda_images[TLBHANDMISS][16]&0xffff0000) |
417 ((dtlb_addr >> 8)&0x0000ffff);
418 cuda_images[TLBHANDMISS][17] =
419 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
420 cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
421
422 cuda_images[BIG_CPI][16] =
423 (cuda_images[BIG_CPI][16]&0xffff0000) |
424 ((dtlb_addr >> 8)&0x0000ffff);
425 cuda_images[BIG_CPI][17] =
426 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
427 cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
428 } else {
429
430 }
431#endif
432}
433
434
435
436
437
438
439
440
441static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
442{
443 long error_start;
444 uint32_t raddr[4];
445 int error = 0;
446
447 switch (cmd) {
448
449 case PA_PERF_ON:
450
451 perf_start_counters();
452 break;
453
454 case PA_PERF_OFF:
455 error_start = perf_stop_counters(raddr);
456 if (error_start != 0) {
457 printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start);
458 error = -EFAULT;
459 break;
460 }
461
462
463 if (copy_to_user((void __user *)arg, raddr,
464 sizeof (raddr)) != 0) {
465 error = -EFAULT;
466 break;
467 }
468 break;
469
470 case PA_PERF_VERSION:
471
472 error = put_user(PERF_VERSION, (int *)arg);
473 break;
474
475 default:
476 error = -ENOTTY;
477 }
478
479 return error;
480}
481
482static const struct file_operations perf_fops = {
483 .llseek = no_llseek,
484 .read = perf_read,
485 .write = perf_write,
486 .unlocked_ioctl = perf_ioctl,
487 .compat_ioctl = perf_ioctl,
488 .open = perf_open,
489 .release = perf_release
490};
491
492static struct miscdevice perf_dev = {
493 MISC_DYNAMIC_MINOR,
494 PA_PERF_DEV,
495 &perf_fops
496};
497
498
499
500
501static int __init perf_init(void)
502{
503 int ret;
504
505
506 bitmask_array = perf_bitmasks;
507
508 if (boot_cpu_data.cpu_type == pcxu ||
509 boot_cpu_data.cpu_type == pcxu_) {
510 perf_processor_interface = ONYX_INTF;
511 } else if (boot_cpu_data.cpu_type == pcxw ||
512 boot_cpu_data.cpu_type == pcxw_ ||
513 boot_cpu_data.cpu_type == pcxw2 ||
514 boot_cpu_data.cpu_type == mako ||
515 boot_cpu_data.cpu_type == mako2) {
516 perf_processor_interface = CUDA_INTF;
517 if (boot_cpu_data.cpu_type == pcxw2 ||
518 boot_cpu_data.cpu_type == mako ||
519 boot_cpu_data.cpu_type == mako2)
520 bitmask_array = perf_bitmasks_piranha;
521 } else {
522 perf_processor_interface = UNKNOWN_INTF;
523 printk("Performance monitoring counters not supported on this processor\n");
524 return -ENODEV;
525 }
526
527 ret = misc_register(&perf_dev);
528 if (ret) {
529 printk(KERN_ERR "Performance monitoring counters: "
530 "cannot register misc device.\n");
531 return ret;
532 }
533
534
535 perf_patch_images();
536
537 spin_lock_init(&perf_lock);
538
539
540 cpu_device = per_cpu(cpu_data, 0).dev;
541 printk("Performance monitoring counters enabled for %s\n",
542 per_cpu(cpu_data, 0).dev->name);
543
544 return 0;
545}
546device_initcall(perf_init);
547
548
549
550
551
552
553static void perf_start_counters(void)
554{
555
556 perf_intrigue_enable_perf_counters();
557}
558
559
560
561
562
563
564
565static int perf_stop_counters(uint32_t *raddr)
566{
567 uint64_t userbuf[MAX_RDR_WORDS];
568
569
570 perf_intrigue_disable_perf_counters();
571
572 if (perf_processor_interface == ONYX_INTF) {
573 uint64_t tmp64;
574
575
576
577 if (!perf_rdr_read_ubuf(16, userbuf))
578 return -13;
579
580
581 tmp64 = (userbuf[21] << 22) & 0x00000000ffc00000;
582 tmp64 |= (userbuf[22] >> 42) & 0x00000000003fffff;
583
584 tmp64 |= (userbuf[22] >> 10) & 0x0000000080000000;
585 raddr[0] = (uint32_t)tmp64;
586
587
588 tmp64 = (userbuf[22] >> 9) & 0x00000000ffffffff;
589
590 tmp64 |= (userbuf[22] << 23) & 0x0000000080000000;
591 raddr[1] = (uint32_t)tmp64;
592
593
594 tmp64 = (userbuf[22] << 24) & 0x00000000ff000000;
595 tmp64 |= (userbuf[23] >> 40) & 0x0000000000ffffff;
596
597 tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
598 raddr[2] = (uint32_t)tmp64;
599
600
601 tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff;
602
603 tmp64 |= (userbuf[23] << 25) & 0x0000000080000000;
604 raddr[3] = (uint32_t)tmp64;
605
606
607
608
609
610
611
612
613
614
615
616
617 userbuf[21] &= 0xfffffffffffffc00ul;
618 userbuf[22] = 0;
619 userbuf[23] = 0;
620
621
622
623
624
625 perf_rdr_write(16, userbuf);
626 } else {
627
628
629
630
631 if (!perf_rdr_read_ubuf(15, userbuf)) {
632 return -13;
633 }
634
635
636
637
638 perf_rdr_clear(15);
639
640
641
642
643 raddr[0] = (uint32_t)((userbuf[0] >> 32) & 0x00000000ffffffffUL);
644 raddr[1] = (uint32_t)(userbuf[0] & 0x00000000ffffffffUL);
645 raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
646 raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
647 }
648
649 return 0;
650}
651
652
653
654
655
656
657
658static const struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num)
659{
660 if (perf_processor_interface == ONYX_INTF) {
661 return &perf_rdr_tbl_U[rdr_num];
662 } else {
663 return &perf_rdr_tbl_W[rdr_num];
664 }
665}
666
667
668
669
670
671
672static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
673{
674 uint64_t data, data_mask = 0;
675 uint32_t width, xbits, i;
676 const struct rdr_tbl_ent *tentry;
677
678 tentry = perf_rdr_get_entry(rdr_num);
679 if ((width = tentry->width) == 0)
680 return 0;
681
682
683 i = tentry->num_words;
684 while (i--) {
685 buffer[i] = 0;
686 }
687
688
689 if ((xbits = width & 0x03f) != 0) {
690 data_mask = 1;
691 data_mask <<= (64 - xbits);
692 data_mask--;
693 }
694
695
696 i = tentry->num_words;
697 while (i--) {
698
699 if (perf_processor_interface == ONYX_INTF) {
700 data = perf_rdr_shift_in_U(rdr_num, width);
701 } else {
702 data = perf_rdr_shift_in_W(rdr_num, width);
703 }
704 if (xbits) {
705 buffer[i] |= (data << (64 - xbits));
706 if (i) {
707 buffer[i-1] |= ((data >> xbits) & data_mask);
708 }
709 } else {
710 buffer[i] = data;
711 }
712 }
713
714 return 1;
715}
716
717
718
719
720
721
722static int perf_rdr_clear(uint32_t rdr_num)
723{
724 const struct rdr_tbl_ent *tentry;
725 int32_t i;
726
727 tentry = perf_rdr_get_entry(rdr_num);
728
729 if (tentry->width == 0) {
730 return -1;
731 }
732
733 i = tentry->num_words;
734 while (i--) {
735 if (perf_processor_interface == ONYX_INTF) {
736 perf_rdr_shift_out_U(rdr_num, 0UL);
737 } else {
738 perf_rdr_shift_out_W(rdr_num, 0UL);
739 }
740 }
741
742 return 0;
743}
744
745
746
747
748
749
750
751static int perf_write_image(uint64_t *memaddr)
752{
753 uint64_t buffer[MAX_RDR_WORDS];
754 uint64_t *bptr;
755 uint32_t dwords;
756 const uint32_t *intrigue_rdr;
757 const uint64_t *intrigue_bitmask;
758 uint64_t tmp64;
759 void __iomem *runway;
760 const struct rdr_tbl_ent *tentry;
761 int i;
762
763
764 if (perf_processor_interface == ONYX_INTF) {
765
766 perf_rdr_clear(16);
767
768
769 perf_intrigue_enable_perf_counters();
770 perf_intrigue_disable_perf_counters();
771
772 intrigue_rdr = perf_rdrs_U;
773 } else {
774 perf_rdr_clear(15);
775 intrigue_rdr = perf_rdrs_W;
776 }
777
778
779 while (*intrigue_rdr != -1) {
780 tentry = perf_rdr_get_entry(*intrigue_rdr);
781 perf_rdr_read_ubuf(*intrigue_rdr, buffer);
782 bptr = &buffer[0];
783 dwords = tentry->num_words;
784 if (tentry->write_control) {
785 intrigue_bitmask = &bitmask_array[tentry->write_control >> 3];
786 while (dwords--) {
787 tmp64 = *intrigue_bitmask & *memaddr++;
788 tmp64 |= (~(*intrigue_bitmask++)) & *bptr;
789 *bptr++ = tmp64;
790 }
791 } else {
792 while (dwords--) {
793 *bptr++ = *memaddr++;
794 }
795 }
796
797 perf_rdr_write(*intrigue_rdr, buffer);
798 intrigue_rdr++;
799 }
800
801
802
803
804
805 if (cpu_device == NULL)
806 {
807 printk(KERN_ERR "write_image: cpu_device not yet initialized!\n");
808 return -1;
809 }
810
811 runway = ioremap_nocache(cpu_device->hpa.start, 4096);
812
813
814 tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
815 __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
816 runway + RUNWAY_STATUS);
817
818
819 for (i = 0; i < 8; i++) {
820 __raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
821 }
822
823 return 0;
824}
825
826
827
828
829
830
831
832static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer)
833{
834 const struct rdr_tbl_ent *tentry;
835 int32_t i;
836
837printk("perf_rdr_write\n");
838 tentry = perf_rdr_get_entry(rdr_num);
839 if (tentry->width == 0) { return; }
840
841 i = tentry->num_words;
842 while (i--) {
843 if (perf_processor_interface == ONYX_INTF) {
844 perf_rdr_shift_out_U(rdr_num, buffer[i]);
845 } else {
846 perf_rdr_shift_out_W(rdr_num, buffer[i]);
847 }
848 }
849printk("perf_rdr_write done\n");
850}
851