1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <linux/capability.h>
46#include <linux/init.h>
47#include <linux/proc_fs.h>
48#include <linux/miscdevice.h>
49#include <linux/spinlock.h>
50
51#include <linux/uaccess.h>
52#include <asm/perf.h>
53#include <asm/parisc-device.h>
54#include <asm/processor.h>
55#include <asm/runway.h>
56#include <asm/io.h>
57
58#include "perf_images.h"
59
60#define MAX_RDR_WORDS 24
61#define PERF_VERSION 2
62
63
64struct rdr_tbl_ent {
65 uint16_t width;
66 uint8_t num_words;
67 uint8_t write_control;
68};
69
70static int perf_processor_interface __read_mostly = UNKNOWN_INTF;
71static int perf_enabled __read_mostly;
72static spinlock_t perf_lock;
73struct parisc_device *cpu_device __read_mostly;
74
75
76static const int perf_rdrs_W[] =
77 { 0, 1, 4, 5, 6, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
78
79
80static const int perf_rdrs_U[] =
81 { 0, 1, 4, 5, 6, 7, 16, 17, 18, 20, 21, 22, 23, 24, 25, -1 };
82
83
84static const struct rdr_tbl_ent perf_rdr_tbl_W[] = {
85 { 19, 1, 8 },
86 { 16, 1, 16 },
87 { 72, 2, 0 },
88 { 81, 2, 0 },
89 { 328, 6, 0 },
90 { 160, 3, 0 },
91 { 336, 6, 0 },
92 { 164, 3, 0 },
93 { 0, 0, 0 },
94 { 35, 1, 0 },
95 { 6, 1, 0 },
96 { 18, 1, 0 },
97 { 13, 1, 0 },
98 { 8, 1, 0 },
99 { 8, 1, 0 },
100 { 8, 1, 0 },
101 { 1530, 24, 0 },
102 { 16, 1, 0 },
103 { 4, 1, 0 },
104 { 0, 0, 0 },
105 { 152, 3, 24 },
106 { 152, 3, 24 },
107 { 233, 4, 48 },
108 { 233, 4, 48 },
109 { 71, 2, 0 },
110 { 71, 2, 0 },
111 { 11, 1, 0 },
112 { 18, 1, 0 },
113 { 128, 2, 0 },
114 { 0, 0, 0 },
115 { 16, 1, 0 },
116 { 16, 1, 0 },
117};
118
119
120static const struct rdr_tbl_ent perf_rdr_tbl_U[] = {
121 { 19, 1, 8 },
122 { 32, 1, 16 },
123 { 20, 1, 0 },
124 { 0, 0, 0 },
125 { 344, 6, 0 },
126 { 176, 3, 0 },
127 { 336, 6, 0 },
128 { 0, 0, 0 },
129 { 0, 0, 0 },
130 { 0, 0, 0 },
131 { 28, 1, 0 },
132 { 33, 1, 0 },
133 { 0, 0, 0 },
134 { 230, 4, 0 },
135 { 32, 1, 0 },
136 { 128, 2, 0 },
137 { 1494, 24, 0 },
138 { 18, 1, 0 },
139 { 4, 1, 0 },
140 { 0, 0, 0 },
141 { 158, 3, 24 },
142 { 158, 3, 24 },
143 { 194, 4, 48 },
144 { 194, 4, 48 },
145 { 71, 2, 0 },
146 { 71, 2, 0 },
147 { 28, 1, 0 },
148 { 33, 1, 0 },
149 { 88, 2, 0 },
150 { 32, 1, 0 },
151 { 24, 1, 0 },
152 { 16, 1, 0 },
153};
154
155
156
157
158
159static const uint64_t perf_bitmasks[] = {
160 0x0000000000000000ul,
161 0xfdffe00000000000ul,
162 0x003f000000000000ul,
163 0x00fffffffffffffful,
164 0xfffffffffffffffful,
165 0xfffffffc00000000ul,
166 0xfffffffffffffffful,
167 0xfffffffffffffffful,
168 0xfffffffffffffffcul,
169 0xff00000000000000ul
170};
171
172
173
174
175
176static const uint64_t perf_bitmasks_piranha[] = {
177 0x0000000000000000ul,
178 0xfdffe00000000000ul,
179 0x003f000000000000ul,
180 0x00fffffffffffffful,
181 0xfffffffffffffffful,
182 0xfffffffc00000000ul,
183 0xfffffffffffffffful,
184 0xfffffffffffffffful,
185 0xfffffffffffffffful,
186 0xfffc000000000000ul
187};
188
189static const uint64_t *bitmask_array;
190
191
192
193
194static int perf_config(uint32_t *image_ptr);
195static int perf_release(struct inode *inode, struct file *file);
196static int perf_open(struct inode *inode, struct file *file);
197static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
198static ssize_t perf_write(struct file *file, const char __user *buf,
199 size_t count, loff_t *ppos);
200static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
201static void perf_start_counters(void);
202static int perf_stop_counters(uint32_t *raddr);
203static const struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num);
204static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer);
205static int perf_rdr_clear(uint32_t rdr_num);
206static int perf_write_image(uint64_t *memaddr);
207static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer);
208
209
210extern uint64_t perf_rdr_shift_in_W (uint32_t rdr_num, uint16_t width);
211extern uint64_t perf_rdr_shift_in_U (uint32_t rdr_num, uint16_t width);
212extern void perf_rdr_shift_out_W (uint32_t rdr_num, uint64_t buffer);
213extern void perf_rdr_shift_out_U (uint32_t rdr_num, uint64_t buffer);
214extern void perf_intrigue_enable_perf_counters (void);
215extern void perf_intrigue_disable_perf_counters (void);
216
217
218
219
220
221
222
223
224
225
226
227
228static int perf_config(uint32_t *image_ptr)
229{
230 long error;
231 uint32_t raddr[4];
232
233
234 error = perf_stop_counters(raddr);
235 if (error != 0) {
236 printk("perf_config: perf_stop_counters = %ld\n", error);
237 return -EINVAL;
238 }
239
240printk("Preparing to write image\n");
241
242 error = perf_write_image((uint64_t *)image_ptr);
243 if (error != 0) {
244 printk("perf_config: DOWNLOAD = %ld\n", error);
245 return -EINVAL;
246 }
247
248printk("Preparing to start counters\n");
249
250
251 perf_start_counters();
252
253 return sizeof(uint32_t);
254}
255
256
257
258
259
260
261static int perf_open(struct inode *inode, struct file *file)
262{
263 spin_lock(&perf_lock);
264 if (perf_enabled) {
265 spin_unlock(&perf_lock);
266 return -EBUSY;
267 }
268 perf_enabled = 1;
269 spin_unlock(&perf_lock);
270
271 return 0;
272}
273
274
275
276
277static int perf_release(struct inode *inode, struct file *file)
278{
279 spin_lock(&perf_lock);
280 perf_enabled = 0;
281 spin_unlock(&perf_lock);
282
283 return 0;
284}
285
286
287
288
289static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos)
290{
291 return 0;
292}
293
294
295
296
297
298
299
300
301static ssize_t perf_write(struct file *file, const char __user *buf,
302 size_t count, loff_t *ppos)
303{
304 size_t image_size;
305 uint32_t image_type;
306 uint32_t interface_type;
307 uint32_t test;
308
309 if (perf_processor_interface == ONYX_INTF)
310 image_size = PCXU_IMAGE_SIZE;
311 else if (perf_processor_interface == CUDA_INTF)
312 image_size = PCXW_IMAGE_SIZE;
313 else
314 return -EFAULT;
315
316 if (!capable(CAP_SYS_ADMIN))
317 return -EACCES;
318
319 if (count != sizeof(uint32_t))
320 return -EIO;
321
322 if (copy_from_user(&image_type, buf, sizeof(uint32_t)))
323 return -EFAULT;
324
325
326 interface_type = (image_type >> 16) & 0xffff;
327 test = (image_type & 0xffff);
328
329
330
331
332
333 if (((perf_processor_interface == CUDA_INTF) &&
334 (interface_type != CUDA_INTF)) ||
335 ((perf_processor_interface == ONYX_INTF) &&
336 (interface_type != ONYX_INTF)))
337 return -EINVAL;
338
339
340
341 if (((interface_type == CUDA_INTF) &&
342 (test >= MAX_CUDA_IMAGES)) ||
343 ((interface_type == ONYX_INTF) &&
344 (test >= MAX_ONYX_IMAGES)))
345 return -EINVAL;
346
347
348 if (interface_type == CUDA_INTF)
349 return perf_config(cuda_images[test]);
350 else
351 return perf_config(onyx_images[test]);
352
353 return count;
354}
355
356
357
358
359static void perf_patch_images(void)
360{
361#if 0
362
363
364
365
366 extern void $i_itlb_miss_2_0();
367 extern void $i_dtlb_miss_2_0();
368 extern void PA2_0_iva();
369
370
371
372
373
374 uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0);
375 uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0);
376 uint32_t IVAaddress = (uint32_t)&PA2_0_iva;
377
378 if (perf_processor_interface == ONYX_INTF) {
379
380 onyx_images[TLBMISS][15] &= 0xffffff00;
381
382 onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
383 onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
384 onyx_images[TLBMISS][17] = itlb_addr;
385
386
387 onyx_images[TLBHANDMISS][15] &= 0xffffff00;
388
389 onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
390 onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
391 onyx_images[TLBHANDMISS][17] = itlb_addr;
392
393
394 onyx_images[BIG_CPI][15] &= 0xffffff00;
395
396 onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
397 onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
398 onyx_images[BIG_CPI][17] = itlb_addr;
399
400 onyx_images[PANIC][15] &= 0xffffff00;
401 onyx_images[PANIC][15] |= (0x000000ff&((IVAaddress) >> 24));
402 onyx_images[PANIC][16] = (IVAaddress << 8)&0xffffff00;
403
404
405 } else if (perf_processor_interface == CUDA_INTF) {
406
407 cuda_images[TLBMISS][16] =
408 (cuda_images[TLBMISS][16]&0xffff0000) |
409 ((dtlb_addr >> 8)&0x0000ffff);
410 cuda_images[TLBMISS][17] =
411 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
412 cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
413
414 cuda_images[TLBHANDMISS][16] =
415 (cuda_images[TLBHANDMISS][16]&0xffff0000) |
416 ((dtlb_addr >> 8)&0x0000ffff);
417 cuda_images[TLBHANDMISS][17] =
418 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
419 cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
420
421 cuda_images[BIG_CPI][16] =
422 (cuda_images[BIG_CPI][16]&0xffff0000) |
423 ((dtlb_addr >> 8)&0x0000ffff);
424 cuda_images[BIG_CPI][17] =
425 ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
426 cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
427 } else {
428
429 }
430#endif
431}
432
433
434
435
436
437
438
439
440static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
441{
442 long error_start;
443 uint32_t raddr[4];
444 int error = 0;
445
446 switch (cmd) {
447
448 case PA_PERF_ON:
449
450 perf_start_counters();
451 break;
452
453 case PA_PERF_OFF:
454 error_start = perf_stop_counters(raddr);
455 if (error_start != 0) {
456 printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start);
457 error = -EFAULT;
458 break;
459 }
460
461
462 if (copy_to_user((void __user *)arg, raddr,
463 sizeof (raddr)) != 0) {
464 error = -EFAULT;
465 break;
466 }
467 break;
468
469 case PA_PERF_VERSION:
470
471 error = put_user(PERF_VERSION, (int *)arg);
472 break;
473
474 default:
475 error = -ENOTTY;
476 }
477
478 return error;
479}
480
481static const struct file_operations perf_fops = {
482 .llseek = no_llseek,
483 .read = perf_read,
484 .write = perf_write,
485 .unlocked_ioctl = perf_ioctl,
486 .compat_ioctl = perf_ioctl,
487 .open = perf_open,
488 .release = perf_release
489};
490
491static struct miscdevice perf_dev = {
492 MISC_DYNAMIC_MINOR,
493 PA_PERF_DEV,
494 &perf_fops
495};
496
497
498
499
500static int __init perf_init(void)
501{
502 int ret;
503
504
505 bitmask_array = perf_bitmasks;
506
507 if (boot_cpu_data.cpu_type == pcxu ||
508 boot_cpu_data.cpu_type == pcxu_) {
509 perf_processor_interface = ONYX_INTF;
510 } else if (boot_cpu_data.cpu_type == pcxw ||
511 boot_cpu_data.cpu_type == pcxw_ ||
512 boot_cpu_data.cpu_type == pcxw2 ||
513 boot_cpu_data.cpu_type == mako ||
514 boot_cpu_data.cpu_type == mako2) {
515 perf_processor_interface = CUDA_INTF;
516 if (boot_cpu_data.cpu_type == pcxw2 ||
517 boot_cpu_data.cpu_type == mako ||
518 boot_cpu_data.cpu_type == mako2)
519 bitmask_array = perf_bitmasks_piranha;
520 } else {
521 perf_processor_interface = UNKNOWN_INTF;
522 printk("Performance monitoring counters not supported on this processor\n");
523 return -ENODEV;
524 }
525
526 ret = misc_register(&perf_dev);
527 if (ret) {
528 printk(KERN_ERR "Performance monitoring counters: "
529 "cannot register misc device.\n");
530 return ret;
531 }
532
533
534 perf_patch_images();
535
536 spin_lock_init(&perf_lock);
537
538
539 cpu_device = per_cpu(cpu_data, 0).dev;
540 printk("Performance monitoring counters enabled for %s\n",
541 per_cpu(cpu_data, 0).dev->name);
542
543 return 0;
544}
545device_initcall(perf_init);
546
547
548
549
550
551
552static void perf_start_counters(void)
553{
554
555 perf_intrigue_enable_perf_counters();
556}
557
558
559
560
561
562
563
564static int perf_stop_counters(uint32_t *raddr)
565{
566 uint64_t userbuf[MAX_RDR_WORDS];
567
568
569 perf_intrigue_disable_perf_counters();
570
571 if (perf_processor_interface == ONYX_INTF) {
572 uint64_t tmp64;
573
574
575
576 if (!perf_rdr_read_ubuf(16, userbuf))
577 return -13;
578
579
580 tmp64 = (userbuf[21] << 22) & 0x00000000ffc00000;
581 tmp64 |= (userbuf[22] >> 42) & 0x00000000003fffff;
582
583 tmp64 |= (userbuf[22] >> 10) & 0x0000000080000000;
584 raddr[0] = (uint32_t)tmp64;
585
586
587 tmp64 = (userbuf[22] >> 9) & 0x00000000ffffffff;
588
589 tmp64 |= (userbuf[22] << 23) & 0x0000000080000000;
590 raddr[1] = (uint32_t)tmp64;
591
592
593 tmp64 = (userbuf[22] << 24) & 0x00000000ff000000;
594 tmp64 |= (userbuf[23] >> 40) & 0x0000000000ffffff;
595
596 tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
597 raddr[2] = (uint32_t)tmp64;
598
599
600 tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff;
601
602 tmp64 |= (userbuf[23] << 25) & 0x0000000080000000;
603 raddr[3] = (uint32_t)tmp64;
604
605
606
607
608
609
610
611
612
613
614
615
616 userbuf[21] &= 0xfffffffffffffc00ul;
617 userbuf[22] = 0;
618 userbuf[23] = 0;
619
620
621
622
623
624 perf_rdr_write(16, userbuf);
625 } else {
626
627
628
629
630 if (!perf_rdr_read_ubuf(15, userbuf)) {
631 return -13;
632 }
633
634
635
636
637 perf_rdr_clear(15);
638
639
640
641
642 raddr[0] = (uint32_t)((userbuf[0] >> 32) & 0x00000000ffffffffUL);
643 raddr[1] = (uint32_t)(userbuf[0] & 0x00000000ffffffffUL);
644 raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
645 raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
646 }
647
648 return 0;
649}
650
651
652
653
654
655
656
657static const struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num)
658{
659 if (perf_processor_interface == ONYX_INTF) {
660 return &perf_rdr_tbl_U[rdr_num];
661 } else {
662 return &perf_rdr_tbl_W[rdr_num];
663 }
664}
665
666
667
668
669
670
671static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
672{
673 uint64_t data, data_mask = 0;
674 uint32_t width, xbits, i;
675 const struct rdr_tbl_ent *tentry;
676
677 tentry = perf_rdr_get_entry(rdr_num);
678 if ((width = tentry->width) == 0)
679 return 0;
680
681
682 i = tentry->num_words;
683 while (i--) {
684 buffer[i] = 0;
685 }
686
687
688 if ((xbits = width & 0x03f) != 0) {
689 data_mask = 1;
690 data_mask <<= (64 - xbits);
691 data_mask--;
692 }
693
694
695 i = tentry->num_words;
696 while (i--) {
697
698 if (perf_processor_interface == ONYX_INTF) {
699 data = perf_rdr_shift_in_U(rdr_num, width);
700 } else {
701 data = perf_rdr_shift_in_W(rdr_num, width);
702 }
703 if (xbits) {
704 buffer[i] |= (data << (64 - xbits));
705 if (i) {
706 buffer[i-1] |= ((data >> xbits) & data_mask);
707 }
708 } else {
709 buffer[i] = data;
710 }
711 }
712
713 return 1;
714}
715
716
717
718
719
720
721static int perf_rdr_clear(uint32_t rdr_num)
722{
723 const struct rdr_tbl_ent *tentry;
724 int32_t i;
725
726 tentry = perf_rdr_get_entry(rdr_num);
727
728 if (tentry->width == 0) {
729 return -1;
730 }
731
732 i = tentry->num_words;
733 while (i--) {
734 if (perf_processor_interface == ONYX_INTF) {
735 perf_rdr_shift_out_U(rdr_num, 0UL);
736 } else {
737 perf_rdr_shift_out_W(rdr_num, 0UL);
738 }
739 }
740
741 return 0;
742}
743
744
745
746
747
748
749
750static int perf_write_image(uint64_t *memaddr)
751{
752 uint64_t buffer[MAX_RDR_WORDS];
753 uint64_t *bptr;
754 uint32_t dwords;
755 const uint32_t *intrigue_rdr;
756 const uint64_t *intrigue_bitmask;
757 uint64_t tmp64;
758 void __iomem *runway;
759 const struct rdr_tbl_ent *tentry;
760 int i;
761
762
763 if (perf_processor_interface == ONYX_INTF) {
764
765 perf_rdr_clear(16);
766
767
768 perf_intrigue_enable_perf_counters();
769 perf_intrigue_disable_perf_counters();
770
771 intrigue_rdr = perf_rdrs_U;
772 } else {
773 perf_rdr_clear(15);
774 intrigue_rdr = perf_rdrs_W;
775 }
776
777
778 while (*intrigue_rdr != -1) {
779 tentry = perf_rdr_get_entry(*intrigue_rdr);
780 perf_rdr_read_ubuf(*intrigue_rdr, buffer);
781 bptr = &buffer[0];
782 dwords = tentry->num_words;
783 if (tentry->write_control) {
784 intrigue_bitmask = &bitmask_array[tentry->write_control >> 3];
785 while (dwords--) {
786 tmp64 = *intrigue_bitmask & *memaddr++;
787 tmp64 |= (~(*intrigue_bitmask++)) & *bptr;
788 *bptr++ = tmp64;
789 }
790 } else {
791 while (dwords--) {
792 *bptr++ = *memaddr++;
793 }
794 }
795
796 perf_rdr_write(*intrigue_rdr, buffer);
797 intrigue_rdr++;
798 }
799
800
801
802
803
804 if (cpu_device == NULL)
805 {
806 printk(KERN_ERR "write_image: cpu_device not yet initialized!\n");
807 return -1;
808 }
809
810 runway = ioremap_nocache(cpu_device->hpa.start, 4096);
811 if (!runway) {
812 pr_err("perf_write_image: ioremap failed!\n");
813 return -ENOMEM;
814 }
815
816
817 tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
818 __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
819 runway + RUNWAY_STATUS);
820
821
822 for (i = 0; i < 8; i++) {
823 __raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
824 }
825
826 return 0;
827}
828
829
830
831
832
833
834
835static void perf_rdr_write(uint32_t rdr_num, uint64_t *buffer)
836{
837 const struct rdr_tbl_ent *tentry;
838 int32_t i;
839
840printk("perf_rdr_write\n");
841 tentry = perf_rdr_get_entry(rdr_num);
842 if (tentry->width == 0) { return; }
843
844 i = tentry->num_words;
845 while (i--) {
846 if (perf_processor_interface == ONYX_INTF) {
847 perf_rdr_shift_out_U(rdr_num, buffer[i]);
848 } else {
849 perf_rdr_shift_out_W(rdr_num, buffer[i]);
850 }
851 }
852printk("perf_rdr_write done\n");
853}
854