1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/interrupt.h>
26#include <linux/proc_fs.h>
27#include <linux/seq_file.h>
28#include <linux/init.h>
29#include <linux/vmalloc.h>
30#include <linux/mm.h>
31#include <linux/sysctl.h>
32#include <linux/list.h>
33#include <linux/file.h>
34#include <linux/poll.h>
35#include <linux/vfs.h>
36#include <linux/smp.h>
37#include <linux/pagemap.h>
38#include <linux/mount.h>
39#include <linux/bitops.h>
40#include <linux/capability.h>
41#include <linux/rcupdate.h>
42#include <linux/completion.h>
43#include <linux/tracehook.h>
44
45#include <asm/errno.h>
46#include <asm/intrinsics.h>
47#include <asm/page.h>
48#include <asm/perfmon.h>
49#include <asm/processor.h>
50#include <asm/signal.h>
51#include <asm/system.h>
52#include <asm/uaccess.h>
53#include <asm/delay.h>
54
55#ifdef CONFIG_PERFMON
56
57
58
59#define PFM_CTX_UNLOADED 1
60#define PFM_CTX_LOADED 2
61#define PFM_CTX_MASKED 3
62#define PFM_CTX_ZOMBIE 4
63
64#define PFM_INVALID_ACTIVATION (~0UL)
65
66#define PFM_NUM_PMC_REGS 64
67#define PFM_NUM_PMD_REGS 64
68
69
70
71
72#define PFM_MAX_MSGS 32
73#define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
74
75
76
77
78
79
80
81
82
83
84
85
86#define PFM_REG_NOTIMPL 0x0
87#define PFM_REG_IMPL 0x1
88#define PFM_REG_END 0x2
89#define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL)
90#define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR)
91#define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL)
92#define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL)
93#define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL)
94
95#define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
96#define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
97
98#define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
99
100
101#define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
102#define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
103
104
105#define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
106#define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
107#define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
108#define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
109
110#define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
111#define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
112#define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
113#define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
114
115#define PFM_NUM_IBRS IA64_NUM_DBG_REGS
116#define PFM_NUM_DBRS IA64_NUM_DBG_REGS
117
118#define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
119#define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
120#define PFM_CTX_TASK(h) (h)->ctx_task
121
122#define PMU_PMC_OI 5
123
124
125#define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
126#define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
127
128#define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
129
130#define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
131#define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
132#define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
133#define PFM_CODE_RR 0
134#define PFM_DATA_RR 1
135
136#define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
137#define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
138#define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
139
140#define RDEP(x) (1UL<<(x))
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160#define PROTECT_CTX(c, f) \
161 do { \
162 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
163 spin_lock_irqsave(&(c)->ctx_lock, f); \
164 DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
165 } while(0)
166
167#define UNPROTECT_CTX(c, f) \
168 do { \
169 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
170 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
171 } while(0)
172
173#define PROTECT_CTX_NOPRINT(c, f) \
174 do { \
175 spin_lock_irqsave(&(c)->ctx_lock, f); \
176 } while(0)
177
178
179#define UNPROTECT_CTX_NOPRINT(c, f) \
180 do { \
181 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
182 } while(0)
183
184
185#define PROTECT_CTX_NOIRQ(c) \
186 do { \
187 spin_lock(&(c)->ctx_lock); \
188 } while(0)
189
190#define UNPROTECT_CTX_NOIRQ(c) \
191 do { \
192 spin_unlock(&(c)->ctx_lock); \
193 } while(0)
194
195
196#ifdef CONFIG_SMP
197
198#define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
199#define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
200#define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
201
202#else
203#define SET_ACTIVATION(t) do {} while(0)
204#define GET_ACTIVATION(t) do {} while(0)
205#define INC_ACTIVATION(t) do {} while(0)
206#endif
207
208#define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
209#define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
210#define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
211
212#define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
213#define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
214
215#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
216
217
218
219
220#define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
221
222#define PFMFS_MAGIC 0xa0b4d889
223
224
225
226
227#define PFM_DEBUGGING 1
228#ifdef PFM_DEBUGGING
229#define DPRINT(a) \
230 do { \
231 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
232 } while (0)
233
234#define DPRINT_ovfl(a) \
235 do { \
236 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
237 } while (0)
238#endif
239
240
241
242
243
244
245typedef struct {
246 unsigned long val;
247 unsigned long lval;
248 unsigned long long_reset;
249 unsigned long short_reset;
250 unsigned long reset_pmds[4];
251 unsigned long smpl_pmds[4];
252 unsigned long seed;
253 unsigned long mask;
254 unsigned int flags;
255 unsigned long eventid;
256} pfm_counter_t;
257
258
259
260
261typedef struct {
262 unsigned int block:1;
263 unsigned int system:1;
264 unsigned int using_dbreg:1;
265 unsigned int is_sampling:1;
266 unsigned int excl_idle:1;
267 unsigned int going_zombie:1;
268 unsigned int trap_reason:2;
269 unsigned int no_msg:1;
270 unsigned int can_restart:1;
271 unsigned int reserved:22;
272} pfm_context_flags_t;
273
274#define PFM_TRAP_REASON_NONE 0x0
275#define PFM_TRAP_REASON_BLOCK 0x1
276#define PFM_TRAP_REASON_RESET 0x2
277
278
279
280
281
282
283typedef struct pfm_context {
284 spinlock_t ctx_lock;
285
286 pfm_context_flags_t ctx_flags;
287 unsigned int ctx_state;
288
289 struct task_struct *ctx_task;
290
291 unsigned long ctx_ovfl_regs[4];
292
293 struct completion ctx_restart_done;
294
295 unsigned long ctx_used_pmds[4];
296 unsigned long ctx_all_pmds[4];
297 unsigned long ctx_reload_pmds[4];
298
299 unsigned long ctx_all_pmcs[4];
300 unsigned long ctx_reload_pmcs[4];
301 unsigned long ctx_used_monitors[4];
302
303 unsigned long ctx_pmcs[PFM_NUM_PMC_REGS];
304
305 unsigned int ctx_used_ibrs[1];
306 unsigned int ctx_used_dbrs[1];
307 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS];
308 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS];
309
310 pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS];
311
312 unsigned long th_pmcs[PFM_NUM_PMC_REGS];
313 unsigned long th_pmds[PFM_NUM_PMD_REGS];
314
315 unsigned long ctx_saved_psr_up;
316
317 unsigned long ctx_last_activation;
318 unsigned int ctx_last_cpu;
319 unsigned int ctx_cpu;
320
321 int ctx_fd;
322 pfm_ovfl_arg_t ctx_ovfl_arg;
323
324 pfm_buffer_fmt_t *ctx_buf_fmt;
325 void *ctx_smpl_hdr;
326 unsigned long ctx_smpl_size;
327 void *ctx_smpl_vaddr;
328
329 wait_queue_head_t ctx_msgq_wait;
330 pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
331 int ctx_msgq_head;
332 int ctx_msgq_tail;
333 struct fasync_struct *ctx_async_queue;
334
335 wait_queue_head_t ctx_zombieq;
336} pfm_context_t;
337
338
339
340
341
342#define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
343
344#define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
345
346#ifdef CONFIG_SMP
347#define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
348#define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
349#else
350#define SET_LAST_CPU(ctx, v) do {} while(0)
351#define GET_LAST_CPU(ctx) do {} while(0)
352#endif
353
354
355#define ctx_fl_block ctx_flags.block
356#define ctx_fl_system ctx_flags.system
357#define ctx_fl_using_dbreg ctx_flags.using_dbreg
358#define ctx_fl_is_sampling ctx_flags.is_sampling
359#define ctx_fl_excl_idle ctx_flags.excl_idle
360#define ctx_fl_going_zombie ctx_flags.going_zombie
361#define ctx_fl_trap_reason ctx_flags.trap_reason
362#define ctx_fl_no_msg ctx_flags.no_msg
363#define ctx_fl_can_restart ctx_flags.can_restart
364
365#define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
366#define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
367
368
369
370
371
372typedef struct {
373 spinlock_t pfs_lock;
374
375 unsigned int pfs_task_sessions;
376 unsigned int pfs_sys_sessions;
377 unsigned int pfs_sys_use_dbregs;
378 unsigned int pfs_ptrace_use_dbregs;
379 struct task_struct *pfs_sys_session[NR_CPUS];
380} pfm_session_t;
381
382
383
384
385
386
387typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
388typedef struct {
389 unsigned int type;
390 int pm_pos;
391 unsigned long default_value;
392 unsigned long reserved_mask;
393 pfm_reg_check_t read_check;
394 pfm_reg_check_t write_check;
395 unsigned long dep_pmd[4];
396 unsigned long dep_pmc[4];
397} pfm_reg_desc_t;
398
399
400#define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
401
402
403
404
405
406
407
408
409
410
411
412
413
414typedef struct {
415 unsigned long ovfl_val;
416
417 pfm_reg_desc_t *pmc_desc;
418 pfm_reg_desc_t *pmd_desc;
419
420 unsigned int num_pmcs;
421 unsigned int num_pmds;
422 unsigned long impl_pmcs[4];
423 unsigned long impl_pmds[4];
424
425 char *pmu_name;
426 unsigned int pmu_family;
427 unsigned int flags;
428 unsigned int num_ibrs;
429 unsigned int num_dbrs;
430 unsigned int num_counters;
431 int (*probe)(void);
432 unsigned int use_rr_dbregs:1;
433} pmu_config_t;
434
435
436
437#define PFM_PMU_IRQ_RESEND 1
438
439
440
441
442typedef struct {
443 unsigned long ibr_mask:56;
444 unsigned long ibr_plm:4;
445 unsigned long ibr_ig:3;
446 unsigned long ibr_x:1;
447} ibr_mask_reg_t;
448
449typedef struct {
450 unsigned long dbr_mask:56;
451 unsigned long dbr_plm:4;
452 unsigned long dbr_ig:2;
453 unsigned long dbr_w:1;
454 unsigned long dbr_r:1;
455} dbr_mask_reg_t;
456
457typedef union {
458 unsigned long val;
459 ibr_mask_reg_t ibr;
460 dbr_mask_reg_t dbr;
461} dbreg_t;
462
463
464
465
466
467typedef struct {
468 int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
469 char *cmd_name;
470 int cmd_flags;
471 unsigned int cmd_narg;
472 size_t cmd_argsize;
473 int (*cmd_getsize)(void *arg, size_t *sz);
474} pfm_cmd_desc_t;
475
476#define PFM_CMD_FD 0x01
477#define PFM_CMD_ARG_READ 0x02
478#define PFM_CMD_ARG_RW 0x04
479#define PFM_CMD_STOP 0x08
480
481
482#define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
483#define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
484#define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
485#define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
486#define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
487
488#define PFM_CMD_ARG_MANY -1
489
490typedef struct {
491 unsigned long pfm_spurious_ovfl_intr_count;
492 unsigned long pfm_replay_ovfl_intr_count;
493 unsigned long pfm_ovfl_intr_count;
494 unsigned long pfm_ovfl_intr_cycles;
495 unsigned long pfm_ovfl_intr_cycles_min;
496 unsigned long pfm_ovfl_intr_cycles_max;
497 unsigned long pfm_smpl_handler_calls;
498 unsigned long pfm_smpl_handler_cycles;
499 char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
500} pfm_stats_t;
501
502
503
504
505static pfm_stats_t pfm_stats[NR_CPUS];
506static pfm_session_t pfm_sessions;
507
508static DEFINE_SPINLOCK(pfm_alt_install_check);
509static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
510
511static struct proc_dir_entry *perfmon_dir;
512static pfm_uuid_t pfm_null_uuid = {0,};
513
514static spinlock_t pfm_buffer_fmt_lock;
515static LIST_HEAD(pfm_buffer_fmt_list);
516
517static pmu_config_t *pmu_conf;
518
519
520pfm_sysctl_t pfm_sysctl;
521EXPORT_SYMBOL(pfm_sysctl);
522
523static ctl_table pfm_ctl_table[]={
524 {
525 .ctl_name = CTL_UNNUMBERED,
526 .procname = "debug",
527 .data = &pfm_sysctl.debug,
528 .maxlen = sizeof(int),
529 .mode = 0666,
530 .proc_handler = &proc_dointvec,
531 },
532 {
533 .ctl_name = CTL_UNNUMBERED,
534 .procname = "debug_ovfl",
535 .data = &pfm_sysctl.debug_ovfl,
536 .maxlen = sizeof(int),
537 .mode = 0666,
538 .proc_handler = &proc_dointvec,
539 },
540 {
541 .ctl_name = CTL_UNNUMBERED,
542 .procname = "fastctxsw",
543 .data = &pfm_sysctl.fastctxsw,
544 .maxlen = sizeof(int),
545 .mode = 0600,
546 .proc_handler = &proc_dointvec,
547 },
548 {
549 .ctl_name = CTL_UNNUMBERED,
550 .procname = "expert_mode",
551 .data = &pfm_sysctl.expert_mode,
552 .maxlen = sizeof(int),
553 .mode = 0600,
554 .proc_handler = &proc_dointvec,
555 },
556 {}
557};
558static ctl_table pfm_sysctl_dir[] = {
559 {
560 .ctl_name = CTL_UNNUMBERED,
561 .procname = "perfmon",
562 .mode = 0555,
563 .child = pfm_ctl_table,
564 },
565 {}
566};
567static ctl_table pfm_sysctl_root[] = {
568 {
569 .ctl_name = CTL_KERN,
570 .procname = "kernel",
571 .mode = 0555,
572 .child = pfm_sysctl_dir,
573 },
574 {}
575};
576static struct ctl_table_header *pfm_sysctl_header;
577
578static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
579
580#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
581#define pfm_get_cpu_data(a,b) per_cpu(a, b)
582
583static inline void
584pfm_put_task(struct task_struct *task)
585{
586 if (task != current) put_task_struct(task);
587}
588
589static inline void
590pfm_reserve_page(unsigned long a)
591{
592 SetPageReserved(vmalloc_to_page((void *)a));
593}
594static inline void
595pfm_unreserve_page(unsigned long a)
596{
597 ClearPageReserved(vmalloc_to_page((void*)a));
598}
599
600static inline unsigned long
601pfm_protect_ctx_ctxsw(pfm_context_t *x)
602{
603 spin_lock(&(x)->ctx_lock);
604 return 0UL;
605}
606
607static inline void
608pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
609{
610 spin_unlock(&(x)->ctx_lock);
611}
612
613static inline unsigned int
614pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct)
615{
616 return do_munmap(mm, addr, len);
617}
618
619static inline unsigned long
620pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
621{
622 return get_unmapped_area(file, addr, len, pgoff, flags);
623}
624
625
626static int
627pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data,
628 struct vfsmount *mnt)
629{
630 return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC, mnt);
631}
632
633static struct file_system_type pfm_fs_type = {
634 .name = "pfmfs",
635 .get_sb = pfmfs_get_sb,
636 .kill_sb = kill_anon_super,
637};
638
639DEFINE_PER_CPU(unsigned long, pfm_syst_info);
640DEFINE_PER_CPU(struct task_struct *, pmu_owner);
641DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
642DEFINE_PER_CPU(unsigned long, pmu_activation_number);
643EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
644
645
646
647static const struct file_operations pfm_file_ops;
648
649
650
651
652#ifndef CONFIG_SMP
653static void pfm_lazy_save_regs (struct task_struct *ta);
654#endif
655
656void dump_pmu_state(const char *);
657static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
658
659#include "perfmon_itanium.h"
660#include "perfmon_mckinley.h"
661#include "perfmon_montecito.h"
662#include "perfmon_generic.h"
663
664static pmu_config_t *pmu_confs[]={
665 &pmu_conf_mont,
666 &pmu_conf_mck,
667 &pmu_conf_ita,
668 &pmu_conf_gen,
669 NULL
670};
671
672
673static int pfm_end_notify_user(pfm_context_t *ctx);
674
675static inline void
676pfm_clear_psr_pp(void)
677{
678 ia64_rsm(IA64_PSR_PP);
679 ia64_srlz_i();
680}
681
682static inline void
683pfm_set_psr_pp(void)
684{
685 ia64_ssm(IA64_PSR_PP);
686 ia64_srlz_i();
687}
688
689static inline void
690pfm_clear_psr_up(void)
691{
692 ia64_rsm(IA64_PSR_UP);
693 ia64_srlz_i();
694}
695
696static inline void
697pfm_set_psr_up(void)
698{
699 ia64_ssm(IA64_PSR_UP);
700 ia64_srlz_i();
701}
702
703static inline unsigned long
704pfm_get_psr(void)
705{
706 unsigned long tmp;
707 tmp = ia64_getreg(_IA64_REG_PSR);
708 ia64_srlz_i();
709 return tmp;
710}
711
712static inline void
713pfm_set_psr_l(unsigned long val)
714{
715 ia64_setreg(_IA64_REG_PSR_L, val);
716 ia64_srlz_i();
717}
718
719static inline void
720pfm_freeze_pmu(void)
721{
722 ia64_set_pmc(0,1UL);
723 ia64_srlz_d();
724}
725
726static inline void
727pfm_unfreeze_pmu(void)
728{
729 ia64_set_pmc(0,0UL);
730 ia64_srlz_d();
731}
732
733static inline void
734pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
735{
736 int i;
737
738 for (i=0; i < nibrs; i++) {
739 ia64_set_ibr(i, ibrs[i]);
740 ia64_dv_serialize_instruction();
741 }
742 ia64_srlz_i();
743}
744
745static inline void
746pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
747{
748 int i;
749
750 for (i=0; i < ndbrs; i++) {
751 ia64_set_dbr(i, dbrs[i]);
752 ia64_dv_serialize_data();
753 }
754 ia64_srlz_d();
755}
756
757
758
759
760static inline unsigned long
761pfm_read_soft_counter(pfm_context_t *ctx, int i)
762{
763 return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
764}
765
766
767
768
769static inline void
770pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
771{
772 unsigned long ovfl_val = pmu_conf->ovfl_val;
773
774 ctx->ctx_pmds[i].val = val & ~ovfl_val;
775
776
777
778
779 ia64_set_pmd(i, val & ovfl_val);
780}
781
782static pfm_msg_t *
783pfm_get_new_msg(pfm_context_t *ctx)
784{
785 int idx, next;
786
787 next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
788
789 DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
790 if (next == ctx->ctx_msgq_head) return NULL;
791
792 idx = ctx->ctx_msgq_tail;
793 ctx->ctx_msgq_tail = next;
794
795 DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
796
797 return ctx->ctx_msgq+idx;
798}
799
800static pfm_msg_t *
801pfm_get_next_msg(pfm_context_t *ctx)
802{
803 pfm_msg_t *msg;
804
805 DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
806
807 if (PFM_CTXQ_EMPTY(ctx)) return NULL;
808
809
810
811
812 msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
813
814
815
816
817 ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
818
819 DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
820
821 return msg;
822}
823
824static void
825pfm_reset_msgq(pfm_context_t *ctx)
826{
827 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
828 DPRINT(("ctx=%p msgq reset\n", ctx));
829}
830
831static void *
832pfm_rvmalloc(unsigned long size)
833{
834 void *mem;
835 unsigned long addr;
836
837 size = PAGE_ALIGN(size);
838 mem = vmalloc(size);
839 if (mem) {
840
841 memset(mem, 0, size);
842 addr = (unsigned long)mem;
843 while (size > 0) {
844 pfm_reserve_page(addr);
845 addr+=PAGE_SIZE;
846 size-=PAGE_SIZE;
847 }
848 }
849 return mem;
850}
851
852static void
853pfm_rvfree(void *mem, unsigned long size)
854{
855 unsigned long addr;
856
857 if (mem) {
858 DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
859 addr = (unsigned long) mem;
860 while ((long) size > 0) {
861 pfm_unreserve_page(addr);
862 addr+=PAGE_SIZE;
863 size-=PAGE_SIZE;
864 }
865 vfree(mem);
866 }
867 return;
868}
869
870static pfm_context_t *
871pfm_context_alloc(int ctx_flags)
872{
873 pfm_context_t *ctx;
874
875
876
877
878
879 ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
880 if (ctx) {
881 DPRINT(("alloc ctx @%p\n", ctx));
882
883
884
885
886 spin_lock_init(&ctx->ctx_lock);
887
888
889
890
891 ctx->ctx_state = PFM_CTX_UNLOADED;
892
893
894
895
896 ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
897 ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
898 ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
899
900
901
902
903
904
905
906
907 init_completion(&ctx->ctx_restart_done);
908
909
910
911
912 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
913 SET_LAST_CPU(ctx, -1);
914
915
916
917
918 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
919 init_waitqueue_head(&ctx->ctx_msgq_wait);
920 init_waitqueue_head(&ctx->ctx_zombieq);
921
922 }
923 return ctx;
924}
925
926static void
927pfm_context_free(pfm_context_t *ctx)
928{
929 if (ctx) {
930 DPRINT(("free ctx @%p\n", ctx));
931 kfree(ctx);
932 }
933}
934
935static void
936pfm_mask_monitoring(struct task_struct *task)
937{
938 pfm_context_t *ctx = PFM_GET_CTX(task);
939 unsigned long mask, val, ovfl_mask;
940 int i;
941
942 DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
943
944 ovfl_mask = pmu_conf->ovfl_val;
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964 mask = ctx->ctx_used_pmds[0];
965 for (i = 0; mask; i++, mask>>=1) {
966
967 if ((mask & 0x1) == 0) continue;
968 val = ia64_get_pmd(i);
969
970 if (PMD_IS_COUNTING(i)) {
971
972
973
974 ctx->ctx_pmds[i].val += (val & ovfl_mask);
975 } else {
976 ctx->ctx_pmds[i].val = val;
977 }
978 DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
979 i,
980 ctx->ctx_pmds[i].val,
981 val & ovfl_mask));
982 }
983
984
985
986
987
988
989
990
991 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
992 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
993 if ((mask & 0x1) == 0UL) continue;
994 ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
995 ctx->th_pmcs[i] &= ~0xfUL;
996 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
997 }
998
999
1000
1001 ia64_srlz_d();
1002}
1003
1004
1005
1006
1007
1008
1009static void
1010pfm_restore_monitoring(struct task_struct *task)
1011{
1012 pfm_context_t *ctx = PFM_GET_CTX(task);
1013 unsigned long mask, ovfl_mask;
1014 unsigned long psr, val;
1015 int i, is_system;
1016
1017 is_system = ctx->ctx_fl_system;
1018 ovfl_mask = pmu_conf->ovfl_val;
1019
1020 if (task != current) {
1021 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
1022 return;
1023 }
1024 if (ctx->ctx_state != PFM_CTX_MASKED) {
1025 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
1026 task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
1027 return;
1028 }
1029 psr = pfm_get_psr();
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1041
1042 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
1043 pfm_clear_psr_pp();
1044 } else {
1045 pfm_clear_psr_up();
1046 }
1047
1048
1049
1050 mask = ctx->ctx_used_pmds[0];
1051 for (i = 0; mask; i++, mask>>=1) {
1052
1053 if ((mask & 0x1) == 0) continue;
1054
1055 if (PMD_IS_COUNTING(i)) {
1056
1057
1058
1059
1060 val = ctx->ctx_pmds[i].val & ovfl_mask;
1061 ctx->ctx_pmds[i].val &= ~ovfl_mask;
1062 } else {
1063 val = ctx->ctx_pmds[i].val;
1064 }
1065 ia64_set_pmd(i, val);
1066
1067 DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
1068 i,
1069 ctx->ctx_pmds[i].val,
1070 val));
1071 }
1072
1073
1074
1075 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
1076 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
1077 if ((mask & 0x1) == 0UL) continue;
1078 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1079 ia64_set_pmc(i, ctx->th_pmcs[i]);
1080 DPRINT(("[%d] pmc[%d]=0x%lx\n",
1081 task_pid_nr(task), i, ctx->th_pmcs[i]));
1082 }
1083 ia64_srlz_d();
1084
1085
1086
1087
1088
1089 if (ctx->ctx_fl_using_dbreg) {
1090 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
1091 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
1092 }
1093
1094
1095
1096
1097 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1098
1099 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
1100 ia64_srlz_i();
1101 }
1102 pfm_set_psr_l(psr);
1103}
1104
1105static inline void
1106pfm_save_pmds(unsigned long *pmds, unsigned long mask)
1107{
1108 int i;
1109
1110 ia64_srlz_d();
1111
1112 for (i=0; mask; i++, mask>>=1) {
1113 if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
1114 }
1115}
1116
1117
1118
1119
1120static inline void
1121pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
1122{
1123 int i;
1124 unsigned long val, ovfl_val = pmu_conf->ovfl_val;
1125
1126 for (i=0; mask; i++, mask>>=1) {
1127 if ((mask & 0x1) == 0) continue;
1128 val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
1129 ia64_set_pmd(i, val);
1130 }
1131 ia64_srlz_d();
1132}
1133
1134
1135
1136
1137static inline void
1138pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1139{
1140 unsigned long ovfl_val = pmu_conf->ovfl_val;
1141 unsigned long mask = ctx->ctx_all_pmds[0];
1142 unsigned long val;
1143 int i;
1144
1145 DPRINT(("mask=0x%lx\n", mask));
1146
1147 for (i=0; mask; i++, mask>>=1) {
1148
1149 val = ctx->ctx_pmds[i].val;
1150
1151
1152
1153
1154
1155
1156
1157 if (PMD_IS_COUNTING(i)) {
1158 ctx->ctx_pmds[i].val = val & ~ovfl_val;
1159 val &= ovfl_val;
1160 }
1161 ctx->th_pmds[i] = val;
1162
1163 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1164 i,
1165 ctx->th_pmds[i],
1166 ctx->ctx_pmds[i].val));
1167 }
1168}
1169
1170
1171
1172
1173static inline void
1174pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1175{
1176 unsigned long mask = ctx->ctx_all_pmcs[0];
1177 int i;
1178
1179 DPRINT(("mask=0x%lx\n", mask));
1180
1181 for (i=0; mask; i++, mask>>=1) {
1182
1183 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1184 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1185 }
1186}
1187
1188
1189
1190static inline void
1191pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
1192{
1193 int i;
1194
1195 for (i=0; mask; i++, mask>>=1) {
1196 if ((mask & 0x1) == 0) continue;
1197 ia64_set_pmc(i, pmcs[i]);
1198 }
1199 ia64_srlz_d();
1200}
1201
1202static inline int
1203pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
1204{
1205 return memcmp(a, b, sizeof(pfm_uuid_t));
1206}
1207
1208static inline int
1209pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
1210{
1211 int ret = 0;
1212 if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
1213 return ret;
1214}
1215
1216static inline int
1217pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
1218{
1219 int ret = 0;
1220 if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
1221 return ret;
1222}
1223
1224
1225static inline int
1226pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
1227 int cpu, void *arg)
1228{
1229 int ret = 0;
1230 if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
1231 return ret;
1232}
1233
1234static inline int
1235pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
1236 int cpu, void *arg)
1237{
1238 int ret = 0;
1239 if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
1240 return ret;
1241}
1242
1243static inline int
1244pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1245{
1246 int ret = 0;
1247 if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
1248 return ret;
1249}
1250
1251static inline int
1252pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1253{
1254 int ret = 0;
1255 if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
1256 return ret;
1257}
1258
1259static pfm_buffer_fmt_t *
1260__pfm_find_buffer_fmt(pfm_uuid_t uuid)
1261{
1262 struct list_head * pos;
1263 pfm_buffer_fmt_t * entry;
1264
1265 list_for_each(pos, &pfm_buffer_fmt_list) {
1266 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
1267 if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
1268 return entry;
1269 }
1270 return NULL;
1271}
1272
1273
1274
1275
1276static pfm_buffer_fmt_t *
1277pfm_find_buffer_fmt(pfm_uuid_t uuid)
1278{
1279 pfm_buffer_fmt_t * fmt;
1280 spin_lock(&pfm_buffer_fmt_lock);
1281 fmt = __pfm_find_buffer_fmt(uuid);
1282 spin_unlock(&pfm_buffer_fmt_lock);
1283 return fmt;
1284}
1285
1286int
1287pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
1288{
1289 int ret = 0;
1290
1291
1292 if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
1293
1294
1295 if (fmt->fmt_handler == NULL) return -EINVAL;
1296
1297
1298
1299
1300
1301 spin_lock(&pfm_buffer_fmt_lock);
1302
1303 if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
1304 printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
1305 ret = -EBUSY;
1306 goto out;
1307 }
1308 list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
1309 printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
1310
1311out:
1312 spin_unlock(&pfm_buffer_fmt_lock);
1313 return ret;
1314}
1315EXPORT_SYMBOL(pfm_register_buffer_fmt);
1316
1317int
1318pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
1319{
1320 pfm_buffer_fmt_t *fmt;
1321 int ret = 0;
1322
1323 spin_lock(&pfm_buffer_fmt_lock);
1324
1325 fmt = __pfm_find_buffer_fmt(uuid);
1326 if (!fmt) {
1327 printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
1328 ret = -EINVAL;
1329 goto out;
1330 }
1331 list_del_init(&fmt->fmt_list);
1332 printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
1333
1334out:
1335 spin_unlock(&pfm_buffer_fmt_lock);
1336 return ret;
1337
1338}
1339EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1340
1341extern void update_pal_halt_status(int);
1342
1343static int
1344pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1345{
1346 unsigned long flags;
1347
1348
1349
1350 LOCK_PFS(flags);
1351
1352 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1353 pfm_sessions.pfs_sys_sessions,
1354 pfm_sessions.pfs_task_sessions,
1355 pfm_sessions.pfs_sys_use_dbregs,
1356 is_syswide,
1357 cpu));
1358
1359 if (is_syswide) {
1360
1361
1362
1363 if (pfm_sessions.pfs_task_sessions > 0UL) {
1364 DPRINT(("system wide not possible, %u conflicting task_sessions\n",
1365 pfm_sessions.pfs_task_sessions));
1366 goto abort;
1367 }
1368
1369 if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
1370
1371 DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
1372
1373 pfm_sessions.pfs_sys_session[cpu] = task;
1374
1375 pfm_sessions.pfs_sys_sessions++ ;
1376
1377 } else {
1378 if (pfm_sessions.pfs_sys_sessions) goto abort;
1379 pfm_sessions.pfs_task_sessions++;
1380 }
1381
1382 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1383 pfm_sessions.pfs_sys_sessions,
1384 pfm_sessions.pfs_task_sessions,
1385 pfm_sessions.pfs_sys_use_dbregs,
1386 is_syswide,
1387 cpu));
1388
1389
1390
1391
1392 update_pal_halt_status(0);
1393
1394 UNLOCK_PFS(flags);
1395
1396 return 0;
1397
1398error_conflict:
1399 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
1400 task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
1401 cpu));
1402abort:
1403 UNLOCK_PFS(flags);
1404
1405 return -EBUSY;
1406
1407}
1408
1409static int
1410pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1411{
1412 unsigned long flags;
1413
1414
1415
1416 LOCK_PFS(flags);
1417
1418 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1419 pfm_sessions.pfs_sys_sessions,
1420 pfm_sessions.pfs_task_sessions,
1421 pfm_sessions.pfs_sys_use_dbregs,
1422 is_syswide,
1423 cpu));
1424
1425
1426 if (is_syswide) {
1427 pfm_sessions.pfs_sys_session[cpu] = NULL;
1428
1429
1430
1431 if (ctx && ctx->ctx_fl_using_dbreg) {
1432 if (pfm_sessions.pfs_sys_use_dbregs == 0) {
1433 printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
1434 } else {
1435 pfm_sessions.pfs_sys_use_dbregs--;
1436 }
1437 }
1438 pfm_sessions.pfs_sys_sessions--;
1439 } else {
1440 pfm_sessions.pfs_task_sessions--;
1441 }
1442 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1443 pfm_sessions.pfs_sys_sessions,
1444 pfm_sessions.pfs_task_sessions,
1445 pfm_sessions.pfs_sys_use_dbregs,
1446 is_syswide,
1447 cpu));
1448
1449
1450
1451
1452 if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
1453 update_pal_halt_status(1);
1454
1455 UNLOCK_PFS(flags);
1456
1457 return 0;
1458}
1459
1460
1461
1462
1463
1464
1465static int
1466pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size)
1467{
1468 int r;
1469
1470
1471 if (task->mm == NULL || size == 0UL || vaddr == NULL) {
1472 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
1473 return -EINVAL;
1474 }
1475
1476 DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
1477
1478
1479
1480
1481 down_write(&task->mm->mmap_sem);
1482
1483 DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size));
1484
1485 r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0);
1486
1487 up_write(&task->mm->mmap_sem);
1488 if (r !=0) {
1489 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
1490 }
1491
1492 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
1493
1494 return 0;
1495}
1496
1497
1498
1499
1500#if 0
1501static int
1502pfm_free_smpl_buffer(pfm_context_t *ctx)
1503{
1504 pfm_buffer_fmt_t *fmt;
1505
1506 if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
1507
1508
1509
1510
1511 fmt = ctx->ctx_buf_fmt;
1512
1513 DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
1514 ctx->ctx_smpl_hdr,
1515 ctx->ctx_smpl_size,
1516 ctx->ctx_smpl_vaddr));
1517
1518 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1519
1520
1521
1522
1523 pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
1524
1525 ctx->ctx_smpl_hdr = NULL;
1526 ctx->ctx_smpl_size = 0UL;
1527
1528 return 0;
1529
1530invalid_free:
1531 printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
1532 return -EINVAL;
1533}
1534#endif
1535
1536static inline void
1537pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
1538{
1539 if (fmt == NULL) return;
1540
1541 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1542
1543}
1544
1545
1546
1547
1548
1549
1550
1551static struct vfsmount *pfmfs_mnt;
1552
1553static int __init
1554init_pfm_fs(void)
1555{
1556 int err = register_filesystem(&pfm_fs_type);
1557 if (!err) {
1558 pfmfs_mnt = kern_mount(&pfm_fs_type);
1559 err = PTR_ERR(pfmfs_mnt);
1560 if (IS_ERR(pfmfs_mnt))
1561 unregister_filesystem(&pfm_fs_type);
1562 else
1563 err = 0;
1564 }
1565 return err;
1566}
1567
1568static ssize_t
1569pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1570{
1571 pfm_context_t *ctx;
1572 pfm_msg_t *msg;
1573 ssize_t ret;
1574 unsigned long flags;
1575 DECLARE_WAITQUEUE(wait, current);
1576 if (PFM_IS_FILE(filp) == 0) {
1577 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1578 return -EINVAL;
1579 }
1580
1581 ctx = (pfm_context_t *)filp->private_data;
1582 if (ctx == NULL) {
1583 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
1584 return -EINVAL;
1585 }
1586
1587
1588
1589
1590 if (size < sizeof(pfm_msg_t)) {
1591 DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
1592 return -EINVAL;
1593 }
1594
1595 PROTECT_CTX(ctx, flags);
1596
1597
1598
1599
1600 add_wait_queue(&ctx->ctx_msgq_wait, &wait);
1601
1602
1603 for(;;) {
1604
1605
1606
1607
1608 set_current_state(TASK_INTERRUPTIBLE);
1609
1610 DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
1611
1612 ret = 0;
1613 if(PFM_CTXQ_EMPTY(ctx) == 0) break;
1614
1615 UNPROTECT_CTX(ctx, flags);
1616
1617
1618
1619
1620 ret = -EAGAIN;
1621 if(filp->f_flags & O_NONBLOCK) break;
1622
1623
1624
1625
1626 if(signal_pending(current)) {
1627 ret = -EINTR;
1628 break;
1629 }
1630
1631
1632
1633 schedule();
1634
1635 PROTECT_CTX(ctx, flags);
1636 }
1637 DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
1638 set_current_state(TASK_RUNNING);
1639 remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
1640
1641 if (ret < 0) goto abort;
1642
1643 ret = -EINVAL;
1644 msg = pfm_get_next_msg(ctx);
1645 if (msg == NULL) {
1646 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
1647 goto abort_locked;
1648 }
1649
1650 DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
1651
1652 ret = -EFAULT;
1653 if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
1654
1655abort_locked:
1656 UNPROTECT_CTX(ctx, flags);
1657abort:
1658 return ret;
1659}
1660
1661static ssize_t
1662pfm_write(struct file *file, const char __user *ubuf,
1663 size_t size, loff_t *ppos)
1664{
1665 DPRINT(("pfm_write called\n"));
1666 return -EINVAL;
1667}
1668
1669static unsigned int
1670pfm_poll(struct file *filp, poll_table * wait)
1671{
1672 pfm_context_t *ctx;
1673 unsigned long flags;
1674 unsigned int mask = 0;
1675
1676 if (PFM_IS_FILE(filp) == 0) {
1677 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1678 return 0;
1679 }
1680
1681 ctx = (pfm_context_t *)filp->private_data;
1682 if (ctx == NULL) {
1683 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
1684 return 0;
1685 }
1686
1687
1688 DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
1689
1690 poll_wait(filp, &ctx->ctx_msgq_wait, wait);
1691
1692 PROTECT_CTX(ctx, flags);
1693
1694 if (PFM_CTXQ_EMPTY(ctx) == 0)
1695 mask = POLLIN | POLLRDNORM;
1696
1697 UNPROTECT_CTX(ctx, flags);
1698
1699 DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
1700
1701 return mask;
1702}
1703
1704static int
1705pfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1706{
1707 DPRINT(("pfm_ioctl called\n"));
1708 return -EINVAL;
1709}
1710
1711
1712
1713
1714static inline int
1715pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
1716{
1717 int ret;
1718
1719 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
1720
1721 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1722 task_pid_nr(current),
1723 fd,
1724 on,
1725 ctx->ctx_async_queue, ret));
1726
1727 return ret;
1728}
1729
1730static int
1731pfm_fasync(int fd, struct file *filp, int on)
1732{
1733 pfm_context_t *ctx;
1734 int ret;
1735
1736 if (PFM_IS_FILE(filp) == 0) {
1737 printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
1738 return -EBADF;
1739 }
1740
1741 ctx = (pfm_context_t *)filp->private_data;
1742 if (ctx == NULL) {
1743 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
1744 return -EBADF;
1745 }
1746
1747
1748
1749
1750
1751
1752
1753 ret = pfm_do_fasync(fd, filp, ctx, on);
1754
1755
1756 DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1757 fd,
1758 on,
1759 ctx->ctx_async_queue, ret));
1760
1761 return ret;
1762}
1763
1764#ifdef CONFIG_SMP
1765
1766
1767
1768
1769
1770static void
1771pfm_syswide_force_stop(void *info)
1772{
1773 pfm_context_t *ctx = (pfm_context_t *)info;
1774 struct pt_regs *regs = task_pt_regs(current);
1775 struct task_struct *owner;
1776 unsigned long flags;
1777 int ret;
1778
1779 if (ctx->ctx_cpu != smp_processor_id()) {
1780 printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
1781 ctx->ctx_cpu,
1782 smp_processor_id());
1783 return;
1784 }
1785 owner = GET_PMU_OWNER();
1786 if (owner != ctx->ctx_task) {
1787 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1788 smp_processor_id(),
1789 task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
1790 return;
1791 }
1792 if (GET_PMU_CTX() != ctx) {
1793 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
1794 smp_processor_id(),
1795 GET_PMU_CTX(), ctx);
1796 return;
1797 }
1798
1799 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
1800
1801
1802
1803
1804
1805 local_irq_save(flags);
1806
1807 ret = pfm_context_unload(ctx, NULL, 0, regs);
1808 if (ret) {
1809 DPRINT(("context_unload returned %d\n", ret));
1810 }
1811
1812
1813
1814
1815 local_irq_restore(flags);
1816}
1817
1818static void
1819pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
1820{
1821 int ret;
1822
1823 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
1824 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
1825 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
1826}
1827#endif
1828
1829
1830
1831
1832
1833static int
1834pfm_flush(struct file *filp, fl_owner_t id)
1835{
1836 pfm_context_t *ctx;
1837 struct task_struct *task;
1838 struct pt_regs *regs;
1839 unsigned long flags;
1840 unsigned long smpl_buf_size = 0UL;
1841 void *smpl_buf_vaddr = NULL;
1842 int state, is_system;
1843
1844 if (PFM_IS_FILE(filp) == 0) {
1845 DPRINT(("bad magic for\n"));
1846 return -EBADF;
1847 }
1848
1849 ctx = (pfm_context_t *)filp->private_data;
1850 if (ctx == NULL) {
1851 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
1852 return -EBADF;
1853 }
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868 PROTECT_CTX(ctx, flags);
1869
1870 state = ctx->ctx_state;
1871 is_system = ctx->ctx_fl_system;
1872
1873 task = PFM_CTX_TASK(ctx);
1874 regs = task_pt_regs(task);
1875
1876 DPRINT(("ctx_state=%d is_current=%d\n",
1877 state,
1878 task == current ? 1 : 0));
1879
1880
1881
1882
1883
1884
1885
1886
1887 if (task == current) {
1888#ifdef CONFIG_SMP
1889
1890
1891
1892
1893
1894
1895
1896 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
1897
1898 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
1899
1900
1901
1902 local_irq_restore(flags);
1903
1904 pfm_syswide_cleanup_other_cpu(ctx);
1905
1906
1907
1908
1909 local_irq_save(flags);
1910
1911
1912
1913
1914 } else
1915#endif
1916 {
1917
1918 DPRINT(("forcing unload\n"));
1919
1920
1921
1922
1923 pfm_context_unload(ctx, NULL, 0, regs);
1924
1925 DPRINT(("ctx_state=%d\n", ctx->ctx_state));
1926 }
1927 }
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940 if (ctx->ctx_smpl_vaddr && current->mm) {
1941 smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
1942 smpl_buf_size = ctx->ctx_smpl_size;
1943 }
1944
1945 UNPROTECT_CTX(ctx, flags);
1946
1947
1948
1949
1950
1951
1952
1953 if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size);
1954
1955 return 0;
1956}
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972static int
1973pfm_close(struct inode *inode, struct file *filp)
1974{
1975 pfm_context_t *ctx;
1976 struct task_struct *task;
1977 struct pt_regs *regs;
1978 DECLARE_WAITQUEUE(wait, current);
1979 unsigned long flags;
1980 unsigned long smpl_buf_size = 0UL;
1981 void *smpl_buf_addr = NULL;
1982 int free_possible = 1;
1983 int state, is_system;
1984
1985 DPRINT(("pfm_close called private=%p\n", filp->private_data));
1986
1987 if (PFM_IS_FILE(filp) == 0) {
1988 DPRINT(("bad magic\n"));
1989 return -EBADF;
1990 }
1991
1992 ctx = (pfm_context_t *)filp->private_data;
1993 if (ctx == NULL) {
1994 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
1995 return -EBADF;
1996 }
1997
1998 PROTECT_CTX(ctx, flags);
1999
2000 state = ctx->ctx_state;
2001 is_system = ctx->ctx_fl_system;
2002
2003 task = PFM_CTX_TASK(ctx);
2004 regs = task_pt_regs(task);
2005
2006 DPRINT(("ctx_state=%d is_current=%d\n",
2007 state,
2008 task == current ? 1 : 0));
2009
2010
2011
2012
2013 if (state == PFM_CTX_UNLOADED) goto doit;
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027 if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043 ctx->ctx_fl_going_zombie = 1;
2044
2045
2046
2047
2048 complete(&ctx->ctx_restart_done);
2049
2050 DPRINT(("waking up ctx_state=%d\n", state));
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060 set_current_state(TASK_INTERRUPTIBLE);
2061 add_wait_queue(&ctx->ctx_zombieq, &wait);
2062
2063 UNPROTECT_CTX(ctx, flags);
2064
2065
2066
2067
2068
2069
2070 schedule();
2071
2072
2073 PROTECT_CTX(ctx, flags);
2074
2075
2076 remove_wait_queue(&ctx->ctx_zombieq, &wait);
2077 set_current_state(TASK_RUNNING);
2078
2079
2080
2081
2082 DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
2083 }
2084 else if (task != current) {
2085#ifdef CONFIG_SMP
2086
2087
2088
2089 ctx->ctx_state = PFM_CTX_ZOMBIE;
2090
2091 DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
2092
2093
2094
2095
2096 free_possible = 0;
2097#else
2098 pfm_context_unload(ctx, NULL, 0, regs);
2099#endif
2100 }
2101
2102doit:
2103
2104 state = ctx->ctx_state;
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120 if (ctx->ctx_smpl_hdr) {
2121 smpl_buf_addr = ctx->ctx_smpl_hdr;
2122 smpl_buf_size = ctx->ctx_smpl_size;
2123
2124 ctx->ctx_smpl_hdr = NULL;
2125 ctx->ctx_fl_is_sampling = 0;
2126 }
2127
2128 DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
2129 state,
2130 free_possible,
2131 smpl_buf_addr,
2132 smpl_buf_size));
2133
2134 if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
2135
2136
2137
2138
2139 if (state == PFM_CTX_ZOMBIE) {
2140 pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
2141 }
2142
2143
2144
2145
2146
2147 filp->private_data = NULL;
2148
2149
2150
2151
2152
2153
2154
2155
2156 UNPROTECT_CTX(ctx, flags);
2157
2158
2159
2160
2161
2162 if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
2163
2164
2165
2166
2167 if (free_possible) pfm_context_free(ctx);
2168
2169 return 0;
2170}
2171
2172static int
2173pfm_no_open(struct inode *irrelevant, struct file *dontcare)
2174{
2175 DPRINT(("pfm_no_open called\n"));
2176 return -ENXIO;
2177}
2178
2179
2180
2181static const struct file_operations pfm_file_ops = {
2182 .llseek = no_llseek,
2183 .read = pfm_read,
2184 .write = pfm_write,
2185 .poll = pfm_poll,
2186 .ioctl = pfm_ioctl,
2187 .open = pfm_no_open,
2188 .fasync = pfm_fasync,
2189 .release = pfm_close,
2190 .flush = pfm_flush
2191};
2192
2193static int
2194pfmfs_delete_dentry(struct dentry *dentry)
2195{
2196 return 1;
2197}
2198
2199static const struct dentry_operations pfmfs_dentry_operations = {
2200 .d_delete = pfmfs_delete_dentry,
2201};
2202
2203
2204static struct file *
2205pfm_alloc_file(pfm_context_t *ctx)
2206{
2207 struct file *file;
2208 struct inode *inode;
2209 struct dentry *dentry;
2210 char name[32];
2211 struct qstr this;
2212
2213
2214
2215
2216 inode = new_inode(pfmfs_mnt->mnt_sb);
2217 if (!inode)
2218 return ERR_PTR(-ENOMEM);
2219
2220 DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
2221
2222 inode->i_mode = S_IFCHR|S_IRUGO;
2223 inode->i_uid = current_fsuid();
2224 inode->i_gid = current_fsgid();
2225
2226 sprintf(name, "[%lu]", inode->i_ino);
2227 this.name = name;
2228 this.len = strlen(name);
2229 this.hash = inode->i_ino;
2230
2231
2232
2233
2234 dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
2235 if (!dentry) {
2236 iput(inode);
2237 return ERR_PTR(-ENOMEM);
2238 }
2239
2240 dentry->d_op = &pfmfs_dentry_operations;
2241 d_add(dentry, inode);
2242
2243 file = alloc_file(pfmfs_mnt, dentry, FMODE_READ, &pfm_file_ops);
2244 if (!file) {
2245 dput(dentry);
2246 return ERR_PTR(-ENFILE);
2247 }
2248
2249 file->f_flags = O_RDONLY;
2250 file->private_data = ctx;
2251
2252 return file;
2253}
2254
2255static int
2256pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
2257{
2258 DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
2259
2260 while (size > 0) {
2261 unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
2262
2263
2264 if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
2265 return -ENOMEM;
2266
2267 addr += PAGE_SIZE;
2268 buf += PAGE_SIZE;
2269 size -= PAGE_SIZE;
2270 }
2271 return 0;
2272}
2273
2274
2275
2276
2277static int
2278pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
2279{
2280 struct mm_struct *mm = task->mm;
2281 struct vm_area_struct *vma = NULL;
2282 unsigned long size;
2283 void *smpl_buf;
2284
2285
2286
2287
2288
2289 size = PAGE_ALIGN(rsize);
2290
2291 DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301 if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
2302 return -ENOMEM;
2303
2304
2305
2306
2307
2308
2309 smpl_buf = pfm_rvmalloc(size);
2310 if (smpl_buf == NULL) {
2311 DPRINT(("Can't allocate sampling buffer\n"));
2312 return -ENOMEM;
2313 }
2314
2315 DPRINT(("smpl_buf @%p\n", smpl_buf));
2316
2317
2318 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2319 if (!vma) {
2320 DPRINT(("Cannot allocate vma\n"));
2321 goto error_kmem;
2322 }
2323
2324
2325
2326
2327 vma->vm_mm = mm;
2328 vma->vm_file = filp;
2329 vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
2330 vma->vm_page_prot = PAGE_READONLY;
2331
2332
2333
2334
2335
2336
2337 ctx->ctx_smpl_hdr = smpl_buf;
2338 ctx->ctx_smpl_size = size;
2339
2340
2341
2342
2343
2344
2345
2346 down_write(&task->mm->mmap_sem);
2347
2348
2349 vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0);
2350 if (vma->vm_start == 0UL) {
2351 DPRINT(("Cannot find unmapped area for size %ld\n", size));
2352 up_write(&task->mm->mmap_sem);
2353 goto error;
2354 }
2355 vma->vm_end = vma->vm_start + size;
2356 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2357
2358 DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
2359
2360
2361 if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
2362 DPRINT(("Can't remap buffer\n"));
2363 up_write(&task->mm->mmap_sem);
2364 goto error;
2365 }
2366
2367 get_file(filp);
2368
2369
2370
2371
2372
2373 insert_vm_struct(mm, vma);
2374
2375 mm->total_vm += size >> PAGE_SHIFT;
2376 vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
2377 vma_pages(vma));
2378 up_write(&task->mm->mmap_sem);
2379
2380
2381
2382
2383 ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
2384 *(unsigned long *)user_vaddr = vma->vm_start;
2385
2386 return 0;
2387
2388error:
2389 kmem_cache_free(vm_area_cachep, vma);
2390error_kmem:
2391 pfm_rvfree(smpl_buf, size);
2392
2393 return -ENOMEM;
2394}
2395
2396
2397
2398
2399static int
2400pfm_bad_permissions(struct task_struct *task)
2401{
2402 const struct cred *tcred;
2403 uid_t uid = current_uid();
2404 gid_t gid = current_gid();
2405 int ret;
2406
2407 rcu_read_lock();
2408 tcred = __task_cred(task);
2409
2410
2411 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
2412 uid,
2413 gid,
2414 tcred->euid,
2415 tcred->suid,
2416 tcred->uid,
2417 tcred->egid,
2418 tcred->sgid));
2419
2420 ret = ((uid != tcred->euid)
2421 || (uid != tcred->suid)
2422 || (uid != tcred->uid)
2423 || (gid != tcred->egid)
2424 || (gid != tcred->sgid)
2425 || (gid != tcred->gid)) && !capable(CAP_SYS_PTRACE);
2426
2427 rcu_read_unlock();
2428 return ret;
2429}
2430
2431static int
2432pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
2433{
2434 int ctx_flags;
2435
2436
2437
2438 ctx_flags = pfx->ctx_flags;
2439
2440 if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
2441
2442
2443
2444
2445 if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
2446 DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
2447 return -EINVAL;
2448 }
2449 } else {
2450 }
2451
2452
2453 return 0;
2454}
2455
2456static int
2457pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
2458 unsigned int cpu, pfarg_context_t *arg)
2459{
2460 pfm_buffer_fmt_t *fmt = NULL;
2461 unsigned long size = 0UL;
2462 void *uaddr = NULL;
2463 void *fmt_arg = NULL;
2464 int ret = 0;
2465#define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
2466
2467
2468 fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
2469 if (fmt == NULL) {
2470 DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
2471 return -EINVAL;
2472 }
2473
2474
2475
2476
2477 if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
2478
2479 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
2480
2481 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
2482
2483 if (ret) goto error;
2484
2485
2486 ctx->ctx_buf_fmt = fmt;
2487 ctx->ctx_fl_is_sampling = 1;
2488
2489
2490
2491
2492 ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
2493 if (ret) goto error;
2494
2495 if (size) {
2496
2497
2498
2499 ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
2500 if (ret) goto error;
2501
2502
2503 arg->ctx_smpl_vaddr = uaddr;
2504 }
2505 ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
2506
2507error:
2508 return ret;
2509}
2510
2511static void
2512pfm_reset_pmu_state(pfm_context_t *ctx)
2513{
2514 int i;
2515
2516
2517
2518
2519 for (i=1; PMC_IS_LAST(i) == 0; i++) {
2520 if (PMC_IS_IMPL(i) == 0) continue;
2521 ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
2522 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
2523 }
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
2552
2553
2554
2555
2556 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
2557
2558 DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
2559
2560
2561
2562
2563 ctx->ctx_used_ibrs[0] = 0UL;
2564 ctx->ctx_used_dbrs[0] = 0UL;
2565}
2566
2567static int
2568pfm_ctx_getsize(void *arg, size_t *sz)
2569{
2570 pfarg_context_t *req = (pfarg_context_t *)arg;
2571 pfm_buffer_fmt_t *fmt;
2572
2573 *sz = 0;
2574
2575 if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
2576
2577 fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
2578 if (fmt == NULL) {
2579 DPRINT(("cannot find buffer format\n"));
2580 return -EINVAL;
2581 }
2582
2583 *sz = fmt->fmt_arg_size;
2584 DPRINT(("arg_size=%lu\n", *sz));
2585
2586 return 0;
2587}
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597static int
2598pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2599{
2600
2601
2602
2603 if (task->mm == NULL) {
2604 DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
2605 return -EPERM;
2606 }
2607 if (pfm_bad_permissions(task)) {
2608 DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
2609 return -EPERM;
2610 }
2611
2612
2613
2614 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
2615 DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
2616 return -EINVAL;
2617 }
2618
2619 if (task->exit_state == EXIT_ZOMBIE) {
2620 DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
2621 return -EBUSY;
2622 }
2623
2624
2625
2626
2627 if (task == current) return 0;
2628
2629 if (!task_is_stopped_or_traced(task)) {
2630 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
2631 return -EBUSY;
2632 }
2633
2634
2635
2636 wait_task_inactive(task, 0);
2637
2638
2639
2640 return 0;
2641}
2642
2643static int
2644pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
2645{
2646 struct task_struct *p = current;
2647 int ret;
2648
2649
2650 if (pid < 2) return -EPERM;
2651
2652 if (pid != task_pid_vnr(current)) {
2653
2654 read_lock(&tasklist_lock);
2655
2656 p = find_task_by_vpid(pid);
2657
2658
2659 if (p) get_task_struct(p);
2660
2661 read_unlock(&tasklist_lock);
2662
2663 if (p == NULL) return -ESRCH;
2664 }
2665
2666 ret = pfm_task_incompatible(ctx, p);
2667 if (ret == 0) {
2668 *task = p;
2669 } else if (p != current) {
2670 pfm_put_task(p);
2671 }
2672 return ret;
2673}
2674
2675
2676
2677static int
2678pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2679{
2680 pfarg_context_t *req = (pfarg_context_t *)arg;
2681 struct file *filp;
2682 struct path path;
2683 int ctx_flags;
2684 int fd;
2685 int ret;
2686
2687
2688 ret = pfarg_is_sane(current, req);
2689 if (ret < 0)
2690 return ret;
2691
2692 ctx_flags = req->ctx_flags;
2693
2694 ret = -ENOMEM;
2695
2696 fd = get_unused_fd();
2697 if (fd < 0)
2698 return fd;
2699
2700 ctx = pfm_context_alloc(ctx_flags);
2701 if (!ctx)
2702 goto error;
2703
2704 filp = pfm_alloc_file(ctx);
2705 if (IS_ERR(filp)) {
2706 ret = PTR_ERR(filp);
2707 goto error_file;
2708 }
2709
2710 req->ctx_fd = ctx->ctx_fd = fd;
2711
2712
2713
2714
2715 if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
2716 ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
2717 if (ret)
2718 goto buffer_error;
2719 }
2720
2721 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n",
2722 ctx,
2723 ctx_flags,
2724 ctx->ctx_fl_system,
2725 ctx->ctx_fl_block,
2726 ctx->ctx_fl_excl_idle,
2727 ctx->ctx_fl_no_msg,
2728 ctx->ctx_fd));
2729
2730
2731
2732
2733 pfm_reset_pmu_state(ctx);
2734
2735 fd_install(fd, filp);
2736
2737 return 0;
2738
2739buffer_error:
2740 path = filp->f_path;
2741 put_filp(filp);
2742 path_put(&path);
2743
2744 if (ctx->ctx_buf_fmt) {
2745 pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
2746 }
2747error_file:
2748 pfm_context_free(ctx);
2749
2750error:
2751 put_unused_fd(fd);
2752 return ret;
2753}
2754
2755static inline unsigned long
2756pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
2757{
2758 unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
2759 unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
2760 extern unsigned long carta_random32 (unsigned long seed);
2761
2762 if (reg->flags & PFM_REGFL_RANDOM) {
2763 new_seed = carta_random32(old_seed);
2764 val -= (old_seed & mask);
2765 if ((mask >> 32) != 0)
2766
2767 new_seed |= carta_random32(old_seed >> 32) << 32;
2768 reg->seed = new_seed;
2769 }
2770 reg->lval = val;
2771 return val;
2772}
2773
2774static void
2775pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2776{
2777 unsigned long mask = ovfl_regs[0];
2778 unsigned long reset_others = 0UL;
2779 unsigned long val;
2780 int i;
2781
2782
2783
2784
2785 mask >>= PMU_FIRST_COUNTER;
2786 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2787
2788 if ((mask & 0x1UL) == 0UL) continue;
2789
2790 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2791 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2792
2793 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2794 }
2795
2796
2797
2798
2799 for(i = 0; reset_others; i++, reset_others >>= 1) {
2800
2801 if ((reset_others & 0x1) == 0) continue;
2802
2803 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2804
2805 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2806 is_long_reset ? "long" : "short", i, val));
2807 }
2808}
2809
2810static void
2811pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2812{
2813 unsigned long mask = ovfl_regs[0];
2814 unsigned long reset_others = 0UL;
2815 unsigned long val;
2816 int i;
2817
2818 DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
2819
2820 if (ctx->ctx_state == PFM_CTX_MASKED) {
2821 pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
2822 return;
2823 }
2824
2825
2826
2827
2828 mask >>= PMU_FIRST_COUNTER;
2829 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2830
2831 if ((mask & 0x1UL) == 0UL) continue;
2832
2833 val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2834 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2835
2836 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2837
2838 pfm_write_soft_counter(ctx, i, val);
2839 }
2840
2841
2842
2843
2844 for(i = 0; reset_others; i++, reset_others >>= 1) {
2845
2846 if ((reset_others & 0x1) == 0) continue;
2847
2848 val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2849
2850 if (PMD_IS_COUNTING(i)) {
2851 pfm_write_soft_counter(ctx, i, val);
2852 } else {
2853 ia64_set_pmd(i, val);
2854 }
2855 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2856 is_long_reset ? "long" : "short", i, val));
2857 }
2858 ia64_srlz_d();
2859}
2860
2861static int
2862pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2863{
2864 struct task_struct *task;
2865 pfarg_reg_t *req = (pfarg_reg_t *)arg;
2866 unsigned long value, pmc_pm;
2867 unsigned long smpl_pmds, reset_pmds, impl_pmds;
2868 unsigned int cnum, reg_flags, flags, pmc_type;
2869 int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
2870 int is_monitor, is_counting, state;
2871 int ret = -EINVAL;
2872 pfm_reg_check_t wr_func;
2873#define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
2874
2875 state = ctx->ctx_state;
2876 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
2877 is_system = ctx->ctx_fl_system;
2878 task = ctx->ctx_task;
2879 impl_pmds = pmu_conf->impl_pmds[0];
2880
2881 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
2882
2883 if (is_loaded) {
2884
2885
2886
2887
2888
2889 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
2890 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
2891 return -EBUSY;
2892 }
2893 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
2894 }
2895 expert_mode = pfm_sysctl.expert_mode;
2896
2897 for (i = 0; i < count; i++, req++) {
2898
2899 cnum = req->reg_num;
2900 reg_flags = req->reg_flags;
2901 value = req->reg_value;
2902 smpl_pmds = req->reg_smpl_pmds[0];
2903 reset_pmds = req->reg_reset_pmds[0];
2904 flags = 0;
2905
2906
2907 if (cnum >= PMU_MAX_PMCS) {
2908 DPRINT(("pmc%u is invalid\n", cnum));
2909 goto error;
2910 }
2911
2912 pmc_type = pmu_conf->pmc_desc[cnum].type;
2913 pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
2914 is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
2915 is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
2916
2917
2918
2919
2920
2921
2922 if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
2923 DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
2924 goto error;
2925 }
2926 wr_func = pmu_conf->pmc_desc[cnum].write_check;
2927
2928
2929
2930
2931
2932 if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
2933 DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
2934 cnum,
2935 pmc_pm,
2936 is_system));
2937 goto error;
2938 }
2939
2940 if (is_counting) {
2941
2942
2943
2944
2945 value |= 1 << PMU_PMC_OI;
2946
2947 if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
2948 flags |= PFM_REGFL_OVFL_NOTIFY;
2949 }
2950
2951 if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
2952
2953
2954 if ((smpl_pmds & impl_pmds) != smpl_pmds) {
2955 DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
2956 goto error;
2957 }
2958
2959
2960 if ((reset_pmds & impl_pmds) != reset_pmds) {
2961 DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
2962 goto error;
2963 }
2964 } else {
2965 if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
2966 DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
2967 goto error;
2968 }
2969
2970 }
2971
2972
2973
2974
2975 if (likely(expert_mode == 0 && wr_func)) {
2976 ret = (*wr_func)(task, ctx, cnum, &value, regs);
2977 if (ret) goto error;
2978 ret = -EINVAL;
2979 }
2980
2981
2982
2983
2984 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
2985
2986
2987
2988
2989
2990
2991
2992
2993 if (is_counting) {
2994
2995
2996
2997 ctx->ctx_pmds[cnum].flags = flags;
2998
2999 ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
3000 ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
3001 ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014 CTX_USED_PMD(ctx, reset_pmds);
3015 CTX_USED_PMD(ctx, smpl_pmds);
3016
3017
3018
3019
3020 if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3021 }
3022
3023
3024
3025
3026
3027 CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
3042
3043
3044
3045
3046 ctx->ctx_pmcs[cnum] = value;
3047
3048 if (is_loaded) {
3049
3050
3051
3052 if (is_system == 0) ctx->th_pmcs[cnum] = value;
3053
3054
3055
3056
3057 if (can_access_pmu) {
3058 ia64_set_pmc(cnum, value);
3059 }
3060#ifdef CONFIG_SMP
3061 else {
3062
3063
3064
3065
3066
3067
3068
3069 ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
3070 }
3071#endif
3072 }
3073
3074 DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
3075 cnum,
3076 value,
3077 is_loaded,
3078 can_access_pmu,
3079 flags,
3080 ctx->ctx_all_pmcs[0],
3081 ctx->ctx_used_pmds[0],
3082 ctx->ctx_pmds[cnum].eventid,
3083 smpl_pmds,
3084 reset_pmds,
3085 ctx->ctx_reload_pmcs[0],
3086 ctx->ctx_used_monitors[0],
3087 ctx->ctx_ovfl_regs[0]));
3088 }
3089
3090
3091
3092
3093 if (can_access_pmu) ia64_srlz_d();
3094
3095 return 0;
3096error:
3097 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3098 return ret;
3099}
3100
3101static int
3102pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3103{
3104 struct task_struct *task;
3105 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3106 unsigned long value, hw_value, ovfl_mask;
3107 unsigned int cnum;
3108 int i, can_access_pmu = 0, state;
3109 int is_counting, is_loaded, is_system, expert_mode;
3110 int ret = -EINVAL;
3111 pfm_reg_check_t wr_func;
3112
3113
3114 state = ctx->ctx_state;
3115 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3116 is_system = ctx->ctx_fl_system;
3117 ovfl_mask = pmu_conf->ovfl_val;
3118 task = ctx->ctx_task;
3119
3120 if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
3121
3122
3123
3124
3125
3126 if (likely(is_loaded)) {
3127
3128
3129
3130
3131
3132 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3133 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3134 return -EBUSY;
3135 }
3136 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3137 }
3138 expert_mode = pfm_sysctl.expert_mode;
3139
3140 for (i = 0; i < count; i++, req++) {
3141
3142 cnum = req->reg_num;
3143 value = req->reg_value;
3144
3145 if (!PMD_IS_IMPL(cnum)) {
3146 DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
3147 goto abort_mission;
3148 }
3149 is_counting = PMD_IS_COUNTING(cnum);
3150 wr_func = pmu_conf->pmd_desc[cnum].write_check;
3151
3152
3153
3154
3155 if (unlikely(expert_mode == 0 && wr_func)) {
3156 unsigned long v = value;
3157
3158 ret = (*wr_func)(task, ctx, cnum, &v, regs);
3159 if (ret) goto abort_mission;
3160
3161 value = v;
3162 ret = -EINVAL;
3163 }
3164
3165
3166
3167
3168 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
3169
3170
3171
3172
3173 hw_value = value;
3174
3175
3176
3177
3178 if (is_counting) {
3179
3180
3181
3182 ctx->ctx_pmds[cnum].lval = value;
3183
3184
3185
3186
3187 if (is_loaded) {
3188 hw_value = value & ovfl_mask;
3189 value = value & ~ovfl_mask;
3190 }
3191 }
3192
3193
3194
3195 ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
3196 ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
3197
3198
3199
3200
3201 ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
3202 ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
3203
3204
3205
3206
3207 ctx->ctx_pmds[cnum].val = value;
3208
3209
3210
3211
3212
3213
3214
3215 CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
3216
3217
3218
3219
3220 CTX_USED_PMD(ctx, RDEP(cnum));
3221
3222
3223
3224
3225
3226 if (is_counting && state == PFM_CTX_MASKED) {
3227 ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3228 }
3229
3230 if (is_loaded) {
3231
3232
3233
3234 if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
3235
3236
3237
3238
3239 if (can_access_pmu) {
3240 ia64_set_pmd(cnum, hw_value);
3241 } else {
3242#ifdef CONFIG_SMP
3243
3244
3245
3246
3247
3248 ctx->ctx_reload_pmds[0] |= 1UL << cnum;
3249#endif
3250 }
3251 }
3252
3253 DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
3254 "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
3255 cnum,
3256 value,
3257 is_loaded,
3258 can_access_pmu,
3259 hw_value,
3260 ctx->ctx_pmds[cnum].val,
3261 ctx->ctx_pmds[cnum].short_reset,
3262 ctx->ctx_pmds[cnum].long_reset,
3263 PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
3264 ctx->ctx_pmds[cnum].seed,
3265 ctx->ctx_pmds[cnum].mask,
3266 ctx->ctx_used_pmds[0],
3267 ctx->ctx_pmds[cnum].reset_pmds[0],
3268 ctx->ctx_reload_pmds[0],
3269 ctx->ctx_all_pmds[0],
3270 ctx->ctx_ovfl_regs[0]));
3271 }
3272
3273
3274
3275
3276 if (can_access_pmu) ia64_srlz_d();
3277
3278 return 0;
3279
3280abort_mission:
3281
3282
3283
3284 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3285 return ret;
3286}
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297static int
3298pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3299{
3300 struct task_struct *task;
3301 unsigned long val = 0UL, lval, ovfl_mask, sval;
3302 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3303 unsigned int cnum, reg_flags = 0;
3304 int i, can_access_pmu = 0, state;
3305 int is_loaded, is_system, is_counting, expert_mode;
3306 int ret = -EINVAL;
3307 pfm_reg_check_t rd_func;
3308
3309
3310
3311
3312
3313
3314 state = ctx->ctx_state;
3315 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3316 is_system = ctx->ctx_fl_system;
3317 ovfl_mask = pmu_conf->ovfl_val;
3318 task = ctx->ctx_task;
3319
3320 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3321
3322 if (likely(is_loaded)) {
3323
3324
3325
3326
3327
3328 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3329 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3330 return -EBUSY;
3331 }
3332
3333
3334
3335 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3336
3337 if (can_access_pmu) ia64_srlz_d();
3338 }
3339 expert_mode = pfm_sysctl.expert_mode;
3340
3341 DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
3342 is_loaded,
3343 can_access_pmu,
3344 state));
3345
3346
3347
3348
3349
3350
3351 for (i = 0; i < count; i++, req++) {
3352
3353 cnum = req->reg_num;
3354 reg_flags = req->reg_flags;
3355
3356 if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
3357
3358
3359
3360
3361
3362
3363
3364
3365 if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
3366
3367 sval = ctx->ctx_pmds[cnum].val;
3368 lval = ctx->ctx_pmds[cnum].lval;
3369 is_counting = PMD_IS_COUNTING(cnum);
3370
3371
3372
3373
3374
3375
3376 if (can_access_pmu){
3377 val = ia64_get_pmd(cnum);
3378 } else {
3379
3380
3381
3382
3383
3384 val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
3385 }
3386 rd_func = pmu_conf->pmd_desc[cnum].read_check;
3387
3388 if (is_counting) {
3389
3390
3391
3392 val &= ovfl_mask;
3393 val += sval;
3394 }
3395
3396
3397
3398
3399 if (unlikely(expert_mode == 0 && rd_func)) {
3400 unsigned long v = val;
3401 ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
3402 if (ret) goto error;
3403 val = v;
3404 ret = -EINVAL;
3405 }
3406
3407 PFM_REG_RETFLAG_SET(reg_flags, 0);
3408
3409 DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
3410
3411
3412
3413
3414
3415
3416 req->reg_value = val;
3417 req->reg_flags = reg_flags;
3418 req->reg_last_reset_val = lval;
3419 }
3420
3421 return 0;
3422
3423error:
3424 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3425 return ret;
3426}
3427
3428int
3429pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3430{
3431 pfm_context_t *ctx;
3432
3433 if (req == NULL) return -EINVAL;
3434
3435 ctx = GET_PMU_CTX();
3436
3437 if (ctx == NULL) return -EINVAL;
3438
3439
3440
3441
3442
3443 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3444
3445 return pfm_write_pmcs(ctx, req, nreq, regs);
3446}
3447EXPORT_SYMBOL(pfm_mod_write_pmcs);
3448
3449int
3450pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3451{
3452 pfm_context_t *ctx;
3453
3454 if (req == NULL) return -EINVAL;
3455
3456 ctx = GET_PMU_CTX();
3457
3458 if (ctx == NULL) return -EINVAL;
3459
3460
3461
3462
3463
3464 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3465
3466 return pfm_read_pmds(ctx, req, nreq, regs);
3467}
3468EXPORT_SYMBOL(pfm_mod_read_pmds);
3469
3470
3471
3472
3473
3474int
3475pfm_use_debug_registers(struct task_struct *task)
3476{
3477 pfm_context_t *ctx = task->thread.pfm_context;
3478 unsigned long flags;
3479 int ret = 0;
3480
3481 if (pmu_conf->use_rr_dbregs == 0) return 0;
3482
3483 DPRINT(("called for [%d]\n", task_pid_nr(task)));
3484
3485
3486
3487
3488 if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498 if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
3499
3500 LOCK_PFS(flags);
3501
3502
3503
3504
3505
3506 if (pfm_sessions.pfs_sys_use_dbregs> 0)
3507 ret = -1;
3508 else
3509 pfm_sessions.pfs_ptrace_use_dbregs++;
3510
3511 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3512 pfm_sessions.pfs_ptrace_use_dbregs,
3513 pfm_sessions.pfs_sys_use_dbregs,
3514 task_pid_nr(task), ret));
3515
3516 UNLOCK_PFS(flags);
3517
3518 return ret;
3519}
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529int
3530pfm_release_debug_registers(struct task_struct *task)
3531{
3532 unsigned long flags;
3533 int ret;
3534
3535 if (pmu_conf->use_rr_dbregs == 0) return 0;
3536
3537 LOCK_PFS(flags);
3538 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
3539 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
3540 ret = -1;
3541 } else {
3542 pfm_sessions.pfs_ptrace_use_dbregs--;
3543 ret = 0;
3544 }
3545 UNLOCK_PFS(flags);
3546
3547 return ret;
3548}
3549
3550static int
3551pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3552{
3553 struct task_struct *task;
3554 pfm_buffer_fmt_t *fmt;
3555 pfm_ovfl_ctrl_t rst_ctrl;
3556 int state, is_system;
3557 int ret = 0;
3558
3559 state = ctx->ctx_state;
3560 fmt = ctx->ctx_buf_fmt;
3561 is_system = ctx->ctx_fl_system;
3562 task = PFM_CTX_TASK(ctx);
3563
3564 switch(state) {
3565 case PFM_CTX_MASKED:
3566 break;
3567 case PFM_CTX_LOADED:
3568 if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
3569
3570 case PFM_CTX_UNLOADED:
3571 case PFM_CTX_ZOMBIE:
3572 DPRINT(("invalid state=%d\n", state));
3573 return -EBUSY;
3574 default:
3575 DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
3576 return -EINVAL;
3577 }
3578
3579
3580
3581
3582
3583
3584 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3585 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3586 return -EBUSY;
3587 }
3588
3589
3590 if (unlikely(task == NULL)) {
3591 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
3592 return -EINVAL;
3593 }
3594
3595 if (task == current || is_system) {
3596
3597 fmt = ctx->ctx_buf_fmt;
3598
3599 DPRINT(("restarting self %d ovfl=0x%lx\n",
3600 task_pid_nr(task),
3601 ctx->ctx_ovfl_regs[0]));
3602
3603 if (CTX_HAS_SMPL(ctx)) {
3604
3605 prefetch(ctx->ctx_smpl_hdr);
3606
3607 rst_ctrl.bits.mask_monitoring = 0;
3608 rst_ctrl.bits.reset_ovfl_pmds = 0;
3609
3610 if (state == PFM_CTX_LOADED)
3611 ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3612 else
3613 ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3614 } else {
3615 rst_ctrl.bits.mask_monitoring = 0;
3616 rst_ctrl.bits.reset_ovfl_pmds = 1;
3617 }
3618
3619 if (ret == 0) {
3620 if (rst_ctrl.bits.reset_ovfl_pmds)
3621 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
3622
3623 if (rst_ctrl.bits.mask_monitoring == 0) {
3624 DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
3625
3626 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
3627 } else {
3628 DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
3629
3630
3631 }
3632 }
3633
3634
3635
3636 ctx->ctx_ovfl_regs[0] = 0UL;
3637
3638
3639
3640
3641 ctx->ctx_state = PFM_CTX_LOADED;
3642
3643
3644
3645
3646 ctx->ctx_fl_can_restart = 0;
3647
3648 return 0;
3649 }
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659 if (state == PFM_CTX_MASKED) {
3660 if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
3661
3662
3663
3664
3665 ctx->ctx_fl_can_restart = 0;
3666 }
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
3685 DPRINT(("unblocking [%d] \n", task_pid_nr(task)));
3686 complete(&ctx->ctx_restart_done);
3687 } else {
3688 DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
3689
3690 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
3691
3692 PFM_SET_WORK_PENDING(task, 1);
3693
3694 set_notify_resume(task);
3695
3696
3697
3698
3699 }
3700 return 0;
3701}
3702
3703static int
3704pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3705{
3706 unsigned int m = *(unsigned int *)arg;
3707
3708 pfm_sysctl.debug = m == 0 ? 0 : 1;
3709
3710 printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
3711
3712 if (m == 0) {
3713 memset(pfm_stats, 0, sizeof(pfm_stats));
3714 for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
3715 }
3716 return 0;
3717}
3718
3719
3720
3721
3722static int
3723pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3724{
3725 struct thread_struct *thread = NULL;
3726 struct task_struct *task;
3727 pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
3728 unsigned long flags;
3729 dbreg_t dbreg;
3730 unsigned int rnum;
3731 int first_time;
3732 int ret = 0, state;
3733 int i, can_access_pmu = 0;
3734 int is_system, is_loaded;
3735
3736 if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
3737
3738 state = ctx->ctx_state;
3739 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3740 is_system = ctx->ctx_fl_system;
3741 task = ctx->ctx_task;
3742
3743 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3744
3745
3746
3747
3748
3749 if (is_loaded) {
3750 thread = &task->thread;
3751
3752
3753
3754
3755
3756 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3757 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3758 return -EBUSY;
3759 }
3760 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3761 }
3762
3763
3764
3765
3766
3767
3768
3769
3770 first_time = ctx->ctx_fl_using_dbreg == 0;
3771
3772
3773
3774
3775 if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
3776 DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
3777 return -EBUSY;
3778 }
3779
3780
3781
3782
3783
3784
3785
3786
3787 if (is_loaded) {
3788 LOCK_PFS(flags);
3789
3790 if (first_time && is_system) {
3791 if (pfm_sessions.pfs_ptrace_use_dbregs)
3792 ret = -EBUSY;
3793 else
3794 pfm_sessions.pfs_sys_use_dbregs++;
3795 }
3796 UNLOCK_PFS(flags);
3797 }
3798
3799 if (ret != 0) return ret;
3800
3801
3802
3803
3804
3805 ctx->ctx_fl_using_dbreg = 1;
3806
3807
3808
3809
3810
3811
3812
3813
3814
3815
3816 if (first_time && can_access_pmu) {
3817 DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
3818 for (i=0; i < pmu_conf->num_ibrs; i++) {
3819 ia64_set_ibr(i, 0UL);
3820 ia64_dv_serialize_instruction();
3821 }
3822 ia64_srlz_i();
3823 for (i=0; i < pmu_conf->num_dbrs; i++) {
3824 ia64_set_dbr(i, 0UL);
3825 ia64_dv_serialize_data();
3826 }
3827 ia64_srlz_d();
3828 }
3829
3830
3831
3832
3833 for (i = 0; i < count; i++, req++) {
3834
3835 rnum = req->dbreg_num;
3836 dbreg.val = req->dbreg_value;
3837
3838 ret = -EINVAL;
3839
3840 if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
3841 DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
3842 rnum, dbreg.val, mode, i, count));
3843
3844 goto abort_mission;
3845 }
3846
3847
3848
3849
3850 if (rnum & 0x1) {
3851 if (mode == PFM_CODE_RR)
3852 dbreg.ibr.ibr_x = 0;
3853 else
3854 dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
3855 }
3856
3857 PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869 if (mode == PFM_CODE_RR) {
3870 CTX_USED_IBR(ctx, rnum);
3871
3872 if (can_access_pmu) {
3873 ia64_set_ibr(rnum, dbreg.val);
3874 ia64_dv_serialize_instruction();
3875 }
3876
3877 ctx->ctx_ibrs[rnum] = dbreg.val;
3878
3879 DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
3880 rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
3881 } else {
3882 CTX_USED_DBR(ctx, rnum);
3883
3884 if (can_access_pmu) {
3885 ia64_set_dbr(rnum, dbreg.val);
3886 ia64_dv_serialize_data();
3887 }
3888 ctx->ctx_dbrs[rnum] = dbreg.val;
3889
3890 DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
3891 rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
3892 }
3893 }
3894
3895 return 0;
3896
3897abort_mission:
3898
3899
3900
3901 if (first_time) {
3902 LOCK_PFS(flags);
3903 if (ctx->ctx_fl_system) {
3904 pfm_sessions.pfs_sys_use_dbregs--;
3905 }
3906 UNLOCK_PFS(flags);
3907 ctx->ctx_fl_using_dbreg = 0;
3908 }
3909
3910
3911
3912 PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
3913
3914 return ret;
3915}
3916
3917static int
3918pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3919{
3920 return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
3921}
3922
3923static int
3924pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3925{
3926 return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
3927}
3928
3929int
3930pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3931{
3932 pfm_context_t *ctx;
3933
3934 if (req == NULL) return -EINVAL;
3935
3936 ctx = GET_PMU_CTX();
3937
3938 if (ctx == NULL) return -EINVAL;
3939
3940
3941
3942
3943
3944 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3945
3946 return pfm_write_ibrs(ctx, req, nreq, regs);
3947}
3948EXPORT_SYMBOL(pfm_mod_write_ibrs);
3949
3950int
3951pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3952{
3953 pfm_context_t *ctx;
3954
3955 if (req == NULL) return -EINVAL;
3956
3957 ctx = GET_PMU_CTX();
3958
3959 if (ctx == NULL) return -EINVAL;
3960
3961
3962
3963
3964
3965 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3966
3967 return pfm_write_dbrs(ctx, req, nreq, regs);
3968}
3969EXPORT_SYMBOL(pfm_mod_write_dbrs);
3970
3971
3972static int
3973pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3974{
3975 pfarg_features_t *req = (pfarg_features_t *)arg;
3976
3977 req->ft_version = PFM_VERSION;
3978 return 0;
3979}
3980
3981static int
3982pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3983{
3984 struct pt_regs *tregs;
3985 struct task_struct *task = PFM_CTX_TASK(ctx);
3986 int state, is_system;
3987
3988 state = ctx->ctx_state;
3989 is_system = ctx->ctx_fl_system;
3990
3991
3992
3993
3994 if (state == PFM_CTX_UNLOADED) return -EINVAL;
3995
3996
3997
3998
3999
4000
4001 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
4002 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
4003 return -EBUSY;
4004 }
4005 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
4006 task_pid_nr(PFM_CTX_TASK(ctx)),
4007 state,
4008 is_system));
4009
4010
4011
4012
4013
4014 if (is_system) {
4015
4016
4017
4018
4019
4020 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
4021 ia64_srlz_i();
4022
4023
4024
4025
4026 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4027
4028
4029
4030
4031 pfm_clear_psr_pp();
4032
4033
4034
4035
4036 ia64_psr(regs)->pp = 0;
4037
4038 return 0;
4039 }
4040
4041
4042
4043
4044 if (task == current) {
4045
4046 pfm_clear_psr_up();
4047
4048
4049
4050
4051 ia64_psr(regs)->up = 0;
4052 } else {
4053 tregs = task_pt_regs(task);
4054
4055
4056
4057
4058 ia64_psr(tregs)->up = 0;
4059
4060
4061
4062
4063 ctx->ctx_saved_psr_up = 0;
4064 DPRINT(("task=[%d]\n", task_pid_nr(task)));
4065 }
4066 return 0;
4067}
4068
4069
4070static int
4071pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4072{
4073 struct pt_regs *tregs;
4074 int state, is_system;
4075
4076 state = ctx->ctx_state;
4077 is_system = ctx->ctx_fl_system;
4078
4079 if (state != PFM_CTX_LOADED) return -EINVAL;
4080
4081
4082
4083
4084
4085
4086 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
4087 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
4088 return -EBUSY;
4089 }
4090
4091
4092
4093
4094
4095
4096 if (is_system) {
4097
4098
4099
4100
4101 ia64_psr(regs)->pp = 1;
4102
4103
4104
4105
4106 PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
4107
4108
4109
4110
4111 pfm_set_psr_pp();
4112
4113
4114 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
4115 ia64_srlz_i();
4116
4117 return 0;
4118 }
4119
4120
4121
4122
4123
4124 if (ctx->ctx_task == current) {
4125
4126
4127 pfm_set_psr_up();
4128
4129
4130
4131
4132 ia64_psr(regs)->up = 1;
4133
4134 } else {
4135 tregs = task_pt_regs(ctx->ctx_task);
4136
4137
4138
4139
4140
4141 ctx->ctx_saved_psr_up = IA64_PSR_UP;
4142
4143
4144
4145
4146 ia64_psr(tregs)->up = 1;
4147 }
4148 return 0;
4149}
4150
4151static int
4152pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4153{
4154 pfarg_reg_t *req = (pfarg_reg_t *)arg;
4155 unsigned int cnum;
4156 int i;
4157 int ret = -EINVAL;
4158
4159 for (i = 0; i < count; i++, req++) {
4160
4161 cnum = req->reg_num;
4162
4163 if (!PMC_IS_IMPL(cnum)) goto abort_mission;
4164
4165 req->reg_value = PMC_DFL_VAL(cnum);
4166
4167 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
4168
4169 DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
4170 }
4171 return 0;
4172
4173abort_mission:
4174 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
4175 return ret;
4176}
4177
4178static int
4179pfm_check_task_exist(pfm_context_t *ctx)
4180{
4181 struct task_struct *g, *t;
4182 int ret = -ESRCH;
4183
4184 read_lock(&tasklist_lock);
4185
4186 do_each_thread (g, t) {
4187 if (t->thread.pfm_context == ctx) {
4188 ret = 0;
4189 goto out;
4190 }
4191 } while_each_thread (g, t);
4192out:
4193 read_unlock(&tasklist_lock);
4194
4195 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
4196
4197 return ret;
4198}
4199
4200static int
4201pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4202{
4203 struct task_struct *task;
4204 struct thread_struct *thread;
4205 struct pfm_context_t *old;
4206 unsigned long flags;
4207#ifndef CONFIG_SMP
4208 struct task_struct *owner_task = NULL;
4209#endif
4210 pfarg_load_t *req = (pfarg_load_t *)arg;
4211 unsigned long *pmcs_source, *pmds_source;
4212 int the_cpu;
4213 int ret = 0;
4214 int state, is_system, set_dbregs = 0;
4215
4216 state = ctx->ctx_state;
4217 is_system = ctx->ctx_fl_system;
4218
4219
4220
4221 if (state != PFM_CTX_UNLOADED) {
4222 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4223 req->load_pid,
4224 ctx->ctx_state));
4225 return -EBUSY;
4226 }
4227
4228 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
4229
4230 if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
4231 DPRINT(("cannot use blocking mode on self\n"));
4232 return -EINVAL;
4233 }
4234
4235 ret = pfm_get_task(ctx, req->load_pid, &task);
4236 if (ret) {
4237 DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
4238 return ret;
4239 }
4240
4241 ret = -EINVAL;
4242
4243
4244
4245
4246 if (is_system && task != current) {
4247 DPRINT(("system wide is self monitoring only load_pid=%d\n",
4248 req->load_pid));
4249 goto error;
4250 }
4251
4252 thread = &task->thread;
4253
4254 ret = 0;
4255
4256
4257
4258
4259 if (ctx->ctx_fl_using_dbreg) {
4260 if (thread->flags & IA64_THREAD_DBG_VALID) {
4261 ret = -EBUSY;
4262 DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
4263 goto error;
4264 }
4265 LOCK_PFS(flags);
4266
4267 if (is_system) {
4268 if (pfm_sessions.pfs_ptrace_use_dbregs) {
4269 DPRINT(("cannot load [%d] dbregs in use\n",
4270 task_pid_nr(task)));
4271 ret = -EBUSY;
4272 } else {
4273 pfm_sessions.pfs_sys_use_dbregs++;
4274 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
4275 set_dbregs = 1;
4276 }
4277 }
4278
4279 UNLOCK_PFS(flags);
4280
4281 if (ret) goto error;
4282 }
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299 the_cpu = ctx->ctx_cpu = smp_processor_id();
4300
4301 ret = -EBUSY;
4302
4303
4304
4305 ret = pfm_reserve_session(current, is_system, the_cpu);
4306 if (ret) goto error;
4307
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317 DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
4318 thread->pfm_context, ctx));
4319
4320 ret = -EBUSY;
4321 old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
4322 if (old != NULL) {
4323 DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
4324 goto error_unres;
4325 }
4326
4327 pfm_reset_msgq(ctx);
4328
4329 ctx->ctx_state = PFM_CTX_LOADED;
4330
4331
4332
4333
4334 ctx->ctx_task = task;
4335
4336 if (is_system) {
4337
4338
4339
4340 PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
4341 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4342
4343 if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
4344 } else {
4345 thread->flags |= IA64_THREAD_PM_VALID;
4346 }
4347
4348
4349
4350
4351 pfm_copy_pmds(task, ctx);
4352 pfm_copy_pmcs(task, ctx);
4353
4354 pmcs_source = ctx->th_pmcs;
4355 pmds_source = ctx->th_pmds;
4356
4357
4358
4359
4360 if (task == current) {
4361
4362 if (is_system == 0) {
4363
4364
4365 ia64_psr(regs)->sp = 0;
4366 DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
4367
4368 SET_LAST_CPU(ctx, smp_processor_id());
4369 INC_ACTIVATION();
4370 SET_ACTIVATION(ctx);
4371#ifndef CONFIG_SMP
4372
4373
4374
4375 owner_task = GET_PMU_OWNER();
4376 if (owner_task) pfm_lazy_save_regs(owner_task);
4377#endif
4378 }
4379
4380
4381
4382
4383 pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
4384 pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
4385
4386 ctx->ctx_reload_pmcs[0] = 0UL;
4387 ctx->ctx_reload_pmds[0] = 0UL;
4388
4389
4390
4391
4392 if (ctx->ctx_fl_using_dbreg) {
4393 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
4394 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
4395 }
4396
4397
4398
4399 SET_PMU_OWNER(task, ctx);
4400
4401 DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
4402 } else {
4403
4404
4405
4406 regs = task_pt_regs(task);
4407
4408
4409 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4410 SET_LAST_CPU(ctx, -1);
4411
4412
4413 ctx->ctx_saved_psr_up = 0UL;
4414 ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
4415 }
4416
4417 ret = 0;
4418
4419error_unres:
4420 if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
4421error:
4422
4423
4424
4425 if (ret && set_dbregs) {
4426 LOCK_PFS(flags);
4427 pfm_sessions.pfs_sys_use_dbregs--;
4428 UNLOCK_PFS(flags);
4429 }
4430
4431
4432
4433 if (is_system == 0 && task != current) {
4434 pfm_put_task(task);
4435
4436 if (ret == 0) {
4437 ret = pfm_check_task_exist(ctx);
4438 if (ret) {
4439 ctx->ctx_state = PFM_CTX_UNLOADED;
4440 ctx->ctx_task = NULL;
4441 }
4442 }
4443 }
4444 return ret;
4445}
4446
4447
4448
4449
4450
4451
4452
4453
4454
4455static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
4456
4457static int
4458pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4459{
4460 struct task_struct *task = PFM_CTX_TASK(ctx);
4461 struct pt_regs *tregs;
4462 int prev_state, is_system;
4463 int ret;
4464
4465 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
4466
4467 prev_state = ctx->ctx_state;
4468 is_system = ctx->ctx_fl_system;
4469
4470
4471
4472
4473 if (prev_state == PFM_CTX_UNLOADED) {
4474 DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
4475 return 0;
4476 }
4477
4478
4479
4480
4481 ret = pfm_stop(ctx, NULL, 0, regs);
4482 if (ret) return ret;
4483
4484 ctx->ctx_state = PFM_CTX_UNLOADED;
4485
4486
4487
4488
4489
4490
4491 if (is_system) {
4492
4493
4494
4495
4496
4497
4498 PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
4499 PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
4500
4501
4502
4503
4504
4505 pfm_flush_pmds(current, ctx);
4506
4507
4508
4509
4510
4511 if (prev_state != PFM_CTX_ZOMBIE)
4512 pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
4513
4514
4515
4516
4517 task->thread.pfm_context = NULL;
4518
4519
4520
4521 ctx->ctx_task = NULL;
4522
4523
4524
4525
4526 return 0;
4527 }
4528
4529
4530
4531
4532 tregs = task == current ? regs : task_pt_regs(task);
4533
4534 if (task == current) {
4535
4536
4537
4538 ia64_psr(regs)->sp = 1;
4539
4540 DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
4541 }
4542
4543
4544
4545
4546 pfm_flush_pmds(task, ctx);
4547
4548
4549
4550
4551
4552
4553
4554 if (prev_state != PFM_CTX_ZOMBIE)
4555 pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
4556
4557
4558
4559
4560 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4561 SET_LAST_CPU(ctx, -1);
4562
4563
4564
4565
4566 task->thread.flags &= ~IA64_THREAD_PM_VALID;
4567
4568
4569
4570
4571 task->thread.pfm_context = NULL;
4572 ctx->ctx_task = NULL;
4573
4574 PFM_SET_WORK_PENDING(task, 0);
4575
4576 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4577 ctx->ctx_fl_can_restart = 0;
4578 ctx->ctx_fl_going_zombie = 0;
4579
4580 DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
4581
4582 return 0;
4583}
4584
4585
4586
4587
4588
4589
4590void
4591pfm_exit_thread(struct task_struct *task)
4592{
4593 pfm_context_t *ctx;
4594 unsigned long flags;
4595 struct pt_regs *regs = task_pt_regs(task);
4596 int ret, state;
4597 int free_ok = 0;
4598
4599 ctx = PFM_GET_CTX(task);
4600
4601 PROTECT_CTX(ctx, flags);
4602
4603 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
4604
4605 state = ctx->ctx_state;
4606 switch(state) {
4607 case PFM_CTX_UNLOADED:
4608
4609
4610
4611
4612 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
4613 break;
4614 case PFM_CTX_LOADED:
4615 case PFM_CTX_MASKED:
4616 ret = pfm_context_unload(ctx, NULL, 0, regs);
4617 if (ret) {
4618 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4619 }
4620 DPRINT(("ctx unloaded for current state was %d\n", state));
4621
4622 pfm_end_notify_user(ctx);
4623 break;
4624 case PFM_CTX_ZOMBIE:
4625 ret = pfm_context_unload(ctx, NULL, 0, regs);
4626 if (ret) {
4627 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4628 }
4629 free_ok = 1;
4630 break;
4631 default:
4632 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
4633 break;
4634 }
4635 UNPROTECT_CTX(ctx, flags);
4636
4637 { u64 psr = pfm_get_psr();
4638 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
4639 BUG_ON(GET_PMU_OWNER());
4640 BUG_ON(ia64_psr(regs)->up);
4641 BUG_ON(ia64_psr(regs)->pp);
4642 }
4643
4644
4645
4646
4647
4648 if (free_ok) pfm_context_free(ctx);
4649}
4650
4651
4652
4653
4654#define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
4655#define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
4656#define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
4657#define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
4658#define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
4659
4660static pfm_cmd_desc_t pfm_cmd_tab[]={
4661PFM_CMD_NONE,
4662PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4663PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4664PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4665PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
4666PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
4667PFM_CMD_NONE,
4668PFM_CMD_NONE,
4669PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
4670PFM_CMD_NONE,
4671PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
4672PFM_CMD_NONE,
4673PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
4674PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
4675PFM_CMD_NONE,
4676PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4677PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
4678PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
4679PFM_CMD_NONE,
4680PFM_CMD_NONE,
4681PFM_CMD_NONE,
4682PFM_CMD_NONE,
4683PFM_CMD_NONE,
4684PFM_CMD_NONE,
4685PFM_CMD_NONE,
4686PFM_CMD_NONE,
4687PFM_CMD_NONE,
4688PFM_CMD_NONE,
4689PFM_CMD_NONE,
4690PFM_CMD_NONE,
4691PFM_CMD_NONE,
4692PFM_CMD_NONE,
4693PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
4694PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
4695};
4696#define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
4697
4698static int
4699pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
4700{
4701 struct task_struct *task;
4702 int state, old_state;
4703
4704recheck:
4705 state = ctx->ctx_state;
4706 task = ctx->ctx_task;
4707
4708 if (task == NULL) {
4709 DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
4710 return 0;
4711 }
4712
4713 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4714 ctx->ctx_fd,
4715 state,
4716 task_pid_nr(task),
4717 task->state, PFM_CMD_STOPPED(cmd)));
4718
4719
4720
4721
4722
4723
4724
4725
4726 if (task == current || ctx->ctx_fl_system) return 0;
4727
4728
4729
4730
4731 switch(state) {
4732 case PFM_CTX_UNLOADED:
4733
4734
4735
4736 return 0;
4737 case PFM_CTX_ZOMBIE:
4738
4739
4740
4741 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
4742 return -EINVAL;
4743 case PFM_CTX_MASKED:
4744
4745
4746
4747
4748 if (cmd != PFM_UNLOAD_CONTEXT) return 0;
4749 }
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761 if (PFM_CMD_STOPPED(cmd)) {
4762 if (!task_is_stopped_or_traced(task)) {
4763 DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
4764 return -EBUSY;
4765 }
4766
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780 old_state = state;
4781
4782 UNPROTECT_CTX(ctx, flags);
4783
4784 wait_task_inactive(task, 0);
4785
4786 PROTECT_CTX(ctx, flags);
4787
4788
4789
4790
4791 if (ctx->ctx_state != old_state) {
4792 DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
4793 goto recheck;
4794 }
4795 }
4796 return 0;
4797}
4798
4799
4800
4801
4802asmlinkage long
4803sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
4804{
4805 struct file *file = NULL;
4806 pfm_context_t *ctx = NULL;
4807 unsigned long flags = 0UL;
4808 void *args_k = NULL;
4809 long ret;
4810 size_t base_sz, sz, xtra_sz = 0;
4811 int narg, completed_args = 0, call_made = 0, cmd_flags;
4812 int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
4813 int (*getsize)(void *arg, size_t *sz);
4814#define PFM_MAX_ARGSIZE 4096
4815
4816
4817
4818
4819 if (unlikely(pmu_conf == NULL)) return -ENOSYS;
4820
4821 if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
4822 DPRINT(("invalid cmd=%d\n", cmd));
4823 return -EINVAL;
4824 }
4825
4826 func = pfm_cmd_tab[cmd].cmd_func;
4827 narg = pfm_cmd_tab[cmd].cmd_narg;
4828 base_sz = pfm_cmd_tab[cmd].cmd_argsize;
4829 getsize = pfm_cmd_tab[cmd].cmd_getsize;
4830 cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
4831
4832 if (unlikely(func == NULL)) {
4833 DPRINT(("invalid cmd=%d\n", cmd));
4834 return -EINVAL;
4835 }
4836
4837 DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
4838 PFM_CMD_NAME(cmd),
4839 cmd,
4840 narg,
4841 base_sz,
4842 count));
4843
4844
4845
4846
4847 if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
4848 return -EINVAL;
4849
4850restart_args:
4851 sz = xtra_sz + base_sz*count;
4852
4853
4854
4855 if (unlikely(sz > PFM_MAX_ARGSIZE)) {
4856 printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
4857 return -E2BIG;
4858 }
4859
4860
4861
4862
4863 if (likely(count && args_k == NULL)) {
4864 args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
4865 if (args_k == NULL) return -ENOMEM;
4866 }
4867
4868 ret = -EFAULT;
4869
4870
4871
4872
4873
4874
4875 if (sz && copy_from_user(args_k, arg, sz)) {
4876 DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
4877 goto error_args;
4878 }
4879
4880
4881
4882
4883 if (completed_args == 0 && getsize) {
4884
4885
4886
4887 ret = (*getsize)(args_k, &xtra_sz);
4888 if (ret) goto error_args;
4889
4890 completed_args = 1;
4891
4892 DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
4893
4894
4895 if (likely(xtra_sz)) goto restart_args;
4896 }
4897
4898 if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
4899
4900 ret = -EBADF;
4901
4902 file = fget(fd);
4903 if (unlikely(file == NULL)) {
4904 DPRINT(("invalid fd %d\n", fd));
4905 goto error_args;
4906 }
4907 if (unlikely(PFM_IS_FILE(file) == 0)) {
4908 DPRINT(("fd %d not related to perfmon\n", fd));
4909 goto error_args;
4910 }
4911
4912 ctx = (pfm_context_t *)file->private_data;
4913 if (unlikely(ctx == NULL)) {
4914 DPRINT(("no context for fd %d\n", fd));
4915 goto error_args;
4916 }
4917 prefetch(&ctx->ctx_state);
4918
4919 PROTECT_CTX(ctx, flags);
4920
4921
4922
4923
4924 ret = pfm_check_task_state(ctx, cmd, flags);
4925 if (unlikely(ret)) goto abort_locked;
4926
4927skip_fd:
4928 ret = (*func)(ctx, args_k, count, task_pt_regs(current));
4929
4930 call_made = 1;
4931
4932abort_locked:
4933 if (likely(ctx)) {
4934 DPRINT(("context unlocked\n"));
4935 UNPROTECT_CTX(ctx, flags);
4936 }
4937
4938
4939 if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
4940
4941error_args:
4942 if (file)
4943 fput(file);
4944
4945 kfree(args_k);
4946
4947 DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
4948
4949 return ret;
4950}
4951
4952static void
4953pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
4954{
4955 pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
4956 pfm_ovfl_ctrl_t rst_ctrl;
4957 int state;
4958 int ret = 0;
4959
4960 state = ctx->ctx_state;
4961
4962
4963
4964
4965 if (CTX_HAS_SMPL(ctx)) {
4966
4967 rst_ctrl.bits.mask_monitoring = 0;
4968 rst_ctrl.bits.reset_ovfl_pmds = 0;
4969
4970 if (state == PFM_CTX_LOADED)
4971 ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4972 else
4973 ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4974 } else {
4975 rst_ctrl.bits.mask_monitoring = 0;
4976 rst_ctrl.bits.reset_ovfl_pmds = 1;
4977 }
4978
4979 if (ret == 0) {
4980 if (rst_ctrl.bits.reset_ovfl_pmds) {
4981 pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
4982 }
4983 if (rst_ctrl.bits.mask_monitoring == 0) {
4984 DPRINT(("resuming monitoring\n"));
4985 if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
4986 } else {
4987 DPRINT(("stopping monitoring\n"));
4988
4989 }
4990 ctx->ctx_state = PFM_CTX_LOADED;
4991 }
4992}
4993
4994
4995
4996
4997
4998static void
4999pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
5000{
5001 int ret;
5002
5003 DPRINT(("entering for [%d]\n", task_pid_nr(current)));
5004
5005 ret = pfm_context_unload(ctx, NULL, 0, regs);
5006 if (ret) {
5007 printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
5008 }
5009
5010
5011
5012
5013 wake_up_interruptible(&ctx->ctx_zombieq);
5014
5015
5016
5017
5018
5019
5020}
5021
5022static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
5023
5024
5025
5026
5027
5028
5029
5030
5031
5032
5033void
5034pfm_handle_work(void)
5035{
5036 pfm_context_t *ctx;
5037 struct pt_regs *regs;
5038 unsigned long flags, dummy_flags;
5039 unsigned long ovfl_regs;
5040 unsigned int reason;
5041 int ret;
5042
5043 ctx = PFM_GET_CTX(current);
5044 if (ctx == NULL) {
5045 printk(KERN_ERR "perfmon: [%d] has no PFM context\n",
5046 task_pid_nr(current));
5047 return;
5048 }
5049
5050 PROTECT_CTX(ctx, flags);
5051
5052 PFM_SET_WORK_PENDING(current, 0);
5053
5054 regs = task_pt_regs(current);
5055
5056
5057
5058
5059 reason = ctx->ctx_fl_trap_reason;
5060 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
5061 ovfl_regs = ctx->ctx_ovfl_regs[0];
5062
5063 DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
5064
5065
5066
5067
5068 if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE)
5069 goto do_zombie;
5070
5071
5072 if (reason == PFM_TRAP_REASON_RESET)
5073 goto skip_blocking;
5074
5075
5076
5077
5078
5079 UNPROTECT_CTX(ctx, flags);
5080
5081
5082
5083
5084 local_irq_enable();
5085
5086 DPRINT(("before block sleeping\n"));
5087
5088
5089
5090
5091
5092 ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
5093
5094 DPRINT(("after block sleeping ret=%d\n", ret));
5095
5096
5097
5098
5099
5100
5101
5102 PROTECT_CTX(ctx, dummy_flags);
5103
5104
5105
5106
5107
5108
5109
5110 ovfl_regs = ctx->ctx_ovfl_regs[0];
5111
5112 if (ctx->ctx_fl_going_zombie) {
5113do_zombie:
5114 DPRINT(("context is zombie, bailing out\n"));
5115 pfm_context_force_terminate(ctx, regs);
5116 goto nothing_to_do;
5117 }
5118
5119
5120
5121 if (ret < 0)
5122 goto nothing_to_do;
5123
5124skip_blocking:
5125 pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
5126 ctx->ctx_ovfl_regs[0] = 0UL;
5127
5128nothing_to_do:
5129
5130
5131
5132 UNPROTECT_CTX(ctx, flags);
5133}
5134
5135static int
5136pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
5137{
5138 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5139 DPRINT(("ignoring overflow notification, owner is zombie\n"));
5140 return 0;
5141 }
5142
5143 DPRINT(("waking up somebody\n"));
5144
5145 if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
5146
5147
5148
5149
5150
5151 kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
5152
5153 return 0;
5154}
5155
5156static int
5157pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
5158{
5159 pfm_msg_t *msg = NULL;
5160
5161 if (ctx->ctx_fl_no_msg == 0) {
5162 msg = pfm_get_new_msg(ctx);
5163 if (msg == NULL) {
5164 printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
5165 return -1;
5166 }
5167
5168 msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
5169 msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
5170 msg->pfm_ovfl_msg.msg_active_set = 0;
5171 msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
5172 msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
5173 msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
5174 msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
5175 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5176 }
5177
5178 DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
5179 msg,
5180 ctx->ctx_fl_no_msg,
5181 ctx->ctx_fd,
5182 ovfl_pmds));
5183
5184 return pfm_notify_user(ctx, msg);
5185}
5186
5187static int
5188pfm_end_notify_user(pfm_context_t *ctx)
5189{
5190 pfm_msg_t *msg;
5191
5192 msg = pfm_get_new_msg(ctx);
5193 if (msg == NULL) {
5194 printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
5195 return -1;
5196 }
5197
5198 memset(msg, 0, sizeof(*msg));
5199
5200 msg->pfm_end_msg.msg_type = PFM_MSG_END;
5201 msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
5202 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5203
5204 DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
5205 msg,
5206 ctx->ctx_fl_no_msg,
5207 ctx->ctx_fd));
5208
5209 return pfm_notify_user(ctx, msg);
5210}
5211
5212
5213
5214
5215
5216static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx,
5217 unsigned long pmc0, struct pt_regs *regs)
5218{
5219 pfm_ovfl_arg_t *ovfl_arg;
5220 unsigned long mask;
5221 unsigned long old_val, ovfl_val, new_val;
5222 unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
5223 unsigned long tstamp;
5224 pfm_ovfl_ctrl_t ovfl_ctrl;
5225 unsigned int i, has_smpl;
5226 int must_notify = 0;
5227
5228 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
5229
5230
5231
5232
5233 if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
5234
5235 tstamp = ia64_get_itc();
5236 mask = pmc0 >> PMU_FIRST_COUNTER;
5237 ovfl_val = pmu_conf->ovfl_val;
5238 has_smpl = CTX_HAS_SMPL(ctx);
5239
5240 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5241 "used_pmds=0x%lx\n",
5242 pmc0,
5243 task ? task_pid_nr(task): -1,
5244 (regs ? regs->cr_iip : 0),
5245 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
5246 ctx->ctx_used_pmds[0]));
5247
5248
5249
5250
5251
5252
5253 for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
5254
5255
5256 if ((mask & 0x1) == 0) continue;
5257
5258
5259
5260
5261
5262
5263
5264 old_val = new_val = ctx->ctx_pmds[i].val;
5265 new_val += 1 + ovfl_val;
5266 ctx->ctx_pmds[i].val = new_val;
5267
5268
5269
5270
5271 if (likely(old_val > new_val)) {
5272 ovfl_pmds |= 1UL << i;
5273 if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
5274 }
5275
5276 DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
5277 i,
5278 new_val,
5279 old_val,
5280 ia64_get_pmd(i) & ovfl_val,
5281 ovfl_pmds,
5282 ovfl_notify));
5283 }
5284
5285
5286
5287
5288 if (ovfl_pmds == 0UL) return;
5289
5290
5291
5292
5293 ovfl_ctrl.val = 0;
5294 reset_pmds = 0UL;
5295
5296
5297
5298
5299
5300 if (has_smpl) {
5301 unsigned long start_cycles, end_cycles;
5302 unsigned long pmd_mask;
5303 int j, k, ret = 0;
5304 int this_cpu = smp_processor_id();
5305
5306 pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
5307 ovfl_arg = &ctx->ctx_ovfl_arg;
5308
5309 prefetch(ctx->ctx_smpl_hdr);
5310
5311 for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
5312
5313 mask = 1UL << i;
5314
5315 if ((pmd_mask & 0x1) == 0) continue;
5316
5317 ovfl_arg->ovfl_pmd = (unsigned char )i;
5318 ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
5319 ovfl_arg->active_set = 0;
5320 ovfl_arg->ovfl_ctrl.val = 0;
5321 ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
5322
5323 ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
5324 ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
5325 ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
5326
5327
5328
5329
5330
5331 if (smpl_pmds) {
5332 for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
5333 if ((smpl_pmds & 0x1) == 0) continue;
5334 ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
5335 DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
5336 }
5337 }
5338
5339 pfm_stats[this_cpu].pfm_smpl_handler_calls++;
5340
5341 start_cycles = ia64_get_itc();
5342
5343
5344
5345
5346 ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
5347
5348 end_cycles = ia64_get_itc();
5349
5350
5351
5352
5353
5354 ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
5355 ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
5356 ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
5357
5358
5359
5360 if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
5361
5362 pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
5363 }
5364
5365
5366
5367 if (ret && pmd_mask) {
5368 DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
5369 pmd_mask<<PMU_FIRST_COUNTER));
5370 }
5371
5372
5373
5374 ovfl_pmds &= ~reset_pmds;
5375 } else {
5376
5377
5378
5379
5380 ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
5381 ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
5382 ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0;
5383 ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
5384
5385
5386
5387 if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
5388 }
5389
5390 DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
5391
5392
5393
5394
5395 if (reset_pmds) {
5396 unsigned long bm = reset_pmds;
5397 pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
5398 }
5399
5400 if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
5401
5402
5403
5404 ctx->ctx_ovfl_regs[0] = ovfl_pmds;
5405
5406
5407
5408
5409 if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
5410
5411 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
5412
5413
5414
5415
5416 PFM_SET_WORK_PENDING(task, 1);
5417
5418
5419
5420
5421
5422 set_notify_resume(task);
5423 }
5424
5425
5426
5427
5428 must_notify = 1;
5429 }
5430
5431 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
5432 GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
5433 PFM_GET_WORK_PENDING(task),
5434 ctx->ctx_fl_trap_reason,
5435 ovfl_pmds,
5436 ovfl_notify,
5437 ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
5438
5439
5440
5441 if (ovfl_ctrl.bits.mask_monitoring) {
5442 pfm_mask_monitoring(task);
5443 ctx->ctx_state = PFM_CTX_MASKED;
5444 ctx->ctx_fl_can_restart = 1;
5445 }
5446
5447
5448
5449
5450 if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
5451
5452 return;
5453
5454sanity_check:
5455 printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5456 smp_processor_id(),
5457 task ? task_pid_nr(task) : -1,
5458 pmc0);
5459 return;
5460
5461stop_monitoring:
5462
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489
5490 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
5491 pfm_clear_psr_up();
5492 ia64_psr(regs)->up = 0;
5493 ia64_psr(regs)->sp = 1;
5494 return;
5495}
5496
5497static int
5498pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
5499{
5500 struct task_struct *task;
5501 pfm_context_t *ctx;
5502 unsigned long flags;
5503 u64 pmc0;
5504 int this_cpu = smp_processor_id();
5505 int retval = 0;
5506
5507 pfm_stats[this_cpu].pfm_ovfl_intr_count++;
5508
5509
5510
5511
5512 pmc0 = ia64_get_pmc(0);
5513
5514 task = GET_PMU_OWNER();
5515 ctx = GET_PMU_CTX();
5516
5517
5518
5519
5520
5521 if (PMC0_HAS_OVFL(pmc0) && task) {
5522
5523
5524
5525
5526
5527 if (!ctx) goto report_spurious1;
5528
5529 if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
5530 goto report_spurious2;
5531
5532 PROTECT_CTX_NOPRINT(ctx, flags);
5533
5534 pfm_overflow_handler(task, ctx, pmc0, regs);
5535
5536 UNPROTECT_CTX_NOPRINT(ctx, flags);
5537
5538 } else {
5539 pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
5540 retval = -1;
5541 }
5542
5543
5544
5545 pfm_unfreeze_pmu();
5546
5547 return retval;
5548
5549report_spurious1:
5550 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
5551 this_cpu, task_pid_nr(task));
5552 pfm_unfreeze_pmu();
5553 return -1;
5554report_spurious2:
5555 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5556 this_cpu,
5557 task_pid_nr(task));
5558 pfm_unfreeze_pmu();
5559 return -1;
5560}
5561
5562static irqreturn_t
5563pfm_interrupt_handler(int irq, void *arg)
5564{
5565 unsigned long start_cycles, total_cycles;
5566 unsigned long min, max;
5567 int this_cpu;
5568 int ret;
5569 struct pt_regs *regs = get_irq_regs();
5570
5571 this_cpu = get_cpu();
5572 if (likely(!pfm_alt_intr_handler)) {
5573 min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
5574 max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
5575
5576 start_cycles = ia64_get_itc();
5577
5578 ret = pfm_do_interrupt_handler(arg, regs);
5579
5580 total_cycles = ia64_get_itc();
5581
5582
5583
5584
5585 if (likely(ret == 0)) {
5586 total_cycles -= start_cycles;
5587
5588 if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
5589 if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
5590
5591 pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
5592 }
5593 }
5594 else {
5595 (*pfm_alt_intr_handler->handler)(irq, arg, regs);
5596 }
5597
5598 put_cpu();
5599 return IRQ_HANDLED;
5600}
5601
5602
5603
5604
5605
5606#define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
5607
5608static void *
5609pfm_proc_start(struct seq_file *m, loff_t *pos)
5610{
5611 if (*pos == 0) {
5612 return PFM_PROC_SHOW_HEADER;
5613 }
5614
5615 while (*pos <= nr_cpu_ids) {
5616 if (cpu_online(*pos - 1)) {
5617 return (void *)*pos;
5618 }
5619 ++*pos;
5620 }
5621 return NULL;
5622}
5623
5624static void *
5625pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
5626{
5627 ++*pos;
5628 return pfm_proc_start(m, pos);
5629}
5630
5631static void
5632pfm_proc_stop(struct seq_file *m, void *v)
5633{
5634}
5635
5636static void
5637pfm_proc_show_header(struct seq_file *m)
5638{
5639 struct list_head * pos;
5640 pfm_buffer_fmt_t * entry;
5641 unsigned long flags;
5642
5643 seq_printf(m,
5644 "perfmon version : %u.%u\n"
5645 "model : %s\n"
5646 "fastctxsw : %s\n"
5647 "expert mode : %s\n"
5648 "ovfl_mask : 0x%lx\n"
5649 "PMU flags : 0x%x\n",
5650 PFM_VERSION_MAJ, PFM_VERSION_MIN,
5651 pmu_conf->pmu_name,
5652 pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
5653 pfm_sysctl.expert_mode > 0 ? "Yes": "No",
5654 pmu_conf->ovfl_val,
5655 pmu_conf->flags);
5656
5657 LOCK_PFS(flags);
5658
5659 seq_printf(m,
5660 "proc_sessions : %u\n"
5661 "sys_sessions : %u\n"
5662 "sys_use_dbregs : %u\n"
5663 "ptrace_use_dbregs : %u\n",
5664 pfm_sessions.pfs_task_sessions,
5665 pfm_sessions.pfs_sys_sessions,
5666 pfm_sessions.pfs_sys_use_dbregs,
5667 pfm_sessions.pfs_ptrace_use_dbregs);
5668
5669 UNLOCK_PFS(flags);
5670
5671 spin_lock(&pfm_buffer_fmt_lock);
5672
5673 list_for_each(pos, &pfm_buffer_fmt_list) {
5674 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
5675 seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
5676 entry->fmt_uuid[0],
5677 entry->fmt_uuid[1],
5678 entry->fmt_uuid[2],
5679 entry->fmt_uuid[3],
5680 entry->fmt_uuid[4],
5681 entry->fmt_uuid[5],
5682 entry->fmt_uuid[6],
5683 entry->fmt_uuid[7],
5684 entry->fmt_uuid[8],
5685 entry->fmt_uuid[9],
5686 entry->fmt_uuid[10],
5687 entry->fmt_uuid[11],
5688 entry->fmt_uuid[12],
5689 entry->fmt_uuid[13],
5690 entry->fmt_uuid[14],
5691 entry->fmt_uuid[15],
5692 entry->fmt_name);
5693 }
5694 spin_unlock(&pfm_buffer_fmt_lock);
5695
5696}
5697
5698static int
5699pfm_proc_show(struct seq_file *m, void *v)
5700{
5701 unsigned long psr;
5702 unsigned int i;
5703 int cpu;
5704
5705 if (v == PFM_PROC_SHOW_HEADER) {
5706 pfm_proc_show_header(m);
5707 return 0;
5708 }
5709
5710
5711
5712 cpu = (long)v - 1;
5713 seq_printf(m,
5714 "CPU%-2d overflow intrs : %lu\n"
5715 "CPU%-2d overflow cycles : %lu\n"
5716 "CPU%-2d overflow min : %lu\n"
5717 "CPU%-2d overflow max : %lu\n"
5718 "CPU%-2d smpl handler calls : %lu\n"
5719 "CPU%-2d smpl handler cycles : %lu\n"
5720 "CPU%-2d spurious intrs : %lu\n"
5721 "CPU%-2d replay intrs : %lu\n"
5722 "CPU%-2d syst_wide : %d\n"
5723 "CPU%-2d dcr_pp : %d\n"
5724 "CPU%-2d exclude idle : %d\n"
5725 "CPU%-2d owner : %d\n"
5726 "CPU%-2d context : %p\n"
5727 "CPU%-2d activations : %lu\n",
5728 cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
5729 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
5730 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
5731 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
5732 cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
5733 cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
5734 cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
5735 cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
5736 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
5737 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
5738 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
5739 cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
5740 cpu, pfm_get_cpu_data(pmu_ctx, cpu),
5741 cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
5742
5743 if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
5744
5745 psr = pfm_get_psr();
5746
5747 ia64_srlz_d();
5748
5749 seq_printf(m,
5750 "CPU%-2d psr : 0x%lx\n"
5751 "CPU%-2d pmc0 : 0x%lx\n",
5752 cpu, psr,
5753 cpu, ia64_get_pmc(0));
5754
5755 for (i=0; PMC_IS_LAST(i) == 0; i++) {
5756 if (PMC_IS_COUNTING(i) == 0) continue;
5757 seq_printf(m,
5758 "CPU%-2d pmc%u : 0x%lx\n"
5759 "CPU%-2d pmd%u : 0x%lx\n",
5760 cpu, i, ia64_get_pmc(i),
5761 cpu, i, ia64_get_pmd(i));
5762 }
5763 }
5764 return 0;
5765}
5766
5767const struct seq_operations pfm_seq_ops = {
5768 .start = pfm_proc_start,
5769 .next = pfm_proc_next,
5770 .stop = pfm_proc_stop,
5771 .show = pfm_proc_show
5772};
5773
5774static int
5775pfm_proc_open(struct inode *inode, struct file *file)
5776{
5777 return seq_open(file, &pfm_seq_ops);
5778}
5779
5780
5781
5782
5783
5784
5785
5786
5787void
5788pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
5789{
5790 struct pt_regs *regs;
5791 unsigned long dcr;
5792 unsigned long dcr_pp;
5793
5794 dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
5795
5796
5797
5798
5799
5800 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
5801 regs = task_pt_regs(task);
5802 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
5803 return;
5804 }
5805
5806
5807
5808 if (dcr_pp) {
5809 dcr = ia64_getreg(_IA64_REG_CR_DCR);
5810
5811
5812
5813 if (is_ctxswin) {
5814
5815 ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
5816 pfm_clear_psr_pp();
5817 ia64_srlz_i();
5818 return;
5819 }
5820
5821
5822
5823
5824
5825
5826
5827 ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
5828 pfm_set_psr_pp();
5829 ia64_srlz_i();
5830 }
5831}
5832
5833#ifdef CONFIG_SMP
5834
5835static void
5836pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
5837{
5838 struct task_struct *task = ctx->ctx_task;
5839
5840 ia64_psr(regs)->up = 0;
5841 ia64_psr(regs)->sp = 1;
5842
5843 if (GET_PMU_OWNER() == task) {
5844 DPRINT(("cleared ownership for [%d]\n",
5845 task_pid_nr(ctx->ctx_task)));
5846 SET_PMU_OWNER(NULL, NULL);
5847 }
5848
5849
5850
5851
5852 PFM_SET_WORK_PENDING(task, 0);
5853
5854 task->thread.pfm_context = NULL;
5855 task->thread.flags &= ~IA64_THREAD_PM_VALID;
5856
5857 DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
5858}
5859
5860
5861
5862
5863
5864void
5865pfm_save_regs(struct task_struct *task)
5866{
5867 pfm_context_t *ctx;
5868 unsigned long flags;
5869 u64 psr;
5870
5871
5872 ctx = PFM_GET_CTX(task);
5873 if (ctx == NULL) return;
5874
5875
5876
5877
5878
5879
5880 flags = pfm_protect_ctx_ctxsw(ctx);
5881
5882 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5883 struct pt_regs *regs = task_pt_regs(task);
5884
5885 pfm_clear_psr_up();
5886
5887 pfm_force_cleanup(ctx, regs);
5888
5889 BUG_ON(ctx->ctx_smpl_hdr);
5890
5891 pfm_unprotect_ctx_ctxsw(ctx, flags);
5892
5893 pfm_context_free(ctx);
5894 return;
5895 }
5896
5897
5898
5899
5900 ia64_srlz_d();
5901 psr = pfm_get_psr();
5902
5903 BUG_ON(psr & (IA64_PSR_I));
5904
5905
5906
5907
5908
5909
5910
5911
5912 pfm_clear_psr_up();
5913
5914
5915
5916
5917 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5918
5919
5920
5921
5922
5923
5924 SET_PMU_OWNER(NULL, NULL);
5925
5926
5927
5928
5929
5930
5931 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
5932
5933
5934
5935
5936
5937
5938 ctx->th_pmcs[0] = ia64_get_pmc(0);
5939
5940
5941
5942
5943 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5944
5945
5946
5947
5948
5949 pfm_unprotect_ctx_ctxsw(ctx, flags);
5950}
5951
5952#else
5953void
5954pfm_save_regs(struct task_struct *task)
5955{
5956 pfm_context_t *ctx;
5957 u64 psr;
5958
5959 ctx = PFM_GET_CTX(task);
5960 if (ctx == NULL) return;
5961
5962
5963
5964
5965 psr = pfm_get_psr();
5966
5967 BUG_ON(psr & (IA64_PSR_I));
5968
5969
5970
5971
5972
5973
5974
5975
5976 pfm_clear_psr_up();
5977
5978
5979
5980
5981 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5982}
5983
5984static void
5985pfm_lazy_save_regs (struct task_struct *task)
5986{
5987 pfm_context_t *ctx;
5988 unsigned long flags;
5989
5990 { u64 psr = pfm_get_psr();
5991 BUG_ON(psr & IA64_PSR_UP);
5992 }
5993
5994 ctx = PFM_GET_CTX(task);
5995
5996
5997
5998
5999
6000
6001
6002
6003
6004
6005 PROTECT_CTX(ctx,flags);
6006
6007
6008
6009
6010
6011
6012
6013
6014 SET_PMU_OWNER(NULL, NULL);
6015
6016
6017
6018
6019 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
6020
6021
6022
6023
6024
6025
6026 ctx->th_pmcs[0] = ia64_get_pmc(0);
6027
6028
6029
6030
6031 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
6032
6033
6034
6035
6036
6037
6038 UNPROTECT_CTX(ctx,flags);
6039}
6040#endif
6041
6042#ifdef CONFIG_SMP
6043
6044
6045
6046void
6047pfm_load_regs (struct task_struct *task)
6048{
6049 pfm_context_t *ctx;
6050 unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
6051 unsigned long flags;
6052 u64 psr, psr_up;
6053 int need_irq_resend;
6054
6055 ctx = PFM_GET_CTX(task);
6056 if (unlikely(ctx == NULL)) return;
6057
6058 BUG_ON(GET_PMU_OWNER());
6059
6060
6061
6062
6063 if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
6064
6065
6066
6067
6068
6069
6070 flags = pfm_protect_ctx_ctxsw(ctx);
6071 psr = pfm_get_psr();
6072
6073 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6074
6075 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6076 BUG_ON(psr & IA64_PSR_I);
6077
6078 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
6079 struct pt_regs *regs = task_pt_regs(task);
6080
6081 BUG_ON(ctx->ctx_smpl_hdr);
6082
6083 pfm_force_cleanup(ctx, regs);
6084
6085 pfm_unprotect_ctx_ctxsw(ctx, flags);
6086
6087
6088
6089
6090 pfm_context_free(ctx);
6091
6092 return;
6093 }
6094
6095
6096
6097
6098
6099 if (ctx->ctx_fl_using_dbreg) {
6100 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6101 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6102 }
6103
6104
6105
6106 psr_up = ctx->ctx_saved_psr_up;
6107
6108
6109
6110
6111
6112 if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
6113
6114
6115
6116
6117 pmc_mask = ctx->ctx_reload_pmcs[0];
6118 pmd_mask = ctx->ctx_reload_pmds[0];
6119
6120 } else {
6121
6122
6123
6124
6125
6126
6127 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6128
6129
6130
6131
6132
6133
6134
6135
6136 pmc_mask = ctx->ctx_all_pmcs[0];
6137 }
6138
6139
6140
6141
6142
6143
6144
6145 if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6146 if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6147
6148
6149
6150
6151
6152 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6153
6154
6155
6156
6157 ia64_set_pmc(0, ctx->th_pmcs[0]);
6158 ia64_srlz_d();
6159 ctx->th_pmcs[0] = 0UL;
6160
6161
6162
6163
6164 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
6165
6166 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6167 }
6168
6169
6170
6171
6172 ctx->ctx_reload_pmcs[0] = 0UL;
6173 ctx->ctx_reload_pmds[0] = 0UL;
6174
6175 SET_LAST_CPU(ctx, smp_processor_id());
6176
6177
6178
6179
6180 INC_ACTIVATION();
6181
6182
6183
6184 SET_ACTIVATION(ctx);
6185
6186
6187
6188
6189 SET_PMU_OWNER(task, ctx);
6190
6191
6192
6193
6194
6195
6196
6197 if (likely(psr_up)) pfm_set_psr_up();
6198
6199
6200
6201
6202 pfm_unprotect_ctx_ctxsw(ctx, flags);
6203}
6204#else
6205
6206
6207
6208
6209void
6210pfm_load_regs (struct task_struct *task)
6211{
6212 pfm_context_t *ctx;
6213 struct task_struct *owner;
6214 unsigned long pmd_mask, pmc_mask;
6215 u64 psr, psr_up;
6216 int need_irq_resend;
6217
6218 owner = GET_PMU_OWNER();
6219 ctx = PFM_GET_CTX(task);
6220 psr = pfm_get_psr();
6221
6222 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6223 BUG_ON(psr & IA64_PSR_I);
6224
6225
6226
6227
6228
6229
6230
6231
6232
6233 if (ctx->ctx_fl_using_dbreg) {
6234 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6235 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6236 }
6237
6238
6239
6240
6241 psr_up = ctx->ctx_saved_psr_up;
6242 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6243
6244
6245
6246
6247
6248
6249
6250
6251
6252 if (likely(owner == task)) {
6253 if (likely(psr_up)) pfm_set_psr_up();
6254 return;
6255 }
6256
6257
6258
6259
6260
6261
6262
6263 if (owner) pfm_lazy_save_regs(owner);
6264
6265
6266
6267
6268
6269
6270
6271 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6272
6273
6274
6275
6276
6277
6278
6279
6280 pmc_mask = ctx->ctx_all_pmcs[0];
6281
6282 pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6283 pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6284
6285
6286
6287
6288
6289 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6290
6291
6292
6293
6294 ia64_set_pmc(0, ctx->th_pmcs[0]);
6295 ia64_srlz_d();
6296
6297 ctx->th_pmcs[0] = 0UL;
6298
6299
6300
6301
6302 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
6303
6304 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6305 }
6306
6307
6308
6309
6310 SET_PMU_OWNER(task, ctx);
6311
6312
6313
6314
6315
6316
6317
6318 if (likely(psr_up)) pfm_set_psr_up();
6319}
6320#endif
6321
6322
6323
6324
6325static void
6326pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6327{
6328 u64 pmc0;
6329 unsigned long mask2, val, pmd_val, ovfl_val;
6330 int i, can_access_pmu = 0;
6331 int is_self;
6332
6333
6334
6335
6336
6337 is_self = ctx->ctx_task == task ? 1 : 0;
6338
6339
6340
6341
6342
6343
6344
6345
6346 can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
6347 if (can_access_pmu) {
6348
6349
6350
6351
6352
6353
6354
6355
6356 SET_PMU_OWNER(NULL, NULL);
6357 DPRINT(("releasing ownership\n"));
6358
6359
6360
6361
6362
6363
6364 ia64_srlz_d();
6365 pmc0 = ia64_get_pmc(0);
6366
6367
6368
6369
6370 pfm_unfreeze_pmu();
6371 } else {
6372 pmc0 = ctx->th_pmcs[0];
6373
6374
6375
6376 ctx->th_pmcs[0] = 0;
6377 }
6378 ovfl_val = pmu_conf->ovfl_val;
6379
6380
6381
6382
6383
6384
6385 mask2 = ctx->ctx_used_pmds[0];
6386
6387 DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
6388
6389 for (i = 0; mask2; i++, mask2>>=1) {
6390
6391
6392 if ((mask2 & 0x1) == 0) continue;
6393
6394
6395
6396
6397 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
6398
6399 if (PMD_IS_COUNTING(i)) {
6400 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
6401 task_pid_nr(task),
6402 i,
6403 ctx->ctx_pmds[i].val,
6404 val & ovfl_val));
6405
6406
6407
6408
6409 val = ctx->ctx_pmds[i].val + (val & ovfl_val);
6410
6411
6412
6413
6414
6415
6416 pmd_val = 0UL;
6417
6418
6419
6420
6421 if (pmc0 & (1UL << i)) {
6422 val += 1 + ovfl_val;
6423 DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
6424 }
6425 }
6426
6427 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
6428
6429 if (is_self) ctx->th_pmds[i] = pmd_val;
6430
6431 ctx->ctx_pmds[i].val = val;
6432 }
6433}
6434
6435static struct irqaction perfmon_irqaction = {
6436 .handler = pfm_interrupt_handler,
6437 .flags = IRQF_DISABLED,
6438 .name = "perfmon"
6439};
6440
6441static void
6442pfm_alt_save_pmu_state(void *data)
6443{
6444 struct pt_regs *regs;
6445
6446 regs = task_pt_regs(current);
6447
6448 DPRINT(("called\n"));
6449
6450
6451
6452
6453
6454 pfm_clear_psr_up();
6455 pfm_clear_psr_pp();
6456 ia64_psr(regs)->pp = 0;
6457
6458
6459
6460
6461
6462 pfm_freeze_pmu();
6463
6464 ia64_srlz_d();
6465}
6466
6467void
6468pfm_alt_restore_pmu_state(void *data)
6469{
6470 struct pt_regs *regs;
6471
6472 regs = task_pt_regs(current);
6473
6474 DPRINT(("called\n"));
6475
6476
6477
6478
6479
6480 pfm_clear_psr_up();
6481 pfm_clear_psr_pp();
6482 ia64_psr(regs)->pp = 0;
6483
6484
6485
6486
6487 pfm_unfreeze_pmu();
6488
6489 ia64_srlz_d();
6490}
6491
6492int
6493pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6494{
6495 int ret, i;
6496 int reserve_cpu;
6497
6498
6499 if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
6500
6501
6502 if (pfm_alt_intr_handler) return -EBUSY;
6503
6504
6505 if (!spin_trylock(&pfm_alt_install_check)) {
6506 return -EBUSY;
6507 }
6508
6509
6510 for_each_online_cpu(reserve_cpu) {
6511 ret = pfm_reserve_session(NULL, 1, reserve_cpu);
6512 if (ret) goto cleanup_reserve;
6513 }
6514
6515
6516 ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
6517 if (ret) {
6518 DPRINT(("on_each_cpu() failed: %d\n", ret));
6519 goto cleanup_reserve;
6520 }
6521
6522
6523 pfm_alt_intr_handler = hdl;
6524
6525 spin_unlock(&pfm_alt_install_check);
6526
6527 return 0;
6528
6529cleanup_reserve:
6530 for_each_online_cpu(i) {
6531
6532 if (i >= reserve_cpu) break;
6533
6534 pfm_unreserve_session(NULL, 1, i);
6535 }
6536
6537 spin_unlock(&pfm_alt_install_check);
6538
6539 return ret;
6540}
6541EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
6542
6543int
6544pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6545{
6546 int i;
6547 int ret;
6548
6549 if (hdl == NULL) return -EINVAL;
6550
6551
6552 if (pfm_alt_intr_handler != hdl) return -EINVAL;
6553
6554
6555 if (!spin_trylock(&pfm_alt_install_check)) {
6556 return -EBUSY;
6557 }
6558
6559 pfm_alt_intr_handler = NULL;
6560
6561 ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
6562 if (ret) {
6563 DPRINT(("on_each_cpu() failed: %d\n", ret));
6564 }
6565
6566 for_each_online_cpu(i) {
6567 pfm_unreserve_session(NULL, 1, i);
6568 }
6569
6570 spin_unlock(&pfm_alt_install_check);
6571
6572 return 0;
6573}
6574EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
6575
6576
6577
6578
6579static int init_pfm_fs(void);
6580
6581static int __init
6582pfm_probe_pmu(void)
6583{
6584 pmu_config_t **p;
6585 int family;
6586
6587 family = local_cpu_data->family;
6588 p = pmu_confs;
6589
6590 while(*p) {
6591 if ((*p)->probe) {
6592 if ((*p)->probe() == 0) goto found;
6593 } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
6594 goto found;
6595 }
6596 p++;
6597 }
6598 return -1;
6599found:
6600 pmu_conf = *p;
6601 return 0;
6602}
6603
6604static const struct file_operations pfm_proc_fops = {
6605 .open = pfm_proc_open,
6606 .read = seq_read,
6607 .llseek = seq_lseek,
6608 .release = seq_release,
6609};
6610
6611int __init
6612pfm_init(void)
6613{
6614 unsigned int n, n_counters, i;
6615
6616 printk("perfmon: version %u.%u IRQ %u\n",
6617 PFM_VERSION_MAJ,
6618 PFM_VERSION_MIN,
6619 IA64_PERFMON_VECTOR);
6620
6621 if (pfm_probe_pmu()) {
6622 printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
6623 local_cpu_data->family);
6624 return -ENODEV;
6625 }
6626
6627
6628
6629
6630
6631 n = 0;
6632 for (i=0; PMC_IS_LAST(i) == 0; i++) {
6633 if (PMC_IS_IMPL(i) == 0) continue;
6634 pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
6635 n++;
6636 }
6637 pmu_conf->num_pmcs = n;
6638
6639 n = 0; n_counters = 0;
6640 for (i=0; PMD_IS_LAST(i) == 0; i++) {
6641 if (PMD_IS_IMPL(i) == 0) continue;
6642 pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
6643 n++;
6644 if (PMD_IS_COUNTING(i)) n_counters++;
6645 }
6646 pmu_conf->num_pmds = n;
6647 pmu_conf->num_counters = n_counters;
6648
6649
6650
6651
6652 if (pmu_conf->use_rr_dbregs) {
6653 if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
6654 printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
6655 pmu_conf = NULL;
6656 return -1;
6657 }
6658 if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
6659 printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
6660 pmu_conf = NULL;
6661 return -1;
6662 }
6663 }
6664
6665 printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
6666 pmu_conf->pmu_name,
6667 pmu_conf->num_pmcs,
6668 pmu_conf->num_pmds,
6669 pmu_conf->num_counters,
6670 ffz(pmu_conf->ovfl_val));
6671
6672
6673 if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
6674 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
6675 pmu_conf = NULL;
6676 return -1;
6677 }
6678
6679
6680
6681
6682 perfmon_dir = proc_create("perfmon", S_IRUGO, NULL, &pfm_proc_fops);
6683 if (perfmon_dir == NULL) {
6684 printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
6685 pmu_conf = NULL;
6686 return -1;
6687 }
6688
6689
6690
6691
6692 pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
6693
6694
6695
6696
6697 spin_lock_init(&pfm_sessions.pfs_lock);
6698 spin_lock_init(&pfm_buffer_fmt_lock);
6699
6700 init_pfm_fs();
6701
6702 for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
6703
6704 return 0;
6705}
6706
6707__initcall(pfm_init);
6708
6709
6710
6711
6712void
6713pfm_init_percpu (void)
6714{
6715 static int first_time=1;
6716
6717
6718
6719
6720 pfm_clear_psr_pp();
6721 pfm_clear_psr_up();
6722
6723
6724
6725
6726 pfm_unfreeze_pmu();
6727
6728 if (first_time) {
6729 register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
6730 first_time=0;
6731 }
6732
6733 ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
6734 ia64_srlz_d();
6735}
6736
6737
6738
6739
6740void
6741dump_pmu_state(const char *from)
6742{
6743 struct task_struct *task;
6744 struct pt_regs *regs;
6745 pfm_context_t *ctx;
6746 unsigned long psr, dcr, info, flags;
6747 int i, this_cpu;
6748
6749 local_irq_save(flags);
6750
6751 this_cpu = smp_processor_id();
6752 regs = task_pt_regs(current);
6753 info = PFM_CPUINFO_GET();
6754 dcr = ia64_getreg(_IA64_REG_CR_DCR);
6755
6756 if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
6757 local_irq_restore(flags);
6758 return;
6759 }
6760
6761 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6762 this_cpu,
6763 from,
6764 task_pid_nr(current),
6765 regs->cr_iip,
6766 current->comm);
6767
6768 task = GET_PMU_OWNER();
6769 ctx = GET_PMU_CTX();
6770
6771 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
6772
6773 psr = pfm_get_psr();
6774
6775 printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
6776 this_cpu,
6777 ia64_get_pmc(0),
6778 psr & IA64_PSR_PP ? 1 : 0,
6779 psr & IA64_PSR_UP ? 1 : 0,
6780 dcr & IA64_DCR_PP ? 1 : 0,
6781 info,
6782 ia64_psr(regs)->up,
6783 ia64_psr(regs)->pp);
6784
6785 ia64_psr(regs)->up = 0;
6786 ia64_psr(regs)->pp = 0;
6787
6788 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6789 if (PMC_IS_IMPL(i) == 0) continue;
6790 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
6791 }
6792
6793 for (i=1; PMD_IS_LAST(i) == 0; i++) {
6794 if (PMD_IS_IMPL(i) == 0) continue;
6795 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
6796 }
6797
6798 if (ctx) {
6799 printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
6800 this_cpu,
6801 ctx->ctx_state,
6802 ctx->ctx_smpl_vaddr,
6803 ctx->ctx_smpl_hdr,
6804 ctx->ctx_msgq_head,
6805 ctx->ctx_msgq_tail,
6806 ctx->ctx_saved_psr_up);
6807 }
6808 local_irq_restore(flags);
6809}
6810
6811
6812
6813
6814void
6815pfm_inherit(struct task_struct *task, struct pt_regs *regs)
6816{
6817 struct thread_struct *thread;
6818
6819 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
6820
6821 thread = &task->thread;
6822
6823
6824
6825
6826 thread->pfm_context = NULL;
6827
6828 PFM_SET_WORK_PENDING(task, 0);
6829
6830
6831
6832
6833}
6834#else
6835asmlinkage long
6836sys_perfmonctl (int fd, int cmd, void *arg, int count)
6837{
6838 return -ENOSYS;
6839}
6840#endif
6841