1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/interrupt.h>
26#include <linux/proc_fs.h>
27#include <linux/seq_file.h>
28#include <linux/init.h>
29#include <linux/vmalloc.h>
30#include <linux/mm.h>
31#include <linux/sysctl.h>
32#include <linux/list.h>
33#include <linux/file.h>
34#include <linux/poll.h>
35#include <linux/vfs.h>
36#include <linux/smp.h>
37#include <linux/pagemap.h>
38#include <linux/mount.h>
39#include <linux/bitops.h>
40#include <linux/capability.h>
41#include <linux/rcupdate.h>
42#include <linux/completion.h>
43#include <linux/tracehook.h>
44#include <linux/slab.h>
45
46#include <asm/errno.h>
47#include <asm/intrinsics.h>
48#include <asm/page.h>
49#include <asm/perfmon.h>
50#include <asm/processor.h>
51#include <asm/signal.h>
52#include <asm/uaccess.h>
53#include <asm/delay.h>
54
55#ifdef CONFIG_PERFMON
56
57
58
59#define PFM_CTX_UNLOADED 1
60#define PFM_CTX_LOADED 2
61#define PFM_CTX_MASKED 3
62#define PFM_CTX_ZOMBIE 4
63
64#define PFM_INVALID_ACTIVATION (~0UL)
65
66#define PFM_NUM_PMC_REGS 64
67#define PFM_NUM_PMD_REGS 64
68
69
70
71
72#define PFM_MAX_MSGS 32
73#define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
74
75
76
77
78
79
80
81
82
83
84
85
86#define PFM_REG_NOTIMPL 0x0
87#define PFM_REG_IMPL 0x1
88#define PFM_REG_END 0x2
89#define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL)
90#define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR)
91#define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL)
92#define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL)
93#define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL)
94
95#define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
96#define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
97
98#define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
99
100
101#define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
102#define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
103
104
105#define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
106#define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
107#define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
108#define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
109
110#define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
111#define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
112#define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
113#define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
114
115#define PFM_NUM_IBRS IA64_NUM_DBG_REGS
116#define PFM_NUM_DBRS IA64_NUM_DBG_REGS
117
118#define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
119#define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
120#define PFM_CTX_TASK(h) (h)->ctx_task
121
122#define PMU_PMC_OI 5
123
124
125#define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
126#define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
127
128#define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
129
130#define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
131#define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
132#define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
133#define PFM_CODE_RR 0
134#define PFM_DATA_RR 1
135
136#define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
137#define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
138#define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
139
140#define RDEP(x) (1UL<<(x))
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160#define PROTECT_CTX(c, f) \
161 do { \
162 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
163 spin_lock_irqsave(&(c)->ctx_lock, f); \
164 DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
165 } while(0)
166
167#define UNPROTECT_CTX(c, f) \
168 do { \
169 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
170 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
171 } while(0)
172
173#define PROTECT_CTX_NOPRINT(c, f) \
174 do { \
175 spin_lock_irqsave(&(c)->ctx_lock, f); \
176 } while(0)
177
178
179#define UNPROTECT_CTX_NOPRINT(c, f) \
180 do { \
181 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
182 } while(0)
183
184
185#define PROTECT_CTX_NOIRQ(c) \
186 do { \
187 spin_lock(&(c)->ctx_lock); \
188 } while(0)
189
190#define UNPROTECT_CTX_NOIRQ(c) \
191 do { \
192 spin_unlock(&(c)->ctx_lock); \
193 } while(0)
194
195
196#ifdef CONFIG_SMP
197
198#define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
199#define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
200#define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
201
202#else
203#define SET_ACTIVATION(t) do {} while(0)
204#define GET_ACTIVATION(t) do {} while(0)
205#define INC_ACTIVATION(t) do {} while(0)
206#endif
207
208#define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
209#define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
210#define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
211
212#define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
213#define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
214
215#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
216
217
218
219
220#define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
221
222#define PFMFS_MAGIC 0xa0b4d889
223
224
225
226
227#define PFM_DEBUGGING 1
228#ifdef PFM_DEBUGGING
229#define DPRINT(a) \
230 do { \
231 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
232 } while (0)
233
234#define DPRINT_ovfl(a) \
235 do { \
236 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
237 } while (0)
238#endif
239
240
241
242
243
244
245typedef struct {
246 unsigned long val;
247 unsigned long lval;
248 unsigned long long_reset;
249 unsigned long short_reset;
250 unsigned long reset_pmds[4];
251 unsigned long smpl_pmds[4];
252 unsigned long seed;
253 unsigned long mask;
254 unsigned int flags;
255 unsigned long eventid;
256} pfm_counter_t;
257
258
259
260
261typedef struct {
262 unsigned int block:1;
263 unsigned int system:1;
264 unsigned int using_dbreg:1;
265 unsigned int is_sampling:1;
266 unsigned int excl_idle:1;
267 unsigned int going_zombie:1;
268 unsigned int trap_reason:2;
269 unsigned int no_msg:1;
270 unsigned int can_restart:1;
271 unsigned int reserved:22;
272} pfm_context_flags_t;
273
274#define PFM_TRAP_REASON_NONE 0x0
275#define PFM_TRAP_REASON_BLOCK 0x1
276#define PFM_TRAP_REASON_RESET 0x2
277
278
279
280
281
282
283typedef struct pfm_context {
284 spinlock_t ctx_lock;
285
286 pfm_context_flags_t ctx_flags;
287 unsigned int ctx_state;
288
289 struct task_struct *ctx_task;
290
291 unsigned long ctx_ovfl_regs[4];
292
293 struct completion ctx_restart_done;
294
295 unsigned long ctx_used_pmds[4];
296 unsigned long ctx_all_pmds[4];
297 unsigned long ctx_reload_pmds[4];
298
299 unsigned long ctx_all_pmcs[4];
300 unsigned long ctx_reload_pmcs[4];
301 unsigned long ctx_used_monitors[4];
302
303 unsigned long ctx_pmcs[PFM_NUM_PMC_REGS];
304
305 unsigned int ctx_used_ibrs[1];
306 unsigned int ctx_used_dbrs[1];
307 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS];
308 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS];
309
310 pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS];
311
312 unsigned long th_pmcs[PFM_NUM_PMC_REGS];
313 unsigned long th_pmds[PFM_NUM_PMD_REGS];
314
315 unsigned long ctx_saved_psr_up;
316
317 unsigned long ctx_last_activation;
318 unsigned int ctx_last_cpu;
319 unsigned int ctx_cpu;
320
321 int ctx_fd;
322 pfm_ovfl_arg_t ctx_ovfl_arg;
323
324 pfm_buffer_fmt_t *ctx_buf_fmt;
325 void *ctx_smpl_hdr;
326 unsigned long ctx_smpl_size;
327 void *ctx_smpl_vaddr;
328
329 wait_queue_head_t ctx_msgq_wait;
330 pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
331 int ctx_msgq_head;
332 int ctx_msgq_tail;
333 struct fasync_struct *ctx_async_queue;
334
335 wait_queue_head_t ctx_zombieq;
336} pfm_context_t;
337
338
339
340
341
342#define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
343
344#define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
345
346#ifdef CONFIG_SMP
347#define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
348#define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
349#else
350#define SET_LAST_CPU(ctx, v) do {} while(0)
351#define GET_LAST_CPU(ctx) do {} while(0)
352#endif
353
354
355#define ctx_fl_block ctx_flags.block
356#define ctx_fl_system ctx_flags.system
357#define ctx_fl_using_dbreg ctx_flags.using_dbreg
358#define ctx_fl_is_sampling ctx_flags.is_sampling
359#define ctx_fl_excl_idle ctx_flags.excl_idle
360#define ctx_fl_going_zombie ctx_flags.going_zombie
361#define ctx_fl_trap_reason ctx_flags.trap_reason
362#define ctx_fl_no_msg ctx_flags.no_msg
363#define ctx_fl_can_restart ctx_flags.can_restart
364
365#define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
366#define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
367
368
369
370
371
372typedef struct {
373 spinlock_t pfs_lock;
374
375 unsigned int pfs_task_sessions;
376 unsigned int pfs_sys_sessions;
377 unsigned int pfs_sys_use_dbregs;
378 unsigned int pfs_ptrace_use_dbregs;
379 struct task_struct *pfs_sys_session[NR_CPUS];
380} pfm_session_t;
381
382
383
384
385
386
387typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
388typedef struct {
389 unsigned int type;
390 int pm_pos;
391 unsigned long default_value;
392 unsigned long reserved_mask;
393 pfm_reg_check_t read_check;
394 pfm_reg_check_t write_check;
395 unsigned long dep_pmd[4];
396 unsigned long dep_pmc[4];
397} pfm_reg_desc_t;
398
399
400#define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
401
402
403
404
405
406
407
408
409
410
411
412
413
414typedef struct {
415 unsigned long ovfl_val;
416
417 pfm_reg_desc_t *pmc_desc;
418 pfm_reg_desc_t *pmd_desc;
419
420 unsigned int num_pmcs;
421 unsigned int num_pmds;
422 unsigned long impl_pmcs[4];
423 unsigned long impl_pmds[4];
424
425 char *pmu_name;
426 unsigned int pmu_family;
427 unsigned int flags;
428 unsigned int num_ibrs;
429 unsigned int num_dbrs;
430 unsigned int num_counters;
431 int (*probe)(void);
432 unsigned int use_rr_dbregs:1;
433} pmu_config_t;
434
435
436
437#define PFM_PMU_IRQ_RESEND 1
438
439
440
441
442typedef struct {
443 unsigned long ibr_mask:56;
444 unsigned long ibr_plm:4;
445 unsigned long ibr_ig:3;
446 unsigned long ibr_x:1;
447} ibr_mask_reg_t;
448
449typedef struct {
450 unsigned long dbr_mask:56;
451 unsigned long dbr_plm:4;
452 unsigned long dbr_ig:2;
453 unsigned long dbr_w:1;
454 unsigned long dbr_r:1;
455} dbr_mask_reg_t;
456
457typedef union {
458 unsigned long val;
459 ibr_mask_reg_t ibr;
460 dbr_mask_reg_t dbr;
461} dbreg_t;
462
463
464
465
466
467typedef struct {
468 int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
469 char *cmd_name;
470 int cmd_flags;
471 unsigned int cmd_narg;
472 size_t cmd_argsize;
473 int (*cmd_getsize)(void *arg, size_t *sz);
474} pfm_cmd_desc_t;
475
476#define PFM_CMD_FD 0x01
477#define PFM_CMD_ARG_READ 0x02
478#define PFM_CMD_ARG_RW 0x04
479#define PFM_CMD_STOP 0x08
480
481
482#define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
483#define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
484#define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
485#define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
486#define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
487
488#define PFM_CMD_ARG_MANY -1
489
490typedef struct {
491 unsigned long pfm_spurious_ovfl_intr_count;
492 unsigned long pfm_replay_ovfl_intr_count;
493 unsigned long pfm_ovfl_intr_count;
494 unsigned long pfm_ovfl_intr_cycles;
495 unsigned long pfm_ovfl_intr_cycles_min;
496 unsigned long pfm_ovfl_intr_cycles_max;
497 unsigned long pfm_smpl_handler_calls;
498 unsigned long pfm_smpl_handler_cycles;
499 char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
500} pfm_stats_t;
501
502
503
504
505static pfm_stats_t pfm_stats[NR_CPUS];
506static pfm_session_t pfm_sessions;
507
508static DEFINE_SPINLOCK(pfm_alt_install_check);
509static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
510
511static struct proc_dir_entry *perfmon_dir;
512static pfm_uuid_t pfm_null_uuid = {0,};
513
514static spinlock_t pfm_buffer_fmt_lock;
515static LIST_HEAD(pfm_buffer_fmt_list);
516
517static pmu_config_t *pmu_conf;
518
519
520pfm_sysctl_t pfm_sysctl;
521EXPORT_SYMBOL(pfm_sysctl);
522
523static ctl_table pfm_ctl_table[]={
524 {
525 .procname = "debug",
526 .data = &pfm_sysctl.debug,
527 .maxlen = sizeof(int),
528 .mode = 0666,
529 .proc_handler = proc_dointvec,
530 },
531 {
532 .procname = "debug_ovfl",
533 .data = &pfm_sysctl.debug_ovfl,
534 .maxlen = sizeof(int),
535 .mode = 0666,
536 .proc_handler = proc_dointvec,
537 },
538 {
539 .procname = "fastctxsw",
540 .data = &pfm_sysctl.fastctxsw,
541 .maxlen = sizeof(int),
542 .mode = 0600,
543 .proc_handler = proc_dointvec,
544 },
545 {
546 .procname = "expert_mode",
547 .data = &pfm_sysctl.expert_mode,
548 .maxlen = sizeof(int),
549 .mode = 0600,
550 .proc_handler = proc_dointvec,
551 },
552 {}
553};
554static ctl_table pfm_sysctl_dir[] = {
555 {
556 .procname = "perfmon",
557 .mode = 0555,
558 .child = pfm_ctl_table,
559 },
560 {}
561};
562static ctl_table pfm_sysctl_root[] = {
563 {
564 .procname = "kernel",
565 .mode = 0555,
566 .child = pfm_sysctl_dir,
567 },
568 {}
569};
570static struct ctl_table_header *pfm_sysctl_header;
571
572static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
573
574#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
575#define pfm_get_cpu_data(a,b) per_cpu(a, b)
576
577static inline void
578pfm_put_task(struct task_struct *task)
579{
580 if (task != current) put_task_struct(task);
581}
582
583static inline void
584pfm_reserve_page(unsigned long a)
585{
586 SetPageReserved(vmalloc_to_page((void *)a));
587}
588static inline void
589pfm_unreserve_page(unsigned long a)
590{
591 ClearPageReserved(vmalloc_to_page((void*)a));
592}
593
594static inline unsigned long
595pfm_protect_ctx_ctxsw(pfm_context_t *x)
596{
597 spin_lock(&(x)->ctx_lock);
598 return 0UL;
599}
600
601static inline void
602pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
603{
604 spin_unlock(&(x)->ctx_lock);
605}
606
607
608static const struct dentry_operations pfmfs_dentry_operations;
609
610static struct dentry *
611pfmfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
612{
613 return mount_pseudo(fs_type, "pfm:", NULL, &pfmfs_dentry_operations,
614 PFMFS_MAGIC);
615}
616
617static struct file_system_type pfm_fs_type = {
618 .name = "pfmfs",
619 .mount = pfmfs_mount,
620 .kill_sb = kill_anon_super,
621};
622
623DEFINE_PER_CPU(unsigned long, pfm_syst_info);
624DEFINE_PER_CPU(struct task_struct *, pmu_owner);
625DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
626DEFINE_PER_CPU(unsigned long, pmu_activation_number);
627EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
628
629
630
631static const struct file_operations pfm_file_ops;
632
633
634
635
636#ifndef CONFIG_SMP
637static void pfm_lazy_save_regs (struct task_struct *ta);
638#endif
639
640void dump_pmu_state(const char *);
641static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
642
643#include "perfmon_itanium.h"
644#include "perfmon_mckinley.h"
645#include "perfmon_montecito.h"
646#include "perfmon_generic.h"
647
648static pmu_config_t *pmu_confs[]={
649 &pmu_conf_mont,
650 &pmu_conf_mck,
651 &pmu_conf_ita,
652 &pmu_conf_gen,
653 NULL
654};
655
656
657static int pfm_end_notify_user(pfm_context_t *ctx);
658
659static inline void
660pfm_clear_psr_pp(void)
661{
662 ia64_rsm(IA64_PSR_PP);
663 ia64_srlz_i();
664}
665
666static inline void
667pfm_set_psr_pp(void)
668{
669 ia64_ssm(IA64_PSR_PP);
670 ia64_srlz_i();
671}
672
673static inline void
674pfm_clear_psr_up(void)
675{
676 ia64_rsm(IA64_PSR_UP);
677 ia64_srlz_i();
678}
679
680static inline void
681pfm_set_psr_up(void)
682{
683 ia64_ssm(IA64_PSR_UP);
684 ia64_srlz_i();
685}
686
687static inline unsigned long
688pfm_get_psr(void)
689{
690 unsigned long tmp;
691 tmp = ia64_getreg(_IA64_REG_PSR);
692 ia64_srlz_i();
693 return tmp;
694}
695
696static inline void
697pfm_set_psr_l(unsigned long val)
698{
699 ia64_setreg(_IA64_REG_PSR_L, val);
700 ia64_srlz_i();
701}
702
703static inline void
704pfm_freeze_pmu(void)
705{
706 ia64_set_pmc(0,1UL);
707 ia64_srlz_d();
708}
709
710static inline void
711pfm_unfreeze_pmu(void)
712{
713 ia64_set_pmc(0,0UL);
714 ia64_srlz_d();
715}
716
717static inline void
718pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
719{
720 int i;
721
722 for (i=0; i < nibrs; i++) {
723 ia64_set_ibr(i, ibrs[i]);
724 ia64_dv_serialize_instruction();
725 }
726 ia64_srlz_i();
727}
728
729static inline void
730pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
731{
732 int i;
733
734 for (i=0; i < ndbrs; i++) {
735 ia64_set_dbr(i, dbrs[i]);
736 ia64_dv_serialize_data();
737 }
738 ia64_srlz_d();
739}
740
741
742
743
744static inline unsigned long
745pfm_read_soft_counter(pfm_context_t *ctx, int i)
746{
747 return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
748}
749
750
751
752
753static inline void
754pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
755{
756 unsigned long ovfl_val = pmu_conf->ovfl_val;
757
758 ctx->ctx_pmds[i].val = val & ~ovfl_val;
759
760
761
762
763 ia64_set_pmd(i, val & ovfl_val);
764}
765
766static pfm_msg_t *
767pfm_get_new_msg(pfm_context_t *ctx)
768{
769 int idx, next;
770
771 next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
772
773 DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
774 if (next == ctx->ctx_msgq_head) return NULL;
775
776 idx = ctx->ctx_msgq_tail;
777 ctx->ctx_msgq_tail = next;
778
779 DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
780
781 return ctx->ctx_msgq+idx;
782}
783
784static pfm_msg_t *
785pfm_get_next_msg(pfm_context_t *ctx)
786{
787 pfm_msg_t *msg;
788
789 DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
790
791 if (PFM_CTXQ_EMPTY(ctx)) return NULL;
792
793
794
795
796 msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
797
798
799
800
801 ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
802
803 DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
804
805 return msg;
806}
807
808static void
809pfm_reset_msgq(pfm_context_t *ctx)
810{
811 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
812 DPRINT(("ctx=%p msgq reset\n", ctx));
813}
814
815static void *
816pfm_rvmalloc(unsigned long size)
817{
818 void *mem;
819 unsigned long addr;
820
821 size = PAGE_ALIGN(size);
822 mem = vzalloc(size);
823 if (mem) {
824
825 addr = (unsigned long)mem;
826 while (size > 0) {
827 pfm_reserve_page(addr);
828 addr+=PAGE_SIZE;
829 size-=PAGE_SIZE;
830 }
831 }
832 return mem;
833}
834
835static void
836pfm_rvfree(void *mem, unsigned long size)
837{
838 unsigned long addr;
839
840 if (mem) {
841 DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
842 addr = (unsigned long) mem;
843 while ((long) size > 0) {
844 pfm_unreserve_page(addr);
845 addr+=PAGE_SIZE;
846 size-=PAGE_SIZE;
847 }
848 vfree(mem);
849 }
850 return;
851}
852
853static pfm_context_t *
854pfm_context_alloc(int ctx_flags)
855{
856 pfm_context_t *ctx;
857
858
859
860
861
862 ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
863 if (ctx) {
864 DPRINT(("alloc ctx @%p\n", ctx));
865
866
867
868
869 spin_lock_init(&ctx->ctx_lock);
870
871
872
873
874 ctx->ctx_state = PFM_CTX_UNLOADED;
875
876
877
878
879 ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
880 ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
881 ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
882
883
884
885
886
887
888
889
890 init_completion(&ctx->ctx_restart_done);
891
892
893
894
895 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
896 SET_LAST_CPU(ctx, -1);
897
898
899
900
901 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
902 init_waitqueue_head(&ctx->ctx_msgq_wait);
903 init_waitqueue_head(&ctx->ctx_zombieq);
904
905 }
906 return ctx;
907}
908
909static void
910pfm_context_free(pfm_context_t *ctx)
911{
912 if (ctx) {
913 DPRINT(("free ctx @%p\n", ctx));
914 kfree(ctx);
915 }
916}
917
918static void
919pfm_mask_monitoring(struct task_struct *task)
920{
921 pfm_context_t *ctx = PFM_GET_CTX(task);
922 unsigned long mask, val, ovfl_mask;
923 int i;
924
925 DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
926
927 ovfl_mask = pmu_conf->ovfl_val;
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947 mask = ctx->ctx_used_pmds[0];
948 for (i = 0; mask; i++, mask>>=1) {
949
950 if ((mask & 0x1) == 0) continue;
951 val = ia64_get_pmd(i);
952
953 if (PMD_IS_COUNTING(i)) {
954
955
956
957 ctx->ctx_pmds[i].val += (val & ovfl_mask);
958 } else {
959 ctx->ctx_pmds[i].val = val;
960 }
961 DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
962 i,
963 ctx->ctx_pmds[i].val,
964 val & ovfl_mask));
965 }
966
967
968
969
970
971
972
973
974 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
975 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
976 if ((mask & 0x1) == 0UL) continue;
977 ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
978 ctx->th_pmcs[i] &= ~0xfUL;
979 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
980 }
981
982
983
984 ia64_srlz_d();
985}
986
987
988
989
990
991
992static void
993pfm_restore_monitoring(struct task_struct *task)
994{
995 pfm_context_t *ctx = PFM_GET_CTX(task);
996 unsigned long mask, ovfl_mask;
997 unsigned long psr, val;
998 int i, is_system;
999
1000 is_system = ctx->ctx_fl_system;
1001 ovfl_mask = pmu_conf->ovfl_val;
1002
1003 if (task != current) {
1004 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
1005 return;
1006 }
1007 if (ctx->ctx_state != PFM_CTX_MASKED) {
1008 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
1009 task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
1010 return;
1011 }
1012 psr = pfm_get_psr();
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1024
1025 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
1026 pfm_clear_psr_pp();
1027 } else {
1028 pfm_clear_psr_up();
1029 }
1030
1031
1032
1033 mask = ctx->ctx_used_pmds[0];
1034 for (i = 0; mask; i++, mask>>=1) {
1035
1036 if ((mask & 0x1) == 0) continue;
1037
1038 if (PMD_IS_COUNTING(i)) {
1039
1040
1041
1042
1043 val = ctx->ctx_pmds[i].val & ovfl_mask;
1044 ctx->ctx_pmds[i].val &= ~ovfl_mask;
1045 } else {
1046 val = ctx->ctx_pmds[i].val;
1047 }
1048 ia64_set_pmd(i, val);
1049
1050 DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
1051 i,
1052 ctx->ctx_pmds[i].val,
1053 val));
1054 }
1055
1056
1057
1058 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
1059 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
1060 if ((mask & 0x1) == 0UL) continue;
1061 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1062 ia64_set_pmc(i, ctx->th_pmcs[i]);
1063 DPRINT(("[%d] pmc[%d]=0x%lx\n",
1064 task_pid_nr(task), i, ctx->th_pmcs[i]));
1065 }
1066 ia64_srlz_d();
1067
1068
1069
1070
1071
1072 if (ctx->ctx_fl_using_dbreg) {
1073 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
1074 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
1075 }
1076
1077
1078
1079
1080 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1081
1082 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
1083 ia64_srlz_i();
1084 }
1085 pfm_set_psr_l(psr);
1086}
1087
1088static inline void
1089pfm_save_pmds(unsigned long *pmds, unsigned long mask)
1090{
1091 int i;
1092
1093 ia64_srlz_d();
1094
1095 for (i=0; mask; i++, mask>>=1) {
1096 if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
1097 }
1098}
1099
1100
1101
1102
1103static inline void
1104pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
1105{
1106 int i;
1107 unsigned long val, ovfl_val = pmu_conf->ovfl_val;
1108
1109 for (i=0; mask; i++, mask>>=1) {
1110 if ((mask & 0x1) == 0) continue;
1111 val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
1112 ia64_set_pmd(i, val);
1113 }
1114 ia64_srlz_d();
1115}
1116
1117
1118
1119
1120static inline void
1121pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1122{
1123 unsigned long ovfl_val = pmu_conf->ovfl_val;
1124 unsigned long mask = ctx->ctx_all_pmds[0];
1125 unsigned long val;
1126 int i;
1127
1128 DPRINT(("mask=0x%lx\n", mask));
1129
1130 for (i=0; mask; i++, mask>>=1) {
1131
1132 val = ctx->ctx_pmds[i].val;
1133
1134
1135
1136
1137
1138
1139
1140 if (PMD_IS_COUNTING(i)) {
1141 ctx->ctx_pmds[i].val = val & ~ovfl_val;
1142 val &= ovfl_val;
1143 }
1144 ctx->th_pmds[i] = val;
1145
1146 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1147 i,
1148 ctx->th_pmds[i],
1149 ctx->ctx_pmds[i].val));
1150 }
1151}
1152
1153
1154
1155
1156static inline void
1157pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1158{
1159 unsigned long mask = ctx->ctx_all_pmcs[0];
1160 int i;
1161
1162 DPRINT(("mask=0x%lx\n", mask));
1163
1164 for (i=0; mask; i++, mask>>=1) {
1165
1166 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1167 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1168 }
1169}
1170
1171
1172
1173static inline void
1174pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
1175{
1176 int i;
1177
1178 for (i=0; mask; i++, mask>>=1) {
1179 if ((mask & 0x1) == 0) continue;
1180 ia64_set_pmc(i, pmcs[i]);
1181 }
1182 ia64_srlz_d();
1183}
1184
1185static inline int
1186pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
1187{
1188 return memcmp(a, b, sizeof(pfm_uuid_t));
1189}
1190
1191static inline int
1192pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
1193{
1194 int ret = 0;
1195 if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
1196 return ret;
1197}
1198
1199static inline int
1200pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
1201{
1202 int ret = 0;
1203 if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
1204 return ret;
1205}
1206
1207
1208static inline int
1209pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
1210 int cpu, void *arg)
1211{
1212 int ret = 0;
1213 if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
1214 return ret;
1215}
1216
1217static inline int
1218pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
1219 int cpu, void *arg)
1220{
1221 int ret = 0;
1222 if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
1223 return ret;
1224}
1225
1226static inline int
1227pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1228{
1229 int ret = 0;
1230 if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
1231 return ret;
1232}
1233
1234static inline int
1235pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1236{
1237 int ret = 0;
1238 if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
1239 return ret;
1240}
1241
1242static pfm_buffer_fmt_t *
1243__pfm_find_buffer_fmt(pfm_uuid_t uuid)
1244{
1245 struct list_head * pos;
1246 pfm_buffer_fmt_t * entry;
1247
1248 list_for_each(pos, &pfm_buffer_fmt_list) {
1249 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
1250 if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
1251 return entry;
1252 }
1253 return NULL;
1254}
1255
1256
1257
1258
1259static pfm_buffer_fmt_t *
1260pfm_find_buffer_fmt(pfm_uuid_t uuid)
1261{
1262 pfm_buffer_fmt_t * fmt;
1263 spin_lock(&pfm_buffer_fmt_lock);
1264 fmt = __pfm_find_buffer_fmt(uuid);
1265 spin_unlock(&pfm_buffer_fmt_lock);
1266 return fmt;
1267}
1268
1269int
1270pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
1271{
1272 int ret = 0;
1273
1274
1275 if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
1276
1277
1278 if (fmt->fmt_handler == NULL) return -EINVAL;
1279
1280
1281
1282
1283
1284 spin_lock(&pfm_buffer_fmt_lock);
1285
1286 if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
1287 printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
1288 ret = -EBUSY;
1289 goto out;
1290 }
1291 list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
1292 printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
1293
1294out:
1295 spin_unlock(&pfm_buffer_fmt_lock);
1296 return ret;
1297}
1298EXPORT_SYMBOL(pfm_register_buffer_fmt);
1299
1300int
1301pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
1302{
1303 pfm_buffer_fmt_t *fmt;
1304 int ret = 0;
1305
1306 spin_lock(&pfm_buffer_fmt_lock);
1307
1308 fmt = __pfm_find_buffer_fmt(uuid);
1309 if (!fmt) {
1310 printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
1311 ret = -EINVAL;
1312 goto out;
1313 }
1314 list_del_init(&fmt->fmt_list);
1315 printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
1316
1317out:
1318 spin_unlock(&pfm_buffer_fmt_lock);
1319 return ret;
1320
1321}
1322EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1323
1324extern void update_pal_halt_status(int);
1325
1326static int
1327pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1328{
1329 unsigned long flags;
1330
1331
1332
1333 LOCK_PFS(flags);
1334
1335 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1336 pfm_sessions.pfs_sys_sessions,
1337 pfm_sessions.pfs_task_sessions,
1338 pfm_sessions.pfs_sys_use_dbregs,
1339 is_syswide,
1340 cpu));
1341
1342 if (is_syswide) {
1343
1344
1345
1346 if (pfm_sessions.pfs_task_sessions > 0UL) {
1347 DPRINT(("system wide not possible, %u conflicting task_sessions\n",
1348 pfm_sessions.pfs_task_sessions));
1349 goto abort;
1350 }
1351
1352 if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
1353
1354 DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
1355
1356 pfm_sessions.pfs_sys_session[cpu] = task;
1357
1358 pfm_sessions.pfs_sys_sessions++ ;
1359
1360 } else {
1361 if (pfm_sessions.pfs_sys_sessions) goto abort;
1362 pfm_sessions.pfs_task_sessions++;
1363 }
1364
1365 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1366 pfm_sessions.pfs_sys_sessions,
1367 pfm_sessions.pfs_task_sessions,
1368 pfm_sessions.pfs_sys_use_dbregs,
1369 is_syswide,
1370 cpu));
1371
1372
1373
1374
1375 update_pal_halt_status(0);
1376
1377 UNLOCK_PFS(flags);
1378
1379 return 0;
1380
1381error_conflict:
1382 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
1383 task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
1384 cpu));
1385abort:
1386 UNLOCK_PFS(flags);
1387
1388 return -EBUSY;
1389
1390}
1391
1392static int
1393pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1394{
1395 unsigned long flags;
1396
1397
1398
1399 LOCK_PFS(flags);
1400
1401 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1402 pfm_sessions.pfs_sys_sessions,
1403 pfm_sessions.pfs_task_sessions,
1404 pfm_sessions.pfs_sys_use_dbregs,
1405 is_syswide,
1406 cpu));
1407
1408
1409 if (is_syswide) {
1410 pfm_sessions.pfs_sys_session[cpu] = NULL;
1411
1412
1413
1414 if (ctx && ctx->ctx_fl_using_dbreg) {
1415 if (pfm_sessions.pfs_sys_use_dbregs == 0) {
1416 printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
1417 } else {
1418 pfm_sessions.pfs_sys_use_dbregs--;
1419 }
1420 }
1421 pfm_sessions.pfs_sys_sessions--;
1422 } else {
1423 pfm_sessions.pfs_task_sessions--;
1424 }
1425 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1426 pfm_sessions.pfs_sys_sessions,
1427 pfm_sessions.pfs_task_sessions,
1428 pfm_sessions.pfs_sys_use_dbregs,
1429 is_syswide,
1430 cpu));
1431
1432
1433
1434
1435 if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
1436 update_pal_halt_status(1);
1437
1438 UNLOCK_PFS(flags);
1439
1440 return 0;
1441}
1442
1443
1444
1445
1446
1447
1448static int
1449pfm_remove_smpl_mapping(void *vaddr, unsigned long size)
1450{
1451 struct task_struct *task = current;
1452 int r;
1453
1454
1455 if (task->mm == NULL || size == 0UL || vaddr == NULL) {
1456 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
1457 return -EINVAL;
1458 }
1459
1460 DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
1461
1462
1463
1464
1465 r = vm_munmap((unsigned long)vaddr, size);
1466
1467 if (r !=0) {
1468 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
1469 }
1470
1471 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
1472
1473 return 0;
1474}
1475
1476
1477
1478
1479#if 0
1480static int
1481pfm_free_smpl_buffer(pfm_context_t *ctx)
1482{
1483 pfm_buffer_fmt_t *fmt;
1484
1485 if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
1486
1487
1488
1489
1490 fmt = ctx->ctx_buf_fmt;
1491
1492 DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
1493 ctx->ctx_smpl_hdr,
1494 ctx->ctx_smpl_size,
1495 ctx->ctx_smpl_vaddr));
1496
1497 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1498
1499
1500
1501
1502 pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
1503
1504 ctx->ctx_smpl_hdr = NULL;
1505 ctx->ctx_smpl_size = 0UL;
1506
1507 return 0;
1508
1509invalid_free:
1510 printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
1511 return -EINVAL;
1512}
1513#endif
1514
1515static inline void
1516pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
1517{
1518 if (fmt == NULL) return;
1519
1520 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1521
1522}
1523
1524
1525
1526
1527
1528
1529
1530static struct vfsmount *pfmfs_mnt __read_mostly;
1531
1532static int __init
1533init_pfm_fs(void)
1534{
1535 int err = register_filesystem(&pfm_fs_type);
1536 if (!err) {
1537 pfmfs_mnt = kern_mount(&pfm_fs_type);
1538 err = PTR_ERR(pfmfs_mnt);
1539 if (IS_ERR(pfmfs_mnt))
1540 unregister_filesystem(&pfm_fs_type);
1541 else
1542 err = 0;
1543 }
1544 return err;
1545}
1546
1547static ssize_t
1548pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1549{
1550 pfm_context_t *ctx;
1551 pfm_msg_t *msg;
1552 ssize_t ret;
1553 unsigned long flags;
1554 DECLARE_WAITQUEUE(wait, current);
1555 if (PFM_IS_FILE(filp) == 0) {
1556 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1557 return -EINVAL;
1558 }
1559
1560 ctx = filp->private_data;
1561 if (ctx == NULL) {
1562 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
1563 return -EINVAL;
1564 }
1565
1566
1567
1568
1569 if (size < sizeof(pfm_msg_t)) {
1570 DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
1571 return -EINVAL;
1572 }
1573
1574 PROTECT_CTX(ctx, flags);
1575
1576
1577
1578
1579 add_wait_queue(&ctx->ctx_msgq_wait, &wait);
1580
1581
1582 for(;;) {
1583
1584
1585
1586
1587 set_current_state(TASK_INTERRUPTIBLE);
1588
1589 DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
1590
1591 ret = 0;
1592 if(PFM_CTXQ_EMPTY(ctx) == 0) break;
1593
1594 UNPROTECT_CTX(ctx, flags);
1595
1596
1597
1598
1599 ret = -EAGAIN;
1600 if(filp->f_flags & O_NONBLOCK) break;
1601
1602
1603
1604
1605 if(signal_pending(current)) {
1606 ret = -EINTR;
1607 break;
1608 }
1609
1610
1611
1612 schedule();
1613
1614 PROTECT_CTX(ctx, flags);
1615 }
1616 DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
1617 set_current_state(TASK_RUNNING);
1618 remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
1619
1620 if (ret < 0) goto abort;
1621
1622 ret = -EINVAL;
1623 msg = pfm_get_next_msg(ctx);
1624 if (msg == NULL) {
1625 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
1626 goto abort_locked;
1627 }
1628
1629 DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
1630
1631 ret = -EFAULT;
1632 if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
1633
1634abort_locked:
1635 UNPROTECT_CTX(ctx, flags);
1636abort:
1637 return ret;
1638}
1639
1640static ssize_t
1641pfm_write(struct file *file, const char __user *ubuf,
1642 size_t size, loff_t *ppos)
1643{
1644 DPRINT(("pfm_write called\n"));
1645 return -EINVAL;
1646}
1647
1648static unsigned int
1649pfm_poll(struct file *filp, poll_table * wait)
1650{
1651 pfm_context_t *ctx;
1652 unsigned long flags;
1653 unsigned int mask = 0;
1654
1655 if (PFM_IS_FILE(filp) == 0) {
1656 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1657 return 0;
1658 }
1659
1660 ctx = filp->private_data;
1661 if (ctx == NULL) {
1662 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
1663 return 0;
1664 }
1665
1666
1667 DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
1668
1669 poll_wait(filp, &ctx->ctx_msgq_wait, wait);
1670
1671 PROTECT_CTX(ctx, flags);
1672
1673 if (PFM_CTXQ_EMPTY(ctx) == 0)
1674 mask = POLLIN | POLLRDNORM;
1675
1676 UNPROTECT_CTX(ctx, flags);
1677
1678 DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
1679
1680 return mask;
1681}
1682
1683static long
1684pfm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1685{
1686 DPRINT(("pfm_ioctl called\n"));
1687 return -EINVAL;
1688}
1689
1690
1691
1692
1693static inline int
1694pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
1695{
1696 int ret;
1697
1698 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
1699
1700 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1701 task_pid_nr(current),
1702 fd,
1703 on,
1704 ctx->ctx_async_queue, ret));
1705
1706 return ret;
1707}
1708
1709static int
1710pfm_fasync(int fd, struct file *filp, int on)
1711{
1712 pfm_context_t *ctx;
1713 int ret;
1714
1715 if (PFM_IS_FILE(filp) == 0) {
1716 printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
1717 return -EBADF;
1718 }
1719
1720 ctx = filp->private_data;
1721 if (ctx == NULL) {
1722 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
1723 return -EBADF;
1724 }
1725
1726
1727
1728
1729
1730
1731
1732 ret = pfm_do_fasync(fd, filp, ctx, on);
1733
1734
1735 DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1736 fd,
1737 on,
1738 ctx->ctx_async_queue, ret));
1739
1740 return ret;
1741}
1742
1743#ifdef CONFIG_SMP
1744
1745
1746
1747
1748
1749static void
1750pfm_syswide_force_stop(void *info)
1751{
1752 pfm_context_t *ctx = (pfm_context_t *)info;
1753 struct pt_regs *regs = task_pt_regs(current);
1754 struct task_struct *owner;
1755 unsigned long flags;
1756 int ret;
1757
1758 if (ctx->ctx_cpu != smp_processor_id()) {
1759 printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
1760 ctx->ctx_cpu,
1761 smp_processor_id());
1762 return;
1763 }
1764 owner = GET_PMU_OWNER();
1765 if (owner != ctx->ctx_task) {
1766 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1767 smp_processor_id(),
1768 task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
1769 return;
1770 }
1771 if (GET_PMU_CTX() != ctx) {
1772 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
1773 smp_processor_id(),
1774 GET_PMU_CTX(), ctx);
1775 return;
1776 }
1777
1778 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
1779
1780
1781
1782
1783
1784 local_irq_save(flags);
1785
1786 ret = pfm_context_unload(ctx, NULL, 0, regs);
1787 if (ret) {
1788 DPRINT(("context_unload returned %d\n", ret));
1789 }
1790
1791
1792
1793
1794 local_irq_restore(flags);
1795}
1796
1797static void
1798pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
1799{
1800 int ret;
1801
1802 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
1803 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
1804 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
1805}
1806#endif
1807
1808
1809
1810
1811
1812static int
1813pfm_flush(struct file *filp, fl_owner_t id)
1814{
1815 pfm_context_t *ctx;
1816 struct task_struct *task;
1817 struct pt_regs *regs;
1818 unsigned long flags;
1819 unsigned long smpl_buf_size = 0UL;
1820 void *smpl_buf_vaddr = NULL;
1821 int state, is_system;
1822
1823 if (PFM_IS_FILE(filp) == 0) {
1824 DPRINT(("bad magic for\n"));
1825 return -EBADF;
1826 }
1827
1828 ctx = filp->private_data;
1829 if (ctx == NULL) {
1830 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
1831 return -EBADF;
1832 }
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847 PROTECT_CTX(ctx, flags);
1848
1849 state = ctx->ctx_state;
1850 is_system = ctx->ctx_fl_system;
1851
1852 task = PFM_CTX_TASK(ctx);
1853 regs = task_pt_regs(task);
1854
1855 DPRINT(("ctx_state=%d is_current=%d\n",
1856 state,
1857 task == current ? 1 : 0));
1858
1859
1860
1861
1862
1863
1864
1865
1866 if (task == current) {
1867#ifdef CONFIG_SMP
1868
1869
1870
1871
1872
1873
1874
1875 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
1876
1877 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
1878
1879
1880
1881 local_irq_restore(flags);
1882
1883 pfm_syswide_cleanup_other_cpu(ctx);
1884
1885
1886
1887
1888 local_irq_save(flags);
1889
1890
1891
1892
1893 } else
1894#endif
1895 {
1896
1897 DPRINT(("forcing unload\n"));
1898
1899
1900
1901
1902 pfm_context_unload(ctx, NULL, 0, regs);
1903
1904 DPRINT(("ctx_state=%d\n", ctx->ctx_state));
1905 }
1906 }
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919 if (ctx->ctx_smpl_vaddr && current->mm) {
1920 smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
1921 smpl_buf_size = ctx->ctx_smpl_size;
1922 }
1923
1924 UNPROTECT_CTX(ctx, flags);
1925
1926
1927
1928
1929
1930
1931
1932 if (smpl_buf_vaddr) pfm_remove_smpl_mapping(smpl_buf_vaddr, smpl_buf_size);
1933
1934 return 0;
1935}
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951static int
1952pfm_close(struct inode *inode, struct file *filp)
1953{
1954 pfm_context_t *ctx;
1955 struct task_struct *task;
1956 struct pt_regs *regs;
1957 DECLARE_WAITQUEUE(wait, current);
1958 unsigned long flags;
1959 unsigned long smpl_buf_size = 0UL;
1960 void *smpl_buf_addr = NULL;
1961 int free_possible = 1;
1962 int state, is_system;
1963
1964 DPRINT(("pfm_close called private=%p\n", filp->private_data));
1965
1966 if (PFM_IS_FILE(filp) == 0) {
1967 DPRINT(("bad magic\n"));
1968 return -EBADF;
1969 }
1970
1971 ctx = filp->private_data;
1972 if (ctx == NULL) {
1973 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
1974 return -EBADF;
1975 }
1976
1977 PROTECT_CTX(ctx, flags);
1978
1979 state = ctx->ctx_state;
1980 is_system = ctx->ctx_fl_system;
1981
1982 task = PFM_CTX_TASK(ctx);
1983 regs = task_pt_regs(task);
1984
1985 DPRINT(("ctx_state=%d is_current=%d\n",
1986 state,
1987 task == current ? 1 : 0));
1988
1989
1990
1991
1992 if (state == PFM_CTX_UNLOADED) goto doit;
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006 if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022 ctx->ctx_fl_going_zombie = 1;
2023
2024
2025
2026
2027 complete(&ctx->ctx_restart_done);
2028
2029 DPRINT(("waking up ctx_state=%d\n", state));
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039 set_current_state(TASK_INTERRUPTIBLE);
2040 add_wait_queue(&ctx->ctx_zombieq, &wait);
2041
2042 UNPROTECT_CTX(ctx, flags);
2043
2044
2045
2046
2047
2048
2049 schedule();
2050
2051
2052 PROTECT_CTX(ctx, flags);
2053
2054
2055 remove_wait_queue(&ctx->ctx_zombieq, &wait);
2056 set_current_state(TASK_RUNNING);
2057
2058
2059
2060
2061 DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
2062 }
2063 else if (task != current) {
2064#ifdef CONFIG_SMP
2065
2066
2067
2068 ctx->ctx_state = PFM_CTX_ZOMBIE;
2069
2070 DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
2071
2072
2073
2074
2075 free_possible = 0;
2076#else
2077 pfm_context_unload(ctx, NULL, 0, regs);
2078#endif
2079 }
2080
2081doit:
2082
2083 state = ctx->ctx_state;
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099 if (ctx->ctx_smpl_hdr) {
2100 smpl_buf_addr = ctx->ctx_smpl_hdr;
2101 smpl_buf_size = ctx->ctx_smpl_size;
2102
2103 ctx->ctx_smpl_hdr = NULL;
2104 ctx->ctx_fl_is_sampling = 0;
2105 }
2106
2107 DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
2108 state,
2109 free_possible,
2110 smpl_buf_addr,
2111 smpl_buf_size));
2112
2113 if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
2114
2115
2116
2117
2118 if (state == PFM_CTX_ZOMBIE) {
2119 pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
2120 }
2121
2122
2123
2124
2125
2126 filp->private_data = NULL;
2127
2128
2129
2130
2131
2132
2133
2134
2135 UNPROTECT_CTX(ctx, flags);
2136
2137
2138
2139
2140
2141 if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
2142
2143
2144
2145
2146 if (free_possible) pfm_context_free(ctx);
2147
2148 return 0;
2149}
2150
2151static int
2152pfm_no_open(struct inode *irrelevant, struct file *dontcare)
2153{
2154 DPRINT(("pfm_no_open called\n"));
2155 return -ENXIO;
2156}
2157
2158
2159
2160static const struct file_operations pfm_file_ops = {
2161 .llseek = no_llseek,
2162 .read = pfm_read,
2163 .write = pfm_write,
2164 .poll = pfm_poll,
2165 .unlocked_ioctl = pfm_ioctl,
2166 .open = pfm_no_open,
2167 .fasync = pfm_fasync,
2168 .release = pfm_close,
2169 .flush = pfm_flush
2170};
2171
2172static int
2173pfmfs_delete_dentry(const struct dentry *dentry)
2174{
2175 return 1;
2176}
2177
2178static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen)
2179{
2180 return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]",
2181 dentry->d_inode->i_ino);
2182}
2183
2184static const struct dentry_operations pfmfs_dentry_operations = {
2185 .d_delete = pfmfs_delete_dentry,
2186 .d_dname = pfmfs_dname,
2187};
2188
2189
2190static struct file *
2191pfm_alloc_file(pfm_context_t *ctx)
2192{
2193 struct file *file;
2194 struct inode *inode;
2195 struct path path;
2196 struct qstr this = { .name = "" };
2197
2198
2199
2200
2201 inode = new_inode(pfmfs_mnt->mnt_sb);
2202 if (!inode)
2203 return ERR_PTR(-ENOMEM);
2204
2205 DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
2206
2207 inode->i_mode = S_IFCHR|S_IRUGO;
2208 inode->i_uid = current_fsuid();
2209 inode->i_gid = current_fsgid();
2210
2211
2212
2213
2214 path.dentry = d_alloc(pfmfs_mnt->mnt_root, &this);
2215 if (!path.dentry) {
2216 iput(inode);
2217 return ERR_PTR(-ENOMEM);
2218 }
2219 path.mnt = mntget(pfmfs_mnt);
2220
2221 d_add(path.dentry, inode);
2222
2223 file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
2224 if (!file) {
2225 path_put(&path);
2226 return ERR_PTR(-ENFILE);
2227 }
2228
2229 file->f_flags = O_RDONLY;
2230 file->private_data = ctx;
2231
2232 return file;
2233}
2234
2235static int
2236pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
2237{
2238 DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
2239
2240 while (size > 0) {
2241 unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
2242
2243
2244 if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
2245 return -ENOMEM;
2246
2247 addr += PAGE_SIZE;
2248 buf += PAGE_SIZE;
2249 size -= PAGE_SIZE;
2250 }
2251 return 0;
2252}
2253
2254
2255
2256
2257static int
2258pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
2259{
2260 struct mm_struct *mm = task->mm;
2261 struct vm_area_struct *vma = NULL;
2262 unsigned long size;
2263 void *smpl_buf;
2264
2265
2266
2267
2268
2269 size = PAGE_ALIGN(rsize);
2270
2271 DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281 if (size > task_rlimit(task, RLIMIT_MEMLOCK))
2282 return -ENOMEM;
2283
2284
2285
2286
2287
2288
2289 smpl_buf = pfm_rvmalloc(size);
2290 if (smpl_buf == NULL) {
2291 DPRINT(("Can't allocate sampling buffer\n"));
2292 return -ENOMEM;
2293 }
2294
2295 DPRINT(("smpl_buf @%p\n", smpl_buf));
2296
2297
2298 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2299 if (!vma) {
2300 DPRINT(("Cannot allocate vma\n"));
2301 goto error_kmem;
2302 }
2303 INIT_LIST_HEAD(&vma->anon_vma_chain);
2304
2305
2306
2307
2308 vma->vm_mm = mm;
2309 vma->vm_file = filp;
2310 vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
2311 vma->vm_page_prot = PAGE_READONLY;
2312
2313
2314
2315
2316
2317
2318 ctx->ctx_smpl_hdr = smpl_buf;
2319 ctx->ctx_smpl_size = size;
2320
2321
2322
2323
2324
2325
2326
2327 down_write(&task->mm->mmap_sem);
2328
2329
2330 vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
2331 if (IS_ERR_VALUE(vma->vm_start)) {
2332 DPRINT(("Cannot find unmapped area for size %ld\n", size));
2333 up_write(&task->mm->mmap_sem);
2334 goto error;
2335 }
2336 vma->vm_end = vma->vm_start + size;
2337 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2338
2339 DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
2340
2341
2342 if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
2343 DPRINT(("Can't remap buffer\n"));
2344 up_write(&task->mm->mmap_sem);
2345 goto error;
2346 }
2347
2348 get_file(filp);
2349
2350
2351
2352
2353
2354 insert_vm_struct(mm, vma);
2355
2356 vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
2357 vma_pages(vma));
2358 up_write(&task->mm->mmap_sem);
2359
2360
2361
2362
2363 ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
2364 *(unsigned long *)user_vaddr = vma->vm_start;
2365
2366 return 0;
2367
2368error:
2369 kmem_cache_free(vm_area_cachep, vma);
2370error_kmem:
2371 pfm_rvfree(smpl_buf, size);
2372
2373 return -ENOMEM;
2374}
2375
2376
2377
2378
2379static int
2380pfm_bad_permissions(struct task_struct *task)
2381{
2382 const struct cred *tcred;
2383 uid_t uid = current_uid();
2384 gid_t gid = current_gid();
2385 int ret;
2386
2387 rcu_read_lock();
2388 tcred = __task_cred(task);
2389
2390
2391 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
2392 uid,
2393 gid,
2394 tcred->euid,
2395 tcred->suid,
2396 tcred->uid,
2397 tcred->egid,
2398 tcred->sgid));
2399
2400 ret = ((uid != tcred->euid)
2401 || (uid != tcred->suid)
2402 || (uid != tcred->uid)
2403 || (gid != tcred->egid)
2404 || (gid != tcred->sgid)
2405 || (gid != tcred->gid)) && !capable(CAP_SYS_PTRACE);
2406
2407 rcu_read_unlock();
2408 return ret;
2409}
2410
2411static int
2412pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
2413{
2414 int ctx_flags;
2415
2416
2417
2418 ctx_flags = pfx->ctx_flags;
2419
2420 if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
2421
2422
2423
2424
2425 if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
2426 DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
2427 return -EINVAL;
2428 }
2429 } else {
2430 }
2431
2432
2433 return 0;
2434}
2435
2436static int
2437pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
2438 unsigned int cpu, pfarg_context_t *arg)
2439{
2440 pfm_buffer_fmt_t *fmt = NULL;
2441 unsigned long size = 0UL;
2442 void *uaddr = NULL;
2443 void *fmt_arg = NULL;
2444 int ret = 0;
2445#define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
2446
2447
2448 fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
2449 if (fmt == NULL) {
2450 DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
2451 return -EINVAL;
2452 }
2453
2454
2455
2456
2457 if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
2458
2459 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
2460
2461 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
2462
2463 if (ret) goto error;
2464
2465
2466 ctx->ctx_buf_fmt = fmt;
2467 ctx->ctx_fl_is_sampling = 1;
2468
2469
2470
2471
2472 ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
2473 if (ret) goto error;
2474
2475 if (size) {
2476
2477
2478
2479 ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
2480 if (ret) goto error;
2481
2482
2483 arg->ctx_smpl_vaddr = uaddr;
2484 }
2485 ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
2486
2487error:
2488 return ret;
2489}
2490
2491static void
2492pfm_reset_pmu_state(pfm_context_t *ctx)
2493{
2494 int i;
2495
2496
2497
2498
2499 for (i=1; PMC_IS_LAST(i) == 0; i++) {
2500 if (PMC_IS_IMPL(i) == 0) continue;
2501 ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
2502 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
2503 }
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
2532
2533
2534
2535
2536 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
2537
2538 DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
2539
2540
2541
2542
2543 ctx->ctx_used_ibrs[0] = 0UL;
2544 ctx->ctx_used_dbrs[0] = 0UL;
2545}
2546
2547static int
2548pfm_ctx_getsize(void *arg, size_t *sz)
2549{
2550 pfarg_context_t *req = (pfarg_context_t *)arg;
2551 pfm_buffer_fmt_t *fmt;
2552
2553 *sz = 0;
2554
2555 if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
2556
2557 fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
2558 if (fmt == NULL) {
2559 DPRINT(("cannot find buffer format\n"));
2560 return -EINVAL;
2561 }
2562
2563 *sz = fmt->fmt_arg_size;
2564 DPRINT(("arg_size=%lu\n", *sz));
2565
2566 return 0;
2567}
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577static int
2578pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2579{
2580
2581
2582
2583 if (task->mm == NULL) {
2584 DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
2585 return -EPERM;
2586 }
2587 if (pfm_bad_permissions(task)) {
2588 DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
2589 return -EPERM;
2590 }
2591
2592
2593
2594 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
2595 DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
2596 return -EINVAL;
2597 }
2598
2599 if (task->exit_state == EXIT_ZOMBIE) {
2600 DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
2601 return -EBUSY;
2602 }
2603
2604
2605
2606
2607 if (task == current) return 0;
2608
2609 if (!task_is_stopped_or_traced(task)) {
2610 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
2611 return -EBUSY;
2612 }
2613
2614
2615
2616 wait_task_inactive(task, 0);
2617
2618
2619
2620 return 0;
2621}
2622
2623static int
2624pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
2625{
2626 struct task_struct *p = current;
2627 int ret;
2628
2629
2630 if (pid < 2) return -EPERM;
2631
2632 if (pid != task_pid_vnr(current)) {
2633
2634 read_lock(&tasklist_lock);
2635
2636 p = find_task_by_vpid(pid);
2637
2638
2639 if (p) get_task_struct(p);
2640
2641 read_unlock(&tasklist_lock);
2642
2643 if (p == NULL) return -ESRCH;
2644 }
2645
2646 ret = pfm_task_incompatible(ctx, p);
2647 if (ret == 0) {
2648 *task = p;
2649 } else if (p != current) {
2650 pfm_put_task(p);
2651 }
2652 return ret;
2653}
2654
2655
2656
2657static int
2658pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2659{
2660 pfarg_context_t *req = (pfarg_context_t *)arg;
2661 struct file *filp;
2662 struct path path;
2663 int ctx_flags;
2664 int fd;
2665 int ret;
2666
2667
2668 ret = pfarg_is_sane(current, req);
2669 if (ret < 0)
2670 return ret;
2671
2672 ctx_flags = req->ctx_flags;
2673
2674 ret = -ENOMEM;
2675
2676 fd = get_unused_fd();
2677 if (fd < 0)
2678 return fd;
2679
2680 ctx = pfm_context_alloc(ctx_flags);
2681 if (!ctx)
2682 goto error;
2683
2684 filp = pfm_alloc_file(ctx);
2685 if (IS_ERR(filp)) {
2686 ret = PTR_ERR(filp);
2687 goto error_file;
2688 }
2689
2690 req->ctx_fd = ctx->ctx_fd = fd;
2691
2692
2693
2694
2695 if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
2696 ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
2697 if (ret)
2698 goto buffer_error;
2699 }
2700
2701 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n",
2702 ctx,
2703 ctx_flags,
2704 ctx->ctx_fl_system,
2705 ctx->ctx_fl_block,
2706 ctx->ctx_fl_excl_idle,
2707 ctx->ctx_fl_no_msg,
2708 ctx->ctx_fd));
2709
2710
2711
2712
2713 pfm_reset_pmu_state(ctx);
2714
2715 fd_install(fd, filp);
2716
2717 return 0;
2718
2719buffer_error:
2720 path = filp->f_path;
2721 put_filp(filp);
2722 path_put(&path);
2723
2724 if (ctx->ctx_buf_fmt) {
2725 pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
2726 }
2727error_file:
2728 pfm_context_free(ctx);
2729
2730error:
2731 put_unused_fd(fd);
2732 return ret;
2733}
2734
2735static inline unsigned long
2736pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
2737{
2738 unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
2739 unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
2740 extern unsigned long carta_random32 (unsigned long seed);
2741
2742 if (reg->flags & PFM_REGFL_RANDOM) {
2743 new_seed = carta_random32(old_seed);
2744 val -= (old_seed & mask);
2745 if ((mask >> 32) != 0)
2746
2747 new_seed |= carta_random32(old_seed >> 32) << 32;
2748 reg->seed = new_seed;
2749 }
2750 reg->lval = val;
2751 return val;
2752}
2753
2754static void
2755pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2756{
2757 unsigned long mask = ovfl_regs[0];
2758 unsigned long reset_others = 0UL;
2759 unsigned long val;
2760 int i;
2761
2762
2763
2764
2765 mask >>= PMU_FIRST_COUNTER;
2766 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2767
2768 if ((mask & 0x1UL) == 0UL) continue;
2769
2770 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2771 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2772
2773 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2774 }
2775
2776
2777
2778
2779 for(i = 0; reset_others; i++, reset_others >>= 1) {
2780
2781 if ((reset_others & 0x1) == 0) continue;
2782
2783 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2784
2785 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2786 is_long_reset ? "long" : "short", i, val));
2787 }
2788}
2789
2790static void
2791pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2792{
2793 unsigned long mask = ovfl_regs[0];
2794 unsigned long reset_others = 0UL;
2795 unsigned long val;
2796 int i;
2797
2798 DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
2799
2800 if (ctx->ctx_state == PFM_CTX_MASKED) {
2801 pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
2802 return;
2803 }
2804
2805
2806
2807
2808 mask >>= PMU_FIRST_COUNTER;
2809 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2810
2811 if ((mask & 0x1UL) == 0UL) continue;
2812
2813 val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2814 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2815
2816 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2817
2818 pfm_write_soft_counter(ctx, i, val);
2819 }
2820
2821
2822
2823
2824 for(i = 0; reset_others; i++, reset_others >>= 1) {
2825
2826 if ((reset_others & 0x1) == 0) continue;
2827
2828 val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2829
2830 if (PMD_IS_COUNTING(i)) {
2831 pfm_write_soft_counter(ctx, i, val);
2832 } else {
2833 ia64_set_pmd(i, val);
2834 }
2835 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2836 is_long_reset ? "long" : "short", i, val));
2837 }
2838 ia64_srlz_d();
2839}
2840
2841static int
2842pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2843{
2844 struct task_struct *task;
2845 pfarg_reg_t *req = (pfarg_reg_t *)arg;
2846 unsigned long value, pmc_pm;
2847 unsigned long smpl_pmds, reset_pmds, impl_pmds;
2848 unsigned int cnum, reg_flags, flags, pmc_type;
2849 int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
2850 int is_monitor, is_counting, state;
2851 int ret = -EINVAL;
2852 pfm_reg_check_t wr_func;
2853#define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
2854
2855 state = ctx->ctx_state;
2856 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
2857 is_system = ctx->ctx_fl_system;
2858 task = ctx->ctx_task;
2859 impl_pmds = pmu_conf->impl_pmds[0];
2860
2861 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
2862
2863 if (is_loaded) {
2864
2865
2866
2867
2868
2869 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
2870 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
2871 return -EBUSY;
2872 }
2873 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
2874 }
2875 expert_mode = pfm_sysctl.expert_mode;
2876
2877 for (i = 0; i < count; i++, req++) {
2878
2879 cnum = req->reg_num;
2880 reg_flags = req->reg_flags;
2881 value = req->reg_value;
2882 smpl_pmds = req->reg_smpl_pmds[0];
2883 reset_pmds = req->reg_reset_pmds[0];
2884 flags = 0;
2885
2886
2887 if (cnum >= PMU_MAX_PMCS) {
2888 DPRINT(("pmc%u is invalid\n", cnum));
2889 goto error;
2890 }
2891
2892 pmc_type = pmu_conf->pmc_desc[cnum].type;
2893 pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
2894 is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
2895 is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
2896
2897
2898
2899
2900
2901
2902 if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
2903 DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
2904 goto error;
2905 }
2906 wr_func = pmu_conf->pmc_desc[cnum].write_check;
2907
2908
2909
2910
2911
2912 if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
2913 DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
2914 cnum,
2915 pmc_pm,
2916 is_system));
2917 goto error;
2918 }
2919
2920 if (is_counting) {
2921
2922
2923
2924
2925 value |= 1 << PMU_PMC_OI;
2926
2927 if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
2928 flags |= PFM_REGFL_OVFL_NOTIFY;
2929 }
2930
2931 if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
2932
2933
2934 if ((smpl_pmds & impl_pmds) != smpl_pmds) {
2935 DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
2936 goto error;
2937 }
2938
2939
2940 if ((reset_pmds & impl_pmds) != reset_pmds) {
2941 DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
2942 goto error;
2943 }
2944 } else {
2945 if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
2946 DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
2947 goto error;
2948 }
2949
2950 }
2951
2952
2953
2954
2955 if (likely(expert_mode == 0 && wr_func)) {
2956 ret = (*wr_func)(task, ctx, cnum, &value, regs);
2957 if (ret) goto error;
2958 ret = -EINVAL;
2959 }
2960
2961
2962
2963
2964 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
2965
2966
2967
2968
2969
2970
2971
2972
2973 if (is_counting) {
2974
2975
2976
2977 ctx->ctx_pmds[cnum].flags = flags;
2978
2979 ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
2980 ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
2981 ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994 CTX_USED_PMD(ctx, reset_pmds);
2995 CTX_USED_PMD(ctx, smpl_pmds);
2996
2997
2998
2999
3000 if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3001 }
3002
3003
3004
3005
3006
3007 CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
3022
3023
3024
3025
3026 ctx->ctx_pmcs[cnum] = value;
3027
3028 if (is_loaded) {
3029
3030
3031
3032 if (is_system == 0) ctx->th_pmcs[cnum] = value;
3033
3034
3035
3036
3037 if (can_access_pmu) {
3038 ia64_set_pmc(cnum, value);
3039 }
3040#ifdef CONFIG_SMP
3041 else {
3042
3043
3044
3045
3046
3047
3048
3049 ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
3050 }
3051#endif
3052 }
3053
3054 DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
3055 cnum,
3056 value,
3057 is_loaded,
3058 can_access_pmu,
3059 flags,
3060 ctx->ctx_all_pmcs[0],
3061 ctx->ctx_used_pmds[0],
3062 ctx->ctx_pmds[cnum].eventid,
3063 smpl_pmds,
3064 reset_pmds,
3065 ctx->ctx_reload_pmcs[0],
3066 ctx->ctx_used_monitors[0],
3067 ctx->ctx_ovfl_regs[0]));
3068 }
3069
3070
3071
3072
3073 if (can_access_pmu) ia64_srlz_d();
3074
3075 return 0;
3076error:
3077 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3078 return ret;
3079}
3080
3081static int
3082pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3083{
3084 struct task_struct *task;
3085 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3086 unsigned long value, hw_value, ovfl_mask;
3087 unsigned int cnum;
3088 int i, can_access_pmu = 0, state;
3089 int is_counting, is_loaded, is_system, expert_mode;
3090 int ret = -EINVAL;
3091 pfm_reg_check_t wr_func;
3092
3093
3094 state = ctx->ctx_state;
3095 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3096 is_system = ctx->ctx_fl_system;
3097 ovfl_mask = pmu_conf->ovfl_val;
3098 task = ctx->ctx_task;
3099
3100 if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
3101
3102
3103
3104
3105
3106 if (likely(is_loaded)) {
3107
3108
3109
3110
3111
3112 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3113 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3114 return -EBUSY;
3115 }
3116 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3117 }
3118 expert_mode = pfm_sysctl.expert_mode;
3119
3120 for (i = 0; i < count; i++, req++) {
3121
3122 cnum = req->reg_num;
3123 value = req->reg_value;
3124
3125 if (!PMD_IS_IMPL(cnum)) {
3126 DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
3127 goto abort_mission;
3128 }
3129 is_counting = PMD_IS_COUNTING(cnum);
3130 wr_func = pmu_conf->pmd_desc[cnum].write_check;
3131
3132
3133
3134
3135 if (unlikely(expert_mode == 0 && wr_func)) {
3136 unsigned long v = value;
3137
3138 ret = (*wr_func)(task, ctx, cnum, &v, regs);
3139 if (ret) goto abort_mission;
3140
3141 value = v;
3142 ret = -EINVAL;
3143 }
3144
3145
3146
3147
3148 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
3149
3150
3151
3152
3153 hw_value = value;
3154
3155
3156
3157
3158 if (is_counting) {
3159
3160
3161
3162 ctx->ctx_pmds[cnum].lval = value;
3163
3164
3165
3166
3167 if (is_loaded) {
3168 hw_value = value & ovfl_mask;
3169 value = value & ~ovfl_mask;
3170 }
3171 }
3172
3173
3174
3175 ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
3176 ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
3177
3178
3179
3180
3181 ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
3182 ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
3183
3184
3185
3186
3187 ctx->ctx_pmds[cnum].val = value;
3188
3189
3190
3191
3192
3193
3194
3195 CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
3196
3197
3198
3199
3200 CTX_USED_PMD(ctx, RDEP(cnum));
3201
3202
3203
3204
3205
3206 if (is_counting && state == PFM_CTX_MASKED) {
3207 ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3208 }
3209
3210 if (is_loaded) {
3211
3212
3213
3214 if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
3215
3216
3217
3218
3219 if (can_access_pmu) {
3220 ia64_set_pmd(cnum, hw_value);
3221 } else {
3222#ifdef CONFIG_SMP
3223
3224
3225
3226
3227
3228 ctx->ctx_reload_pmds[0] |= 1UL << cnum;
3229#endif
3230 }
3231 }
3232
3233 DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
3234 "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
3235 cnum,
3236 value,
3237 is_loaded,
3238 can_access_pmu,
3239 hw_value,
3240 ctx->ctx_pmds[cnum].val,
3241 ctx->ctx_pmds[cnum].short_reset,
3242 ctx->ctx_pmds[cnum].long_reset,
3243 PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
3244 ctx->ctx_pmds[cnum].seed,
3245 ctx->ctx_pmds[cnum].mask,
3246 ctx->ctx_used_pmds[0],
3247 ctx->ctx_pmds[cnum].reset_pmds[0],
3248 ctx->ctx_reload_pmds[0],
3249 ctx->ctx_all_pmds[0],
3250 ctx->ctx_ovfl_regs[0]));
3251 }
3252
3253
3254
3255
3256 if (can_access_pmu) ia64_srlz_d();
3257
3258 return 0;
3259
3260abort_mission:
3261
3262
3263
3264 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3265 return ret;
3266}
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277static int
3278pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3279{
3280 struct task_struct *task;
3281 unsigned long val = 0UL, lval, ovfl_mask, sval;
3282 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3283 unsigned int cnum, reg_flags = 0;
3284 int i, can_access_pmu = 0, state;
3285 int is_loaded, is_system, is_counting, expert_mode;
3286 int ret = -EINVAL;
3287 pfm_reg_check_t rd_func;
3288
3289
3290
3291
3292
3293
3294 state = ctx->ctx_state;
3295 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3296 is_system = ctx->ctx_fl_system;
3297 ovfl_mask = pmu_conf->ovfl_val;
3298 task = ctx->ctx_task;
3299
3300 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3301
3302 if (likely(is_loaded)) {
3303
3304
3305
3306
3307
3308 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3309 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3310 return -EBUSY;
3311 }
3312
3313
3314
3315 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3316
3317 if (can_access_pmu) ia64_srlz_d();
3318 }
3319 expert_mode = pfm_sysctl.expert_mode;
3320
3321 DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
3322 is_loaded,
3323 can_access_pmu,
3324 state));
3325
3326
3327
3328
3329
3330
3331 for (i = 0; i < count; i++, req++) {
3332
3333 cnum = req->reg_num;
3334 reg_flags = req->reg_flags;
3335
3336 if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
3337
3338
3339
3340
3341
3342
3343
3344
3345 if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
3346
3347 sval = ctx->ctx_pmds[cnum].val;
3348 lval = ctx->ctx_pmds[cnum].lval;
3349 is_counting = PMD_IS_COUNTING(cnum);
3350
3351
3352
3353
3354
3355
3356 if (can_access_pmu){
3357 val = ia64_get_pmd(cnum);
3358 } else {
3359
3360
3361
3362
3363
3364 val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
3365 }
3366 rd_func = pmu_conf->pmd_desc[cnum].read_check;
3367
3368 if (is_counting) {
3369
3370
3371
3372 val &= ovfl_mask;
3373 val += sval;
3374 }
3375
3376
3377
3378
3379 if (unlikely(expert_mode == 0 && rd_func)) {
3380 unsigned long v = val;
3381 ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
3382 if (ret) goto error;
3383 val = v;
3384 ret = -EINVAL;
3385 }
3386
3387 PFM_REG_RETFLAG_SET(reg_flags, 0);
3388
3389 DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
3390
3391
3392
3393
3394
3395
3396 req->reg_value = val;
3397 req->reg_flags = reg_flags;
3398 req->reg_last_reset_val = lval;
3399 }
3400
3401 return 0;
3402
3403error:
3404 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3405 return ret;
3406}
3407
3408int
3409pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3410{
3411 pfm_context_t *ctx;
3412
3413 if (req == NULL) return -EINVAL;
3414
3415 ctx = GET_PMU_CTX();
3416
3417 if (ctx == NULL) return -EINVAL;
3418
3419
3420
3421
3422
3423 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3424
3425 return pfm_write_pmcs(ctx, req, nreq, regs);
3426}
3427EXPORT_SYMBOL(pfm_mod_write_pmcs);
3428
3429int
3430pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3431{
3432 pfm_context_t *ctx;
3433
3434 if (req == NULL) return -EINVAL;
3435
3436 ctx = GET_PMU_CTX();
3437
3438 if (ctx == NULL) return -EINVAL;
3439
3440
3441
3442
3443
3444 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3445
3446 return pfm_read_pmds(ctx, req, nreq, regs);
3447}
3448EXPORT_SYMBOL(pfm_mod_read_pmds);
3449
3450
3451
3452
3453
3454int
3455pfm_use_debug_registers(struct task_struct *task)
3456{
3457 pfm_context_t *ctx = task->thread.pfm_context;
3458 unsigned long flags;
3459 int ret = 0;
3460
3461 if (pmu_conf->use_rr_dbregs == 0) return 0;
3462
3463 DPRINT(("called for [%d]\n", task_pid_nr(task)));
3464
3465
3466
3467
3468 if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478 if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
3479
3480 LOCK_PFS(flags);
3481
3482
3483
3484
3485
3486 if (pfm_sessions.pfs_sys_use_dbregs> 0)
3487 ret = -1;
3488 else
3489 pfm_sessions.pfs_ptrace_use_dbregs++;
3490
3491 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3492 pfm_sessions.pfs_ptrace_use_dbregs,
3493 pfm_sessions.pfs_sys_use_dbregs,
3494 task_pid_nr(task), ret));
3495
3496 UNLOCK_PFS(flags);
3497
3498 return ret;
3499}
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509int
3510pfm_release_debug_registers(struct task_struct *task)
3511{
3512 unsigned long flags;
3513 int ret;
3514
3515 if (pmu_conf->use_rr_dbregs == 0) return 0;
3516
3517 LOCK_PFS(flags);
3518 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
3519 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
3520 ret = -1;
3521 } else {
3522 pfm_sessions.pfs_ptrace_use_dbregs--;
3523 ret = 0;
3524 }
3525 UNLOCK_PFS(flags);
3526
3527 return ret;
3528}
3529
3530static int
3531pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3532{
3533 struct task_struct *task;
3534 pfm_buffer_fmt_t *fmt;
3535 pfm_ovfl_ctrl_t rst_ctrl;
3536 int state, is_system;
3537 int ret = 0;
3538
3539 state = ctx->ctx_state;
3540 fmt = ctx->ctx_buf_fmt;
3541 is_system = ctx->ctx_fl_system;
3542 task = PFM_CTX_TASK(ctx);
3543
3544 switch(state) {
3545 case PFM_CTX_MASKED:
3546 break;
3547 case PFM_CTX_LOADED:
3548 if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
3549
3550 case PFM_CTX_UNLOADED:
3551 case PFM_CTX_ZOMBIE:
3552 DPRINT(("invalid state=%d\n", state));
3553 return -EBUSY;
3554 default:
3555 DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
3556 return -EINVAL;
3557 }
3558
3559
3560
3561
3562
3563
3564 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3565 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3566 return -EBUSY;
3567 }
3568
3569
3570 if (unlikely(task == NULL)) {
3571 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
3572 return -EINVAL;
3573 }
3574
3575 if (task == current || is_system) {
3576
3577 fmt = ctx->ctx_buf_fmt;
3578
3579 DPRINT(("restarting self %d ovfl=0x%lx\n",
3580 task_pid_nr(task),
3581 ctx->ctx_ovfl_regs[0]));
3582
3583 if (CTX_HAS_SMPL(ctx)) {
3584
3585 prefetch(ctx->ctx_smpl_hdr);
3586
3587 rst_ctrl.bits.mask_monitoring = 0;
3588 rst_ctrl.bits.reset_ovfl_pmds = 0;
3589
3590 if (state == PFM_CTX_LOADED)
3591 ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3592 else
3593 ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3594 } else {
3595 rst_ctrl.bits.mask_monitoring = 0;
3596 rst_ctrl.bits.reset_ovfl_pmds = 1;
3597 }
3598
3599 if (ret == 0) {
3600 if (rst_ctrl.bits.reset_ovfl_pmds)
3601 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
3602
3603 if (rst_ctrl.bits.mask_monitoring == 0) {
3604 DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
3605
3606 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
3607 } else {
3608 DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
3609
3610
3611 }
3612 }
3613
3614
3615
3616 ctx->ctx_ovfl_regs[0] = 0UL;
3617
3618
3619
3620
3621 ctx->ctx_state = PFM_CTX_LOADED;
3622
3623
3624
3625
3626 ctx->ctx_fl_can_restart = 0;
3627
3628 return 0;
3629 }
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639 if (state == PFM_CTX_MASKED) {
3640 if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
3641
3642
3643
3644
3645 ctx->ctx_fl_can_restart = 0;
3646 }
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
3665 DPRINT(("unblocking [%d]\n", task_pid_nr(task)));
3666 complete(&ctx->ctx_restart_done);
3667 } else {
3668 DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
3669
3670 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
3671
3672 PFM_SET_WORK_PENDING(task, 1);
3673
3674 set_notify_resume(task);
3675
3676
3677
3678
3679 }
3680 return 0;
3681}
3682
3683static int
3684pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3685{
3686 unsigned int m = *(unsigned int *)arg;
3687
3688 pfm_sysctl.debug = m == 0 ? 0 : 1;
3689
3690 printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
3691
3692 if (m == 0) {
3693 memset(pfm_stats, 0, sizeof(pfm_stats));
3694 for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
3695 }
3696 return 0;
3697}
3698
3699
3700
3701
3702static int
3703pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3704{
3705 struct thread_struct *thread = NULL;
3706 struct task_struct *task;
3707 pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
3708 unsigned long flags;
3709 dbreg_t dbreg;
3710 unsigned int rnum;
3711 int first_time;
3712 int ret = 0, state;
3713 int i, can_access_pmu = 0;
3714 int is_system, is_loaded;
3715
3716 if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
3717
3718 state = ctx->ctx_state;
3719 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3720 is_system = ctx->ctx_fl_system;
3721 task = ctx->ctx_task;
3722
3723 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3724
3725
3726
3727
3728
3729 if (is_loaded) {
3730 thread = &task->thread;
3731
3732
3733
3734
3735
3736 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3737 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3738 return -EBUSY;
3739 }
3740 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3741 }
3742
3743
3744
3745
3746
3747
3748
3749
3750 first_time = ctx->ctx_fl_using_dbreg == 0;
3751
3752
3753
3754
3755 if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
3756 DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
3757 return -EBUSY;
3758 }
3759
3760
3761
3762
3763
3764
3765
3766
3767 if (is_loaded) {
3768 LOCK_PFS(flags);
3769
3770 if (first_time && is_system) {
3771 if (pfm_sessions.pfs_ptrace_use_dbregs)
3772 ret = -EBUSY;
3773 else
3774 pfm_sessions.pfs_sys_use_dbregs++;
3775 }
3776 UNLOCK_PFS(flags);
3777 }
3778
3779 if (ret != 0) return ret;
3780
3781
3782
3783
3784
3785 ctx->ctx_fl_using_dbreg = 1;
3786
3787
3788
3789
3790
3791
3792
3793
3794
3795
3796 if (first_time && can_access_pmu) {
3797 DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
3798 for (i=0; i < pmu_conf->num_ibrs; i++) {
3799 ia64_set_ibr(i, 0UL);
3800 ia64_dv_serialize_instruction();
3801 }
3802 ia64_srlz_i();
3803 for (i=0; i < pmu_conf->num_dbrs; i++) {
3804 ia64_set_dbr(i, 0UL);
3805 ia64_dv_serialize_data();
3806 }
3807 ia64_srlz_d();
3808 }
3809
3810
3811
3812
3813 for (i = 0; i < count; i++, req++) {
3814
3815 rnum = req->dbreg_num;
3816 dbreg.val = req->dbreg_value;
3817
3818 ret = -EINVAL;
3819
3820 if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
3821 DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
3822 rnum, dbreg.val, mode, i, count));
3823
3824 goto abort_mission;
3825 }
3826
3827
3828
3829
3830 if (rnum & 0x1) {
3831 if (mode == PFM_CODE_RR)
3832 dbreg.ibr.ibr_x = 0;
3833 else
3834 dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
3835 }
3836
3837 PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849 if (mode == PFM_CODE_RR) {
3850 CTX_USED_IBR(ctx, rnum);
3851
3852 if (can_access_pmu) {
3853 ia64_set_ibr(rnum, dbreg.val);
3854 ia64_dv_serialize_instruction();
3855 }
3856
3857 ctx->ctx_ibrs[rnum] = dbreg.val;
3858
3859 DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
3860 rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
3861 } else {
3862 CTX_USED_DBR(ctx, rnum);
3863
3864 if (can_access_pmu) {
3865 ia64_set_dbr(rnum, dbreg.val);
3866 ia64_dv_serialize_data();
3867 }
3868 ctx->ctx_dbrs[rnum] = dbreg.val;
3869
3870 DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
3871 rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
3872 }
3873 }
3874
3875 return 0;
3876
3877abort_mission:
3878
3879
3880
3881 if (first_time) {
3882 LOCK_PFS(flags);
3883 if (ctx->ctx_fl_system) {
3884 pfm_sessions.pfs_sys_use_dbregs--;
3885 }
3886 UNLOCK_PFS(flags);
3887 ctx->ctx_fl_using_dbreg = 0;
3888 }
3889
3890
3891
3892 PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
3893
3894 return ret;
3895}
3896
3897static int
3898pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3899{
3900 return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
3901}
3902
3903static int
3904pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3905{
3906 return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
3907}
3908
3909int
3910pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3911{
3912 pfm_context_t *ctx;
3913
3914 if (req == NULL) return -EINVAL;
3915
3916 ctx = GET_PMU_CTX();
3917
3918 if (ctx == NULL) return -EINVAL;
3919
3920
3921
3922
3923
3924 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3925
3926 return pfm_write_ibrs(ctx, req, nreq, regs);
3927}
3928EXPORT_SYMBOL(pfm_mod_write_ibrs);
3929
3930int
3931pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3932{
3933 pfm_context_t *ctx;
3934
3935 if (req == NULL) return -EINVAL;
3936
3937 ctx = GET_PMU_CTX();
3938
3939 if (ctx == NULL) return -EINVAL;
3940
3941
3942
3943
3944
3945 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3946
3947 return pfm_write_dbrs(ctx, req, nreq, regs);
3948}
3949EXPORT_SYMBOL(pfm_mod_write_dbrs);
3950
3951
3952static int
3953pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3954{
3955 pfarg_features_t *req = (pfarg_features_t *)arg;
3956
3957 req->ft_version = PFM_VERSION;
3958 return 0;
3959}
3960
3961static int
3962pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3963{
3964 struct pt_regs *tregs;
3965 struct task_struct *task = PFM_CTX_TASK(ctx);
3966 int state, is_system;
3967
3968 state = ctx->ctx_state;
3969 is_system = ctx->ctx_fl_system;
3970
3971
3972
3973
3974 if (state == PFM_CTX_UNLOADED) return -EINVAL;
3975
3976
3977
3978
3979
3980
3981 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3982 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3983 return -EBUSY;
3984 }
3985 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
3986 task_pid_nr(PFM_CTX_TASK(ctx)),
3987 state,
3988 is_system));
3989
3990
3991
3992
3993
3994 if (is_system) {
3995
3996
3997
3998
3999
4000 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
4001 ia64_srlz_i();
4002
4003
4004
4005
4006 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4007
4008
4009
4010
4011 pfm_clear_psr_pp();
4012
4013
4014
4015
4016 ia64_psr(regs)->pp = 0;
4017
4018 return 0;
4019 }
4020
4021
4022
4023
4024 if (task == current) {
4025
4026 pfm_clear_psr_up();
4027
4028
4029
4030
4031 ia64_psr(regs)->up = 0;
4032 } else {
4033 tregs = task_pt_regs(task);
4034
4035
4036
4037
4038 ia64_psr(tregs)->up = 0;
4039
4040
4041
4042
4043 ctx->ctx_saved_psr_up = 0;
4044 DPRINT(("task=[%d]\n", task_pid_nr(task)));
4045 }
4046 return 0;
4047}
4048
4049
4050static int
4051pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4052{
4053 struct pt_regs *tregs;
4054 int state, is_system;
4055
4056 state = ctx->ctx_state;
4057 is_system = ctx->ctx_fl_system;
4058
4059 if (state != PFM_CTX_LOADED) return -EINVAL;
4060
4061
4062
4063
4064
4065
4066 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
4067 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
4068 return -EBUSY;
4069 }
4070
4071
4072
4073
4074
4075
4076 if (is_system) {
4077
4078
4079
4080
4081 ia64_psr(regs)->pp = 1;
4082
4083
4084
4085
4086 PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
4087
4088
4089
4090
4091 pfm_set_psr_pp();
4092
4093
4094 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
4095 ia64_srlz_i();
4096
4097 return 0;
4098 }
4099
4100
4101
4102
4103
4104 if (ctx->ctx_task == current) {
4105
4106
4107 pfm_set_psr_up();
4108
4109
4110
4111
4112 ia64_psr(regs)->up = 1;
4113
4114 } else {
4115 tregs = task_pt_regs(ctx->ctx_task);
4116
4117
4118
4119
4120
4121 ctx->ctx_saved_psr_up = IA64_PSR_UP;
4122
4123
4124
4125
4126 ia64_psr(tregs)->up = 1;
4127 }
4128 return 0;
4129}
4130
4131static int
4132pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4133{
4134 pfarg_reg_t *req = (pfarg_reg_t *)arg;
4135 unsigned int cnum;
4136 int i;
4137 int ret = -EINVAL;
4138
4139 for (i = 0; i < count; i++, req++) {
4140
4141 cnum = req->reg_num;
4142
4143 if (!PMC_IS_IMPL(cnum)) goto abort_mission;
4144
4145 req->reg_value = PMC_DFL_VAL(cnum);
4146
4147 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
4148
4149 DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
4150 }
4151 return 0;
4152
4153abort_mission:
4154 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
4155 return ret;
4156}
4157
4158static int
4159pfm_check_task_exist(pfm_context_t *ctx)
4160{
4161 struct task_struct *g, *t;
4162 int ret = -ESRCH;
4163
4164 read_lock(&tasklist_lock);
4165
4166 do_each_thread (g, t) {
4167 if (t->thread.pfm_context == ctx) {
4168 ret = 0;
4169 goto out;
4170 }
4171 } while_each_thread (g, t);
4172out:
4173 read_unlock(&tasklist_lock);
4174
4175 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
4176
4177 return ret;
4178}
4179
4180static int
4181pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4182{
4183 struct task_struct *task;
4184 struct thread_struct *thread;
4185 struct pfm_context_t *old;
4186 unsigned long flags;
4187#ifndef CONFIG_SMP
4188 struct task_struct *owner_task = NULL;
4189#endif
4190 pfarg_load_t *req = (pfarg_load_t *)arg;
4191 unsigned long *pmcs_source, *pmds_source;
4192 int the_cpu;
4193 int ret = 0;
4194 int state, is_system, set_dbregs = 0;
4195
4196 state = ctx->ctx_state;
4197 is_system = ctx->ctx_fl_system;
4198
4199
4200
4201 if (state != PFM_CTX_UNLOADED) {
4202 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4203 req->load_pid,
4204 ctx->ctx_state));
4205 return -EBUSY;
4206 }
4207
4208 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
4209
4210 if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
4211 DPRINT(("cannot use blocking mode on self\n"));
4212 return -EINVAL;
4213 }
4214
4215 ret = pfm_get_task(ctx, req->load_pid, &task);
4216 if (ret) {
4217 DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
4218 return ret;
4219 }
4220
4221 ret = -EINVAL;
4222
4223
4224
4225
4226 if (is_system && task != current) {
4227 DPRINT(("system wide is self monitoring only load_pid=%d\n",
4228 req->load_pid));
4229 goto error;
4230 }
4231
4232 thread = &task->thread;
4233
4234 ret = 0;
4235
4236
4237
4238
4239 if (ctx->ctx_fl_using_dbreg) {
4240 if (thread->flags & IA64_THREAD_DBG_VALID) {
4241 ret = -EBUSY;
4242 DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
4243 goto error;
4244 }
4245 LOCK_PFS(flags);
4246
4247 if (is_system) {
4248 if (pfm_sessions.pfs_ptrace_use_dbregs) {
4249 DPRINT(("cannot load [%d] dbregs in use\n",
4250 task_pid_nr(task)));
4251 ret = -EBUSY;
4252 } else {
4253 pfm_sessions.pfs_sys_use_dbregs++;
4254 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
4255 set_dbregs = 1;
4256 }
4257 }
4258
4259 UNLOCK_PFS(flags);
4260
4261 if (ret) goto error;
4262 }
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276
4277
4278
4279 the_cpu = ctx->ctx_cpu = smp_processor_id();
4280
4281 ret = -EBUSY;
4282
4283
4284
4285 ret = pfm_reserve_session(current, is_system, the_cpu);
4286 if (ret) goto error;
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297 DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
4298 thread->pfm_context, ctx));
4299
4300 ret = -EBUSY;
4301 old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
4302 if (old != NULL) {
4303 DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
4304 goto error_unres;
4305 }
4306
4307 pfm_reset_msgq(ctx);
4308
4309 ctx->ctx_state = PFM_CTX_LOADED;
4310
4311
4312
4313
4314 ctx->ctx_task = task;
4315
4316 if (is_system) {
4317
4318
4319
4320 PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
4321 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4322
4323 if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
4324 } else {
4325 thread->flags |= IA64_THREAD_PM_VALID;
4326 }
4327
4328
4329
4330
4331 pfm_copy_pmds(task, ctx);
4332 pfm_copy_pmcs(task, ctx);
4333
4334 pmcs_source = ctx->th_pmcs;
4335 pmds_source = ctx->th_pmds;
4336
4337
4338
4339
4340 if (task == current) {
4341
4342 if (is_system == 0) {
4343
4344
4345 ia64_psr(regs)->sp = 0;
4346 DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
4347
4348 SET_LAST_CPU(ctx, smp_processor_id());
4349 INC_ACTIVATION();
4350 SET_ACTIVATION(ctx);
4351#ifndef CONFIG_SMP
4352
4353
4354
4355 owner_task = GET_PMU_OWNER();
4356 if (owner_task) pfm_lazy_save_regs(owner_task);
4357#endif
4358 }
4359
4360
4361
4362
4363 pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
4364 pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
4365
4366 ctx->ctx_reload_pmcs[0] = 0UL;
4367 ctx->ctx_reload_pmds[0] = 0UL;
4368
4369
4370
4371
4372 if (ctx->ctx_fl_using_dbreg) {
4373 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
4374 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
4375 }
4376
4377
4378
4379 SET_PMU_OWNER(task, ctx);
4380
4381 DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
4382 } else {
4383
4384
4385
4386 regs = task_pt_regs(task);
4387
4388
4389 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4390 SET_LAST_CPU(ctx, -1);
4391
4392
4393 ctx->ctx_saved_psr_up = 0UL;
4394 ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
4395 }
4396
4397 ret = 0;
4398
4399error_unres:
4400 if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
4401error:
4402
4403
4404
4405 if (ret && set_dbregs) {
4406 LOCK_PFS(flags);
4407 pfm_sessions.pfs_sys_use_dbregs--;
4408 UNLOCK_PFS(flags);
4409 }
4410
4411
4412
4413 if (is_system == 0 && task != current) {
4414 pfm_put_task(task);
4415
4416 if (ret == 0) {
4417 ret = pfm_check_task_exist(ctx);
4418 if (ret) {
4419 ctx->ctx_state = PFM_CTX_UNLOADED;
4420 ctx->ctx_task = NULL;
4421 }
4422 }
4423 }
4424 return ret;
4425}
4426
4427
4428
4429
4430
4431
4432
4433
4434
4435static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
4436
4437static int
4438pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4439{
4440 struct task_struct *task = PFM_CTX_TASK(ctx);
4441 struct pt_regs *tregs;
4442 int prev_state, is_system;
4443 int ret;
4444
4445 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
4446
4447 prev_state = ctx->ctx_state;
4448 is_system = ctx->ctx_fl_system;
4449
4450
4451
4452
4453 if (prev_state == PFM_CTX_UNLOADED) {
4454 DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
4455 return 0;
4456 }
4457
4458
4459
4460
4461 ret = pfm_stop(ctx, NULL, 0, regs);
4462 if (ret) return ret;
4463
4464 ctx->ctx_state = PFM_CTX_UNLOADED;
4465
4466
4467
4468
4469
4470
4471 if (is_system) {
4472
4473
4474
4475
4476
4477
4478 PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
4479 PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
4480
4481
4482
4483
4484
4485 pfm_flush_pmds(current, ctx);
4486
4487
4488
4489
4490
4491 if (prev_state != PFM_CTX_ZOMBIE)
4492 pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
4493
4494
4495
4496
4497 task->thread.pfm_context = NULL;
4498
4499
4500
4501 ctx->ctx_task = NULL;
4502
4503
4504
4505
4506 return 0;
4507 }
4508
4509
4510
4511
4512 tregs = task == current ? regs : task_pt_regs(task);
4513
4514 if (task == current) {
4515
4516
4517
4518 ia64_psr(regs)->sp = 1;
4519
4520 DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
4521 }
4522
4523
4524
4525
4526 pfm_flush_pmds(task, ctx);
4527
4528
4529
4530
4531
4532
4533
4534 if (prev_state != PFM_CTX_ZOMBIE)
4535 pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
4536
4537
4538
4539
4540 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4541 SET_LAST_CPU(ctx, -1);
4542
4543
4544
4545
4546 task->thread.flags &= ~IA64_THREAD_PM_VALID;
4547
4548
4549
4550
4551 task->thread.pfm_context = NULL;
4552 ctx->ctx_task = NULL;
4553
4554 PFM_SET_WORK_PENDING(task, 0);
4555
4556 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4557 ctx->ctx_fl_can_restart = 0;
4558 ctx->ctx_fl_going_zombie = 0;
4559
4560 DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
4561
4562 return 0;
4563}
4564
4565
4566
4567
4568
4569
4570void
4571pfm_exit_thread(struct task_struct *task)
4572{
4573 pfm_context_t *ctx;
4574 unsigned long flags;
4575 struct pt_regs *regs = task_pt_regs(task);
4576 int ret, state;
4577 int free_ok = 0;
4578
4579 ctx = PFM_GET_CTX(task);
4580
4581 PROTECT_CTX(ctx, flags);
4582
4583 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
4584
4585 state = ctx->ctx_state;
4586 switch(state) {
4587 case PFM_CTX_UNLOADED:
4588
4589
4590
4591
4592 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
4593 break;
4594 case PFM_CTX_LOADED:
4595 case PFM_CTX_MASKED:
4596 ret = pfm_context_unload(ctx, NULL, 0, regs);
4597 if (ret) {
4598 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4599 }
4600 DPRINT(("ctx unloaded for current state was %d\n", state));
4601
4602 pfm_end_notify_user(ctx);
4603 break;
4604 case PFM_CTX_ZOMBIE:
4605 ret = pfm_context_unload(ctx, NULL, 0, regs);
4606 if (ret) {
4607 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4608 }
4609 free_ok = 1;
4610 break;
4611 default:
4612 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
4613 break;
4614 }
4615 UNPROTECT_CTX(ctx, flags);
4616
4617 { u64 psr = pfm_get_psr();
4618 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
4619 BUG_ON(GET_PMU_OWNER());
4620 BUG_ON(ia64_psr(regs)->up);
4621 BUG_ON(ia64_psr(regs)->pp);
4622 }
4623
4624
4625
4626
4627
4628 if (free_ok) pfm_context_free(ctx);
4629}
4630
4631
4632
4633
4634#define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
4635#define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
4636#define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
4637#define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
4638#define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
4639
4640static pfm_cmd_desc_t pfm_cmd_tab[]={
4641PFM_CMD_NONE,
4642PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4643PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4644PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4645PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
4646PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
4647PFM_CMD_NONE,
4648PFM_CMD_NONE,
4649PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
4650PFM_CMD_NONE,
4651PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
4652PFM_CMD_NONE,
4653PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
4654PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
4655PFM_CMD_NONE,
4656PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4657PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
4658PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
4659PFM_CMD_NONE,
4660PFM_CMD_NONE,
4661PFM_CMD_NONE,
4662PFM_CMD_NONE,
4663PFM_CMD_NONE,
4664PFM_CMD_NONE,
4665PFM_CMD_NONE,
4666PFM_CMD_NONE,
4667PFM_CMD_NONE,
4668PFM_CMD_NONE,
4669PFM_CMD_NONE,
4670PFM_CMD_NONE,
4671PFM_CMD_NONE,
4672PFM_CMD_NONE,
4673PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
4674PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
4675};
4676#define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
4677
4678static int
4679pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
4680{
4681 struct task_struct *task;
4682 int state, old_state;
4683
4684recheck:
4685 state = ctx->ctx_state;
4686 task = ctx->ctx_task;
4687
4688 if (task == NULL) {
4689 DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
4690 return 0;
4691 }
4692
4693 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4694 ctx->ctx_fd,
4695 state,
4696 task_pid_nr(task),
4697 task->state, PFM_CMD_STOPPED(cmd)));
4698
4699
4700
4701
4702
4703
4704
4705
4706 if (task == current || ctx->ctx_fl_system) return 0;
4707
4708
4709
4710
4711 switch(state) {
4712 case PFM_CTX_UNLOADED:
4713
4714
4715
4716 return 0;
4717 case PFM_CTX_ZOMBIE:
4718
4719
4720
4721 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
4722 return -EINVAL;
4723 case PFM_CTX_MASKED:
4724
4725
4726
4727
4728 if (cmd != PFM_UNLOAD_CONTEXT) return 0;
4729 }
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740
4741 if (PFM_CMD_STOPPED(cmd)) {
4742 if (!task_is_stopped_or_traced(task)) {
4743 DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
4744 return -EBUSY;
4745 }
4746
4747
4748
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760 old_state = state;
4761
4762 UNPROTECT_CTX(ctx, flags);
4763
4764 wait_task_inactive(task, 0);
4765
4766 PROTECT_CTX(ctx, flags);
4767
4768
4769
4770
4771 if (ctx->ctx_state != old_state) {
4772 DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
4773 goto recheck;
4774 }
4775 }
4776 return 0;
4777}
4778
4779
4780
4781
4782asmlinkage long
4783sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
4784{
4785 struct file *file = NULL;
4786 pfm_context_t *ctx = NULL;
4787 unsigned long flags = 0UL;
4788 void *args_k = NULL;
4789 long ret;
4790 size_t base_sz, sz, xtra_sz = 0;
4791 int narg, completed_args = 0, call_made = 0, cmd_flags;
4792 int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
4793 int (*getsize)(void *arg, size_t *sz);
4794#define PFM_MAX_ARGSIZE 4096
4795
4796
4797
4798
4799 if (unlikely(pmu_conf == NULL)) return -ENOSYS;
4800
4801 if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
4802 DPRINT(("invalid cmd=%d\n", cmd));
4803 return -EINVAL;
4804 }
4805
4806 func = pfm_cmd_tab[cmd].cmd_func;
4807 narg = pfm_cmd_tab[cmd].cmd_narg;
4808 base_sz = pfm_cmd_tab[cmd].cmd_argsize;
4809 getsize = pfm_cmd_tab[cmd].cmd_getsize;
4810 cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
4811
4812 if (unlikely(func == NULL)) {
4813 DPRINT(("invalid cmd=%d\n", cmd));
4814 return -EINVAL;
4815 }
4816
4817 DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
4818 PFM_CMD_NAME(cmd),
4819 cmd,
4820 narg,
4821 base_sz,
4822 count));
4823
4824
4825
4826
4827 if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
4828 return -EINVAL;
4829
4830restart_args:
4831 sz = xtra_sz + base_sz*count;
4832
4833
4834
4835 if (unlikely(sz > PFM_MAX_ARGSIZE)) {
4836 printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
4837 return -E2BIG;
4838 }
4839
4840
4841
4842
4843 if (likely(count && args_k == NULL)) {
4844 args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
4845 if (args_k == NULL) return -ENOMEM;
4846 }
4847
4848 ret = -EFAULT;
4849
4850
4851
4852
4853
4854
4855 if (sz && copy_from_user(args_k, arg, sz)) {
4856 DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
4857 goto error_args;
4858 }
4859
4860
4861
4862
4863 if (completed_args == 0 && getsize) {
4864
4865
4866
4867 ret = (*getsize)(args_k, &xtra_sz);
4868 if (ret) goto error_args;
4869
4870 completed_args = 1;
4871
4872 DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
4873
4874
4875 if (likely(xtra_sz)) goto restart_args;
4876 }
4877
4878 if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
4879
4880 ret = -EBADF;
4881
4882 file = fget(fd);
4883 if (unlikely(file == NULL)) {
4884 DPRINT(("invalid fd %d\n", fd));
4885 goto error_args;
4886 }
4887 if (unlikely(PFM_IS_FILE(file) == 0)) {
4888 DPRINT(("fd %d not related to perfmon\n", fd));
4889 goto error_args;
4890 }
4891
4892 ctx = file->private_data;
4893 if (unlikely(ctx == NULL)) {
4894 DPRINT(("no context for fd %d\n", fd));
4895 goto error_args;
4896 }
4897 prefetch(&ctx->ctx_state);
4898
4899 PROTECT_CTX(ctx, flags);
4900
4901
4902
4903
4904 ret = pfm_check_task_state(ctx, cmd, flags);
4905 if (unlikely(ret)) goto abort_locked;
4906
4907skip_fd:
4908 ret = (*func)(ctx, args_k, count, task_pt_regs(current));
4909
4910 call_made = 1;
4911
4912abort_locked:
4913 if (likely(ctx)) {
4914 DPRINT(("context unlocked\n"));
4915 UNPROTECT_CTX(ctx, flags);
4916 }
4917
4918
4919 if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
4920
4921error_args:
4922 if (file)
4923 fput(file);
4924
4925 kfree(args_k);
4926
4927 DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
4928
4929 return ret;
4930}
4931
4932static void
4933pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
4934{
4935 pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
4936 pfm_ovfl_ctrl_t rst_ctrl;
4937 int state;
4938 int ret = 0;
4939
4940 state = ctx->ctx_state;
4941
4942
4943
4944
4945 if (CTX_HAS_SMPL(ctx)) {
4946
4947 rst_ctrl.bits.mask_monitoring = 0;
4948 rst_ctrl.bits.reset_ovfl_pmds = 0;
4949
4950 if (state == PFM_CTX_LOADED)
4951 ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4952 else
4953 ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4954 } else {
4955 rst_ctrl.bits.mask_monitoring = 0;
4956 rst_ctrl.bits.reset_ovfl_pmds = 1;
4957 }
4958
4959 if (ret == 0) {
4960 if (rst_ctrl.bits.reset_ovfl_pmds) {
4961 pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
4962 }
4963 if (rst_ctrl.bits.mask_monitoring == 0) {
4964 DPRINT(("resuming monitoring\n"));
4965 if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
4966 } else {
4967 DPRINT(("stopping monitoring\n"));
4968
4969 }
4970 ctx->ctx_state = PFM_CTX_LOADED;
4971 }
4972}
4973
4974
4975
4976
4977
4978static void
4979pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
4980{
4981 int ret;
4982
4983 DPRINT(("entering for [%d]\n", task_pid_nr(current)));
4984
4985 ret = pfm_context_unload(ctx, NULL, 0, regs);
4986 if (ret) {
4987 printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
4988 }
4989
4990
4991
4992
4993 wake_up_interruptible(&ctx->ctx_zombieq);
4994
4995
4996
4997
4998
4999
5000}
5001
5002static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
5003
5004
5005
5006
5007
5008
5009
5010
5011
5012
5013void
5014pfm_handle_work(void)
5015{
5016 pfm_context_t *ctx;
5017 struct pt_regs *regs;
5018 unsigned long flags, dummy_flags;
5019 unsigned long ovfl_regs;
5020 unsigned int reason;
5021 int ret;
5022
5023 ctx = PFM_GET_CTX(current);
5024 if (ctx == NULL) {
5025 printk(KERN_ERR "perfmon: [%d] has no PFM context\n",
5026 task_pid_nr(current));
5027 return;
5028 }
5029
5030 PROTECT_CTX(ctx, flags);
5031
5032 PFM_SET_WORK_PENDING(current, 0);
5033
5034 regs = task_pt_regs(current);
5035
5036
5037
5038
5039 reason = ctx->ctx_fl_trap_reason;
5040 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
5041 ovfl_regs = ctx->ctx_ovfl_regs[0];
5042
5043 DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
5044
5045
5046
5047
5048 if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE)
5049 goto do_zombie;
5050
5051
5052 if (reason == PFM_TRAP_REASON_RESET)
5053 goto skip_blocking;
5054
5055
5056
5057
5058
5059 UNPROTECT_CTX(ctx, flags);
5060
5061
5062
5063
5064 local_irq_enable();
5065
5066 DPRINT(("before block sleeping\n"));
5067
5068
5069
5070
5071
5072 ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
5073
5074 DPRINT(("after block sleeping ret=%d\n", ret));
5075
5076
5077
5078
5079
5080
5081
5082 PROTECT_CTX(ctx, dummy_flags);
5083
5084
5085
5086
5087
5088
5089
5090 ovfl_regs = ctx->ctx_ovfl_regs[0];
5091
5092 if (ctx->ctx_fl_going_zombie) {
5093do_zombie:
5094 DPRINT(("context is zombie, bailing out\n"));
5095 pfm_context_force_terminate(ctx, regs);
5096 goto nothing_to_do;
5097 }
5098
5099
5100
5101 if (ret < 0)
5102 goto nothing_to_do;
5103
5104skip_blocking:
5105 pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
5106 ctx->ctx_ovfl_regs[0] = 0UL;
5107
5108nothing_to_do:
5109
5110
5111
5112 UNPROTECT_CTX(ctx, flags);
5113}
5114
5115static int
5116pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
5117{
5118 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5119 DPRINT(("ignoring overflow notification, owner is zombie\n"));
5120 return 0;
5121 }
5122
5123 DPRINT(("waking up somebody\n"));
5124
5125 if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
5126
5127
5128
5129
5130
5131 kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
5132
5133 return 0;
5134}
5135
5136static int
5137pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
5138{
5139 pfm_msg_t *msg = NULL;
5140
5141 if (ctx->ctx_fl_no_msg == 0) {
5142 msg = pfm_get_new_msg(ctx);
5143 if (msg == NULL) {
5144 printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
5145 return -1;
5146 }
5147
5148 msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
5149 msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
5150 msg->pfm_ovfl_msg.msg_active_set = 0;
5151 msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
5152 msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
5153 msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
5154 msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
5155 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5156 }
5157
5158 DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
5159 msg,
5160 ctx->ctx_fl_no_msg,
5161 ctx->ctx_fd,
5162 ovfl_pmds));
5163
5164 return pfm_notify_user(ctx, msg);
5165}
5166
5167static int
5168pfm_end_notify_user(pfm_context_t *ctx)
5169{
5170 pfm_msg_t *msg;
5171
5172 msg = pfm_get_new_msg(ctx);
5173 if (msg == NULL) {
5174 printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
5175 return -1;
5176 }
5177
5178 memset(msg, 0, sizeof(*msg));
5179
5180 msg->pfm_end_msg.msg_type = PFM_MSG_END;
5181 msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
5182 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5183
5184 DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
5185 msg,
5186 ctx->ctx_fl_no_msg,
5187 ctx->ctx_fd));
5188
5189 return pfm_notify_user(ctx, msg);
5190}
5191
5192
5193
5194
5195
5196static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx,
5197 unsigned long pmc0, struct pt_regs *regs)
5198{
5199 pfm_ovfl_arg_t *ovfl_arg;
5200 unsigned long mask;
5201 unsigned long old_val, ovfl_val, new_val;
5202 unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
5203 unsigned long tstamp;
5204 pfm_ovfl_ctrl_t ovfl_ctrl;
5205 unsigned int i, has_smpl;
5206 int must_notify = 0;
5207
5208 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
5209
5210
5211
5212
5213 if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
5214
5215 tstamp = ia64_get_itc();
5216 mask = pmc0 >> PMU_FIRST_COUNTER;
5217 ovfl_val = pmu_conf->ovfl_val;
5218 has_smpl = CTX_HAS_SMPL(ctx);
5219
5220 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5221 "used_pmds=0x%lx\n",
5222 pmc0,
5223 task ? task_pid_nr(task): -1,
5224 (regs ? regs->cr_iip : 0),
5225 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
5226 ctx->ctx_used_pmds[0]));
5227
5228
5229
5230
5231
5232
5233 for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
5234
5235
5236 if ((mask & 0x1) == 0) continue;
5237
5238
5239
5240
5241
5242
5243
5244 old_val = new_val = ctx->ctx_pmds[i].val;
5245 new_val += 1 + ovfl_val;
5246 ctx->ctx_pmds[i].val = new_val;
5247
5248
5249
5250
5251 if (likely(old_val > new_val)) {
5252 ovfl_pmds |= 1UL << i;
5253 if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
5254 }
5255
5256 DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
5257 i,
5258 new_val,
5259 old_val,
5260 ia64_get_pmd(i) & ovfl_val,
5261 ovfl_pmds,
5262 ovfl_notify));
5263 }
5264
5265
5266
5267
5268 if (ovfl_pmds == 0UL) return;
5269
5270
5271
5272
5273 ovfl_ctrl.val = 0;
5274 reset_pmds = 0UL;
5275
5276
5277
5278
5279
5280 if (has_smpl) {
5281 unsigned long start_cycles, end_cycles;
5282 unsigned long pmd_mask;
5283 int j, k, ret = 0;
5284 int this_cpu = smp_processor_id();
5285
5286 pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
5287 ovfl_arg = &ctx->ctx_ovfl_arg;
5288
5289 prefetch(ctx->ctx_smpl_hdr);
5290
5291 for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
5292
5293 mask = 1UL << i;
5294
5295 if ((pmd_mask & 0x1) == 0) continue;
5296
5297 ovfl_arg->ovfl_pmd = (unsigned char )i;
5298 ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
5299 ovfl_arg->active_set = 0;
5300 ovfl_arg->ovfl_ctrl.val = 0;
5301 ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
5302
5303 ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
5304 ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
5305 ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
5306
5307
5308
5309
5310
5311 if (smpl_pmds) {
5312 for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
5313 if ((smpl_pmds & 0x1) == 0) continue;
5314 ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
5315 DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
5316 }
5317 }
5318
5319 pfm_stats[this_cpu].pfm_smpl_handler_calls++;
5320
5321 start_cycles = ia64_get_itc();
5322
5323
5324
5325
5326 ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
5327
5328 end_cycles = ia64_get_itc();
5329
5330
5331
5332
5333
5334 ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
5335 ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
5336 ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
5337
5338
5339
5340 if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
5341
5342 pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
5343 }
5344
5345
5346
5347 if (ret && pmd_mask) {
5348 DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
5349 pmd_mask<<PMU_FIRST_COUNTER));
5350 }
5351
5352
5353
5354 ovfl_pmds &= ~reset_pmds;
5355 } else {
5356
5357
5358
5359
5360 ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
5361 ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
5362 ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0;
5363 ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
5364
5365
5366
5367 if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
5368 }
5369
5370 DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
5371
5372
5373
5374
5375 if (reset_pmds) {
5376 unsigned long bm = reset_pmds;
5377 pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
5378 }
5379
5380 if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
5381
5382
5383
5384 ctx->ctx_ovfl_regs[0] = ovfl_pmds;
5385
5386
5387
5388
5389 if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
5390
5391 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
5392
5393
5394
5395
5396 PFM_SET_WORK_PENDING(task, 1);
5397
5398
5399
5400
5401
5402 set_notify_resume(task);
5403 }
5404
5405
5406
5407
5408 must_notify = 1;
5409 }
5410
5411 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
5412 GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
5413 PFM_GET_WORK_PENDING(task),
5414 ctx->ctx_fl_trap_reason,
5415 ovfl_pmds,
5416 ovfl_notify,
5417 ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
5418
5419
5420
5421 if (ovfl_ctrl.bits.mask_monitoring) {
5422 pfm_mask_monitoring(task);
5423 ctx->ctx_state = PFM_CTX_MASKED;
5424 ctx->ctx_fl_can_restart = 1;
5425 }
5426
5427
5428
5429
5430 if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
5431
5432 return;
5433
5434sanity_check:
5435 printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5436 smp_processor_id(),
5437 task ? task_pid_nr(task) : -1,
5438 pmc0);
5439 return;
5440
5441stop_monitoring:
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462
5463
5464
5465
5466
5467
5468
5469
5470 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
5471 pfm_clear_psr_up();
5472 ia64_psr(regs)->up = 0;
5473 ia64_psr(regs)->sp = 1;
5474 return;
5475}
5476
5477static int
5478pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
5479{
5480 struct task_struct *task;
5481 pfm_context_t *ctx;
5482 unsigned long flags;
5483 u64 pmc0;
5484 int this_cpu = smp_processor_id();
5485 int retval = 0;
5486
5487 pfm_stats[this_cpu].pfm_ovfl_intr_count++;
5488
5489
5490
5491
5492 pmc0 = ia64_get_pmc(0);
5493
5494 task = GET_PMU_OWNER();
5495 ctx = GET_PMU_CTX();
5496
5497
5498
5499
5500
5501 if (PMC0_HAS_OVFL(pmc0) && task) {
5502
5503
5504
5505
5506
5507 if (!ctx) goto report_spurious1;
5508
5509 if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
5510 goto report_spurious2;
5511
5512 PROTECT_CTX_NOPRINT(ctx, flags);
5513
5514 pfm_overflow_handler(task, ctx, pmc0, regs);
5515
5516 UNPROTECT_CTX_NOPRINT(ctx, flags);
5517
5518 } else {
5519 pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
5520 retval = -1;
5521 }
5522
5523
5524
5525 pfm_unfreeze_pmu();
5526
5527 return retval;
5528
5529report_spurious1:
5530 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
5531 this_cpu, task_pid_nr(task));
5532 pfm_unfreeze_pmu();
5533 return -1;
5534report_spurious2:
5535 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5536 this_cpu,
5537 task_pid_nr(task));
5538 pfm_unfreeze_pmu();
5539 return -1;
5540}
5541
5542static irqreturn_t
5543pfm_interrupt_handler(int irq, void *arg)
5544{
5545 unsigned long start_cycles, total_cycles;
5546 unsigned long min, max;
5547 int this_cpu;
5548 int ret;
5549 struct pt_regs *regs = get_irq_regs();
5550
5551 this_cpu = get_cpu();
5552 if (likely(!pfm_alt_intr_handler)) {
5553 min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
5554 max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
5555
5556 start_cycles = ia64_get_itc();
5557
5558 ret = pfm_do_interrupt_handler(arg, regs);
5559
5560 total_cycles = ia64_get_itc();
5561
5562
5563
5564
5565 if (likely(ret == 0)) {
5566 total_cycles -= start_cycles;
5567
5568 if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
5569 if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
5570
5571 pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
5572 }
5573 }
5574 else {
5575 (*pfm_alt_intr_handler->handler)(irq, arg, regs);
5576 }
5577
5578 put_cpu();
5579 return IRQ_HANDLED;
5580}
5581
5582
5583
5584
5585
5586#define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
5587
5588static void *
5589pfm_proc_start(struct seq_file *m, loff_t *pos)
5590{
5591 if (*pos == 0) {
5592 return PFM_PROC_SHOW_HEADER;
5593 }
5594
5595 while (*pos <= nr_cpu_ids) {
5596 if (cpu_online(*pos - 1)) {
5597 return (void *)*pos;
5598 }
5599 ++*pos;
5600 }
5601 return NULL;
5602}
5603
5604static void *
5605pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
5606{
5607 ++*pos;
5608 return pfm_proc_start(m, pos);
5609}
5610
5611static void
5612pfm_proc_stop(struct seq_file *m, void *v)
5613{
5614}
5615
5616static void
5617pfm_proc_show_header(struct seq_file *m)
5618{
5619 struct list_head * pos;
5620 pfm_buffer_fmt_t * entry;
5621 unsigned long flags;
5622
5623 seq_printf(m,
5624 "perfmon version : %u.%u\n"
5625 "model : %s\n"
5626 "fastctxsw : %s\n"
5627 "expert mode : %s\n"
5628 "ovfl_mask : 0x%lx\n"
5629 "PMU flags : 0x%x\n",
5630 PFM_VERSION_MAJ, PFM_VERSION_MIN,
5631 pmu_conf->pmu_name,
5632 pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
5633 pfm_sysctl.expert_mode > 0 ? "Yes": "No",
5634 pmu_conf->ovfl_val,
5635 pmu_conf->flags);
5636
5637 LOCK_PFS(flags);
5638
5639 seq_printf(m,
5640 "proc_sessions : %u\n"
5641 "sys_sessions : %u\n"
5642 "sys_use_dbregs : %u\n"
5643 "ptrace_use_dbregs : %u\n",
5644 pfm_sessions.pfs_task_sessions,
5645 pfm_sessions.pfs_sys_sessions,
5646 pfm_sessions.pfs_sys_use_dbregs,
5647 pfm_sessions.pfs_ptrace_use_dbregs);
5648
5649 UNLOCK_PFS(flags);
5650
5651 spin_lock(&pfm_buffer_fmt_lock);
5652
5653 list_for_each(pos, &pfm_buffer_fmt_list) {
5654 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
5655 seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
5656 entry->fmt_uuid[0],
5657 entry->fmt_uuid[1],
5658 entry->fmt_uuid[2],
5659 entry->fmt_uuid[3],
5660 entry->fmt_uuid[4],
5661 entry->fmt_uuid[5],
5662 entry->fmt_uuid[6],
5663 entry->fmt_uuid[7],
5664 entry->fmt_uuid[8],
5665 entry->fmt_uuid[9],
5666 entry->fmt_uuid[10],
5667 entry->fmt_uuid[11],
5668 entry->fmt_uuid[12],
5669 entry->fmt_uuid[13],
5670 entry->fmt_uuid[14],
5671 entry->fmt_uuid[15],
5672 entry->fmt_name);
5673 }
5674 spin_unlock(&pfm_buffer_fmt_lock);
5675
5676}
5677
5678static int
5679pfm_proc_show(struct seq_file *m, void *v)
5680{
5681 unsigned long psr;
5682 unsigned int i;
5683 int cpu;
5684
5685 if (v == PFM_PROC_SHOW_HEADER) {
5686 pfm_proc_show_header(m);
5687 return 0;
5688 }
5689
5690
5691
5692 cpu = (long)v - 1;
5693 seq_printf(m,
5694 "CPU%-2d overflow intrs : %lu\n"
5695 "CPU%-2d overflow cycles : %lu\n"
5696 "CPU%-2d overflow min : %lu\n"
5697 "CPU%-2d overflow max : %lu\n"
5698 "CPU%-2d smpl handler calls : %lu\n"
5699 "CPU%-2d smpl handler cycles : %lu\n"
5700 "CPU%-2d spurious intrs : %lu\n"
5701 "CPU%-2d replay intrs : %lu\n"
5702 "CPU%-2d syst_wide : %d\n"
5703 "CPU%-2d dcr_pp : %d\n"
5704 "CPU%-2d exclude idle : %d\n"
5705 "CPU%-2d owner : %d\n"
5706 "CPU%-2d context : %p\n"
5707 "CPU%-2d activations : %lu\n",
5708 cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
5709 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
5710 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
5711 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
5712 cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
5713 cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
5714 cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
5715 cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
5716 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
5717 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
5718 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
5719 cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
5720 cpu, pfm_get_cpu_data(pmu_ctx, cpu),
5721 cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
5722
5723 if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
5724
5725 psr = pfm_get_psr();
5726
5727 ia64_srlz_d();
5728
5729 seq_printf(m,
5730 "CPU%-2d psr : 0x%lx\n"
5731 "CPU%-2d pmc0 : 0x%lx\n",
5732 cpu, psr,
5733 cpu, ia64_get_pmc(0));
5734
5735 for (i=0; PMC_IS_LAST(i) == 0; i++) {
5736 if (PMC_IS_COUNTING(i) == 0) continue;
5737 seq_printf(m,
5738 "CPU%-2d pmc%u : 0x%lx\n"
5739 "CPU%-2d pmd%u : 0x%lx\n",
5740 cpu, i, ia64_get_pmc(i),
5741 cpu, i, ia64_get_pmd(i));
5742 }
5743 }
5744 return 0;
5745}
5746
5747const struct seq_operations pfm_seq_ops = {
5748 .start = pfm_proc_start,
5749 .next = pfm_proc_next,
5750 .stop = pfm_proc_stop,
5751 .show = pfm_proc_show
5752};
5753
5754static int
5755pfm_proc_open(struct inode *inode, struct file *file)
5756{
5757 return seq_open(file, &pfm_seq_ops);
5758}
5759
5760
5761
5762
5763
5764
5765
5766
5767void
5768pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
5769{
5770 struct pt_regs *regs;
5771 unsigned long dcr;
5772 unsigned long dcr_pp;
5773
5774 dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
5775
5776
5777
5778
5779
5780 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
5781 regs = task_pt_regs(task);
5782 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
5783 return;
5784 }
5785
5786
5787
5788 if (dcr_pp) {
5789 dcr = ia64_getreg(_IA64_REG_CR_DCR);
5790
5791
5792
5793 if (is_ctxswin) {
5794
5795 ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
5796 pfm_clear_psr_pp();
5797 ia64_srlz_i();
5798 return;
5799 }
5800
5801
5802
5803
5804
5805
5806
5807 ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
5808 pfm_set_psr_pp();
5809 ia64_srlz_i();
5810 }
5811}
5812
5813#ifdef CONFIG_SMP
5814
5815static void
5816pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
5817{
5818 struct task_struct *task = ctx->ctx_task;
5819
5820 ia64_psr(regs)->up = 0;
5821 ia64_psr(regs)->sp = 1;
5822
5823 if (GET_PMU_OWNER() == task) {
5824 DPRINT(("cleared ownership for [%d]\n",
5825 task_pid_nr(ctx->ctx_task)));
5826 SET_PMU_OWNER(NULL, NULL);
5827 }
5828
5829
5830
5831
5832 PFM_SET_WORK_PENDING(task, 0);
5833
5834 task->thread.pfm_context = NULL;
5835 task->thread.flags &= ~IA64_THREAD_PM_VALID;
5836
5837 DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
5838}
5839
5840
5841
5842
5843
5844void
5845pfm_save_regs(struct task_struct *task)
5846{
5847 pfm_context_t *ctx;
5848 unsigned long flags;
5849 u64 psr;
5850
5851
5852 ctx = PFM_GET_CTX(task);
5853 if (ctx == NULL) return;
5854
5855
5856
5857
5858
5859
5860 flags = pfm_protect_ctx_ctxsw(ctx);
5861
5862 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5863 struct pt_regs *regs = task_pt_regs(task);
5864
5865 pfm_clear_psr_up();
5866
5867 pfm_force_cleanup(ctx, regs);
5868
5869 BUG_ON(ctx->ctx_smpl_hdr);
5870
5871 pfm_unprotect_ctx_ctxsw(ctx, flags);
5872
5873 pfm_context_free(ctx);
5874 return;
5875 }
5876
5877
5878
5879
5880 ia64_srlz_d();
5881 psr = pfm_get_psr();
5882
5883 BUG_ON(psr & (IA64_PSR_I));
5884
5885
5886
5887
5888
5889
5890
5891
5892 pfm_clear_psr_up();
5893
5894
5895
5896
5897 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5898
5899
5900
5901
5902
5903
5904 SET_PMU_OWNER(NULL, NULL);
5905
5906
5907
5908
5909
5910
5911 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
5912
5913
5914
5915
5916
5917
5918 ctx->th_pmcs[0] = ia64_get_pmc(0);
5919
5920
5921
5922
5923 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5924
5925
5926
5927
5928
5929 pfm_unprotect_ctx_ctxsw(ctx, flags);
5930}
5931
5932#else
5933void
5934pfm_save_regs(struct task_struct *task)
5935{
5936 pfm_context_t *ctx;
5937 u64 psr;
5938
5939 ctx = PFM_GET_CTX(task);
5940 if (ctx == NULL) return;
5941
5942
5943
5944
5945 psr = pfm_get_psr();
5946
5947 BUG_ON(psr & (IA64_PSR_I));
5948
5949
5950
5951
5952
5953
5954
5955
5956 pfm_clear_psr_up();
5957
5958
5959
5960
5961 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5962}
5963
5964static void
5965pfm_lazy_save_regs (struct task_struct *task)
5966{
5967 pfm_context_t *ctx;
5968 unsigned long flags;
5969
5970 { u64 psr = pfm_get_psr();
5971 BUG_ON(psr & IA64_PSR_UP);
5972 }
5973
5974 ctx = PFM_GET_CTX(task);
5975
5976
5977
5978
5979
5980
5981
5982
5983
5984
5985 PROTECT_CTX(ctx,flags);
5986
5987
5988
5989
5990
5991
5992
5993
5994 SET_PMU_OWNER(NULL, NULL);
5995
5996
5997
5998
5999 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
6000
6001
6002
6003
6004
6005
6006 ctx->th_pmcs[0] = ia64_get_pmc(0);
6007
6008
6009
6010
6011 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
6012
6013
6014
6015
6016
6017
6018 UNPROTECT_CTX(ctx,flags);
6019}
6020#endif
6021
6022#ifdef CONFIG_SMP
6023
6024
6025
6026void
6027pfm_load_regs (struct task_struct *task)
6028{
6029 pfm_context_t *ctx;
6030 unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
6031 unsigned long flags;
6032 u64 psr, psr_up;
6033 int need_irq_resend;
6034
6035 ctx = PFM_GET_CTX(task);
6036 if (unlikely(ctx == NULL)) return;
6037
6038 BUG_ON(GET_PMU_OWNER());
6039
6040
6041
6042
6043 if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
6044
6045
6046
6047
6048
6049
6050 flags = pfm_protect_ctx_ctxsw(ctx);
6051 psr = pfm_get_psr();
6052
6053 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6054
6055 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6056 BUG_ON(psr & IA64_PSR_I);
6057
6058 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
6059 struct pt_regs *regs = task_pt_regs(task);
6060
6061 BUG_ON(ctx->ctx_smpl_hdr);
6062
6063 pfm_force_cleanup(ctx, regs);
6064
6065 pfm_unprotect_ctx_ctxsw(ctx, flags);
6066
6067
6068
6069
6070 pfm_context_free(ctx);
6071
6072 return;
6073 }
6074
6075
6076
6077
6078
6079 if (ctx->ctx_fl_using_dbreg) {
6080 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6081 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6082 }
6083
6084
6085
6086 psr_up = ctx->ctx_saved_psr_up;
6087
6088
6089
6090
6091
6092 if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
6093
6094
6095
6096
6097 pmc_mask = ctx->ctx_reload_pmcs[0];
6098 pmd_mask = ctx->ctx_reload_pmds[0];
6099
6100 } else {
6101
6102
6103
6104
6105
6106
6107 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6108
6109
6110
6111
6112
6113
6114
6115
6116 pmc_mask = ctx->ctx_all_pmcs[0];
6117 }
6118
6119
6120
6121
6122
6123
6124
6125 if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6126 if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6127
6128
6129
6130
6131
6132 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6133
6134
6135
6136
6137 ia64_set_pmc(0, ctx->th_pmcs[0]);
6138 ia64_srlz_d();
6139 ctx->th_pmcs[0] = 0UL;
6140
6141
6142
6143
6144 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
6145
6146 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6147 }
6148
6149
6150
6151
6152 ctx->ctx_reload_pmcs[0] = 0UL;
6153 ctx->ctx_reload_pmds[0] = 0UL;
6154
6155 SET_LAST_CPU(ctx, smp_processor_id());
6156
6157
6158
6159
6160 INC_ACTIVATION();
6161
6162
6163
6164 SET_ACTIVATION(ctx);
6165
6166
6167
6168
6169 SET_PMU_OWNER(task, ctx);
6170
6171
6172
6173
6174
6175
6176
6177 if (likely(psr_up)) pfm_set_psr_up();
6178
6179
6180
6181
6182 pfm_unprotect_ctx_ctxsw(ctx, flags);
6183}
6184#else
6185
6186
6187
6188
6189void
6190pfm_load_regs (struct task_struct *task)
6191{
6192 pfm_context_t *ctx;
6193 struct task_struct *owner;
6194 unsigned long pmd_mask, pmc_mask;
6195 u64 psr, psr_up;
6196 int need_irq_resend;
6197
6198 owner = GET_PMU_OWNER();
6199 ctx = PFM_GET_CTX(task);
6200 psr = pfm_get_psr();
6201
6202 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6203 BUG_ON(psr & IA64_PSR_I);
6204
6205
6206
6207
6208
6209
6210
6211
6212
6213 if (ctx->ctx_fl_using_dbreg) {
6214 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6215 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6216 }
6217
6218
6219
6220
6221 psr_up = ctx->ctx_saved_psr_up;
6222 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6223
6224
6225
6226
6227
6228
6229
6230
6231
6232 if (likely(owner == task)) {
6233 if (likely(psr_up)) pfm_set_psr_up();
6234 return;
6235 }
6236
6237
6238
6239
6240
6241
6242
6243 if (owner) pfm_lazy_save_regs(owner);
6244
6245
6246
6247
6248
6249
6250
6251 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6252
6253
6254
6255
6256
6257
6258
6259
6260 pmc_mask = ctx->ctx_all_pmcs[0];
6261
6262 pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6263 pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6264
6265
6266
6267
6268
6269 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6270
6271
6272
6273
6274 ia64_set_pmc(0, ctx->th_pmcs[0]);
6275 ia64_srlz_d();
6276
6277 ctx->th_pmcs[0] = 0UL;
6278
6279
6280
6281
6282 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
6283
6284 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6285 }
6286
6287
6288
6289
6290 SET_PMU_OWNER(task, ctx);
6291
6292
6293
6294
6295
6296
6297
6298 if (likely(psr_up)) pfm_set_psr_up();
6299}
6300#endif
6301
6302
6303
6304
6305static void
6306pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6307{
6308 u64 pmc0;
6309 unsigned long mask2, val, pmd_val, ovfl_val;
6310 int i, can_access_pmu = 0;
6311 int is_self;
6312
6313
6314
6315
6316
6317 is_self = ctx->ctx_task == task ? 1 : 0;
6318
6319
6320
6321
6322
6323
6324
6325
6326 can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
6327 if (can_access_pmu) {
6328
6329
6330
6331
6332
6333
6334
6335
6336 SET_PMU_OWNER(NULL, NULL);
6337 DPRINT(("releasing ownership\n"));
6338
6339
6340
6341
6342
6343
6344 ia64_srlz_d();
6345 pmc0 = ia64_get_pmc(0);
6346
6347
6348
6349
6350 pfm_unfreeze_pmu();
6351 } else {
6352 pmc0 = ctx->th_pmcs[0];
6353
6354
6355
6356 ctx->th_pmcs[0] = 0;
6357 }
6358 ovfl_val = pmu_conf->ovfl_val;
6359
6360
6361
6362
6363
6364
6365 mask2 = ctx->ctx_used_pmds[0];
6366
6367 DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
6368
6369 for (i = 0; mask2; i++, mask2>>=1) {
6370
6371
6372 if ((mask2 & 0x1) == 0) continue;
6373
6374
6375
6376
6377 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
6378
6379 if (PMD_IS_COUNTING(i)) {
6380 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
6381 task_pid_nr(task),
6382 i,
6383 ctx->ctx_pmds[i].val,
6384 val & ovfl_val));
6385
6386
6387
6388
6389 val = ctx->ctx_pmds[i].val + (val & ovfl_val);
6390
6391
6392
6393
6394
6395
6396 pmd_val = 0UL;
6397
6398
6399
6400
6401 if (pmc0 & (1UL << i)) {
6402 val += 1 + ovfl_val;
6403 DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
6404 }
6405 }
6406
6407 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
6408
6409 if (is_self) ctx->th_pmds[i] = pmd_val;
6410
6411 ctx->ctx_pmds[i].val = val;
6412 }
6413}
6414
6415static struct irqaction perfmon_irqaction = {
6416 .handler = pfm_interrupt_handler,
6417 .flags = IRQF_DISABLED,
6418 .name = "perfmon"
6419};
6420
6421static void
6422pfm_alt_save_pmu_state(void *data)
6423{
6424 struct pt_regs *regs;
6425
6426 regs = task_pt_regs(current);
6427
6428 DPRINT(("called\n"));
6429
6430
6431
6432
6433
6434 pfm_clear_psr_up();
6435 pfm_clear_psr_pp();
6436 ia64_psr(regs)->pp = 0;
6437
6438
6439
6440
6441
6442 pfm_freeze_pmu();
6443
6444 ia64_srlz_d();
6445}
6446
6447void
6448pfm_alt_restore_pmu_state(void *data)
6449{
6450 struct pt_regs *regs;
6451
6452 regs = task_pt_regs(current);
6453
6454 DPRINT(("called\n"));
6455
6456
6457
6458
6459
6460 pfm_clear_psr_up();
6461 pfm_clear_psr_pp();
6462 ia64_psr(regs)->pp = 0;
6463
6464
6465
6466
6467 pfm_unfreeze_pmu();
6468
6469 ia64_srlz_d();
6470}
6471
6472int
6473pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6474{
6475 int ret, i;
6476 int reserve_cpu;
6477
6478
6479 if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
6480
6481
6482 if (pfm_alt_intr_handler) return -EBUSY;
6483
6484
6485 if (!spin_trylock(&pfm_alt_install_check)) {
6486 return -EBUSY;
6487 }
6488
6489
6490 for_each_online_cpu(reserve_cpu) {
6491 ret = pfm_reserve_session(NULL, 1, reserve_cpu);
6492 if (ret) goto cleanup_reserve;
6493 }
6494
6495
6496 ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
6497 if (ret) {
6498 DPRINT(("on_each_cpu() failed: %d\n", ret));
6499 goto cleanup_reserve;
6500 }
6501
6502
6503 pfm_alt_intr_handler = hdl;
6504
6505 spin_unlock(&pfm_alt_install_check);
6506
6507 return 0;
6508
6509cleanup_reserve:
6510 for_each_online_cpu(i) {
6511
6512 if (i >= reserve_cpu) break;
6513
6514 pfm_unreserve_session(NULL, 1, i);
6515 }
6516
6517 spin_unlock(&pfm_alt_install_check);
6518
6519 return ret;
6520}
6521EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
6522
6523int
6524pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6525{
6526 int i;
6527 int ret;
6528
6529 if (hdl == NULL) return -EINVAL;
6530
6531
6532 if (pfm_alt_intr_handler != hdl) return -EINVAL;
6533
6534
6535 if (!spin_trylock(&pfm_alt_install_check)) {
6536 return -EBUSY;
6537 }
6538
6539 pfm_alt_intr_handler = NULL;
6540
6541 ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
6542 if (ret) {
6543 DPRINT(("on_each_cpu() failed: %d\n", ret));
6544 }
6545
6546 for_each_online_cpu(i) {
6547 pfm_unreserve_session(NULL, 1, i);
6548 }
6549
6550 spin_unlock(&pfm_alt_install_check);
6551
6552 return 0;
6553}
6554EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
6555
6556
6557
6558
6559static int init_pfm_fs(void);
6560
6561static int __init
6562pfm_probe_pmu(void)
6563{
6564 pmu_config_t **p;
6565 int family;
6566
6567 family = local_cpu_data->family;
6568 p = pmu_confs;
6569
6570 while(*p) {
6571 if ((*p)->probe) {
6572 if ((*p)->probe() == 0) goto found;
6573 } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
6574 goto found;
6575 }
6576 p++;
6577 }
6578 return -1;
6579found:
6580 pmu_conf = *p;
6581 return 0;
6582}
6583
6584static const struct file_operations pfm_proc_fops = {
6585 .open = pfm_proc_open,
6586 .read = seq_read,
6587 .llseek = seq_lseek,
6588 .release = seq_release,
6589};
6590
6591int __init
6592pfm_init(void)
6593{
6594 unsigned int n, n_counters, i;
6595
6596 printk("perfmon: version %u.%u IRQ %u\n",
6597 PFM_VERSION_MAJ,
6598 PFM_VERSION_MIN,
6599 IA64_PERFMON_VECTOR);
6600
6601 if (pfm_probe_pmu()) {
6602 printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
6603 local_cpu_data->family);
6604 return -ENODEV;
6605 }
6606
6607
6608
6609
6610
6611 n = 0;
6612 for (i=0; PMC_IS_LAST(i) == 0; i++) {
6613 if (PMC_IS_IMPL(i) == 0) continue;
6614 pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
6615 n++;
6616 }
6617 pmu_conf->num_pmcs = n;
6618
6619 n = 0; n_counters = 0;
6620 for (i=0; PMD_IS_LAST(i) == 0; i++) {
6621 if (PMD_IS_IMPL(i) == 0) continue;
6622 pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
6623 n++;
6624 if (PMD_IS_COUNTING(i)) n_counters++;
6625 }
6626 pmu_conf->num_pmds = n;
6627 pmu_conf->num_counters = n_counters;
6628
6629
6630
6631
6632 if (pmu_conf->use_rr_dbregs) {
6633 if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
6634 printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
6635 pmu_conf = NULL;
6636 return -1;
6637 }
6638 if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
6639 printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
6640 pmu_conf = NULL;
6641 return -1;
6642 }
6643 }
6644
6645 printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
6646 pmu_conf->pmu_name,
6647 pmu_conf->num_pmcs,
6648 pmu_conf->num_pmds,
6649 pmu_conf->num_counters,
6650 ffz(pmu_conf->ovfl_val));
6651
6652
6653 if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
6654 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
6655 pmu_conf = NULL;
6656 return -1;
6657 }
6658
6659
6660
6661
6662 perfmon_dir = proc_create("perfmon", S_IRUGO, NULL, &pfm_proc_fops);
6663 if (perfmon_dir == NULL) {
6664 printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
6665 pmu_conf = NULL;
6666 return -1;
6667 }
6668
6669
6670
6671
6672 pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
6673
6674
6675
6676
6677 spin_lock_init(&pfm_sessions.pfs_lock);
6678 spin_lock_init(&pfm_buffer_fmt_lock);
6679
6680 init_pfm_fs();
6681
6682 for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
6683
6684 return 0;
6685}
6686
6687__initcall(pfm_init);
6688
6689
6690
6691
6692void
6693pfm_init_percpu (void)
6694{
6695 static int first_time=1;
6696
6697
6698
6699
6700 pfm_clear_psr_pp();
6701 pfm_clear_psr_up();
6702
6703
6704
6705
6706 pfm_unfreeze_pmu();
6707
6708 if (first_time) {
6709 register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
6710 first_time=0;
6711 }
6712
6713 ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
6714 ia64_srlz_d();
6715}
6716
6717
6718
6719
6720void
6721dump_pmu_state(const char *from)
6722{
6723 struct task_struct *task;
6724 struct pt_regs *regs;
6725 pfm_context_t *ctx;
6726 unsigned long psr, dcr, info, flags;
6727 int i, this_cpu;
6728
6729 local_irq_save(flags);
6730
6731 this_cpu = smp_processor_id();
6732 regs = task_pt_regs(current);
6733 info = PFM_CPUINFO_GET();
6734 dcr = ia64_getreg(_IA64_REG_CR_DCR);
6735
6736 if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
6737 local_irq_restore(flags);
6738 return;
6739 }
6740
6741 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6742 this_cpu,
6743 from,
6744 task_pid_nr(current),
6745 regs->cr_iip,
6746 current->comm);
6747
6748 task = GET_PMU_OWNER();
6749 ctx = GET_PMU_CTX();
6750
6751 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
6752
6753 psr = pfm_get_psr();
6754
6755 printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
6756 this_cpu,
6757 ia64_get_pmc(0),
6758 psr & IA64_PSR_PP ? 1 : 0,
6759 psr & IA64_PSR_UP ? 1 : 0,
6760 dcr & IA64_DCR_PP ? 1 : 0,
6761 info,
6762 ia64_psr(regs)->up,
6763 ia64_psr(regs)->pp);
6764
6765 ia64_psr(regs)->up = 0;
6766 ia64_psr(regs)->pp = 0;
6767
6768 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6769 if (PMC_IS_IMPL(i) == 0) continue;
6770 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
6771 }
6772
6773 for (i=1; PMD_IS_LAST(i) == 0; i++) {
6774 if (PMD_IS_IMPL(i) == 0) continue;
6775 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
6776 }
6777
6778 if (ctx) {
6779 printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
6780 this_cpu,
6781 ctx->ctx_state,
6782 ctx->ctx_smpl_vaddr,
6783 ctx->ctx_smpl_hdr,
6784 ctx->ctx_msgq_head,
6785 ctx->ctx_msgq_tail,
6786 ctx->ctx_saved_psr_up);
6787 }
6788 local_irq_restore(flags);
6789}
6790
6791
6792
6793
6794void
6795pfm_inherit(struct task_struct *task, struct pt_regs *regs)
6796{
6797 struct thread_struct *thread;
6798
6799 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
6800
6801 thread = &task->thread;
6802
6803
6804
6805
6806 thread->pfm_context = NULL;
6807
6808 PFM_SET_WORK_PENDING(task, 0);
6809
6810
6811
6812
6813}
6814#else
6815asmlinkage long
6816sys_perfmonctl (int fd, int cmd, void *arg, int count)
6817{
6818 return -ENOSYS;
6819}
6820#endif
6821