1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/interrupt.h>
26#include <linux/proc_fs.h>
27#include <linux/seq_file.h>
28#include <linux/init.h>
29#include <linux/vmalloc.h>
30#include <linux/mm.h>
31#include <linux/sysctl.h>
32#include <linux/list.h>
33#include <linux/file.h>
34#include <linux/poll.h>
35#include <linux/vfs.h>
36#include <linux/smp.h>
37#include <linux/pagemap.h>
38#include <linux/mount.h>
39#include <linux/bitops.h>
40#include <linux/capability.h>
41#include <linux/rcupdate.h>
42#include <linux/completion.h>
43#include <linux/tracehook.h>
44#include <linux/slab.h>
45#include <linux/cpu.h>
46
47#include <asm/errno.h>
48#include <asm/intrinsics.h>
49#include <asm/page.h>
50#include <asm/perfmon.h>
51#include <asm/processor.h>
52#include <asm/signal.h>
53#include <asm/uaccess.h>
54#include <asm/delay.h>
55
56#ifdef CONFIG_PERFMON
57
58
59
60#define PFM_CTX_UNLOADED 1
61#define PFM_CTX_LOADED 2
62#define PFM_CTX_MASKED 3
63#define PFM_CTX_ZOMBIE 4
64
65#define PFM_INVALID_ACTIVATION (~0UL)
66
67#define PFM_NUM_PMC_REGS 64
68#define PFM_NUM_PMD_REGS 64
69
70
71
72
73#define PFM_MAX_MSGS 32
74#define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
75
76
77
78
79
80
81
82
83
84
85
86
87#define PFM_REG_NOTIMPL 0x0
88#define PFM_REG_IMPL 0x1
89#define PFM_REG_END 0x2
90#define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL)
91#define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR)
92#define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL)
93#define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL)
94#define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL)
95
96#define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
97#define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
98
99#define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
100
101
102#define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
103#define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
104
105
106#define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
107#define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
108#define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
109#define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
110
111#define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
112#define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
113#define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
114#define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
115
116#define PFM_NUM_IBRS IA64_NUM_DBG_REGS
117#define PFM_NUM_DBRS IA64_NUM_DBG_REGS
118
119#define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
120#define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
121#define PFM_CTX_TASK(h) (h)->ctx_task
122
123#define PMU_PMC_OI 5
124
125
126#define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
127#define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
128
129#define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
130
131#define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
132#define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
133#define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
134#define PFM_CODE_RR 0
135#define PFM_DATA_RR 1
136
137#define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
138#define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
139#define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
140
141#define RDEP(x) (1UL<<(x))
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161#define PROTECT_CTX(c, f) \
162 do { \
163 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
164 spin_lock_irqsave(&(c)->ctx_lock, f); \
165 DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
166 } while(0)
167
168#define UNPROTECT_CTX(c, f) \
169 do { \
170 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
171 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
172 } while(0)
173
174#define PROTECT_CTX_NOPRINT(c, f) \
175 do { \
176 spin_lock_irqsave(&(c)->ctx_lock, f); \
177 } while(0)
178
179
180#define UNPROTECT_CTX_NOPRINT(c, f) \
181 do { \
182 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
183 } while(0)
184
185
186#define PROTECT_CTX_NOIRQ(c) \
187 do { \
188 spin_lock(&(c)->ctx_lock); \
189 } while(0)
190
191#define UNPROTECT_CTX_NOIRQ(c) \
192 do { \
193 spin_unlock(&(c)->ctx_lock); \
194 } while(0)
195
196
197#ifdef CONFIG_SMP
198
199#define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
200#define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
201#define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
202
203#else
204#define SET_ACTIVATION(t) do {} while(0)
205#define GET_ACTIVATION(t) do {} while(0)
206#define INC_ACTIVATION(t) do {} while(0)
207#endif
208
209#define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
210#define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
211#define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
212
213#define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
214#define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
215
216#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
217
218
219
220
221#define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
222
223#define PFMFS_MAGIC 0xa0b4d889
224
225
226
227
228#define PFM_DEBUGGING 1
229#ifdef PFM_DEBUGGING
230#define DPRINT(a) \
231 do { \
232 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
233 } while (0)
234
235#define DPRINT_ovfl(a) \
236 do { \
237 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
238 } while (0)
239#endif
240
241
242
243
244
245
246typedef struct {
247 unsigned long val;
248 unsigned long lval;
249 unsigned long long_reset;
250 unsigned long short_reset;
251 unsigned long reset_pmds[4];
252 unsigned long smpl_pmds[4];
253 unsigned long seed;
254 unsigned long mask;
255 unsigned int flags;
256 unsigned long eventid;
257} pfm_counter_t;
258
259
260
261
262typedef struct {
263 unsigned int block:1;
264 unsigned int system:1;
265 unsigned int using_dbreg:1;
266 unsigned int is_sampling:1;
267 unsigned int excl_idle:1;
268 unsigned int going_zombie:1;
269 unsigned int trap_reason:2;
270 unsigned int no_msg:1;
271 unsigned int can_restart:1;
272 unsigned int reserved:22;
273} pfm_context_flags_t;
274
275#define PFM_TRAP_REASON_NONE 0x0
276#define PFM_TRAP_REASON_BLOCK 0x1
277#define PFM_TRAP_REASON_RESET 0x2
278
279
280
281
282
283
284typedef struct pfm_context {
285 spinlock_t ctx_lock;
286
287 pfm_context_flags_t ctx_flags;
288 unsigned int ctx_state;
289
290 struct task_struct *ctx_task;
291
292 unsigned long ctx_ovfl_regs[4];
293
294 struct completion ctx_restart_done;
295
296 unsigned long ctx_used_pmds[4];
297 unsigned long ctx_all_pmds[4];
298 unsigned long ctx_reload_pmds[4];
299
300 unsigned long ctx_all_pmcs[4];
301 unsigned long ctx_reload_pmcs[4];
302 unsigned long ctx_used_monitors[4];
303
304 unsigned long ctx_pmcs[PFM_NUM_PMC_REGS];
305
306 unsigned int ctx_used_ibrs[1];
307 unsigned int ctx_used_dbrs[1];
308 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS];
309 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS];
310
311 pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS];
312
313 unsigned long th_pmcs[PFM_NUM_PMC_REGS];
314 unsigned long th_pmds[PFM_NUM_PMD_REGS];
315
316 unsigned long ctx_saved_psr_up;
317
318 unsigned long ctx_last_activation;
319 unsigned int ctx_last_cpu;
320 unsigned int ctx_cpu;
321
322 int ctx_fd;
323 pfm_ovfl_arg_t ctx_ovfl_arg;
324
325 pfm_buffer_fmt_t *ctx_buf_fmt;
326 void *ctx_smpl_hdr;
327 unsigned long ctx_smpl_size;
328 void *ctx_smpl_vaddr;
329
330 wait_queue_head_t ctx_msgq_wait;
331 pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
332 int ctx_msgq_head;
333 int ctx_msgq_tail;
334 struct fasync_struct *ctx_async_queue;
335
336 wait_queue_head_t ctx_zombieq;
337} pfm_context_t;
338
339
340
341
342
343#define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
344
345#define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
346
347#ifdef CONFIG_SMP
348#define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
349#define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
350#else
351#define SET_LAST_CPU(ctx, v) do {} while(0)
352#define GET_LAST_CPU(ctx) do {} while(0)
353#endif
354
355
356#define ctx_fl_block ctx_flags.block
357#define ctx_fl_system ctx_flags.system
358#define ctx_fl_using_dbreg ctx_flags.using_dbreg
359#define ctx_fl_is_sampling ctx_flags.is_sampling
360#define ctx_fl_excl_idle ctx_flags.excl_idle
361#define ctx_fl_going_zombie ctx_flags.going_zombie
362#define ctx_fl_trap_reason ctx_flags.trap_reason
363#define ctx_fl_no_msg ctx_flags.no_msg
364#define ctx_fl_can_restart ctx_flags.can_restart
365
366#define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
367#define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
368
369
370
371
372
373typedef struct {
374 spinlock_t pfs_lock;
375
376 unsigned int pfs_task_sessions;
377 unsigned int pfs_sys_sessions;
378 unsigned int pfs_sys_use_dbregs;
379 unsigned int pfs_ptrace_use_dbregs;
380 struct task_struct *pfs_sys_session[NR_CPUS];
381} pfm_session_t;
382
383
384
385
386
387
388typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
389typedef struct {
390 unsigned int type;
391 int pm_pos;
392 unsigned long default_value;
393 unsigned long reserved_mask;
394 pfm_reg_check_t read_check;
395 pfm_reg_check_t write_check;
396 unsigned long dep_pmd[4];
397 unsigned long dep_pmc[4];
398} pfm_reg_desc_t;
399
400
401#define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
402
403
404
405
406
407
408
409
410
411
412
413
414
415typedef struct {
416 unsigned long ovfl_val;
417
418 pfm_reg_desc_t *pmc_desc;
419 pfm_reg_desc_t *pmd_desc;
420
421 unsigned int num_pmcs;
422 unsigned int num_pmds;
423 unsigned long impl_pmcs[4];
424 unsigned long impl_pmds[4];
425
426 char *pmu_name;
427 unsigned int pmu_family;
428 unsigned int flags;
429 unsigned int num_ibrs;
430 unsigned int num_dbrs;
431 unsigned int num_counters;
432 int (*probe)(void);
433 unsigned int use_rr_dbregs:1;
434} pmu_config_t;
435
436
437
438#define PFM_PMU_IRQ_RESEND 1
439
440
441
442
443typedef struct {
444 unsigned long ibr_mask:56;
445 unsigned long ibr_plm:4;
446 unsigned long ibr_ig:3;
447 unsigned long ibr_x:1;
448} ibr_mask_reg_t;
449
450typedef struct {
451 unsigned long dbr_mask:56;
452 unsigned long dbr_plm:4;
453 unsigned long dbr_ig:2;
454 unsigned long dbr_w:1;
455 unsigned long dbr_r:1;
456} dbr_mask_reg_t;
457
458typedef union {
459 unsigned long val;
460 ibr_mask_reg_t ibr;
461 dbr_mask_reg_t dbr;
462} dbreg_t;
463
464
465
466
467
468typedef struct {
469 int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
470 char *cmd_name;
471 int cmd_flags;
472 unsigned int cmd_narg;
473 size_t cmd_argsize;
474 int (*cmd_getsize)(void *arg, size_t *sz);
475} pfm_cmd_desc_t;
476
477#define PFM_CMD_FD 0x01
478#define PFM_CMD_ARG_READ 0x02
479#define PFM_CMD_ARG_RW 0x04
480#define PFM_CMD_STOP 0x08
481
482
483#define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
484#define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
485#define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
486#define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
487#define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
488
489#define PFM_CMD_ARG_MANY -1
490
491typedef struct {
492 unsigned long pfm_spurious_ovfl_intr_count;
493 unsigned long pfm_replay_ovfl_intr_count;
494 unsigned long pfm_ovfl_intr_count;
495 unsigned long pfm_ovfl_intr_cycles;
496 unsigned long pfm_ovfl_intr_cycles_min;
497 unsigned long pfm_ovfl_intr_cycles_max;
498 unsigned long pfm_smpl_handler_calls;
499 unsigned long pfm_smpl_handler_cycles;
500 char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
501} pfm_stats_t;
502
503
504
505
506static pfm_stats_t pfm_stats[NR_CPUS];
507static pfm_session_t pfm_sessions;
508
509static DEFINE_SPINLOCK(pfm_alt_install_check);
510static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
511
512static struct proc_dir_entry *perfmon_dir;
513static pfm_uuid_t pfm_null_uuid = {0,};
514
515static spinlock_t pfm_buffer_fmt_lock;
516static LIST_HEAD(pfm_buffer_fmt_list);
517
518static pmu_config_t *pmu_conf;
519
520
521pfm_sysctl_t pfm_sysctl;
522EXPORT_SYMBOL(pfm_sysctl);
523
524static ctl_table pfm_ctl_table[]={
525 {
526 .procname = "debug",
527 .data = &pfm_sysctl.debug,
528 .maxlen = sizeof(int),
529 .mode = 0666,
530 .proc_handler = proc_dointvec,
531 },
532 {
533 .procname = "debug_ovfl",
534 .data = &pfm_sysctl.debug_ovfl,
535 .maxlen = sizeof(int),
536 .mode = 0666,
537 .proc_handler = proc_dointvec,
538 },
539 {
540 .procname = "fastctxsw",
541 .data = &pfm_sysctl.fastctxsw,
542 .maxlen = sizeof(int),
543 .mode = 0600,
544 .proc_handler = proc_dointvec,
545 },
546 {
547 .procname = "expert_mode",
548 .data = &pfm_sysctl.expert_mode,
549 .maxlen = sizeof(int),
550 .mode = 0600,
551 .proc_handler = proc_dointvec,
552 },
553 {}
554};
555static ctl_table pfm_sysctl_dir[] = {
556 {
557 .procname = "perfmon",
558 .mode = 0555,
559 .child = pfm_ctl_table,
560 },
561 {}
562};
563static ctl_table pfm_sysctl_root[] = {
564 {
565 .procname = "kernel",
566 .mode = 0555,
567 .child = pfm_sysctl_dir,
568 },
569 {}
570};
571static struct ctl_table_header *pfm_sysctl_header;
572
573static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
574
575#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
576#define pfm_get_cpu_data(a,b) per_cpu(a, b)
577
578static inline void
579pfm_put_task(struct task_struct *task)
580{
581 if (task != current) put_task_struct(task);
582}
583
584static inline void
585pfm_reserve_page(unsigned long a)
586{
587 SetPageReserved(vmalloc_to_page((void *)a));
588}
589static inline void
590pfm_unreserve_page(unsigned long a)
591{
592 ClearPageReserved(vmalloc_to_page((void*)a));
593}
594
595static inline unsigned long
596pfm_protect_ctx_ctxsw(pfm_context_t *x)
597{
598 spin_lock(&(x)->ctx_lock);
599 return 0UL;
600}
601
602static inline void
603pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
604{
605 spin_unlock(&(x)->ctx_lock);
606}
607
608
609static const struct dentry_operations pfmfs_dentry_operations;
610
611static struct dentry *
612pfmfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
613{
614 return mount_pseudo(fs_type, "pfm:", NULL, &pfmfs_dentry_operations,
615 PFMFS_MAGIC);
616}
617
618static struct file_system_type pfm_fs_type = {
619 .name = "pfmfs",
620 .mount = pfmfs_mount,
621 .kill_sb = kill_anon_super,
622};
623MODULE_ALIAS_FS("pfmfs");
624
625DEFINE_PER_CPU(unsigned long, pfm_syst_info);
626DEFINE_PER_CPU(struct task_struct *, pmu_owner);
627DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
628DEFINE_PER_CPU(unsigned long, pmu_activation_number);
629EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
630
631
632
633static const struct file_operations pfm_file_ops;
634
635
636
637
638#ifndef CONFIG_SMP
639static void pfm_lazy_save_regs (struct task_struct *ta);
640#endif
641
642void dump_pmu_state(const char *);
643static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
644
645#include "perfmon_itanium.h"
646#include "perfmon_mckinley.h"
647#include "perfmon_montecito.h"
648#include "perfmon_generic.h"
649
650static pmu_config_t *pmu_confs[]={
651 &pmu_conf_mont,
652 &pmu_conf_mck,
653 &pmu_conf_ita,
654 &pmu_conf_gen,
655 NULL
656};
657
658
659static int pfm_end_notify_user(pfm_context_t *ctx);
660
661static inline void
662pfm_clear_psr_pp(void)
663{
664 ia64_rsm(IA64_PSR_PP);
665 ia64_srlz_i();
666}
667
668static inline void
669pfm_set_psr_pp(void)
670{
671 ia64_ssm(IA64_PSR_PP);
672 ia64_srlz_i();
673}
674
675static inline void
676pfm_clear_psr_up(void)
677{
678 ia64_rsm(IA64_PSR_UP);
679 ia64_srlz_i();
680}
681
682static inline void
683pfm_set_psr_up(void)
684{
685 ia64_ssm(IA64_PSR_UP);
686 ia64_srlz_i();
687}
688
689static inline unsigned long
690pfm_get_psr(void)
691{
692 unsigned long tmp;
693 tmp = ia64_getreg(_IA64_REG_PSR);
694 ia64_srlz_i();
695 return tmp;
696}
697
698static inline void
699pfm_set_psr_l(unsigned long val)
700{
701 ia64_setreg(_IA64_REG_PSR_L, val);
702 ia64_srlz_i();
703}
704
705static inline void
706pfm_freeze_pmu(void)
707{
708 ia64_set_pmc(0,1UL);
709 ia64_srlz_d();
710}
711
712static inline void
713pfm_unfreeze_pmu(void)
714{
715 ia64_set_pmc(0,0UL);
716 ia64_srlz_d();
717}
718
719static inline void
720pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
721{
722 int i;
723
724 for (i=0; i < nibrs; i++) {
725 ia64_set_ibr(i, ibrs[i]);
726 ia64_dv_serialize_instruction();
727 }
728 ia64_srlz_i();
729}
730
731static inline void
732pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
733{
734 int i;
735
736 for (i=0; i < ndbrs; i++) {
737 ia64_set_dbr(i, dbrs[i]);
738 ia64_dv_serialize_data();
739 }
740 ia64_srlz_d();
741}
742
743
744
745
746static inline unsigned long
747pfm_read_soft_counter(pfm_context_t *ctx, int i)
748{
749 return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
750}
751
752
753
754
755static inline void
756pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
757{
758 unsigned long ovfl_val = pmu_conf->ovfl_val;
759
760 ctx->ctx_pmds[i].val = val & ~ovfl_val;
761
762
763
764
765 ia64_set_pmd(i, val & ovfl_val);
766}
767
768static pfm_msg_t *
769pfm_get_new_msg(pfm_context_t *ctx)
770{
771 int idx, next;
772
773 next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
774
775 DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
776 if (next == ctx->ctx_msgq_head) return NULL;
777
778 idx = ctx->ctx_msgq_tail;
779 ctx->ctx_msgq_tail = next;
780
781 DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
782
783 return ctx->ctx_msgq+idx;
784}
785
786static pfm_msg_t *
787pfm_get_next_msg(pfm_context_t *ctx)
788{
789 pfm_msg_t *msg;
790
791 DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
792
793 if (PFM_CTXQ_EMPTY(ctx)) return NULL;
794
795
796
797
798 msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
799
800
801
802
803 ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
804
805 DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
806
807 return msg;
808}
809
810static void
811pfm_reset_msgq(pfm_context_t *ctx)
812{
813 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
814 DPRINT(("ctx=%p msgq reset\n", ctx));
815}
816
817static void *
818pfm_rvmalloc(unsigned long size)
819{
820 void *mem;
821 unsigned long addr;
822
823 size = PAGE_ALIGN(size);
824 mem = vzalloc(size);
825 if (mem) {
826
827 addr = (unsigned long)mem;
828 while (size > 0) {
829 pfm_reserve_page(addr);
830 addr+=PAGE_SIZE;
831 size-=PAGE_SIZE;
832 }
833 }
834 return mem;
835}
836
837static void
838pfm_rvfree(void *mem, unsigned long size)
839{
840 unsigned long addr;
841
842 if (mem) {
843 DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
844 addr = (unsigned long) mem;
845 while ((long) size > 0) {
846 pfm_unreserve_page(addr);
847 addr+=PAGE_SIZE;
848 size-=PAGE_SIZE;
849 }
850 vfree(mem);
851 }
852 return;
853}
854
855static pfm_context_t *
856pfm_context_alloc(int ctx_flags)
857{
858 pfm_context_t *ctx;
859
860
861
862
863
864 ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
865 if (ctx) {
866 DPRINT(("alloc ctx @%p\n", ctx));
867
868
869
870
871 spin_lock_init(&ctx->ctx_lock);
872
873
874
875
876 ctx->ctx_state = PFM_CTX_UNLOADED;
877
878
879
880
881 ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
882 ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
883 ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
884
885
886
887
888
889
890
891
892 init_completion(&ctx->ctx_restart_done);
893
894
895
896
897 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
898 SET_LAST_CPU(ctx, -1);
899
900
901
902
903 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
904 init_waitqueue_head(&ctx->ctx_msgq_wait);
905 init_waitqueue_head(&ctx->ctx_zombieq);
906
907 }
908 return ctx;
909}
910
911static void
912pfm_context_free(pfm_context_t *ctx)
913{
914 if (ctx) {
915 DPRINT(("free ctx @%p\n", ctx));
916 kfree(ctx);
917 }
918}
919
920static void
921pfm_mask_monitoring(struct task_struct *task)
922{
923 pfm_context_t *ctx = PFM_GET_CTX(task);
924 unsigned long mask, val, ovfl_mask;
925 int i;
926
927 DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
928
929 ovfl_mask = pmu_conf->ovfl_val;
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949 mask = ctx->ctx_used_pmds[0];
950 for (i = 0; mask; i++, mask>>=1) {
951
952 if ((mask & 0x1) == 0) continue;
953 val = ia64_get_pmd(i);
954
955 if (PMD_IS_COUNTING(i)) {
956
957
958
959 ctx->ctx_pmds[i].val += (val & ovfl_mask);
960 } else {
961 ctx->ctx_pmds[i].val = val;
962 }
963 DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
964 i,
965 ctx->ctx_pmds[i].val,
966 val & ovfl_mask));
967 }
968
969
970
971
972
973
974
975
976 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
977 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
978 if ((mask & 0x1) == 0UL) continue;
979 ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
980 ctx->th_pmcs[i] &= ~0xfUL;
981 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
982 }
983
984
985
986 ia64_srlz_d();
987}
988
989
990
991
992
993
994static void
995pfm_restore_monitoring(struct task_struct *task)
996{
997 pfm_context_t *ctx = PFM_GET_CTX(task);
998 unsigned long mask, ovfl_mask;
999 unsigned long psr, val;
1000 int i, is_system;
1001
1002 is_system = ctx->ctx_fl_system;
1003 ovfl_mask = pmu_conf->ovfl_val;
1004
1005 if (task != current) {
1006 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
1007 return;
1008 }
1009 if (ctx->ctx_state != PFM_CTX_MASKED) {
1010 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
1011 task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
1012 return;
1013 }
1014 psr = pfm_get_psr();
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1026
1027 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
1028 pfm_clear_psr_pp();
1029 } else {
1030 pfm_clear_psr_up();
1031 }
1032
1033
1034
1035 mask = ctx->ctx_used_pmds[0];
1036 for (i = 0; mask; i++, mask>>=1) {
1037
1038 if ((mask & 0x1) == 0) continue;
1039
1040 if (PMD_IS_COUNTING(i)) {
1041
1042
1043
1044
1045 val = ctx->ctx_pmds[i].val & ovfl_mask;
1046 ctx->ctx_pmds[i].val &= ~ovfl_mask;
1047 } else {
1048 val = ctx->ctx_pmds[i].val;
1049 }
1050 ia64_set_pmd(i, val);
1051
1052 DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
1053 i,
1054 ctx->ctx_pmds[i].val,
1055 val));
1056 }
1057
1058
1059
1060 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
1061 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
1062 if ((mask & 0x1) == 0UL) continue;
1063 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1064 ia64_set_pmc(i, ctx->th_pmcs[i]);
1065 DPRINT(("[%d] pmc[%d]=0x%lx\n",
1066 task_pid_nr(task), i, ctx->th_pmcs[i]));
1067 }
1068 ia64_srlz_d();
1069
1070
1071
1072
1073
1074 if (ctx->ctx_fl_using_dbreg) {
1075 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
1076 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
1077 }
1078
1079
1080
1081
1082 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1083
1084 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
1085 ia64_srlz_i();
1086 }
1087 pfm_set_psr_l(psr);
1088}
1089
1090static inline void
1091pfm_save_pmds(unsigned long *pmds, unsigned long mask)
1092{
1093 int i;
1094
1095 ia64_srlz_d();
1096
1097 for (i=0; mask; i++, mask>>=1) {
1098 if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
1099 }
1100}
1101
1102
1103
1104
1105static inline void
1106pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
1107{
1108 int i;
1109 unsigned long val, ovfl_val = pmu_conf->ovfl_val;
1110
1111 for (i=0; mask; i++, mask>>=1) {
1112 if ((mask & 0x1) == 0) continue;
1113 val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
1114 ia64_set_pmd(i, val);
1115 }
1116 ia64_srlz_d();
1117}
1118
1119
1120
1121
1122static inline void
1123pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1124{
1125 unsigned long ovfl_val = pmu_conf->ovfl_val;
1126 unsigned long mask = ctx->ctx_all_pmds[0];
1127 unsigned long val;
1128 int i;
1129
1130 DPRINT(("mask=0x%lx\n", mask));
1131
1132 for (i=0; mask; i++, mask>>=1) {
1133
1134 val = ctx->ctx_pmds[i].val;
1135
1136
1137
1138
1139
1140
1141
1142 if (PMD_IS_COUNTING(i)) {
1143 ctx->ctx_pmds[i].val = val & ~ovfl_val;
1144 val &= ovfl_val;
1145 }
1146 ctx->th_pmds[i] = val;
1147
1148 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1149 i,
1150 ctx->th_pmds[i],
1151 ctx->ctx_pmds[i].val));
1152 }
1153}
1154
1155
1156
1157
1158static inline void
1159pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1160{
1161 unsigned long mask = ctx->ctx_all_pmcs[0];
1162 int i;
1163
1164 DPRINT(("mask=0x%lx\n", mask));
1165
1166 for (i=0; mask; i++, mask>>=1) {
1167
1168 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1169 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1170 }
1171}
1172
1173
1174
1175static inline void
1176pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
1177{
1178 int i;
1179
1180 for (i=0; mask; i++, mask>>=1) {
1181 if ((mask & 0x1) == 0) continue;
1182 ia64_set_pmc(i, pmcs[i]);
1183 }
1184 ia64_srlz_d();
1185}
1186
1187static inline int
1188pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
1189{
1190 return memcmp(a, b, sizeof(pfm_uuid_t));
1191}
1192
1193static inline int
1194pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
1195{
1196 int ret = 0;
1197 if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
1198 return ret;
1199}
1200
1201static inline int
1202pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
1203{
1204 int ret = 0;
1205 if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
1206 return ret;
1207}
1208
1209
1210static inline int
1211pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
1212 int cpu, void *arg)
1213{
1214 int ret = 0;
1215 if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
1216 return ret;
1217}
1218
1219static inline int
1220pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
1221 int cpu, void *arg)
1222{
1223 int ret = 0;
1224 if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
1225 return ret;
1226}
1227
1228static inline int
1229pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1230{
1231 int ret = 0;
1232 if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
1233 return ret;
1234}
1235
1236static inline int
1237pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1238{
1239 int ret = 0;
1240 if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
1241 return ret;
1242}
1243
1244static pfm_buffer_fmt_t *
1245__pfm_find_buffer_fmt(pfm_uuid_t uuid)
1246{
1247 struct list_head * pos;
1248 pfm_buffer_fmt_t * entry;
1249
1250 list_for_each(pos, &pfm_buffer_fmt_list) {
1251 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
1252 if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
1253 return entry;
1254 }
1255 return NULL;
1256}
1257
1258
1259
1260
1261static pfm_buffer_fmt_t *
1262pfm_find_buffer_fmt(pfm_uuid_t uuid)
1263{
1264 pfm_buffer_fmt_t * fmt;
1265 spin_lock(&pfm_buffer_fmt_lock);
1266 fmt = __pfm_find_buffer_fmt(uuid);
1267 spin_unlock(&pfm_buffer_fmt_lock);
1268 return fmt;
1269}
1270
1271int
1272pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
1273{
1274 int ret = 0;
1275
1276
1277 if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
1278
1279
1280 if (fmt->fmt_handler == NULL) return -EINVAL;
1281
1282
1283
1284
1285
1286 spin_lock(&pfm_buffer_fmt_lock);
1287
1288 if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
1289 printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
1290 ret = -EBUSY;
1291 goto out;
1292 }
1293 list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
1294 printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
1295
1296out:
1297 spin_unlock(&pfm_buffer_fmt_lock);
1298 return ret;
1299}
1300EXPORT_SYMBOL(pfm_register_buffer_fmt);
1301
1302int
1303pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
1304{
1305 pfm_buffer_fmt_t *fmt;
1306 int ret = 0;
1307
1308 spin_lock(&pfm_buffer_fmt_lock);
1309
1310 fmt = __pfm_find_buffer_fmt(uuid);
1311 if (!fmt) {
1312 printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
1313 ret = -EINVAL;
1314 goto out;
1315 }
1316 list_del_init(&fmt->fmt_list);
1317 printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
1318
1319out:
1320 spin_unlock(&pfm_buffer_fmt_lock);
1321 return ret;
1322
1323}
1324EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1325
1326static int
1327pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1328{
1329 unsigned long flags;
1330
1331
1332
1333 LOCK_PFS(flags);
1334
1335 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1336 pfm_sessions.pfs_sys_sessions,
1337 pfm_sessions.pfs_task_sessions,
1338 pfm_sessions.pfs_sys_use_dbregs,
1339 is_syswide,
1340 cpu));
1341
1342 if (is_syswide) {
1343
1344
1345
1346 if (pfm_sessions.pfs_task_sessions > 0UL) {
1347 DPRINT(("system wide not possible, %u conflicting task_sessions\n",
1348 pfm_sessions.pfs_task_sessions));
1349 goto abort;
1350 }
1351
1352 if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
1353
1354 DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
1355
1356 pfm_sessions.pfs_sys_session[cpu] = task;
1357
1358 pfm_sessions.pfs_sys_sessions++ ;
1359
1360 } else {
1361 if (pfm_sessions.pfs_sys_sessions) goto abort;
1362 pfm_sessions.pfs_task_sessions++;
1363 }
1364
1365 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1366 pfm_sessions.pfs_sys_sessions,
1367 pfm_sessions.pfs_task_sessions,
1368 pfm_sessions.pfs_sys_use_dbregs,
1369 is_syswide,
1370 cpu));
1371
1372
1373
1374
1375 cpu_idle_poll_ctrl(true);
1376
1377 UNLOCK_PFS(flags);
1378
1379 return 0;
1380
1381error_conflict:
1382 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
1383 task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
1384 cpu));
1385abort:
1386 UNLOCK_PFS(flags);
1387
1388 return -EBUSY;
1389
1390}
1391
1392static int
1393pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1394{
1395 unsigned long flags;
1396
1397
1398
1399 LOCK_PFS(flags);
1400
1401 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1402 pfm_sessions.pfs_sys_sessions,
1403 pfm_sessions.pfs_task_sessions,
1404 pfm_sessions.pfs_sys_use_dbregs,
1405 is_syswide,
1406 cpu));
1407
1408
1409 if (is_syswide) {
1410 pfm_sessions.pfs_sys_session[cpu] = NULL;
1411
1412
1413
1414 if (ctx && ctx->ctx_fl_using_dbreg) {
1415 if (pfm_sessions.pfs_sys_use_dbregs == 0) {
1416 printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
1417 } else {
1418 pfm_sessions.pfs_sys_use_dbregs--;
1419 }
1420 }
1421 pfm_sessions.pfs_sys_sessions--;
1422 } else {
1423 pfm_sessions.pfs_task_sessions--;
1424 }
1425 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1426 pfm_sessions.pfs_sys_sessions,
1427 pfm_sessions.pfs_task_sessions,
1428 pfm_sessions.pfs_sys_use_dbregs,
1429 is_syswide,
1430 cpu));
1431
1432
1433 cpu_idle_poll_ctrl(false);
1434
1435 UNLOCK_PFS(flags);
1436
1437 return 0;
1438}
1439
1440
1441
1442
1443
1444
1445static int
1446pfm_remove_smpl_mapping(void *vaddr, unsigned long size)
1447{
1448 struct task_struct *task = current;
1449 int r;
1450
1451
1452 if (task->mm == NULL || size == 0UL || vaddr == NULL) {
1453 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
1454 return -EINVAL;
1455 }
1456
1457 DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
1458
1459
1460
1461
1462 r = vm_munmap((unsigned long)vaddr, size);
1463
1464 if (r !=0) {
1465 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
1466 }
1467
1468 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
1469
1470 return 0;
1471}
1472
1473
1474
1475
1476#if 0
1477static int
1478pfm_free_smpl_buffer(pfm_context_t *ctx)
1479{
1480 pfm_buffer_fmt_t *fmt;
1481
1482 if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
1483
1484
1485
1486
1487 fmt = ctx->ctx_buf_fmt;
1488
1489 DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
1490 ctx->ctx_smpl_hdr,
1491 ctx->ctx_smpl_size,
1492 ctx->ctx_smpl_vaddr));
1493
1494 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1495
1496
1497
1498
1499 pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
1500
1501 ctx->ctx_smpl_hdr = NULL;
1502 ctx->ctx_smpl_size = 0UL;
1503
1504 return 0;
1505
1506invalid_free:
1507 printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
1508 return -EINVAL;
1509}
1510#endif
1511
1512static inline void
1513pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
1514{
1515 if (fmt == NULL) return;
1516
1517 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1518
1519}
1520
1521
1522
1523
1524
1525
1526
1527static struct vfsmount *pfmfs_mnt __read_mostly;
1528
1529static int __init
1530init_pfm_fs(void)
1531{
1532 int err = register_filesystem(&pfm_fs_type);
1533 if (!err) {
1534 pfmfs_mnt = kern_mount(&pfm_fs_type);
1535 err = PTR_ERR(pfmfs_mnt);
1536 if (IS_ERR(pfmfs_mnt))
1537 unregister_filesystem(&pfm_fs_type);
1538 else
1539 err = 0;
1540 }
1541 return err;
1542}
1543
1544static ssize_t
1545pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1546{
1547 pfm_context_t *ctx;
1548 pfm_msg_t *msg;
1549 ssize_t ret;
1550 unsigned long flags;
1551 DECLARE_WAITQUEUE(wait, current);
1552 if (PFM_IS_FILE(filp) == 0) {
1553 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1554 return -EINVAL;
1555 }
1556
1557 ctx = filp->private_data;
1558 if (ctx == NULL) {
1559 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
1560 return -EINVAL;
1561 }
1562
1563
1564
1565
1566 if (size < sizeof(pfm_msg_t)) {
1567 DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
1568 return -EINVAL;
1569 }
1570
1571 PROTECT_CTX(ctx, flags);
1572
1573
1574
1575
1576 add_wait_queue(&ctx->ctx_msgq_wait, &wait);
1577
1578
1579 for(;;) {
1580
1581
1582
1583
1584 set_current_state(TASK_INTERRUPTIBLE);
1585
1586 DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
1587
1588 ret = 0;
1589 if(PFM_CTXQ_EMPTY(ctx) == 0) break;
1590
1591 UNPROTECT_CTX(ctx, flags);
1592
1593
1594
1595
1596 ret = -EAGAIN;
1597 if(filp->f_flags & O_NONBLOCK) break;
1598
1599
1600
1601
1602 if(signal_pending(current)) {
1603 ret = -EINTR;
1604 break;
1605 }
1606
1607
1608
1609 schedule();
1610
1611 PROTECT_CTX(ctx, flags);
1612 }
1613 DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
1614 set_current_state(TASK_RUNNING);
1615 remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
1616
1617 if (ret < 0) goto abort;
1618
1619 ret = -EINVAL;
1620 msg = pfm_get_next_msg(ctx);
1621 if (msg == NULL) {
1622 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
1623 goto abort_locked;
1624 }
1625
1626 DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
1627
1628 ret = -EFAULT;
1629 if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
1630
1631abort_locked:
1632 UNPROTECT_CTX(ctx, flags);
1633abort:
1634 return ret;
1635}
1636
1637static ssize_t
1638pfm_write(struct file *file, const char __user *ubuf,
1639 size_t size, loff_t *ppos)
1640{
1641 DPRINT(("pfm_write called\n"));
1642 return -EINVAL;
1643}
1644
1645static unsigned int
1646pfm_poll(struct file *filp, poll_table * wait)
1647{
1648 pfm_context_t *ctx;
1649 unsigned long flags;
1650 unsigned int mask = 0;
1651
1652 if (PFM_IS_FILE(filp) == 0) {
1653 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1654 return 0;
1655 }
1656
1657 ctx = filp->private_data;
1658 if (ctx == NULL) {
1659 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
1660 return 0;
1661 }
1662
1663
1664 DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
1665
1666 poll_wait(filp, &ctx->ctx_msgq_wait, wait);
1667
1668 PROTECT_CTX(ctx, flags);
1669
1670 if (PFM_CTXQ_EMPTY(ctx) == 0)
1671 mask = POLLIN | POLLRDNORM;
1672
1673 UNPROTECT_CTX(ctx, flags);
1674
1675 DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
1676
1677 return mask;
1678}
1679
1680static long
1681pfm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1682{
1683 DPRINT(("pfm_ioctl called\n"));
1684 return -EINVAL;
1685}
1686
1687
1688
1689
1690static inline int
1691pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
1692{
1693 int ret;
1694
1695 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
1696
1697 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1698 task_pid_nr(current),
1699 fd,
1700 on,
1701 ctx->ctx_async_queue, ret));
1702
1703 return ret;
1704}
1705
1706static int
1707pfm_fasync(int fd, struct file *filp, int on)
1708{
1709 pfm_context_t *ctx;
1710 int ret;
1711
1712 if (PFM_IS_FILE(filp) == 0) {
1713 printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
1714 return -EBADF;
1715 }
1716
1717 ctx = filp->private_data;
1718 if (ctx == NULL) {
1719 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
1720 return -EBADF;
1721 }
1722
1723
1724
1725
1726
1727
1728
1729 ret = pfm_do_fasync(fd, filp, ctx, on);
1730
1731
1732 DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1733 fd,
1734 on,
1735 ctx->ctx_async_queue, ret));
1736
1737 return ret;
1738}
1739
1740#ifdef CONFIG_SMP
1741
1742
1743
1744
1745
1746static void
1747pfm_syswide_force_stop(void *info)
1748{
1749 pfm_context_t *ctx = (pfm_context_t *)info;
1750 struct pt_regs *regs = task_pt_regs(current);
1751 struct task_struct *owner;
1752 unsigned long flags;
1753 int ret;
1754
1755 if (ctx->ctx_cpu != smp_processor_id()) {
1756 printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
1757 ctx->ctx_cpu,
1758 smp_processor_id());
1759 return;
1760 }
1761 owner = GET_PMU_OWNER();
1762 if (owner != ctx->ctx_task) {
1763 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1764 smp_processor_id(),
1765 task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
1766 return;
1767 }
1768 if (GET_PMU_CTX() != ctx) {
1769 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
1770 smp_processor_id(),
1771 GET_PMU_CTX(), ctx);
1772 return;
1773 }
1774
1775 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
1776
1777
1778
1779
1780
1781 local_irq_save(flags);
1782
1783 ret = pfm_context_unload(ctx, NULL, 0, regs);
1784 if (ret) {
1785 DPRINT(("context_unload returned %d\n", ret));
1786 }
1787
1788
1789
1790
1791 local_irq_restore(flags);
1792}
1793
1794static void
1795pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
1796{
1797 int ret;
1798
1799 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
1800 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
1801 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
1802}
1803#endif
1804
1805
1806
1807
1808
1809static int
1810pfm_flush(struct file *filp, fl_owner_t id)
1811{
1812 pfm_context_t *ctx;
1813 struct task_struct *task;
1814 struct pt_regs *regs;
1815 unsigned long flags;
1816 unsigned long smpl_buf_size = 0UL;
1817 void *smpl_buf_vaddr = NULL;
1818 int state, is_system;
1819
1820 if (PFM_IS_FILE(filp) == 0) {
1821 DPRINT(("bad magic for\n"));
1822 return -EBADF;
1823 }
1824
1825 ctx = filp->private_data;
1826 if (ctx == NULL) {
1827 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
1828 return -EBADF;
1829 }
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844 PROTECT_CTX(ctx, flags);
1845
1846 state = ctx->ctx_state;
1847 is_system = ctx->ctx_fl_system;
1848
1849 task = PFM_CTX_TASK(ctx);
1850 regs = task_pt_regs(task);
1851
1852 DPRINT(("ctx_state=%d is_current=%d\n",
1853 state,
1854 task == current ? 1 : 0));
1855
1856
1857
1858
1859
1860
1861
1862
1863 if (task == current) {
1864#ifdef CONFIG_SMP
1865
1866
1867
1868
1869
1870
1871
1872 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
1873
1874 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
1875
1876
1877
1878 local_irq_restore(flags);
1879
1880 pfm_syswide_cleanup_other_cpu(ctx);
1881
1882
1883
1884
1885 local_irq_save(flags);
1886
1887
1888
1889
1890 } else
1891#endif
1892 {
1893
1894 DPRINT(("forcing unload\n"));
1895
1896
1897
1898
1899 pfm_context_unload(ctx, NULL, 0, regs);
1900
1901 DPRINT(("ctx_state=%d\n", ctx->ctx_state));
1902 }
1903 }
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916 if (ctx->ctx_smpl_vaddr && current->mm) {
1917 smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
1918 smpl_buf_size = ctx->ctx_smpl_size;
1919 }
1920
1921 UNPROTECT_CTX(ctx, flags);
1922
1923
1924
1925
1926
1927
1928
1929 if (smpl_buf_vaddr) pfm_remove_smpl_mapping(smpl_buf_vaddr, smpl_buf_size);
1930
1931 return 0;
1932}
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948static int
1949pfm_close(struct inode *inode, struct file *filp)
1950{
1951 pfm_context_t *ctx;
1952 struct task_struct *task;
1953 struct pt_regs *regs;
1954 DECLARE_WAITQUEUE(wait, current);
1955 unsigned long flags;
1956 unsigned long smpl_buf_size = 0UL;
1957 void *smpl_buf_addr = NULL;
1958 int free_possible = 1;
1959 int state, is_system;
1960
1961 DPRINT(("pfm_close called private=%p\n", filp->private_data));
1962
1963 if (PFM_IS_FILE(filp) == 0) {
1964 DPRINT(("bad magic\n"));
1965 return -EBADF;
1966 }
1967
1968 ctx = filp->private_data;
1969 if (ctx == NULL) {
1970 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
1971 return -EBADF;
1972 }
1973
1974 PROTECT_CTX(ctx, flags);
1975
1976 state = ctx->ctx_state;
1977 is_system = ctx->ctx_fl_system;
1978
1979 task = PFM_CTX_TASK(ctx);
1980 regs = task_pt_regs(task);
1981
1982 DPRINT(("ctx_state=%d is_current=%d\n",
1983 state,
1984 task == current ? 1 : 0));
1985
1986
1987
1988
1989 if (state == PFM_CTX_UNLOADED) goto doit;
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003 if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019 ctx->ctx_fl_going_zombie = 1;
2020
2021
2022
2023
2024 complete(&ctx->ctx_restart_done);
2025
2026 DPRINT(("waking up ctx_state=%d\n", state));
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036 set_current_state(TASK_INTERRUPTIBLE);
2037 add_wait_queue(&ctx->ctx_zombieq, &wait);
2038
2039 UNPROTECT_CTX(ctx, flags);
2040
2041
2042
2043
2044
2045
2046 schedule();
2047
2048
2049 PROTECT_CTX(ctx, flags);
2050
2051
2052 remove_wait_queue(&ctx->ctx_zombieq, &wait);
2053 set_current_state(TASK_RUNNING);
2054
2055
2056
2057
2058 DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
2059 }
2060 else if (task != current) {
2061#ifdef CONFIG_SMP
2062
2063
2064
2065 ctx->ctx_state = PFM_CTX_ZOMBIE;
2066
2067 DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
2068
2069
2070
2071
2072 free_possible = 0;
2073#else
2074 pfm_context_unload(ctx, NULL, 0, regs);
2075#endif
2076 }
2077
2078doit:
2079
2080 state = ctx->ctx_state;
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096 if (ctx->ctx_smpl_hdr) {
2097 smpl_buf_addr = ctx->ctx_smpl_hdr;
2098 smpl_buf_size = ctx->ctx_smpl_size;
2099
2100 ctx->ctx_smpl_hdr = NULL;
2101 ctx->ctx_fl_is_sampling = 0;
2102 }
2103
2104 DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
2105 state,
2106 free_possible,
2107 smpl_buf_addr,
2108 smpl_buf_size));
2109
2110 if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
2111
2112
2113
2114
2115 if (state == PFM_CTX_ZOMBIE) {
2116 pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
2117 }
2118
2119
2120
2121
2122
2123 filp->private_data = NULL;
2124
2125
2126
2127
2128
2129
2130
2131
2132 UNPROTECT_CTX(ctx, flags);
2133
2134
2135
2136
2137
2138 if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
2139
2140
2141
2142
2143 if (free_possible) pfm_context_free(ctx);
2144
2145 return 0;
2146}
2147
2148static int
2149pfm_no_open(struct inode *irrelevant, struct file *dontcare)
2150{
2151 DPRINT(("pfm_no_open called\n"));
2152 return -ENXIO;
2153}
2154
2155
2156
2157static const struct file_operations pfm_file_ops = {
2158 .llseek = no_llseek,
2159 .read = pfm_read,
2160 .write = pfm_write,
2161 .poll = pfm_poll,
2162 .unlocked_ioctl = pfm_ioctl,
2163 .open = pfm_no_open,
2164 .fasync = pfm_fasync,
2165 .release = pfm_close,
2166 .flush = pfm_flush
2167};
2168
2169static int
2170pfmfs_delete_dentry(const struct dentry *dentry)
2171{
2172 return 1;
2173}
2174
2175static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen)
2176{
2177 return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]",
2178 dentry->d_inode->i_ino);
2179}
2180
2181static const struct dentry_operations pfmfs_dentry_operations = {
2182 .d_delete = pfmfs_delete_dentry,
2183 .d_dname = pfmfs_dname,
2184};
2185
2186
2187static struct file *
2188pfm_alloc_file(pfm_context_t *ctx)
2189{
2190 struct file *file;
2191 struct inode *inode;
2192 struct path path;
2193 struct qstr this = { .name = "" };
2194
2195
2196
2197
2198 inode = new_inode(pfmfs_mnt->mnt_sb);
2199 if (!inode)
2200 return ERR_PTR(-ENOMEM);
2201
2202 DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
2203
2204 inode->i_mode = S_IFCHR|S_IRUGO;
2205 inode->i_uid = current_fsuid();
2206 inode->i_gid = current_fsgid();
2207
2208
2209
2210
2211 path.dentry = d_alloc(pfmfs_mnt->mnt_root, &this);
2212 if (!path.dentry) {
2213 iput(inode);
2214 return ERR_PTR(-ENOMEM);
2215 }
2216 path.mnt = mntget(pfmfs_mnt);
2217
2218 d_add(path.dentry, inode);
2219
2220 file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
2221 if (IS_ERR(file)) {
2222 path_put(&path);
2223 return file;
2224 }
2225
2226 file->f_flags = O_RDONLY;
2227 file->private_data = ctx;
2228
2229 return file;
2230}
2231
2232static int
2233pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
2234{
2235 DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
2236
2237 while (size > 0) {
2238 unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
2239
2240
2241 if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
2242 return -ENOMEM;
2243
2244 addr += PAGE_SIZE;
2245 buf += PAGE_SIZE;
2246 size -= PAGE_SIZE;
2247 }
2248 return 0;
2249}
2250
2251
2252
2253
2254static int
2255pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
2256{
2257 struct mm_struct *mm = task->mm;
2258 struct vm_area_struct *vma = NULL;
2259 unsigned long size;
2260 void *smpl_buf;
2261
2262
2263
2264
2265
2266 size = PAGE_ALIGN(rsize);
2267
2268 DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278 if (size > task_rlimit(task, RLIMIT_MEMLOCK))
2279 return -ENOMEM;
2280
2281
2282
2283
2284
2285
2286 smpl_buf = pfm_rvmalloc(size);
2287 if (smpl_buf == NULL) {
2288 DPRINT(("Can't allocate sampling buffer\n"));
2289 return -ENOMEM;
2290 }
2291
2292 DPRINT(("smpl_buf @%p\n", smpl_buf));
2293
2294
2295 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2296 if (!vma) {
2297 DPRINT(("Cannot allocate vma\n"));
2298 goto error_kmem;
2299 }
2300 INIT_LIST_HEAD(&vma->anon_vma_chain);
2301
2302
2303
2304
2305 vma->vm_mm = mm;
2306 vma->vm_file = get_file(filp);
2307 vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
2308 vma->vm_page_prot = PAGE_READONLY;
2309
2310
2311
2312
2313
2314
2315 ctx->ctx_smpl_hdr = smpl_buf;
2316 ctx->ctx_smpl_size = size;
2317
2318
2319
2320
2321
2322
2323
2324 down_write(&task->mm->mmap_sem);
2325
2326
2327 vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
2328 if (IS_ERR_VALUE(vma->vm_start)) {
2329 DPRINT(("Cannot find unmapped area for size %ld\n", size));
2330 up_write(&task->mm->mmap_sem);
2331 goto error;
2332 }
2333 vma->vm_end = vma->vm_start + size;
2334 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2335
2336 DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
2337
2338
2339 if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
2340 DPRINT(("Can't remap buffer\n"));
2341 up_write(&task->mm->mmap_sem);
2342 goto error;
2343 }
2344
2345
2346
2347
2348
2349 insert_vm_struct(mm, vma);
2350
2351 vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
2352 vma_pages(vma));
2353 up_write(&task->mm->mmap_sem);
2354
2355
2356
2357
2358 ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
2359 *(unsigned long *)user_vaddr = vma->vm_start;
2360
2361 return 0;
2362
2363error:
2364 kmem_cache_free(vm_area_cachep, vma);
2365error_kmem:
2366 pfm_rvfree(smpl_buf, size);
2367
2368 return -ENOMEM;
2369}
2370
2371
2372
2373
2374static int
2375pfm_bad_permissions(struct task_struct *task)
2376{
2377 const struct cred *tcred;
2378 kuid_t uid = current_uid();
2379 kgid_t gid = current_gid();
2380 int ret;
2381
2382 rcu_read_lock();
2383 tcred = __task_cred(task);
2384
2385
2386 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
2387 from_kuid(&init_user_ns, uid),
2388 from_kgid(&init_user_ns, gid),
2389 from_kuid(&init_user_ns, tcred->euid),
2390 from_kuid(&init_user_ns, tcred->suid),
2391 from_kuid(&init_user_ns, tcred->uid),
2392 from_kgid(&init_user_ns, tcred->egid),
2393 from_kgid(&init_user_ns, tcred->sgid)));
2394
2395 ret = ((!uid_eq(uid, tcred->euid))
2396 || (!uid_eq(uid, tcred->suid))
2397 || (!uid_eq(uid, tcred->uid))
2398 || (!gid_eq(gid, tcred->egid))
2399 || (!gid_eq(gid, tcred->sgid))
2400 || (!gid_eq(gid, tcred->gid))) && !capable(CAP_SYS_PTRACE);
2401
2402 rcu_read_unlock();
2403 return ret;
2404}
2405
2406static int
2407pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
2408{
2409 int ctx_flags;
2410
2411
2412
2413 ctx_flags = pfx->ctx_flags;
2414
2415 if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
2416
2417
2418
2419
2420 if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
2421 DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
2422 return -EINVAL;
2423 }
2424 } else {
2425 }
2426
2427
2428 return 0;
2429}
2430
2431static int
2432pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
2433 unsigned int cpu, pfarg_context_t *arg)
2434{
2435 pfm_buffer_fmt_t *fmt = NULL;
2436 unsigned long size = 0UL;
2437 void *uaddr = NULL;
2438 void *fmt_arg = NULL;
2439 int ret = 0;
2440#define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
2441
2442
2443 fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
2444 if (fmt == NULL) {
2445 DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
2446 return -EINVAL;
2447 }
2448
2449
2450
2451
2452 if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
2453
2454 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
2455
2456 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
2457
2458 if (ret) goto error;
2459
2460
2461 ctx->ctx_buf_fmt = fmt;
2462 ctx->ctx_fl_is_sampling = 1;
2463
2464
2465
2466
2467 ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
2468 if (ret) goto error;
2469
2470 if (size) {
2471
2472
2473
2474 ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
2475 if (ret) goto error;
2476
2477
2478 arg->ctx_smpl_vaddr = uaddr;
2479 }
2480 ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
2481
2482error:
2483 return ret;
2484}
2485
2486static void
2487pfm_reset_pmu_state(pfm_context_t *ctx)
2488{
2489 int i;
2490
2491
2492
2493
2494 for (i=1; PMC_IS_LAST(i) == 0; i++) {
2495 if (PMC_IS_IMPL(i) == 0) continue;
2496 ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
2497 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
2498 }
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
2527
2528
2529
2530
2531 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
2532
2533 DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
2534
2535
2536
2537
2538 ctx->ctx_used_ibrs[0] = 0UL;
2539 ctx->ctx_used_dbrs[0] = 0UL;
2540}
2541
2542static int
2543pfm_ctx_getsize(void *arg, size_t *sz)
2544{
2545 pfarg_context_t *req = (pfarg_context_t *)arg;
2546 pfm_buffer_fmt_t *fmt;
2547
2548 *sz = 0;
2549
2550 if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
2551
2552 fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
2553 if (fmt == NULL) {
2554 DPRINT(("cannot find buffer format\n"));
2555 return -EINVAL;
2556 }
2557
2558 *sz = fmt->fmt_arg_size;
2559 DPRINT(("arg_size=%lu\n", *sz));
2560
2561 return 0;
2562}
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572static int
2573pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2574{
2575
2576
2577
2578 if (task->mm == NULL) {
2579 DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
2580 return -EPERM;
2581 }
2582 if (pfm_bad_permissions(task)) {
2583 DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
2584 return -EPERM;
2585 }
2586
2587
2588
2589 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
2590 DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
2591 return -EINVAL;
2592 }
2593
2594 if (task->exit_state == EXIT_ZOMBIE) {
2595 DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
2596 return -EBUSY;
2597 }
2598
2599
2600
2601
2602 if (task == current) return 0;
2603
2604 if (!task_is_stopped_or_traced(task)) {
2605 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
2606 return -EBUSY;
2607 }
2608
2609
2610
2611 wait_task_inactive(task, 0);
2612
2613
2614
2615 return 0;
2616}
2617
2618static int
2619pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
2620{
2621 struct task_struct *p = current;
2622 int ret;
2623
2624
2625 if (pid < 2) return -EPERM;
2626
2627 if (pid != task_pid_vnr(current)) {
2628
2629 read_lock(&tasklist_lock);
2630
2631 p = find_task_by_vpid(pid);
2632
2633
2634 if (p) get_task_struct(p);
2635
2636 read_unlock(&tasklist_lock);
2637
2638 if (p == NULL) return -ESRCH;
2639 }
2640
2641 ret = pfm_task_incompatible(ctx, p);
2642 if (ret == 0) {
2643 *task = p;
2644 } else if (p != current) {
2645 pfm_put_task(p);
2646 }
2647 return ret;
2648}
2649
2650
2651
2652static int
2653pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2654{
2655 pfarg_context_t *req = (pfarg_context_t *)arg;
2656 struct file *filp;
2657 struct path path;
2658 int ctx_flags;
2659 int fd;
2660 int ret;
2661
2662
2663 ret = pfarg_is_sane(current, req);
2664 if (ret < 0)
2665 return ret;
2666
2667 ctx_flags = req->ctx_flags;
2668
2669 ret = -ENOMEM;
2670
2671 fd = get_unused_fd();
2672 if (fd < 0)
2673 return fd;
2674
2675 ctx = pfm_context_alloc(ctx_flags);
2676 if (!ctx)
2677 goto error;
2678
2679 filp = pfm_alloc_file(ctx);
2680 if (IS_ERR(filp)) {
2681 ret = PTR_ERR(filp);
2682 goto error_file;
2683 }
2684
2685 req->ctx_fd = ctx->ctx_fd = fd;
2686
2687
2688
2689
2690 if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
2691 ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
2692 if (ret)
2693 goto buffer_error;
2694 }
2695
2696 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n",
2697 ctx,
2698 ctx_flags,
2699 ctx->ctx_fl_system,
2700 ctx->ctx_fl_block,
2701 ctx->ctx_fl_excl_idle,
2702 ctx->ctx_fl_no_msg,
2703 ctx->ctx_fd));
2704
2705
2706
2707
2708 pfm_reset_pmu_state(ctx);
2709
2710 fd_install(fd, filp);
2711
2712 return 0;
2713
2714buffer_error:
2715 path = filp->f_path;
2716 put_filp(filp);
2717 path_put(&path);
2718
2719 if (ctx->ctx_buf_fmt) {
2720 pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
2721 }
2722error_file:
2723 pfm_context_free(ctx);
2724
2725error:
2726 put_unused_fd(fd);
2727 return ret;
2728}
2729
2730static inline unsigned long
2731pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
2732{
2733 unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
2734 unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
2735 extern unsigned long carta_random32 (unsigned long seed);
2736
2737 if (reg->flags & PFM_REGFL_RANDOM) {
2738 new_seed = carta_random32(old_seed);
2739 val -= (old_seed & mask);
2740 if ((mask >> 32) != 0)
2741
2742 new_seed |= carta_random32(old_seed >> 32) << 32;
2743 reg->seed = new_seed;
2744 }
2745 reg->lval = val;
2746 return val;
2747}
2748
2749static void
2750pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2751{
2752 unsigned long mask = ovfl_regs[0];
2753 unsigned long reset_others = 0UL;
2754 unsigned long val;
2755 int i;
2756
2757
2758
2759
2760 mask >>= PMU_FIRST_COUNTER;
2761 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2762
2763 if ((mask & 0x1UL) == 0UL) continue;
2764
2765 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2766 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2767
2768 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2769 }
2770
2771
2772
2773
2774 for(i = 0; reset_others; i++, reset_others >>= 1) {
2775
2776 if ((reset_others & 0x1) == 0) continue;
2777
2778 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2779
2780 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2781 is_long_reset ? "long" : "short", i, val));
2782 }
2783}
2784
2785static void
2786pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2787{
2788 unsigned long mask = ovfl_regs[0];
2789 unsigned long reset_others = 0UL;
2790 unsigned long val;
2791 int i;
2792
2793 DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
2794
2795 if (ctx->ctx_state == PFM_CTX_MASKED) {
2796 pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
2797 return;
2798 }
2799
2800
2801
2802
2803 mask >>= PMU_FIRST_COUNTER;
2804 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2805
2806 if ((mask & 0x1UL) == 0UL) continue;
2807
2808 val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2809 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2810
2811 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2812
2813 pfm_write_soft_counter(ctx, i, val);
2814 }
2815
2816
2817
2818
2819 for(i = 0; reset_others; i++, reset_others >>= 1) {
2820
2821 if ((reset_others & 0x1) == 0) continue;
2822
2823 val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2824
2825 if (PMD_IS_COUNTING(i)) {
2826 pfm_write_soft_counter(ctx, i, val);
2827 } else {
2828 ia64_set_pmd(i, val);
2829 }
2830 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2831 is_long_reset ? "long" : "short", i, val));
2832 }
2833 ia64_srlz_d();
2834}
2835
2836static int
2837pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2838{
2839 struct task_struct *task;
2840 pfarg_reg_t *req = (pfarg_reg_t *)arg;
2841 unsigned long value, pmc_pm;
2842 unsigned long smpl_pmds, reset_pmds, impl_pmds;
2843 unsigned int cnum, reg_flags, flags, pmc_type;
2844 int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
2845 int is_monitor, is_counting, state;
2846 int ret = -EINVAL;
2847 pfm_reg_check_t wr_func;
2848#define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
2849
2850 state = ctx->ctx_state;
2851 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
2852 is_system = ctx->ctx_fl_system;
2853 task = ctx->ctx_task;
2854 impl_pmds = pmu_conf->impl_pmds[0];
2855
2856 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
2857
2858 if (is_loaded) {
2859
2860
2861
2862
2863
2864 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
2865 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
2866 return -EBUSY;
2867 }
2868 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
2869 }
2870 expert_mode = pfm_sysctl.expert_mode;
2871
2872 for (i = 0; i < count; i++, req++) {
2873
2874 cnum = req->reg_num;
2875 reg_flags = req->reg_flags;
2876 value = req->reg_value;
2877 smpl_pmds = req->reg_smpl_pmds[0];
2878 reset_pmds = req->reg_reset_pmds[0];
2879 flags = 0;
2880
2881
2882 if (cnum >= PMU_MAX_PMCS) {
2883 DPRINT(("pmc%u is invalid\n", cnum));
2884 goto error;
2885 }
2886
2887 pmc_type = pmu_conf->pmc_desc[cnum].type;
2888 pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
2889 is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
2890 is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
2891
2892
2893
2894
2895
2896
2897 if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
2898 DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
2899 goto error;
2900 }
2901 wr_func = pmu_conf->pmc_desc[cnum].write_check;
2902
2903
2904
2905
2906
2907 if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
2908 DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
2909 cnum,
2910 pmc_pm,
2911 is_system));
2912 goto error;
2913 }
2914
2915 if (is_counting) {
2916
2917
2918
2919
2920 value |= 1 << PMU_PMC_OI;
2921
2922 if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
2923 flags |= PFM_REGFL_OVFL_NOTIFY;
2924 }
2925
2926 if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
2927
2928
2929 if ((smpl_pmds & impl_pmds) != smpl_pmds) {
2930 DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
2931 goto error;
2932 }
2933
2934
2935 if ((reset_pmds & impl_pmds) != reset_pmds) {
2936 DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
2937 goto error;
2938 }
2939 } else {
2940 if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
2941 DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
2942 goto error;
2943 }
2944
2945 }
2946
2947
2948
2949
2950 if (likely(expert_mode == 0 && wr_func)) {
2951 ret = (*wr_func)(task, ctx, cnum, &value, regs);
2952 if (ret) goto error;
2953 ret = -EINVAL;
2954 }
2955
2956
2957
2958
2959 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
2960
2961
2962
2963
2964
2965
2966
2967
2968 if (is_counting) {
2969
2970
2971
2972 ctx->ctx_pmds[cnum].flags = flags;
2973
2974 ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
2975 ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
2976 ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989 CTX_USED_PMD(ctx, reset_pmds);
2990 CTX_USED_PMD(ctx, smpl_pmds);
2991
2992
2993
2994
2995 if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
2996 }
2997
2998
2999
3000
3001
3002 CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
3017
3018
3019
3020
3021 ctx->ctx_pmcs[cnum] = value;
3022
3023 if (is_loaded) {
3024
3025
3026
3027 if (is_system == 0) ctx->th_pmcs[cnum] = value;
3028
3029
3030
3031
3032 if (can_access_pmu) {
3033 ia64_set_pmc(cnum, value);
3034 }
3035#ifdef CONFIG_SMP
3036 else {
3037
3038
3039
3040
3041
3042
3043
3044 ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
3045 }
3046#endif
3047 }
3048
3049 DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
3050 cnum,
3051 value,
3052 is_loaded,
3053 can_access_pmu,
3054 flags,
3055 ctx->ctx_all_pmcs[0],
3056 ctx->ctx_used_pmds[0],
3057 ctx->ctx_pmds[cnum].eventid,
3058 smpl_pmds,
3059 reset_pmds,
3060 ctx->ctx_reload_pmcs[0],
3061 ctx->ctx_used_monitors[0],
3062 ctx->ctx_ovfl_regs[0]));
3063 }
3064
3065
3066
3067
3068 if (can_access_pmu) ia64_srlz_d();
3069
3070 return 0;
3071error:
3072 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3073 return ret;
3074}
3075
3076static int
3077pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3078{
3079 struct task_struct *task;
3080 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3081 unsigned long value, hw_value, ovfl_mask;
3082 unsigned int cnum;
3083 int i, can_access_pmu = 0, state;
3084 int is_counting, is_loaded, is_system, expert_mode;
3085 int ret = -EINVAL;
3086 pfm_reg_check_t wr_func;
3087
3088
3089 state = ctx->ctx_state;
3090 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3091 is_system = ctx->ctx_fl_system;
3092 ovfl_mask = pmu_conf->ovfl_val;
3093 task = ctx->ctx_task;
3094
3095 if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
3096
3097
3098
3099
3100
3101 if (likely(is_loaded)) {
3102
3103
3104
3105
3106
3107 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3108 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3109 return -EBUSY;
3110 }
3111 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3112 }
3113 expert_mode = pfm_sysctl.expert_mode;
3114
3115 for (i = 0; i < count; i++, req++) {
3116
3117 cnum = req->reg_num;
3118 value = req->reg_value;
3119
3120 if (!PMD_IS_IMPL(cnum)) {
3121 DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
3122 goto abort_mission;
3123 }
3124 is_counting = PMD_IS_COUNTING(cnum);
3125 wr_func = pmu_conf->pmd_desc[cnum].write_check;
3126
3127
3128
3129
3130 if (unlikely(expert_mode == 0 && wr_func)) {
3131 unsigned long v = value;
3132
3133 ret = (*wr_func)(task, ctx, cnum, &v, regs);
3134 if (ret) goto abort_mission;
3135
3136 value = v;
3137 ret = -EINVAL;
3138 }
3139
3140
3141
3142
3143 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
3144
3145
3146
3147
3148 hw_value = value;
3149
3150
3151
3152
3153 if (is_counting) {
3154
3155
3156
3157 ctx->ctx_pmds[cnum].lval = value;
3158
3159
3160
3161
3162 if (is_loaded) {
3163 hw_value = value & ovfl_mask;
3164 value = value & ~ovfl_mask;
3165 }
3166 }
3167
3168
3169
3170 ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
3171 ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
3172
3173
3174
3175
3176 ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
3177 ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
3178
3179
3180
3181
3182 ctx->ctx_pmds[cnum].val = value;
3183
3184
3185
3186
3187
3188
3189
3190 CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
3191
3192
3193
3194
3195 CTX_USED_PMD(ctx, RDEP(cnum));
3196
3197
3198
3199
3200
3201 if (is_counting && state == PFM_CTX_MASKED) {
3202 ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3203 }
3204
3205 if (is_loaded) {
3206
3207
3208
3209 if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
3210
3211
3212
3213
3214 if (can_access_pmu) {
3215 ia64_set_pmd(cnum, hw_value);
3216 } else {
3217#ifdef CONFIG_SMP
3218
3219
3220
3221
3222
3223 ctx->ctx_reload_pmds[0] |= 1UL << cnum;
3224#endif
3225 }
3226 }
3227
3228 DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
3229 "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
3230 cnum,
3231 value,
3232 is_loaded,
3233 can_access_pmu,
3234 hw_value,
3235 ctx->ctx_pmds[cnum].val,
3236 ctx->ctx_pmds[cnum].short_reset,
3237 ctx->ctx_pmds[cnum].long_reset,
3238 PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
3239 ctx->ctx_pmds[cnum].seed,
3240 ctx->ctx_pmds[cnum].mask,
3241 ctx->ctx_used_pmds[0],
3242 ctx->ctx_pmds[cnum].reset_pmds[0],
3243 ctx->ctx_reload_pmds[0],
3244 ctx->ctx_all_pmds[0],
3245 ctx->ctx_ovfl_regs[0]));
3246 }
3247
3248
3249
3250
3251 if (can_access_pmu) ia64_srlz_d();
3252
3253 return 0;
3254
3255abort_mission:
3256
3257
3258
3259 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3260 return ret;
3261}
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272static int
3273pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3274{
3275 struct task_struct *task;
3276 unsigned long val = 0UL, lval, ovfl_mask, sval;
3277 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3278 unsigned int cnum, reg_flags = 0;
3279 int i, can_access_pmu = 0, state;
3280 int is_loaded, is_system, is_counting, expert_mode;
3281 int ret = -EINVAL;
3282 pfm_reg_check_t rd_func;
3283
3284
3285
3286
3287
3288
3289 state = ctx->ctx_state;
3290 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3291 is_system = ctx->ctx_fl_system;
3292 ovfl_mask = pmu_conf->ovfl_val;
3293 task = ctx->ctx_task;
3294
3295 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3296
3297 if (likely(is_loaded)) {
3298
3299
3300
3301
3302
3303 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3304 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3305 return -EBUSY;
3306 }
3307
3308
3309
3310 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3311
3312 if (can_access_pmu) ia64_srlz_d();
3313 }
3314 expert_mode = pfm_sysctl.expert_mode;
3315
3316 DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
3317 is_loaded,
3318 can_access_pmu,
3319 state));
3320
3321
3322
3323
3324
3325
3326 for (i = 0; i < count; i++, req++) {
3327
3328 cnum = req->reg_num;
3329 reg_flags = req->reg_flags;
3330
3331 if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
3332
3333
3334
3335
3336
3337
3338
3339
3340 if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
3341
3342 sval = ctx->ctx_pmds[cnum].val;
3343 lval = ctx->ctx_pmds[cnum].lval;
3344 is_counting = PMD_IS_COUNTING(cnum);
3345
3346
3347
3348
3349
3350
3351 if (can_access_pmu){
3352 val = ia64_get_pmd(cnum);
3353 } else {
3354
3355
3356
3357
3358
3359 val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
3360 }
3361 rd_func = pmu_conf->pmd_desc[cnum].read_check;
3362
3363 if (is_counting) {
3364
3365
3366
3367 val &= ovfl_mask;
3368 val += sval;
3369 }
3370
3371
3372
3373
3374 if (unlikely(expert_mode == 0 && rd_func)) {
3375 unsigned long v = val;
3376 ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
3377 if (ret) goto error;
3378 val = v;
3379 ret = -EINVAL;
3380 }
3381
3382 PFM_REG_RETFLAG_SET(reg_flags, 0);
3383
3384 DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
3385
3386
3387
3388
3389
3390
3391 req->reg_value = val;
3392 req->reg_flags = reg_flags;
3393 req->reg_last_reset_val = lval;
3394 }
3395
3396 return 0;
3397
3398error:
3399 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3400 return ret;
3401}
3402
3403int
3404pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3405{
3406 pfm_context_t *ctx;
3407
3408 if (req == NULL) return -EINVAL;
3409
3410 ctx = GET_PMU_CTX();
3411
3412 if (ctx == NULL) return -EINVAL;
3413
3414
3415
3416
3417
3418 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3419
3420 return pfm_write_pmcs(ctx, req, nreq, regs);
3421}
3422EXPORT_SYMBOL(pfm_mod_write_pmcs);
3423
3424int
3425pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3426{
3427 pfm_context_t *ctx;
3428
3429 if (req == NULL) return -EINVAL;
3430
3431 ctx = GET_PMU_CTX();
3432
3433 if (ctx == NULL) return -EINVAL;
3434
3435
3436
3437
3438
3439 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3440
3441 return pfm_read_pmds(ctx, req, nreq, regs);
3442}
3443EXPORT_SYMBOL(pfm_mod_read_pmds);
3444
3445
3446
3447
3448
3449int
3450pfm_use_debug_registers(struct task_struct *task)
3451{
3452 pfm_context_t *ctx = task->thread.pfm_context;
3453 unsigned long flags;
3454 int ret = 0;
3455
3456 if (pmu_conf->use_rr_dbregs == 0) return 0;
3457
3458 DPRINT(("called for [%d]\n", task_pid_nr(task)));
3459
3460
3461
3462
3463 if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473 if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
3474
3475 LOCK_PFS(flags);
3476
3477
3478
3479
3480
3481 if (pfm_sessions.pfs_sys_use_dbregs> 0)
3482 ret = -1;
3483 else
3484 pfm_sessions.pfs_ptrace_use_dbregs++;
3485
3486 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3487 pfm_sessions.pfs_ptrace_use_dbregs,
3488 pfm_sessions.pfs_sys_use_dbregs,
3489 task_pid_nr(task), ret));
3490
3491 UNLOCK_PFS(flags);
3492
3493 return ret;
3494}
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504int
3505pfm_release_debug_registers(struct task_struct *task)
3506{
3507 unsigned long flags;
3508 int ret;
3509
3510 if (pmu_conf->use_rr_dbregs == 0) return 0;
3511
3512 LOCK_PFS(flags);
3513 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
3514 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
3515 ret = -1;
3516 } else {
3517 pfm_sessions.pfs_ptrace_use_dbregs--;
3518 ret = 0;
3519 }
3520 UNLOCK_PFS(flags);
3521
3522 return ret;
3523}
3524
3525static int
3526pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3527{
3528 struct task_struct *task;
3529 pfm_buffer_fmt_t *fmt;
3530 pfm_ovfl_ctrl_t rst_ctrl;
3531 int state, is_system;
3532 int ret = 0;
3533
3534 state = ctx->ctx_state;
3535 fmt = ctx->ctx_buf_fmt;
3536 is_system = ctx->ctx_fl_system;
3537 task = PFM_CTX_TASK(ctx);
3538
3539 switch(state) {
3540 case PFM_CTX_MASKED:
3541 break;
3542 case PFM_CTX_LOADED:
3543 if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
3544
3545 case PFM_CTX_UNLOADED:
3546 case PFM_CTX_ZOMBIE:
3547 DPRINT(("invalid state=%d\n", state));
3548 return -EBUSY;
3549 default:
3550 DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
3551 return -EINVAL;
3552 }
3553
3554
3555
3556
3557
3558
3559 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3560 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3561 return -EBUSY;
3562 }
3563
3564
3565 if (unlikely(task == NULL)) {
3566 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
3567 return -EINVAL;
3568 }
3569
3570 if (task == current || is_system) {
3571
3572 fmt = ctx->ctx_buf_fmt;
3573
3574 DPRINT(("restarting self %d ovfl=0x%lx\n",
3575 task_pid_nr(task),
3576 ctx->ctx_ovfl_regs[0]));
3577
3578 if (CTX_HAS_SMPL(ctx)) {
3579
3580 prefetch(ctx->ctx_smpl_hdr);
3581
3582 rst_ctrl.bits.mask_monitoring = 0;
3583 rst_ctrl.bits.reset_ovfl_pmds = 0;
3584
3585 if (state == PFM_CTX_LOADED)
3586 ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3587 else
3588 ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3589 } else {
3590 rst_ctrl.bits.mask_monitoring = 0;
3591 rst_ctrl.bits.reset_ovfl_pmds = 1;
3592 }
3593
3594 if (ret == 0) {
3595 if (rst_ctrl.bits.reset_ovfl_pmds)
3596 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
3597
3598 if (rst_ctrl.bits.mask_monitoring == 0) {
3599 DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
3600
3601 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
3602 } else {
3603 DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
3604
3605
3606 }
3607 }
3608
3609
3610
3611 ctx->ctx_ovfl_regs[0] = 0UL;
3612
3613
3614
3615
3616 ctx->ctx_state = PFM_CTX_LOADED;
3617
3618
3619
3620
3621 ctx->ctx_fl_can_restart = 0;
3622
3623 return 0;
3624 }
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634 if (state == PFM_CTX_MASKED) {
3635 if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
3636
3637
3638
3639
3640 ctx->ctx_fl_can_restart = 0;
3641 }
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
3660 DPRINT(("unblocking [%d]\n", task_pid_nr(task)));
3661 complete(&ctx->ctx_restart_done);
3662 } else {
3663 DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
3664
3665 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
3666
3667 PFM_SET_WORK_PENDING(task, 1);
3668
3669 set_notify_resume(task);
3670
3671
3672
3673
3674 }
3675 return 0;
3676}
3677
3678static int
3679pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3680{
3681 unsigned int m = *(unsigned int *)arg;
3682
3683 pfm_sysctl.debug = m == 0 ? 0 : 1;
3684
3685 printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
3686
3687 if (m == 0) {
3688 memset(pfm_stats, 0, sizeof(pfm_stats));
3689 for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
3690 }
3691 return 0;
3692}
3693
3694
3695
3696
3697static int
3698pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3699{
3700 struct thread_struct *thread = NULL;
3701 struct task_struct *task;
3702 pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
3703 unsigned long flags;
3704 dbreg_t dbreg;
3705 unsigned int rnum;
3706 int first_time;
3707 int ret = 0, state;
3708 int i, can_access_pmu = 0;
3709 int is_system, is_loaded;
3710
3711 if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
3712
3713 state = ctx->ctx_state;
3714 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3715 is_system = ctx->ctx_fl_system;
3716 task = ctx->ctx_task;
3717
3718 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3719
3720
3721
3722
3723
3724 if (is_loaded) {
3725 thread = &task->thread;
3726
3727
3728
3729
3730
3731 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3732 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3733 return -EBUSY;
3734 }
3735 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3736 }
3737
3738
3739
3740
3741
3742
3743
3744
3745 first_time = ctx->ctx_fl_using_dbreg == 0;
3746
3747
3748
3749
3750 if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
3751 DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
3752 return -EBUSY;
3753 }
3754
3755
3756
3757
3758
3759
3760
3761
3762 if (is_loaded) {
3763 LOCK_PFS(flags);
3764
3765 if (first_time && is_system) {
3766 if (pfm_sessions.pfs_ptrace_use_dbregs)
3767 ret = -EBUSY;
3768 else
3769 pfm_sessions.pfs_sys_use_dbregs++;
3770 }
3771 UNLOCK_PFS(flags);
3772 }
3773
3774 if (ret != 0) return ret;
3775
3776
3777
3778
3779
3780 ctx->ctx_fl_using_dbreg = 1;
3781
3782
3783
3784
3785
3786
3787
3788
3789
3790
3791 if (first_time && can_access_pmu) {
3792 DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
3793 for (i=0; i < pmu_conf->num_ibrs; i++) {
3794 ia64_set_ibr(i, 0UL);
3795 ia64_dv_serialize_instruction();
3796 }
3797 ia64_srlz_i();
3798 for (i=0; i < pmu_conf->num_dbrs; i++) {
3799 ia64_set_dbr(i, 0UL);
3800 ia64_dv_serialize_data();
3801 }
3802 ia64_srlz_d();
3803 }
3804
3805
3806
3807
3808 for (i = 0; i < count; i++, req++) {
3809
3810 rnum = req->dbreg_num;
3811 dbreg.val = req->dbreg_value;
3812
3813 ret = -EINVAL;
3814
3815 if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
3816 DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
3817 rnum, dbreg.val, mode, i, count));
3818
3819 goto abort_mission;
3820 }
3821
3822
3823
3824
3825 if (rnum & 0x1) {
3826 if (mode == PFM_CODE_RR)
3827 dbreg.ibr.ibr_x = 0;
3828 else
3829 dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
3830 }
3831
3832 PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
3833
3834
3835
3836
3837
3838
3839
3840
3841
3842
3843
3844 if (mode == PFM_CODE_RR) {
3845 CTX_USED_IBR(ctx, rnum);
3846
3847 if (can_access_pmu) {
3848 ia64_set_ibr(rnum, dbreg.val);
3849 ia64_dv_serialize_instruction();
3850 }
3851
3852 ctx->ctx_ibrs[rnum] = dbreg.val;
3853
3854 DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
3855 rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
3856 } else {
3857 CTX_USED_DBR(ctx, rnum);
3858
3859 if (can_access_pmu) {
3860 ia64_set_dbr(rnum, dbreg.val);
3861 ia64_dv_serialize_data();
3862 }
3863 ctx->ctx_dbrs[rnum] = dbreg.val;
3864
3865 DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
3866 rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
3867 }
3868 }
3869
3870 return 0;
3871
3872abort_mission:
3873
3874
3875
3876 if (first_time) {
3877 LOCK_PFS(flags);
3878 if (ctx->ctx_fl_system) {
3879 pfm_sessions.pfs_sys_use_dbregs--;
3880 }
3881 UNLOCK_PFS(flags);
3882 ctx->ctx_fl_using_dbreg = 0;
3883 }
3884
3885
3886
3887 PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
3888
3889 return ret;
3890}
3891
3892static int
3893pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3894{
3895 return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
3896}
3897
3898static int
3899pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3900{
3901 return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
3902}
3903
3904int
3905pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3906{
3907 pfm_context_t *ctx;
3908
3909 if (req == NULL) return -EINVAL;
3910
3911 ctx = GET_PMU_CTX();
3912
3913 if (ctx == NULL) return -EINVAL;
3914
3915
3916
3917
3918
3919 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3920
3921 return pfm_write_ibrs(ctx, req, nreq, regs);
3922}
3923EXPORT_SYMBOL(pfm_mod_write_ibrs);
3924
3925int
3926pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3927{
3928 pfm_context_t *ctx;
3929
3930 if (req == NULL) return -EINVAL;
3931
3932 ctx = GET_PMU_CTX();
3933
3934 if (ctx == NULL) return -EINVAL;
3935
3936
3937
3938
3939
3940 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3941
3942 return pfm_write_dbrs(ctx, req, nreq, regs);
3943}
3944EXPORT_SYMBOL(pfm_mod_write_dbrs);
3945
3946
3947static int
3948pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3949{
3950 pfarg_features_t *req = (pfarg_features_t *)arg;
3951
3952 req->ft_version = PFM_VERSION;
3953 return 0;
3954}
3955
3956static int
3957pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3958{
3959 struct pt_regs *tregs;
3960 struct task_struct *task = PFM_CTX_TASK(ctx);
3961 int state, is_system;
3962
3963 state = ctx->ctx_state;
3964 is_system = ctx->ctx_fl_system;
3965
3966
3967
3968
3969 if (state == PFM_CTX_UNLOADED) return -EINVAL;
3970
3971
3972
3973
3974
3975
3976 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3977 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3978 return -EBUSY;
3979 }
3980 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
3981 task_pid_nr(PFM_CTX_TASK(ctx)),
3982 state,
3983 is_system));
3984
3985
3986
3987
3988
3989 if (is_system) {
3990
3991
3992
3993
3994
3995 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
3996 ia64_srlz_i();
3997
3998
3999
4000
4001 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4002
4003
4004
4005
4006 pfm_clear_psr_pp();
4007
4008
4009
4010
4011 ia64_psr(regs)->pp = 0;
4012
4013 return 0;
4014 }
4015
4016
4017
4018
4019 if (task == current) {
4020
4021 pfm_clear_psr_up();
4022
4023
4024
4025
4026 ia64_psr(regs)->up = 0;
4027 } else {
4028 tregs = task_pt_regs(task);
4029
4030
4031
4032
4033 ia64_psr(tregs)->up = 0;
4034
4035
4036
4037
4038 ctx->ctx_saved_psr_up = 0;
4039 DPRINT(("task=[%d]\n", task_pid_nr(task)));
4040 }
4041 return 0;
4042}
4043
4044
4045static int
4046pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4047{
4048 struct pt_regs *tregs;
4049 int state, is_system;
4050
4051 state = ctx->ctx_state;
4052 is_system = ctx->ctx_fl_system;
4053
4054 if (state != PFM_CTX_LOADED) return -EINVAL;
4055
4056
4057
4058
4059
4060
4061 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
4062 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
4063 return -EBUSY;
4064 }
4065
4066
4067
4068
4069
4070
4071 if (is_system) {
4072
4073
4074
4075
4076 ia64_psr(regs)->pp = 1;
4077
4078
4079
4080
4081 PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
4082
4083
4084
4085
4086 pfm_set_psr_pp();
4087
4088
4089 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
4090 ia64_srlz_i();
4091
4092 return 0;
4093 }
4094
4095
4096
4097
4098
4099 if (ctx->ctx_task == current) {
4100
4101
4102 pfm_set_psr_up();
4103
4104
4105
4106
4107 ia64_psr(regs)->up = 1;
4108
4109 } else {
4110 tregs = task_pt_regs(ctx->ctx_task);
4111
4112
4113
4114
4115
4116 ctx->ctx_saved_psr_up = IA64_PSR_UP;
4117
4118
4119
4120
4121 ia64_psr(tregs)->up = 1;
4122 }
4123 return 0;
4124}
4125
4126static int
4127pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4128{
4129 pfarg_reg_t *req = (pfarg_reg_t *)arg;
4130 unsigned int cnum;
4131 int i;
4132 int ret = -EINVAL;
4133
4134 for (i = 0; i < count; i++, req++) {
4135
4136 cnum = req->reg_num;
4137
4138 if (!PMC_IS_IMPL(cnum)) goto abort_mission;
4139
4140 req->reg_value = PMC_DFL_VAL(cnum);
4141
4142 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
4143
4144 DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
4145 }
4146 return 0;
4147
4148abort_mission:
4149 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
4150 return ret;
4151}
4152
4153static int
4154pfm_check_task_exist(pfm_context_t *ctx)
4155{
4156 struct task_struct *g, *t;
4157 int ret = -ESRCH;
4158
4159 read_lock(&tasklist_lock);
4160
4161 do_each_thread (g, t) {
4162 if (t->thread.pfm_context == ctx) {
4163 ret = 0;
4164 goto out;
4165 }
4166 } while_each_thread (g, t);
4167out:
4168 read_unlock(&tasklist_lock);
4169
4170 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
4171
4172 return ret;
4173}
4174
4175static int
4176pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4177{
4178 struct task_struct *task;
4179 struct thread_struct *thread;
4180 struct pfm_context_t *old;
4181 unsigned long flags;
4182#ifndef CONFIG_SMP
4183 struct task_struct *owner_task = NULL;
4184#endif
4185 pfarg_load_t *req = (pfarg_load_t *)arg;
4186 unsigned long *pmcs_source, *pmds_source;
4187 int the_cpu;
4188 int ret = 0;
4189 int state, is_system, set_dbregs = 0;
4190
4191 state = ctx->ctx_state;
4192 is_system = ctx->ctx_fl_system;
4193
4194
4195
4196 if (state != PFM_CTX_UNLOADED) {
4197 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4198 req->load_pid,
4199 ctx->ctx_state));
4200 return -EBUSY;
4201 }
4202
4203 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
4204
4205 if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
4206 DPRINT(("cannot use blocking mode on self\n"));
4207 return -EINVAL;
4208 }
4209
4210 ret = pfm_get_task(ctx, req->load_pid, &task);
4211 if (ret) {
4212 DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
4213 return ret;
4214 }
4215
4216 ret = -EINVAL;
4217
4218
4219
4220
4221 if (is_system && task != current) {
4222 DPRINT(("system wide is self monitoring only load_pid=%d\n",
4223 req->load_pid));
4224 goto error;
4225 }
4226
4227 thread = &task->thread;
4228
4229 ret = 0;
4230
4231
4232
4233
4234 if (ctx->ctx_fl_using_dbreg) {
4235 if (thread->flags & IA64_THREAD_DBG_VALID) {
4236 ret = -EBUSY;
4237 DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
4238 goto error;
4239 }
4240 LOCK_PFS(flags);
4241
4242 if (is_system) {
4243 if (pfm_sessions.pfs_ptrace_use_dbregs) {
4244 DPRINT(("cannot load [%d] dbregs in use\n",
4245 task_pid_nr(task)));
4246 ret = -EBUSY;
4247 } else {
4248 pfm_sessions.pfs_sys_use_dbregs++;
4249 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
4250 set_dbregs = 1;
4251 }
4252 }
4253
4254 UNLOCK_PFS(flags);
4255
4256 if (ret) goto error;
4257 }
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274 the_cpu = ctx->ctx_cpu = smp_processor_id();
4275
4276 ret = -EBUSY;
4277
4278
4279
4280 ret = pfm_reserve_session(current, is_system, the_cpu);
4281 if (ret) goto error;
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292 DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
4293 thread->pfm_context, ctx));
4294
4295 ret = -EBUSY;
4296 old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
4297 if (old != NULL) {
4298 DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
4299 goto error_unres;
4300 }
4301
4302 pfm_reset_msgq(ctx);
4303
4304 ctx->ctx_state = PFM_CTX_LOADED;
4305
4306
4307
4308
4309 ctx->ctx_task = task;
4310
4311 if (is_system) {
4312
4313
4314
4315 PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
4316 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4317
4318 if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
4319 } else {
4320 thread->flags |= IA64_THREAD_PM_VALID;
4321 }
4322
4323
4324
4325
4326 pfm_copy_pmds(task, ctx);
4327 pfm_copy_pmcs(task, ctx);
4328
4329 pmcs_source = ctx->th_pmcs;
4330 pmds_source = ctx->th_pmds;
4331
4332
4333
4334
4335 if (task == current) {
4336
4337 if (is_system == 0) {
4338
4339
4340 ia64_psr(regs)->sp = 0;
4341 DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
4342
4343 SET_LAST_CPU(ctx, smp_processor_id());
4344 INC_ACTIVATION();
4345 SET_ACTIVATION(ctx);
4346#ifndef CONFIG_SMP
4347
4348
4349
4350 owner_task = GET_PMU_OWNER();
4351 if (owner_task) pfm_lazy_save_regs(owner_task);
4352#endif
4353 }
4354
4355
4356
4357
4358 pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
4359 pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
4360
4361 ctx->ctx_reload_pmcs[0] = 0UL;
4362 ctx->ctx_reload_pmds[0] = 0UL;
4363
4364
4365
4366
4367 if (ctx->ctx_fl_using_dbreg) {
4368 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
4369 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
4370 }
4371
4372
4373
4374 SET_PMU_OWNER(task, ctx);
4375
4376 DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
4377 } else {
4378
4379
4380
4381 regs = task_pt_regs(task);
4382
4383
4384 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4385 SET_LAST_CPU(ctx, -1);
4386
4387
4388 ctx->ctx_saved_psr_up = 0UL;
4389 ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
4390 }
4391
4392 ret = 0;
4393
4394error_unres:
4395 if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
4396error:
4397
4398
4399
4400 if (ret && set_dbregs) {
4401 LOCK_PFS(flags);
4402 pfm_sessions.pfs_sys_use_dbregs--;
4403 UNLOCK_PFS(flags);
4404 }
4405
4406
4407
4408 if (is_system == 0 && task != current) {
4409 pfm_put_task(task);
4410
4411 if (ret == 0) {
4412 ret = pfm_check_task_exist(ctx);
4413 if (ret) {
4414 ctx->ctx_state = PFM_CTX_UNLOADED;
4415 ctx->ctx_task = NULL;
4416 }
4417 }
4418 }
4419 return ret;
4420}
4421
4422
4423
4424
4425
4426
4427
4428
4429
4430static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
4431
4432static int
4433pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4434{
4435 struct task_struct *task = PFM_CTX_TASK(ctx);
4436 struct pt_regs *tregs;
4437 int prev_state, is_system;
4438 int ret;
4439
4440 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
4441
4442 prev_state = ctx->ctx_state;
4443 is_system = ctx->ctx_fl_system;
4444
4445
4446
4447
4448 if (prev_state == PFM_CTX_UNLOADED) {
4449 DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
4450 return 0;
4451 }
4452
4453
4454
4455
4456 ret = pfm_stop(ctx, NULL, 0, regs);
4457 if (ret) return ret;
4458
4459 ctx->ctx_state = PFM_CTX_UNLOADED;
4460
4461
4462
4463
4464
4465
4466 if (is_system) {
4467
4468
4469
4470
4471
4472
4473 PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
4474 PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
4475
4476
4477
4478
4479
4480 pfm_flush_pmds(current, ctx);
4481
4482
4483
4484
4485
4486 if (prev_state != PFM_CTX_ZOMBIE)
4487 pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
4488
4489
4490
4491
4492 task->thread.pfm_context = NULL;
4493
4494
4495
4496 ctx->ctx_task = NULL;
4497
4498
4499
4500
4501 return 0;
4502 }
4503
4504
4505
4506
4507 tregs = task == current ? regs : task_pt_regs(task);
4508
4509 if (task == current) {
4510
4511
4512
4513 ia64_psr(regs)->sp = 1;
4514
4515 DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
4516 }
4517
4518
4519
4520
4521 pfm_flush_pmds(task, ctx);
4522
4523
4524
4525
4526
4527
4528
4529 if (prev_state != PFM_CTX_ZOMBIE)
4530 pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
4531
4532
4533
4534
4535 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4536 SET_LAST_CPU(ctx, -1);
4537
4538
4539
4540
4541 task->thread.flags &= ~IA64_THREAD_PM_VALID;
4542
4543
4544
4545
4546 task->thread.pfm_context = NULL;
4547 ctx->ctx_task = NULL;
4548
4549 PFM_SET_WORK_PENDING(task, 0);
4550
4551 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4552 ctx->ctx_fl_can_restart = 0;
4553 ctx->ctx_fl_going_zombie = 0;
4554
4555 DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
4556
4557 return 0;
4558}
4559
4560
4561
4562
4563
4564
4565void
4566pfm_exit_thread(struct task_struct *task)
4567{
4568 pfm_context_t *ctx;
4569 unsigned long flags;
4570 struct pt_regs *regs = task_pt_regs(task);
4571 int ret, state;
4572 int free_ok = 0;
4573
4574 ctx = PFM_GET_CTX(task);
4575
4576 PROTECT_CTX(ctx, flags);
4577
4578 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
4579
4580 state = ctx->ctx_state;
4581 switch(state) {
4582 case PFM_CTX_UNLOADED:
4583
4584
4585
4586
4587 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
4588 break;
4589 case PFM_CTX_LOADED:
4590 case PFM_CTX_MASKED:
4591 ret = pfm_context_unload(ctx, NULL, 0, regs);
4592 if (ret) {
4593 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4594 }
4595 DPRINT(("ctx unloaded for current state was %d\n", state));
4596
4597 pfm_end_notify_user(ctx);
4598 break;
4599 case PFM_CTX_ZOMBIE:
4600 ret = pfm_context_unload(ctx, NULL, 0, regs);
4601 if (ret) {
4602 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4603 }
4604 free_ok = 1;
4605 break;
4606 default:
4607 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
4608 break;
4609 }
4610 UNPROTECT_CTX(ctx, flags);
4611
4612 { u64 psr = pfm_get_psr();
4613 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
4614 BUG_ON(GET_PMU_OWNER());
4615 BUG_ON(ia64_psr(regs)->up);
4616 BUG_ON(ia64_psr(regs)->pp);
4617 }
4618
4619
4620
4621
4622
4623 if (free_ok) pfm_context_free(ctx);
4624}
4625
4626
4627
4628
4629#define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
4630#define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
4631#define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
4632#define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
4633#define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
4634
4635static pfm_cmd_desc_t pfm_cmd_tab[]={
4636PFM_CMD_NONE,
4637PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4638PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4639PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4640PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
4641PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
4642PFM_CMD_NONE,
4643PFM_CMD_NONE,
4644PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
4645PFM_CMD_NONE,
4646PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
4647PFM_CMD_NONE,
4648PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
4649PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
4650PFM_CMD_NONE,
4651PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4652PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
4653PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
4654PFM_CMD_NONE,
4655PFM_CMD_NONE,
4656PFM_CMD_NONE,
4657PFM_CMD_NONE,
4658PFM_CMD_NONE,
4659PFM_CMD_NONE,
4660PFM_CMD_NONE,
4661PFM_CMD_NONE,
4662PFM_CMD_NONE,
4663PFM_CMD_NONE,
4664PFM_CMD_NONE,
4665PFM_CMD_NONE,
4666PFM_CMD_NONE,
4667PFM_CMD_NONE,
4668PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
4669PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
4670};
4671#define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
4672
4673static int
4674pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
4675{
4676 struct task_struct *task;
4677 int state, old_state;
4678
4679recheck:
4680 state = ctx->ctx_state;
4681 task = ctx->ctx_task;
4682
4683 if (task == NULL) {
4684 DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
4685 return 0;
4686 }
4687
4688 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4689 ctx->ctx_fd,
4690 state,
4691 task_pid_nr(task),
4692 task->state, PFM_CMD_STOPPED(cmd)));
4693
4694
4695
4696
4697
4698
4699
4700
4701 if (task == current || ctx->ctx_fl_system) return 0;
4702
4703
4704
4705
4706 switch(state) {
4707 case PFM_CTX_UNLOADED:
4708
4709
4710
4711 return 0;
4712 case PFM_CTX_ZOMBIE:
4713
4714
4715
4716 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
4717 return -EINVAL;
4718 case PFM_CTX_MASKED:
4719
4720
4721
4722
4723 if (cmd != PFM_UNLOAD_CONTEXT) return 0;
4724 }
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734
4735
4736 if (PFM_CMD_STOPPED(cmd)) {
4737 if (!task_is_stopped_or_traced(task)) {
4738 DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
4739 return -EBUSY;
4740 }
4741
4742
4743
4744
4745
4746
4747
4748
4749
4750
4751
4752
4753
4754
4755 old_state = state;
4756
4757 UNPROTECT_CTX(ctx, flags);
4758
4759 wait_task_inactive(task, 0);
4760
4761 PROTECT_CTX(ctx, flags);
4762
4763
4764
4765
4766 if (ctx->ctx_state != old_state) {
4767 DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
4768 goto recheck;
4769 }
4770 }
4771 return 0;
4772}
4773
4774
4775
4776
4777asmlinkage long
4778sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
4779{
4780 struct fd f = {NULL, 0};
4781 pfm_context_t *ctx = NULL;
4782 unsigned long flags = 0UL;
4783 void *args_k = NULL;
4784 long ret;
4785 size_t base_sz, sz, xtra_sz = 0;
4786 int narg, completed_args = 0, call_made = 0, cmd_flags;
4787 int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
4788 int (*getsize)(void *arg, size_t *sz);
4789#define PFM_MAX_ARGSIZE 4096
4790
4791
4792
4793
4794 if (unlikely(pmu_conf == NULL)) return -ENOSYS;
4795
4796 if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
4797 DPRINT(("invalid cmd=%d\n", cmd));
4798 return -EINVAL;
4799 }
4800
4801 func = pfm_cmd_tab[cmd].cmd_func;
4802 narg = pfm_cmd_tab[cmd].cmd_narg;
4803 base_sz = pfm_cmd_tab[cmd].cmd_argsize;
4804 getsize = pfm_cmd_tab[cmd].cmd_getsize;
4805 cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
4806
4807 if (unlikely(func == NULL)) {
4808 DPRINT(("invalid cmd=%d\n", cmd));
4809 return -EINVAL;
4810 }
4811
4812 DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
4813 PFM_CMD_NAME(cmd),
4814 cmd,
4815 narg,
4816 base_sz,
4817 count));
4818
4819
4820
4821
4822 if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
4823 return -EINVAL;
4824
4825restart_args:
4826 sz = xtra_sz + base_sz*count;
4827
4828
4829
4830 if (unlikely(sz > PFM_MAX_ARGSIZE)) {
4831 printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
4832 return -E2BIG;
4833 }
4834
4835
4836
4837
4838 if (likely(count && args_k == NULL)) {
4839 args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
4840 if (args_k == NULL) return -ENOMEM;
4841 }
4842
4843 ret = -EFAULT;
4844
4845
4846
4847
4848
4849
4850 if (sz && copy_from_user(args_k, arg, sz)) {
4851 DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
4852 goto error_args;
4853 }
4854
4855
4856
4857
4858 if (completed_args == 0 && getsize) {
4859
4860
4861
4862 ret = (*getsize)(args_k, &xtra_sz);
4863 if (ret) goto error_args;
4864
4865 completed_args = 1;
4866
4867 DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
4868
4869
4870 if (likely(xtra_sz)) goto restart_args;
4871 }
4872
4873 if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
4874
4875 ret = -EBADF;
4876
4877 f = fdget(fd);
4878 if (unlikely(f.file == NULL)) {
4879 DPRINT(("invalid fd %d\n", fd));
4880 goto error_args;
4881 }
4882 if (unlikely(PFM_IS_FILE(f.file) == 0)) {
4883 DPRINT(("fd %d not related to perfmon\n", fd));
4884 goto error_args;
4885 }
4886
4887 ctx = f.file->private_data;
4888 if (unlikely(ctx == NULL)) {
4889 DPRINT(("no context for fd %d\n", fd));
4890 goto error_args;
4891 }
4892 prefetch(&ctx->ctx_state);
4893
4894 PROTECT_CTX(ctx, flags);
4895
4896
4897
4898
4899 ret = pfm_check_task_state(ctx, cmd, flags);
4900 if (unlikely(ret)) goto abort_locked;
4901
4902skip_fd:
4903 ret = (*func)(ctx, args_k, count, task_pt_regs(current));
4904
4905 call_made = 1;
4906
4907abort_locked:
4908 if (likely(ctx)) {
4909 DPRINT(("context unlocked\n"));
4910 UNPROTECT_CTX(ctx, flags);
4911 }
4912
4913
4914 if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
4915
4916error_args:
4917 if (f.file)
4918 fdput(f);
4919
4920 kfree(args_k);
4921
4922 DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
4923
4924 return ret;
4925}
4926
4927static void
4928pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
4929{
4930 pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
4931 pfm_ovfl_ctrl_t rst_ctrl;
4932 int state;
4933 int ret = 0;
4934
4935 state = ctx->ctx_state;
4936
4937
4938
4939
4940 if (CTX_HAS_SMPL(ctx)) {
4941
4942 rst_ctrl.bits.mask_monitoring = 0;
4943 rst_ctrl.bits.reset_ovfl_pmds = 0;
4944
4945 if (state == PFM_CTX_LOADED)
4946 ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4947 else
4948 ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4949 } else {
4950 rst_ctrl.bits.mask_monitoring = 0;
4951 rst_ctrl.bits.reset_ovfl_pmds = 1;
4952 }
4953
4954 if (ret == 0) {
4955 if (rst_ctrl.bits.reset_ovfl_pmds) {
4956 pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
4957 }
4958 if (rst_ctrl.bits.mask_monitoring == 0) {
4959 DPRINT(("resuming monitoring\n"));
4960 if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
4961 } else {
4962 DPRINT(("stopping monitoring\n"));
4963
4964 }
4965 ctx->ctx_state = PFM_CTX_LOADED;
4966 }
4967}
4968
4969
4970
4971
4972
4973static void
4974pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
4975{
4976 int ret;
4977
4978 DPRINT(("entering for [%d]\n", task_pid_nr(current)));
4979
4980 ret = pfm_context_unload(ctx, NULL, 0, regs);
4981 if (ret) {
4982 printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
4983 }
4984
4985
4986
4987
4988 wake_up_interruptible(&ctx->ctx_zombieq);
4989
4990
4991
4992
4993
4994
4995}
4996
4997static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
4998
4999
5000
5001
5002
5003
5004
5005
5006
5007
5008void
5009pfm_handle_work(void)
5010{
5011 pfm_context_t *ctx;
5012 struct pt_regs *regs;
5013 unsigned long flags, dummy_flags;
5014 unsigned long ovfl_regs;
5015 unsigned int reason;
5016 int ret;
5017
5018 ctx = PFM_GET_CTX(current);
5019 if (ctx == NULL) {
5020 printk(KERN_ERR "perfmon: [%d] has no PFM context\n",
5021 task_pid_nr(current));
5022 return;
5023 }
5024
5025 PROTECT_CTX(ctx, flags);
5026
5027 PFM_SET_WORK_PENDING(current, 0);
5028
5029 regs = task_pt_regs(current);
5030
5031
5032
5033
5034 reason = ctx->ctx_fl_trap_reason;
5035 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
5036 ovfl_regs = ctx->ctx_ovfl_regs[0];
5037
5038 DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
5039
5040
5041
5042
5043 if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE)
5044 goto do_zombie;
5045
5046
5047 if (reason == PFM_TRAP_REASON_RESET)
5048 goto skip_blocking;
5049
5050
5051
5052
5053
5054 UNPROTECT_CTX(ctx, flags);
5055
5056
5057
5058
5059 local_irq_enable();
5060
5061 DPRINT(("before block sleeping\n"));
5062
5063
5064
5065
5066
5067 ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
5068
5069 DPRINT(("after block sleeping ret=%d\n", ret));
5070
5071
5072
5073
5074
5075
5076
5077 PROTECT_CTX(ctx, dummy_flags);
5078
5079
5080
5081
5082
5083
5084
5085 ovfl_regs = ctx->ctx_ovfl_regs[0];
5086
5087 if (ctx->ctx_fl_going_zombie) {
5088do_zombie:
5089 DPRINT(("context is zombie, bailing out\n"));
5090 pfm_context_force_terminate(ctx, regs);
5091 goto nothing_to_do;
5092 }
5093
5094
5095
5096 if (ret < 0)
5097 goto nothing_to_do;
5098
5099skip_blocking:
5100 pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
5101 ctx->ctx_ovfl_regs[0] = 0UL;
5102
5103nothing_to_do:
5104
5105
5106
5107 UNPROTECT_CTX(ctx, flags);
5108}
5109
5110static int
5111pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
5112{
5113 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5114 DPRINT(("ignoring overflow notification, owner is zombie\n"));
5115 return 0;
5116 }
5117
5118 DPRINT(("waking up somebody\n"));
5119
5120 if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
5121
5122
5123
5124
5125
5126 kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
5127
5128 return 0;
5129}
5130
5131static int
5132pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
5133{
5134 pfm_msg_t *msg = NULL;
5135
5136 if (ctx->ctx_fl_no_msg == 0) {
5137 msg = pfm_get_new_msg(ctx);
5138 if (msg == NULL) {
5139 printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
5140 return -1;
5141 }
5142
5143 msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
5144 msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
5145 msg->pfm_ovfl_msg.msg_active_set = 0;
5146 msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
5147 msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
5148 msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
5149 msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
5150 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5151 }
5152
5153 DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
5154 msg,
5155 ctx->ctx_fl_no_msg,
5156 ctx->ctx_fd,
5157 ovfl_pmds));
5158
5159 return pfm_notify_user(ctx, msg);
5160}
5161
5162static int
5163pfm_end_notify_user(pfm_context_t *ctx)
5164{
5165 pfm_msg_t *msg;
5166
5167 msg = pfm_get_new_msg(ctx);
5168 if (msg == NULL) {
5169 printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
5170 return -1;
5171 }
5172
5173 memset(msg, 0, sizeof(*msg));
5174
5175 msg->pfm_end_msg.msg_type = PFM_MSG_END;
5176 msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
5177 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5178
5179 DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
5180 msg,
5181 ctx->ctx_fl_no_msg,
5182 ctx->ctx_fd));
5183
5184 return pfm_notify_user(ctx, msg);
5185}
5186
5187
5188
5189
5190
5191static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx,
5192 unsigned long pmc0, struct pt_regs *regs)
5193{
5194 pfm_ovfl_arg_t *ovfl_arg;
5195 unsigned long mask;
5196 unsigned long old_val, ovfl_val, new_val;
5197 unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
5198 unsigned long tstamp;
5199 pfm_ovfl_ctrl_t ovfl_ctrl;
5200 unsigned int i, has_smpl;
5201 int must_notify = 0;
5202
5203 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
5204
5205
5206
5207
5208 if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
5209
5210 tstamp = ia64_get_itc();
5211 mask = pmc0 >> PMU_FIRST_COUNTER;
5212 ovfl_val = pmu_conf->ovfl_val;
5213 has_smpl = CTX_HAS_SMPL(ctx);
5214
5215 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5216 "used_pmds=0x%lx\n",
5217 pmc0,
5218 task ? task_pid_nr(task): -1,
5219 (regs ? regs->cr_iip : 0),
5220 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
5221 ctx->ctx_used_pmds[0]));
5222
5223
5224
5225
5226
5227
5228 for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
5229
5230
5231 if ((mask & 0x1) == 0) continue;
5232
5233
5234
5235
5236
5237
5238
5239 old_val = new_val = ctx->ctx_pmds[i].val;
5240 new_val += 1 + ovfl_val;
5241 ctx->ctx_pmds[i].val = new_val;
5242
5243
5244
5245
5246 if (likely(old_val > new_val)) {
5247 ovfl_pmds |= 1UL << i;
5248 if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
5249 }
5250
5251 DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
5252 i,
5253 new_val,
5254 old_val,
5255 ia64_get_pmd(i) & ovfl_val,
5256 ovfl_pmds,
5257 ovfl_notify));
5258 }
5259
5260
5261
5262
5263 if (ovfl_pmds == 0UL) return;
5264
5265
5266
5267
5268 ovfl_ctrl.val = 0;
5269 reset_pmds = 0UL;
5270
5271
5272
5273
5274
5275 if (has_smpl) {
5276 unsigned long start_cycles, end_cycles;
5277 unsigned long pmd_mask;
5278 int j, k, ret = 0;
5279 int this_cpu = smp_processor_id();
5280
5281 pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
5282 ovfl_arg = &ctx->ctx_ovfl_arg;
5283
5284 prefetch(ctx->ctx_smpl_hdr);
5285
5286 for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
5287
5288 mask = 1UL << i;
5289
5290 if ((pmd_mask & 0x1) == 0) continue;
5291
5292 ovfl_arg->ovfl_pmd = (unsigned char )i;
5293 ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
5294 ovfl_arg->active_set = 0;
5295 ovfl_arg->ovfl_ctrl.val = 0;
5296 ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
5297
5298 ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
5299 ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
5300 ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
5301
5302
5303
5304
5305
5306 if (smpl_pmds) {
5307 for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
5308 if ((smpl_pmds & 0x1) == 0) continue;
5309 ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
5310 DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
5311 }
5312 }
5313
5314 pfm_stats[this_cpu].pfm_smpl_handler_calls++;
5315
5316 start_cycles = ia64_get_itc();
5317
5318
5319
5320
5321 ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
5322
5323 end_cycles = ia64_get_itc();
5324
5325
5326
5327
5328
5329 ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
5330 ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
5331 ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
5332
5333
5334
5335 if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
5336
5337 pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
5338 }
5339
5340
5341
5342 if (ret && pmd_mask) {
5343 DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
5344 pmd_mask<<PMU_FIRST_COUNTER));
5345 }
5346
5347
5348
5349 ovfl_pmds &= ~reset_pmds;
5350 } else {
5351
5352
5353
5354
5355 ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
5356 ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
5357 ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0;
5358 ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
5359
5360
5361
5362 if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
5363 }
5364
5365 DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
5366
5367
5368
5369
5370 if (reset_pmds) {
5371 unsigned long bm = reset_pmds;
5372 pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
5373 }
5374
5375 if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
5376
5377
5378
5379 ctx->ctx_ovfl_regs[0] = ovfl_pmds;
5380
5381
5382
5383
5384 if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
5385
5386 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
5387
5388
5389
5390
5391 PFM_SET_WORK_PENDING(task, 1);
5392
5393
5394
5395
5396
5397 set_notify_resume(task);
5398 }
5399
5400
5401
5402
5403 must_notify = 1;
5404 }
5405
5406 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
5407 GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
5408 PFM_GET_WORK_PENDING(task),
5409 ctx->ctx_fl_trap_reason,
5410 ovfl_pmds,
5411 ovfl_notify,
5412 ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
5413
5414
5415
5416 if (ovfl_ctrl.bits.mask_monitoring) {
5417 pfm_mask_monitoring(task);
5418 ctx->ctx_state = PFM_CTX_MASKED;
5419 ctx->ctx_fl_can_restart = 1;
5420 }
5421
5422
5423
5424
5425 if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
5426
5427 return;
5428
5429sanity_check:
5430 printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5431 smp_processor_id(),
5432 task ? task_pid_nr(task) : -1,
5433 pmc0);
5434 return;
5435
5436stop_monitoring:
5437
5438
5439
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462
5463
5464
5465 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
5466 pfm_clear_psr_up();
5467 ia64_psr(regs)->up = 0;
5468 ia64_psr(regs)->sp = 1;
5469 return;
5470}
5471
5472static int
5473pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
5474{
5475 struct task_struct *task;
5476 pfm_context_t *ctx;
5477 unsigned long flags;
5478 u64 pmc0;
5479 int this_cpu = smp_processor_id();
5480 int retval = 0;
5481
5482 pfm_stats[this_cpu].pfm_ovfl_intr_count++;
5483
5484
5485
5486
5487 pmc0 = ia64_get_pmc(0);
5488
5489 task = GET_PMU_OWNER();
5490 ctx = GET_PMU_CTX();
5491
5492
5493
5494
5495
5496 if (PMC0_HAS_OVFL(pmc0) && task) {
5497
5498
5499
5500
5501
5502 if (!ctx) goto report_spurious1;
5503
5504 if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
5505 goto report_spurious2;
5506
5507 PROTECT_CTX_NOPRINT(ctx, flags);
5508
5509 pfm_overflow_handler(task, ctx, pmc0, regs);
5510
5511 UNPROTECT_CTX_NOPRINT(ctx, flags);
5512
5513 } else {
5514 pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
5515 retval = -1;
5516 }
5517
5518
5519
5520 pfm_unfreeze_pmu();
5521
5522 return retval;
5523
5524report_spurious1:
5525 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
5526 this_cpu, task_pid_nr(task));
5527 pfm_unfreeze_pmu();
5528 return -1;
5529report_spurious2:
5530 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5531 this_cpu,
5532 task_pid_nr(task));
5533 pfm_unfreeze_pmu();
5534 return -1;
5535}
5536
5537static irqreturn_t
5538pfm_interrupt_handler(int irq, void *arg)
5539{
5540 unsigned long start_cycles, total_cycles;
5541 unsigned long min, max;
5542 int this_cpu;
5543 int ret;
5544 struct pt_regs *regs = get_irq_regs();
5545
5546 this_cpu = get_cpu();
5547 if (likely(!pfm_alt_intr_handler)) {
5548 min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
5549 max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
5550
5551 start_cycles = ia64_get_itc();
5552
5553 ret = pfm_do_interrupt_handler(arg, regs);
5554
5555 total_cycles = ia64_get_itc();
5556
5557
5558
5559
5560 if (likely(ret == 0)) {
5561 total_cycles -= start_cycles;
5562
5563 if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
5564 if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
5565
5566 pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
5567 }
5568 }
5569 else {
5570 (*pfm_alt_intr_handler->handler)(irq, arg, regs);
5571 }
5572
5573 put_cpu();
5574 return IRQ_HANDLED;
5575}
5576
5577
5578
5579
5580
5581#define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
5582
5583static void *
5584pfm_proc_start(struct seq_file *m, loff_t *pos)
5585{
5586 if (*pos == 0) {
5587 return PFM_PROC_SHOW_HEADER;
5588 }
5589
5590 while (*pos <= nr_cpu_ids) {
5591 if (cpu_online(*pos - 1)) {
5592 return (void *)*pos;
5593 }
5594 ++*pos;
5595 }
5596 return NULL;
5597}
5598
5599static void *
5600pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
5601{
5602 ++*pos;
5603 return pfm_proc_start(m, pos);
5604}
5605
5606static void
5607pfm_proc_stop(struct seq_file *m, void *v)
5608{
5609}
5610
5611static void
5612pfm_proc_show_header(struct seq_file *m)
5613{
5614 struct list_head * pos;
5615 pfm_buffer_fmt_t * entry;
5616 unsigned long flags;
5617
5618 seq_printf(m,
5619 "perfmon version : %u.%u\n"
5620 "model : %s\n"
5621 "fastctxsw : %s\n"
5622 "expert mode : %s\n"
5623 "ovfl_mask : 0x%lx\n"
5624 "PMU flags : 0x%x\n",
5625 PFM_VERSION_MAJ, PFM_VERSION_MIN,
5626 pmu_conf->pmu_name,
5627 pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
5628 pfm_sysctl.expert_mode > 0 ? "Yes": "No",
5629 pmu_conf->ovfl_val,
5630 pmu_conf->flags);
5631
5632 LOCK_PFS(flags);
5633
5634 seq_printf(m,
5635 "proc_sessions : %u\n"
5636 "sys_sessions : %u\n"
5637 "sys_use_dbregs : %u\n"
5638 "ptrace_use_dbregs : %u\n",
5639 pfm_sessions.pfs_task_sessions,
5640 pfm_sessions.pfs_sys_sessions,
5641 pfm_sessions.pfs_sys_use_dbregs,
5642 pfm_sessions.pfs_ptrace_use_dbregs);
5643
5644 UNLOCK_PFS(flags);
5645
5646 spin_lock(&pfm_buffer_fmt_lock);
5647
5648 list_for_each(pos, &pfm_buffer_fmt_list) {
5649 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
5650 seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
5651 entry->fmt_uuid[0],
5652 entry->fmt_uuid[1],
5653 entry->fmt_uuid[2],
5654 entry->fmt_uuid[3],
5655 entry->fmt_uuid[4],
5656 entry->fmt_uuid[5],
5657 entry->fmt_uuid[6],
5658 entry->fmt_uuid[7],
5659 entry->fmt_uuid[8],
5660 entry->fmt_uuid[9],
5661 entry->fmt_uuid[10],
5662 entry->fmt_uuid[11],
5663 entry->fmt_uuid[12],
5664 entry->fmt_uuid[13],
5665 entry->fmt_uuid[14],
5666 entry->fmt_uuid[15],
5667 entry->fmt_name);
5668 }
5669 spin_unlock(&pfm_buffer_fmt_lock);
5670
5671}
5672
5673static int
5674pfm_proc_show(struct seq_file *m, void *v)
5675{
5676 unsigned long psr;
5677 unsigned int i;
5678 int cpu;
5679
5680 if (v == PFM_PROC_SHOW_HEADER) {
5681 pfm_proc_show_header(m);
5682 return 0;
5683 }
5684
5685
5686
5687 cpu = (long)v - 1;
5688 seq_printf(m,
5689 "CPU%-2d overflow intrs : %lu\n"
5690 "CPU%-2d overflow cycles : %lu\n"
5691 "CPU%-2d overflow min : %lu\n"
5692 "CPU%-2d overflow max : %lu\n"
5693 "CPU%-2d smpl handler calls : %lu\n"
5694 "CPU%-2d smpl handler cycles : %lu\n"
5695 "CPU%-2d spurious intrs : %lu\n"
5696 "CPU%-2d replay intrs : %lu\n"
5697 "CPU%-2d syst_wide : %d\n"
5698 "CPU%-2d dcr_pp : %d\n"
5699 "CPU%-2d exclude idle : %d\n"
5700 "CPU%-2d owner : %d\n"
5701 "CPU%-2d context : %p\n"
5702 "CPU%-2d activations : %lu\n",
5703 cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
5704 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
5705 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
5706 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
5707 cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
5708 cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
5709 cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
5710 cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
5711 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
5712 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
5713 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
5714 cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
5715 cpu, pfm_get_cpu_data(pmu_ctx, cpu),
5716 cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
5717
5718 if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
5719
5720 psr = pfm_get_psr();
5721
5722 ia64_srlz_d();
5723
5724 seq_printf(m,
5725 "CPU%-2d psr : 0x%lx\n"
5726 "CPU%-2d pmc0 : 0x%lx\n",
5727 cpu, psr,
5728 cpu, ia64_get_pmc(0));
5729
5730 for (i=0; PMC_IS_LAST(i) == 0; i++) {
5731 if (PMC_IS_COUNTING(i) == 0) continue;
5732 seq_printf(m,
5733 "CPU%-2d pmc%u : 0x%lx\n"
5734 "CPU%-2d pmd%u : 0x%lx\n",
5735 cpu, i, ia64_get_pmc(i),
5736 cpu, i, ia64_get_pmd(i));
5737 }
5738 }
5739 return 0;
5740}
5741
5742const struct seq_operations pfm_seq_ops = {
5743 .start = pfm_proc_start,
5744 .next = pfm_proc_next,
5745 .stop = pfm_proc_stop,
5746 .show = pfm_proc_show
5747};
5748
5749static int
5750pfm_proc_open(struct inode *inode, struct file *file)
5751{
5752 return seq_open(file, &pfm_seq_ops);
5753}
5754
5755
5756
5757
5758
5759
5760
5761
5762void
5763pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
5764{
5765 struct pt_regs *regs;
5766 unsigned long dcr;
5767 unsigned long dcr_pp;
5768
5769 dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
5770
5771
5772
5773
5774
5775 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
5776 regs = task_pt_regs(task);
5777 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
5778 return;
5779 }
5780
5781
5782
5783 if (dcr_pp) {
5784 dcr = ia64_getreg(_IA64_REG_CR_DCR);
5785
5786
5787
5788 if (is_ctxswin) {
5789
5790 ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
5791 pfm_clear_psr_pp();
5792 ia64_srlz_i();
5793 return;
5794 }
5795
5796
5797
5798
5799
5800
5801
5802 ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
5803 pfm_set_psr_pp();
5804 ia64_srlz_i();
5805 }
5806}
5807
5808#ifdef CONFIG_SMP
5809
5810static void
5811pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
5812{
5813 struct task_struct *task = ctx->ctx_task;
5814
5815 ia64_psr(regs)->up = 0;
5816 ia64_psr(regs)->sp = 1;
5817
5818 if (GET_PMU_OWNER() == task) {
5819 DPRINT(("cleared ownership for [%d]\n",
5820 task_pid_nr(ctx->ctx_task)));
5821 SET_PMU_OWNER(NULL, NULL);
5822 }
5823
5824
5825
5826
5827 PFM_SET_WORK_PENDING(task, 0);
5828
5829 task->thread.pfm_context = NULL;
5830 task->thread.flags &= ~IA64_THREAD_PM_VALID;
5831
5832 DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
5833}
5834
5835
5836
5837
5838
5839void
5840pfm_save_regs(struct task_struct *task)
5841{
5842 pfm_context_t *ctx;
5843 unsigned long flags;
5844 u64 psr;
5845
5846
5847 ctx = PFM_GET_CTX(task);
5848 if (ctx == NULL) return;
5849
5850
5851
5852
5853
5854
5855 flags = pfm_protect_ctx_ctxsw(ctx);
5856
5857 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5858 struct pt_regs *regs = task_pt_regs(task);
5859
5860 pfm_clear_psr_up();
5861
5862 pfm_force_cleanup(ctx, regs);
5863
5864 BUG_ON(ctx->ctx_smpl_hdr);
5865
5866 pfm_unprotect_ctx_ctxsw(ctx, flags);
5867
5868 pfm_context_free(ctx);
5869 return;
5870 }
5871
5872
5873
5874
5875 ia64_srlz_d();
5876 psr = pfm_get_psr();
5877
5878 BUG_ON(psr & (IA64_PSR_I));
5879
5880
5881
5882
5883
5884
5885
5886
5887 pfm_clear_psr_up();
5888
5889
5890
5891
5892 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5893
5894
5895
5896
5897
5898
5899 SET_PMU_OWNER(NULL, NULL);
5900
5901
5902
5903
5904
5905
5906 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
5907
5908
5909
5910
5911
5912
5913 ctx->th_pmcs[0] = ia64_get_pmc(0);
5914
5915
5916
5917
5918 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5919
5920
5921
5922
5923
5924 pfm_unprotect_ctx_ctxsw(ctx, flags);
5925}
5926
5927#else
5928void
5929pfm_save_regs(struct task_struct *task)
5930{
5931 pfm_context_t *ctx;
5932 u64 psr;
5933
5934 ctx = PFM_GET_CTX(task);
5935 if (ctx == NULL) return;
5936
5937
5938
5939
5940 psr = pfm_get_psr();
5941
5942 BUG_ON(psr & (IA64_PSR_I));
5943
5944
5945
5946
5947
5948
5949
5950
5951 pfm_clear_psr_up();
5952
5953
5954
5955
5956 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5957}
5958
5959static void
5960pfm_lazy_save_regs (struct task_struct *task)
5961{
5962 pfm_context_t *ctx;
5963 unsigned long flags;
5964
5965 { u64 psr = pfm_get_psr();
5966 BUG_ON(psr & IA64_PSR_UP);
5967 }
5968
5969 ctx = PFM_GET_CTX(task);
5970
5971
5972
5973
5974
5975
5976
5977
5978
5979
5980 PROTECT_CTX(ctx,flags);
5981
5982
5983
5984
5985
5986
5987
5988
5989 SET_PMU_OWNER(NULL, NULL);
5990
5991
5992
5993
5994 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
5995
5996
5997
5998
5999
6000
6001 ctx->th_pmcs[0] = ia64_get_pmc(0);
6002
6003
6004
6005
6006 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
6007
6008
6009
6010
6011
6012
6013 UNPROTECT_CTX(ctx,flags);
6014}
6015#endif
6016
6017#ifdef CONFIG_SMP
6018
6019
6020
6021void
6022pfm_load_regs (struct task_struct *task)
6023{
6024 pfm_context_t *ctx;
6025 unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
6026 unsigned long flags;
6027 u64 psr, psr_up;
6028 int need_irq_resend;
6029
6030 ctx = PFM_GET_CTX(task);
6031 if (unlikely(ctx == NULL)) return;
6032
6033 BUG_ON(GET_PMU_OWNER());
6034
6035
6036
6037
6038 if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
6039
6040
6041
6042
6043
6044
6045 flags = pfm_protect_ctx_ctxsw(ctx);
6046 psr = pfm_get_psr();
6047
6048 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6049
6050 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6051 BUG_ON(psr & IA64_PSR_I);
6052
6053 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
6054 struct pt_regs *regs = task_pt_regs(task);
6055
6056 BUG_ON(ctx->ctx_smpl_hdr);
6057
6058 pfm_force_cleanup(ctx, regs);
6059
6060 pfm_unprotect_ctx_ctxsw(ctx, flags);
6061
6062
6063
6064
6065 pfm_context_free(ctx);
6066
6067 return;
6068 }
6069
6070
6071
6072
6073
6074 if (ctx->ctx_fl_using_dbreg) {
6075 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6076 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6077 }
6078
6079
6080
6081 psr_up = ctx->ctx_saved_psr_up;
6082
6083
6084
6085
6086
6087 if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
6088
6089
6090
6091
6092 pmc_mask = ctx->ctx_reload_pmcs[0];
6093 pmd_mask = ctx->ctx_reload_pmds[0];
6094
6095 } else {
6096
6097
6098
6099
6100
6101
6102 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6103
6104
6105
6106
6107
6108
6109
6110
6111 pmc_mask = ctx->ctx_all_pmcs[0];
6112 }
6113
6114
6115
6116
6117
6118
6119
6120 if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6121 if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6122
6123
6124
6125
6126
6127 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6128
6129
6130
6131
6132 ia64_set_pmc(0, ctx->th_pmcs[0]);
6133 ia64_srlz_d();
6134 ctx->th_pmcs[0] = 0UL;
6135
6136
6137
6138
6139 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
6140
6141 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6142 }
6143
6144
6145
6146
6147 ctx->ctx_reload_pmcs[0] = 0UL;
6148 ctx->ctx_reload_pmds[0] = 0UL;
6149
6150 SET_LAST_CPU(ctx, smp_processor_id());
6151
6152
6153
6154
6155 INC_ACTIVATION();
6156
6157
6158
6159 SET_ACTIVATION(ctx);
6160
6161
6162
6163
6164 SET_PMU_OWNER(task, ctx);
6165
6166
6167
6168
6169
6170
6171
6172 if (likely(psr_up)) pfm_set_psr_up();
6173
6174
6175
6176
6177 pfm_unprotect_ctx_ctxsw(ctx, flags);
6178}
6179#else
6180
6181
6182
6183
6184void
6185pfm_load_regs (struct task_struct *task)
6186{
6187 pfm_context_t *ctx;
6188 struct task_struct *owner;
6189 unsigned long pmd_mask, pmc_mask;
6190 u64 psr, psr_up;
6191 int need_irq_resend;
6192
6193 owner = GET_PMU_OWNER();
6194 ctx = PFM_GET_CTX(task);
6195 psr = pfm_get_psr();
6196
6197 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6198 BUG_ON(psr & IA64_PSR_I);
6199
6200
6201
6202
6203
6204
6205
6206
6207
6208 if (ctx->ctx_fl_using_dbreg) {
6209 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6210 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6211 }
6212
6213
6214
6215
6216 psr_up = ctx->ctx_saved_psr_up;
6217 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6218
6219
6220
6221
6222
6223
6224
6225
6226
6227 if (likely(owner == task)) {
6228 if (likely(psr_up)) pfm_set_psr_up();
6229 return;
6230 }
6231
6232
6233
6234
6235
6236
6237
6238 if (owner) pfm_lazy_save_regs(owner);
6239
6240
6241
6242
6243
6244
6245
6246 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6247
6248
6249
6250
6251
6252
6253
6254
6255 pmc_mask = ctx->ctx_all_pmcs[0];
6256
6257 pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6258 pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6259
6260
6261
6262
6263
6264 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6265
6266
6267
6268
6269 ia64_set_pmc(0, ctx->th_pmcs[0]);
6270 ia64_srlz_d();
6271
6272 ctx->th_pmcs[0] = 0UL;
6273
6274
6275
6276
6277 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
6278
6279 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6280 }
6281
6282
6283
6284
6285 SET_PMU_OWNER(task, ctx);
6286
6287
6288
6289
6290
6291
6292
6293 if (likely(psr_up)) pfm_set_psr_up();
6294}
6295#endif
6296
6297
6298
6299
6300static void
6301pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6302{
6303 u64 pmc0;
6304 unsigned long mask2, val, pmd_val, ovfl_val;
6305 int i, can_access_pmu = 0;
6306 int is_self;
6307
6308
6309
6310
6311
6312 is_self = ctx->ctx_task == task ? 1 : 0;
6313
6314
6315
6316
6317
6318
6319
6320
6321 can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
6322 if (can_access_pmu) {
6323
6324
6325
6326
6327
6328
6329
6330
6331 SET_PMU_OWNER(NULL, NULL);
6332 DPRINT(("releasing ownership\n"));
6333
6334
6335
6336
6337
6338
6339 ia64_srlz_d();
6340 pmc0 = ia64_get_pmc(0);
6341
6342
6343
6344
6345 pfm_unfreeze_pmu();
6346 } else {
6347 pmc0 = ctx->th_pmcs[0];
6348
6349
6350
6351 ctx->th_pmcs[0] = 0;
6352 }
6353 ovfl_val = pmu_conf->ovfl_val;
6354
6355
6356
6357
6358
6359
6360 mask2 = ctx->ctx_used_pmds[0];
6361
6362 DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
6363
6364 for (i = 0; mask2; i++, mask2>>=1) {
6365
6366
6367 if ((mask2 & 0x1) == 0) continue;
6368
6369
6370
6371
6372 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
6373
6374 if (PMD_IS_COUNTING(i)) {
6375 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
6376 task_pid_nr(task),
6377 i,
6378 ctx->ctx_pmds[i].val,
6379 val & ovfl_val));
6380
6381
6382
6383
6384 val = ctx->ctx_pmds[i].val + (val & ovfl_val);
6385
6386
6387
6388
6389
6390
6391 pmd_val = 0UL;
6392
6393
6394
6395
6396 if (pmc0 & (1UL << i)) {
6397 val += 1 + ovfl_val;
6398 DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
6399 }
6400 }
6401
6402 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
6403
6404 if (is_self) ctx->th_pmds[i] = pmd_val;
6405
6406 ctx->ctx_pmds[i].val = val;
6407 }
6408}
6409
6410static struct irqaction perfmon_irqaction = {
6411 .handler = pfm_interrupt_handler,
6412 .flags = IRQF_DISABLED,
6413 .name = "perfmon"
6414};
6415
6416static void
6417pfm_alt_save_pmu_state(void *data)
6418{
6419 struct pt_regs *regs;
6420
6421 regs = task_pt_regs(current);
6422
6423 DPRINT(("called\n"));
6424
6425
6426
6427
6428
6429 pfm_clear_psr_up();
6430 pfm_clear_psr_pp();
6431 ia64_psr(regs)->pp = 0;
6432
6433
6434
6435
6436
6437 pfm_freeze_pmu();
6438
6439 ia64_srlz_d();
6440}
6441
6442void
6443pfm_alt_restore_pmu_state(void *data)
6444{
6445 struct pt_regs *regs;
6446
6447 regs = task_pt_regs(current);
6448
6449 DPRINT(("called\n"));
6450
6451
6452
6453
6454
6455 pfm_clear_psr_up();
6456 pfm_clear_psr_pp();
6457 ia64_psr(regs)->pp = 0;
6458
6459
6460
6461
6462 pfm_unfreeze_pmu();
6463
6464 ia64_srlz_d();
6465}
6466
6467int
6468pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6469{
6470 int ret, i;
6471 int reserve_cpu;
6472
6473
6474 if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
6475
6476
6477 if (pfm_alt_intr_handler) return -EBUSY;
6478
6479
6480 if (!spin_trylock(&pfm_alt_install_check)) {
6481 return -EBUSY;
6482 }
6483
6484
6485 for_each_online_cpu(reserve_cpu) {
6486 ret = pfm_reserve_session(NULL, 1, reserve_cpu);
6487 if (ret) goto cleanup_reserve;
6488 }
6489
6490
6491 ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
6492 if (ret) {
6493 DPRINT(("on_each_cpu() failed: %d\n", ret));
6494 goto cleanup_reserve;
6495 }
6496
6497
6498 pfm_alt_intr_handler = hdl;
6499
6500 spin_unlock(&pfm_alt_install_check);
6501
6502 return 0;
6503
6504cleanup_reserve:
6505 for_each_online_cpu(i) {
6506
6507 if (i >= reserve_cpu) break;
6508
6509 pfm_unreserve_session(NULL, 1, i);
6510 }
6511
6512 spin_unlock(&pfm_alt_install_check);
6513
6514 return ret;
6515}
6516EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
6517
6518int
6519pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6520{
6521 int i;
6522 int ret;
6523
6524 if (hdl == NULL) return -EINVAL;
6525
6526
6527 if (pfm_alt_intr_handler != hdl) return -EINVAL;
6528
6529
6530 if (!spin_trylock(&pfm_alt_install_check)) {
6531 return -EBUSY;
6532 }
6533
6534 pfm_alt_intr_handler = NULL;
6535
6536 ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
6537 if (ret) {
6538 DPRINT(("on_each_cpu() failed: %d\n", ret));
6539 }
6540
6541 for_each_online_cpu(i) {
6542 pfm_unreserve_session(NULL, 1, i);
6543 }
6544
6545 spin_unlock(&pfm_alt_install_check);
6546
6547 return 0;
6548}
6549EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
6550
6551
6552
6553
6554static int init_pfm_fs(void);
6555
6556static int __init
6557pfm_probe_pmu(void)
6558{
6559 pmu_config_t **p;
6560 int family;
6561
6562 family = local_cpu_data->family;
6563 p = pmu_confs;
6564
6565 while(*p) {
6566 if ((*p)->probe) {
6567 if ((*p)->probe() == 0) goto found;
6568 } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
6569 goto found;
6570 }
6571 p++;
6572 }
6573 return -1;
6574found:
6575 pmu_conf = *p;
6576 return 0;
6577}
6578
6579static const struct file_operations pfm_proc_fops = {
6580 .open = pfm_proc_open,
6581 .read = seq_read,
6582 .llseek = seq_lseek,
6583 .release = seq_release,
6584};
6585
6586int __init
6587pfm_init(void)
6588{
6589 unsigned int n, n_counters, i;
6590
6591 printk("perfmon: version %u.%u IRQ %u\n",
6592 PFM_VERSION_MAJ,
6593 PFM_VERSION_MIN,
6594 IA64_PERFMON_VECTOR);
6595
6596 if (pfm_probe_pmu()) {
6597 printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
6598 local_cpu_data->family);
6599 return -ENODEV;
6600 }
6601
6602
6603
6604
6605
6606 n = 0;
6607 for (i=0; PMC_IS_LAST(i) == 0; i++) {
6608 if (PMC_IS_IMPL(i) == 0) continue;
6609 pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
6610 n++;
6611 }
6612 pmu_conf->num_pmcs = n;
6613
6614 n = 0; n_counters = 0;
6615 for (i=0; PMD_IS_LAST(i) == 0; i++) {
6616 if (PMD_IS_IMPL(i) == 0) continue;
6617 pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
6618 n++;
6619 if (PMD_IS_COUNTING(i)) n_counters++;
6620 }
6621 pmu_conf->num_pmds = n;
6622 pmu_conf->num_counters = n_counters;
6623
6624
6625
6626
6627 if (pmu_conf->use_rr_dbregs) {
6628 if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
6629 printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
6630 pmu_conf = NULL;
6631 return -1;
6632 }
6633 if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
6634 printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
6635 pmu_conf = NULL;
6636 return -1;
6637 }
6638 }
6639
6640 printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
6641 pmu_conf->pmu_name,
6642 pmu_conf->num_pmcs,
6643 pmu_conf->num_pmds,
6644 pmu_conf->num_counters,
6645 ffz(pmu_conf->ovfl_val));
6646
6647
6648 if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
6649 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
6650 pmu_conf = NULL;
6651 return -1;
6652 }
6653
6654
6655
6656
6657 perfmon_dir = proc_create("perfmon", S_IRUGO, NULL, &pfm_proc_fops);
6658 if (perfmon_dir == NULL) {
6659 printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
6660 pmu_conf = NULL;
6661 return -1;
6662 }
6663
6664
6665
6666
6667 pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
6668
6669
6670
6671
6672 spin_lock_init(&pfm_sessions.pfs_lock);
6673 spin_lock_init(&pfm_buffer_fmt_lock);
6674
6675 init_pfm_fs();
6676
6677 for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
6678
6679 return 0;
6680}
6681
6682__initcall(pfm_init);
6683
6684
6685
6686
6687void
6688pfm_init_percpu (void)
6689{
6690 static int first_time=1;
6691
6692
6693
6694
6695 pfm_clear_psr_pp();
6696 pfm_clear_psr_up();
6697
6698
6699
6700
6701 pfm_unfreeze_pmu();
6702
6703 if (first_time) {
6704 register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
6705 first_time=0;
6706 }
6707
6708 ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
6709 ia64_srlz_d();
6710}
6711
6712
6713
6714
6715void
6716dump_pmu_state(const char *from)
6717{
6718 struct task_struct *task;
6719 struct pt_regs *regs;
6720 pfm_context_t *ctx;
6721 unsigned long psr, dcr, info, flags;
6722 int i, this_cpu;
6723
6724 local_irq_save(flags);
6725
6726 this_cpu = smp_processor_id();
6727 regs = task_pt_regs(current);
6728 info = PFM_CPUINFO_GET();
6729 dcr = ia64_getreg(_IA64_REG_CR_DCR);
6730
6731 if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
6732 local_irq_restore(flags);
6733 return;
6734 }
6735
6736 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6737 this_cpu,
6738 from,
6739 task_pid_nr(current),
6740 regs->cr_iip,
6741 current->comm);
6742
6743 task = GET_PMU_OWNER();
6744 ctx = GET_PMU_CTX();
6745
6746 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
6747
6748 psr = pfm_get_psr();
6749
6750 printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
6751 this_cpu,
6752 ia64_get_pmc(0),
6753 psr & IA64_PSR_PP ? 1 : 0,
6754 psr & IA64_PSR_UP ? 1 : 0,
6755 dcr & IA64_DCR_PP ? 1 : 0,
6756 info,
6757 ia64_psr(regs)->up,
6758 ia64_psr(regs)->pp);
6759
6760 ia64_psr(regs)->up = 0;
6761 ia64_psr(regs)->pp = 0;
6762
6763 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6764 if (PMC_IS_IMPL(i) == 0) continue;
6765 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
6766 }
6767
6768 for (i=1; PMD_IS_LAST(i) == 0; i++) {
6769 if (PMD_IS_IMPL(i) == 0) continue;
6770 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
6771 }
6772
6773 if (ctx) {
6774 printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
6775 this_cpu,
6776 ctx->ctx_state,
6777 ctx->ctx_smpl_vaddr,
6778 ctx->ctx_smpl_hdr,
6779 ctx->ctx_msgq_head,
6780 ctx->ctx_msgq_tail,
6781 ctx->ctx_saved_psr_up);
6782 }
6783 local_irq_restore(flags);
6784}
6785
6786
6787
6788
6789void
6790pfm_inherit(struct task_struct *task, struct pt_regs *regs)
6791{
6792 struct thread_struct *thread;
6793
6794 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
6795
6796 thread = &task->thread;
6797
6798
6799
6800
6801 thread->pfm_context = NULL;
6802
6803 PFM_SET_WORK_PENDING(task, 0);
6804
6805
6806
6807
6808}
6809#else
6810asmlinkage long
6811sys_perfmonctl (int fd, int cmd, void *arg, int count)
6812{
6813 return -ENOSYS;
6814}
6815#endif
6816