1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/interrupt.h>
26#include <linux/proc_fs.h>
27#include <linux/seq_file.h>
28#include <linux/init.h>
29#include <linux/vmalloc.h>
30#include <linux/mm.h>
31#include <linux/sysctl.h>
32#include <linux/list.h>
33#include <linux/file.h>
34#include <linux/poll.h>
35#include <linux/vfs.h>
36#include <linux/smp.h>
37#include <linux/pagemap.h>
38#include <linux/mount.h>
39#include <linux/bitops.h>
40#include <linux/capability.h>
41#include <linux/rcupdate.h>
42#include <linux/completion.h>
43#include <linux/tracehook.h>
44#include <linux/slab.h>
45
46#include <asm/errno.h>
47#include <asm/intrinsics.h>
48#include <asm/page.h>
49#include <asm/perfmon.h>
50#include <asm/processor.h>
51#include <asm/signal.h>
52#include <asm/system.h>
53#include <asm/uaccess.h>
54#include <asm/delay.h>
55
56#ifdef CONFIG_PERFMON
57
58
59
60#define PFM_CTX_UNLOADED 1
61#define PFM_CTX_LOADED 2
62#define PFM_CTX_MASKED 3
63#define PFM_CTX_ZOMBIE 4
64
65#define PFM_INVALID_ACTIVATION (~0UL)
66
67#define PFM_NUM_PMC_REGS 64
68#define PFM_NUM_PMD_REGS 64
69
70
71
72
73#define PFM_MAX_MSGS 32
74#define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
75
76
77
78
79
80
81
82
83
84
85
86
87#define PFM_REG_NOTIMPL 0x0
88#define PFM_REG_IMPL 0x1
89#define PFM_REG_END 0x2
90#define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL)
91#define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR)
92#define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL)
93#define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL)
94#define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL)
95
96#define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
97#define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
98
99#define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
100
101
102#define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
103#define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
104
105
106#define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
107#define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
108#define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
109#define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
110
111#define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
112#define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
113#define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
114#define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
115
116#define PFM_NUM_IBRS IA64_NUM_DBG_REGS
117#define PFM_NUM_DBRS IA64_NUM_DBG_REGS
118
119#define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
120#define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
121#define PFM_CTX_TASK(h) (h)->ctx_task
122
123#define PMU_PMC_OI 5
124
125
126#define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
127#define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
128
129#define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
130
131#define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
132#define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
133#define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
134#define PFM_CODE_RR 0
135#define PFM_DATA_RR 1
136
137#define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
138#define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
139#define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
140
141#define RDEP(x) (1UL<<(x))
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161#define PROTECT_CTX(c, f) \
162 do { \
163 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
164 spin_lock_irqsave(&(c)->ctx_lock, f); \
165 DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
166 } while(0)
167
168#define UNPROTECT_CTX(c, f) \
169 do { \
170 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
171 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
172 } while(0)
173
174#define PROTECT_CTX_NOPRINT(c, f) \
175 do { \
176 spin_lock_irqsave(&(c)->ctx_lock, f); \
177 } while(0)
178
179
180#define UNPROTECT_CTX_NOPRINT(c, f) \
181 do { \
182 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
183 } while(0)
184
185
186#define PROTECT_CTX_NOIRQ(c) \
187 do { \
188 spin_lock(&(c)->ctx_lock); \
189 } while(0)
190
191#define UNPROTECT_CTX_NOIRQ(c) \
192 do { \
193 spin_unlock(&(c)->ctx_lock); \
194 } while(0)
195
196
197#ifdef CONFIG_SMP
198
199#define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
200#define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
201#define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
202
203#else
204#define SET_ACTIVATION(t) do {} while(0)
205#define GET_ACTIVATION(t) do {} while(0)
206#define INC_ACTIVATION(t) do {} while(0)
207#endif
208
209#define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
210#define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
211#define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
212
213#define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
214#define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
215
216#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
217
218
219
220
221#define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
222
223#define PFMFS_MAGIC 0xa0b4d889
224
225
226
227
228#define PFM_DEBUGGING 1
229#ifdef PFM_DEBUGGING
230#define DPRINT(a) \
231 do { \
232 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
233 } while (0)
234
235#define DPRINT_ovfl(a) \
236 do { \
237 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
238 } while (0)
239#endif
240
241
242
243
244
245
246typedef struct {
247 unsigned long val;
248 unsigned long lval;
249 unsigned long long_reset;
250 unsigned long short_reset;
251 unsigned long reset_pmds[4];
252 unsigned long smpl_pmds[4];
253 unsigned long seed;
254 unsigned long mask;
255 unsigned int flags;
256 unsigned long eventid;
257} pfm_counter_t;
258
259
260
261
262typedef struct {
263 unsigned int block:1;
264 unsigned int system:1;
265 unsigned int using_dbreg:1;
266 unsigned int is_sampling:1;
267 unsigned int excl_idle:1;
268 unsigned int going_zombie:1;
269 unsigned int trap_reason:2;
270 unsigned int no_msg:1;
271 unsigned int can_restart:1;
272 unsigned int reserved:22;
273} pfm_context_flags_t;
274
275#define PFM_TRAP_REASON_NONE 0x0
276#define PFM_TRAP_REASON_BLOCK 0x1
277#define PFM_TRAP_REASON_RESET 0x2
278
279
280
281
282
283
284typedef struct pfm_context {
285 spinlock_t ctx_lock;
286
287 pfm_context_flags_t ctx_flags;
288 unsigned int ctx_state;
289
290 struct task_struct *ctx_task;
291
292 unsigned long ctx_ovfl_regs[4];
293
294 struct completion ctx_restart_done;
295
296 unsigned long ctx_used_pmds[4];
297 unsigned long ctx_all_pmds[4];
298 unsigned long ctx_reload_pmds[4];
299
300 unsigned long ctx_all_pmcs[4];
301 unsigned long ctx_reload_pmcs[4];
302 unsigned long ctx_used_monitors[4];
303
304 unsigned long ctx_pmcs[PFM_NUM_PMC_REGS];
305
306 unsigned int ctx_used_ibrs[1];
307 unsigned int ctx_used_dbrs[1];
308 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS];
309 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS];
310
311 pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS];
312
313 unsigned long th_pmcs[PFM_NUM_PMC_REGS];
314 unsigned long th_pmds[PFM_NUM_PMD_REGS];
315
316 unsigned long ctx_saved_psr_up;
317
318 unsigned long ctx_last_activation;
319 unsigned int ctx_last_cpu;
320 unsigned int ctx_cpu;
321
322 int ctx_fd;
323 pfm_ovfl_arg_t ctx_ovfl_arg;
324
325 pfm_buffer_fmt_t *ctx_buf_fmt;
326 void *ctx_smpl_hdr;
327 unsigned long ctx_smpl_size;
328 void *ctx_smpl_vaddr;
329
330 wait_queue_head_t ctx_msgq_wait;
331 pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
332 int ctx_msgq_head;
333 int ctx_msgq_tail;
334 struct fasync_struct *ctx_async_queue;
335
336 wait_queue_head_t ctx_zombieq;
337} pfm_context_t;
338
339
340
341
342
343#define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
344
345#define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
346
347#ifdef CONFIG_SMP
348#define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
349#define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
350#else
351#define SET_LAST_CPU(ctx, v) do {} while(0)
352#define GET_LAST_CPU(ctx) do {} while(0)
353#endif
354
355
356#define ctx_fl_block ctx_flags.block
357#define ctx_fl_system ctx_flags.system
358#define ctx_fl_using_dbreg ctx_flags.using_dbreg
359#define ctx_fl_is_sampling ctx_flags.is_sampling
360#define ctx_fl_excl_idle ctx_flags.excl_idle
361#define ctx_fl_going_zombie ctx_flags.going_zombie
362#define ctx_fl_trap_reason ctx_flags.trap_reason
363#define ctx_fl_no_msg ctx_flags.no_msg
364#define ctx_fl_can_restart ctx_flags.can_restart
365
366#define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
367#define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
368
369
370
371
372
373typedef struct {
374 spinlock_t pfs_lock;
375
376 unsigned int pfs_task_sessions;
377 unsigned int pfs_sys_sessions;
378 unsigned int pfs_sys_use_dbregs;
379 unsigned int pfs_ptrace_use_dbregs;
380 struct task_struct *pfs_sys_session[NR_CPUS];
381} pfm_session_t;
382
383
384
385
386
387
388typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
389typedef struct {
390 unsigned int type;
391 int pm_pos;
392 unsigned long default_value;
393 unsigned long reserved_mask;
394 pfm_reg_check_t read_check;
395 pfm_reg_check_t write_check;
396 unsigned long dep_pmd[4];
397 unsigned long dep_pmc[4];
398} pfm_reg_desc_t;
399
400
401#define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
402
403
404
405
406
407
408
409
410
411
412
413
414
415typedef struct {
416 unsigned long ovfl_val;
417
418 pfm_reg_desc_t *pmc_desc;
419 pfm_reg_desc_t *pmd_desc;
420
421 unsigned int num_pmcs;
422 unsigned int num_pmds;
423 unsigned long impl_pmcs[4];
424 unsigned long impl_pmds[4];
425
426 char *pmu_name;
427 unsigned int pmu_family;
428 unsigned int flags;
429 unsigned int num_ibrs;
430 unsigned int num_dbrs;
431 unsigned int num_counters;
432 int (*probe)(void);
433 unsigned int use_rr_dbregs:1;
434} pmu_config_t;
435
436
437
438#define PFM_PMU_IRQ_RESEND 1
439
440
441
442
443typedef struct {
444 unsigned long ibr_mask:56;
445 unsigned long ibr_plm:4;
446 unsigned long ibr_ig:3;
447 unsigned long ibr_x:1;
448} ibr_mask_reg_t;
449
450typedef struct {
451 unsigned long dbr_mask:56;
452 unsigned long dbr_plm:4;
453 unsigned long dbr_ig:2;
454 unsigned long dbr_w:1;
455 unsigned long dbr_r:1;
456} dbr_mask_reg_t;
457
458typedef union {
459 unsigned long val;
460 ibr_mask_reg_t ibr;
461 dbr_mask_reg_t dbr;
462} dbreg_t;
463
464
465
466
467
468typedef struct {
469 int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
470 char *cmd_name;
471 int cmd_flags;
472 unsigned int cmd_narg;
473 size_t cmd_argsize;
474 int (*cmd_getsize)(void *arg, size_t *sz);
475} pfm_cmd_desc_t;
476
477#define PFM_CMD_FD 0x01
478#define PFM_CMD_ARG_READ 0x02
479#define PFM_CMD_ARG_RW 0x04
480#define PFM_CMD_STOP 0x08
481
482
483#define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
484#define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
485#define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
486#define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
487#define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
488
489#define PFM_CMD_ARG_MANY -1
490
491typedef struct {
492 unsigned long pfm_spurious_ovfl_intr_count;
493 unsigned long pfm_replay_ovfl_intr_count;
494 unsigned long pfm_ovfl_intr_count;
495 unsigned long pfm_ovfl_intr_cycles;
496 unsigned long pfm_ovfl_intr_cycles_min;
497 unsigned long pfm_ovfl_intr_cycles_max;
498 unsigned long pfm_smpl_handler_calls;
499 unsigned long pfm_smpl_handler_cycles;
500 char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
501} pfm_stats_t;
502
503
504
505
506static pfm_stats_t pfm_stats[NR_CPUS];
507static pfm_session_t pfm_sessions;
508
509static DEFINE_SPINLOCK(pfm_alt_install_check);
510static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
511
512static struct proc_dir_entry *perfmon_dir;
513static pfm_uuid_t pfm_null_uuid = {0,};
514
515static spinlock_t pfm_buffer_fmt_lock;
516static LIST_HEAD(pfm_buffer_fmt_list);
517
518static pmu_config_t *pmu_conf;
519
520
521pfm_sysctl_t pfm_sysctl;
522EXPORT_SYMBOL(pfm_sysctl);
523
524static ctl_table pfm_ctl_table[]={
525 {
526 .procname = "debug",
527 .data = &pfm_sysctl.debug,
528 .maxlen = sizeof(int),
529 .mode = 0666,
530 .proc_handler = proc_dointvec,
531 },
532 {
533 .procname = "debug_ovfl",
534 .data = &pfm_sysctl.debug_ovfl,
535 .maxlen = sizeof(int),
536 .mode = 0666,
537 .proc_handler = proc_dointvec,
538 },
539 {
540 .procname = "fastctxsw",
541 .data = &pfm_sysctl.fastctxsw,
542 .maxlen = sizeof(int),
543 .mode = 0600,
544 .proc_handler = proc_dointvec,
545 },
546 {
547 .procname = "expert_mode",
548 .data = &pfm_sysctl.expert_mode,
549 .maxlen = sizeof(int),
550 .mode = 0600,
551 .proc_handler = proc_dointvec,
552 },
553 {}
554};
555static ctl_table pfm_sysctl_dir[] = {
556 {
557 .procname = "perfmon",
558 .mode = 0555,
559 .child = pfm_ctl_table,
560 },
561 {}
562};
563static ctl_table pfm_sysctl_root[] = {
564 {
565 .procname = "kernel",
566 .mode = 0555,
567 .child = pfm_sysctl_dir,
568 },
569 {}
570};
571static struct ctl_table_header *pfm_sysctl_header;
572
573static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
574
575#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
576#define pfm_get_cpu_data(a,b) per_cpu(a, b)
577
578static inline void
579pfm_put_task(struct task_struct *task)
580{
581 if (task != current) put_task_struct(task);
582}
583
584static inline void
585pfm_reserve_page(unsigned long a)
586{
587 SetPageReserved(vmalloc_to_page((void *)a));
588}
589static inline void
590pfm_unreserve_page(unsigned long a)
591{
592 ClearPageReserved(vmalloc_to_page((void*)a));
593}
594
595static inline unsigned long
596pfm_protect_ctx_ctxsw(pfm_context_t *x)
597{
598 spin_lock(&(x)->ctx_lock);
599 return 0UL;
600}
601
602static inline void
603pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
604{
605 spin_unlock(&(x)->ctx_lock);
606}
607
608static inline unsigned int
609pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct)
610{
611 return do_munmap(mm, addr, len);
612}
613
614static inline unsigned long
615pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
616{
617 return get_unmapped_area(file, addr, len, pgoff, flags);
618}
619
620
621static const struct dentry_operations pfmfs_dentry_operations;
622
623static struct dentry *
624pfmfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
625{
626 return mount_pseudo(fs_type, "pfm:", NULL, &pfmfs_dentry_operations,
627 PFMFS_MAGIC);
628}
629
630static struct file_system_type pfm_fs_type = {
631 .name = "pfmfs",
632 .mount = pfmfs_mount,
633 .kill_sb = kill_anon_super,
634};
635
636DEFINE_PER_CPU(unsigned long, pfm_syst_info);
637DEFINE_PER_CPU(struct task_struct *, pmu_owner);
638DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
639DEFINE_PER_CPU(unsigned long, pmu_activation_number);
640EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
641
642
643
644static const struct file_operations pfm_file_ops;
645
646
647
648
649#ifndef CONFIG_SMP
650static void pfm_lazy_save_regs (struct task_struct *ta);
651#endif
652
653void dump_pmu_state(const char *);
654static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
655
656#include "perfmon_itanium.h"
657#include "perfmon_mckinley.h"
658#include "perfmon_montecito.h"
659#include "perfmon_generic.h"
660
661static pmu_config_t *pmu_confs[]={
662 &pmu_conf_mont,
663 &pmu_conf_mck,
664 &pmu_conf_ita,
665 &pmu_conf_gen,
666 NULL
667};
668
669
670static int pfm_end_notify_user(pfm_context_t *ctx);
671
672static inline void
673pfm_clear_psr_pp(void)
674{
675 ia64_rsm(IA64_PSR_PP);
676 ia64_srlz_i();
677}
678
679static inline void
680pfm_set_psr_pp(void)
681{
682 ia64_ssm(IA64_PSR_PP);
683 ia64_srlz_i();
684}
685
686static inline void
687pfm_clear_psr_up(void)
688{
689 ia64_rsm(IA64_PSR_UP);
690 ia64_srlz_i();
691}
692
693static inline void
694pfm_set_psr_up(void)
695{
696 ia64_ssm(IA64_PSR_UP);
697 ia64_srlz_i();
698}
699
700static inline unsigned long
701pfm_get_psr(void)
702{
703 unsigned long tmp;
704 tmp = ia64_getreg(_IA64_REG_PSR);
705 ia64_srlz_i();
706 return tmp;
707}
708
709static inline void
710pfm_set_psr_l(unsigned long val)
711{
712 ia64_setreg(_IA64_REG_PSR_L, val);
713 ia64_srlz_i();
714}
715
716static inline void
717pfm_freeze_pmu(void)
718{
719 ia64_set_pmc(0,1UL);
720 ia64_srlz_d();
721}
722
723static inline void
724pfm_unfreeze_pmu(void)
725{
726 ia64_set_pmc(0,0UL);
727 ia64_srlz_d();
728}
729
730static inline void
731pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
732{
733 int i;
734
735 for (i=0; i < nibrs; i++) {
736 ia64_set_ibr(i, ibrs[i]);
737 ia64_dv_serialize_instruction();
738 }
739 ia64_srlz_i();
740}
741
742static inline void
743pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
744{
745 int i;
746
747 for (i=0; i < ndbrs; i++) {
748 ia64_set_dbr(i, dbrs[i]);
749 ia64_dv_serialize_data();
750 }
751 ia64_srlz_d();
752}
753
754
755
756
757static inline unsigned long
758pfm_read_soft_counter(pfm_context_t *ctx, int i)
759{
760 return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
761}
762
763
764
765
766static inline void
767pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
768{
769 unsigned long ovfl_val = pmu_conf->ovfl_val;
770
771 ctx->ctx_pmds[i].val = val & ~ovfl_val;
772
773
774
775
776 ia64_set_pmd(i, val & ovfl_val);
777}
778
779static pfm_msg_t *
780pfm_get_new_msg(pfm_context_t *ctx)
781{
782 int idx, next;
783
784 next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
785
786 DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
787 if (next == ctx->ctx_msgq_head) return NULL;
788
789 idx = ctx->ctx_msgq_tail;
790 ctx->ctx_msgq_tail = next;
791
792 DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
793
794 return ctx->ctx_msgq+idx;
795}
796
797static pfm_msg_t *
798pfm_get_next_msg(pfm_context_t *ctx)
799{
800 pfm_msg_t *msg;
801
802 DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
803
804 if (PFM_CTXQ_EMPTY(ctx)) return NULL;
805
806
807
808
809 msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
810
811
812
813
814 ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
815
816 DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
817
818 return msg;
819}
820
821static void
822pfm_reset_msgq(pfm_context_t *ctx)
823{
824 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
825 DPRINT(("ctx=%p msgq reset\n", ctx));
826}
827
828static void *
829pfm_rvmalloc(unsigned long size)
830{
831 void *mem;
832 unsigned long addr;
833
834 size = PAGE_ALIGN(size);
835 mem = vzalloc(size);
836 if (mem) {
837
838 addr = (unsigned long)mem;
839 while (size > 0) {
840 pfm_reserve_page(addr);
841 addr+=PAGE_SIZE;
842 size-=PAGE_SIZE;
843 }
844 }
845 return mem;
846}
847
848static void
849pfm_rvfree(void *mem, unsigned long size)
850{
851 unsigned long addr;
852
853 if (mem) {
854 DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
855 addr = (unsigned long) mem;
856 while ((long) size > 0) {
857 pfm_unreserve_page(addr);
858 addr+=PAGE_SIZE;
859 size-=PAGE_SIZE;
860 }
861 vfree(mem);
862 }
863 return;
864}
865
866static pfm_context_t *
867pfm_context_alloc(int ctx_flags)
868{
869 pfm_context_t *ctx;
870
871
872
873
874
875 ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
876 if (ctx) {
877 DPRINT(("alloc ctx @%p\n", ctx));
878
879
880
881
882 spin_lock_init(&ctx->ctx_lock);
883
884
885
886
887 ctx->ctx_state = PFM_CTX_UNLOADED;
888
889
890
891
892 ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
893 ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
894 ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
895
896
897
898
899
900
901
902
903 init_completion(&ctx->ctx_restart_done);
904
905
906
907
908 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
909 SET_LAST_CPU(ctx, -1);
910
911
912
913
914 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
915 init_waitqueue_head(&ctx->ctx_msgq_wait);
916 init_waitqueue_head(&ctx->ctx_zombieq);
917
918 }
919 return ctx;
920}
921
922static void
923pfm_context_free(pfm_context_t *ctx)
924{
925 if (ctx) {
926 DPRINT(("free ctx @%p\n", ctx));
927 kfree(ctx);
928 }
929}
930
931static void
932pfm_mask_monitoring(struct task_struct *task)
933{
934 pfm_context_t *ctx = PFM_GET_CTX(task);
935 unsigned long mask, val, ovfl_mask;
936 int i;
937
938 DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
939
940 ovfl_mask = pmu_conf->ovfl_val;
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960 mask = ctx->ctx_used_pmds[0];
961 for (i = 0; mask; i++, mask>>=1) {
962
963 if ((mask & 0x1) == 0) continue;
964 val = ia64_get_pmd(i);
965
966 if (PMD_IS_COUNTING(i)) {
967
968
969
970 ctx->ctx_pmds[i].val += (val & ovfl_mask);
971 } else {
972 ctx->ctx_pmds[i].val = val;
973 }
974 DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
975 i,
976 ctx->ctx_pmds[i].val,
977 val & ovfl_mask));
978 }
979
980
981
982
983
984
985
986
987 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
988 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
989 if ((mask & 0x1) == 0UL) continue;
990 ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
991 ctx->th_pmcs[i] &= ~0xfUL;
992 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
993 }
994
995
996
997 ia64_srlz_d();
998}
999
1000
1001
1002
1003
1004
1005static void
1006pfm_restore_monitoring(struct task_struct *task)
1007{
1008 pfm_context_t *ctx = PFM_GET_CTX(task);
1009 unsigned long mask, ovfl_mask;
1010 unsigned long psr, val;
1011 int i, is_system;
1012
1013 is_system = ctx->ctx_fl_system;
1014 ovfl_mask = pmu_conf->ovfl_val;
1015
1016 if (task != current) {
1017 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
1018 return;
1019 }
1020 if (ctx->ctx_state != PFM_CTX_MASKED) {
1021 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
1022 task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
1023 return;
1024 }
1025 psr = pfm_get_psr();
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1037
1038 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
1039 pfm_clear_psr_pp();
1040 } else {
1041 pfm_clear_psr_up();
1042 }
1043
1044
1045
1046 mask = ctx->ctx_used_pmds[0];
1047 for (i = 0; mask; i++, mask>>=1) {
1048
1049 if ((mask & 0x1) == 0) continue;
1050
1051 if (PMD_IS_COUNTING(i)) {
1052
1053
1054
1055
1056 val = ctx->ctx_pmds[i].val & ovfl_mask;
1057 ctx->ctx_pmds[i].val &= ~ovfl_mask;
1058 } else {
1059 val = ctx->ctx_pmds[i].val;
1060 }
1061 ia64_set_pmd(i, val);
1062
1063 DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
1064 i,
1065 ctx->ctx_pmds[i].val,
1066 val));
1067 }
1068
1069
1070
1071 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
1072 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
1073 if ((mask & 0x1) == 0UL) continue;
1074 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1075 ia64_set_pmc(i, ctx->th_pmcs[i]);
1076 DPRINT(("[%d] pmc[%d]=0x%lx\n",
1077 task_pid_nr(task), i, ctx->th_pmcs[i]));
1078 }
1079 ia64_srlz_d();
1080
1081
1082
1083
1084
1085 if (ctx->ctx_fl_using_dbreg) {
1086 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
1087 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
1088 }
1089
1090
1091
1092
1093 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1094
1095 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
1096 ia64_srlz_i();
1097 }
1098 pfm_set_psr_l(psr);
1099}
1100
1101static inline void
1102pfm_save_pmds(unsigned long *pmds, unsigned long mask)
1103{
1104 int i;
1105
1106 ia64_srlz_d();
1107
1108 for (i=0; mask; i++, mask>>=1) {
1109 if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
1110 }
1111}
1112
1113
1114
1115
1116static inline void
1117pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
1118{
1119 int i;
1120 unsigned long val, ovfl_val = pmu_conf->ovfl_val;
1121
1122 for (i=0; mask; i++, mask>>=1) {
1123 if ((mask & 0x1) == 0) continue;
1124 val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
1125 ia64_set_pmd(i, val);
1126 }
1127 ia64_srlz_d();
1128}
1129
1130
1131
1132
1133static inline void
1134pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1135{
1136 unsigned long ovfl_val = pmu_conf->ovfl_val;
1137 unsigned long mask = ctx->ctx_all_pmds[0];
1138 unsigned long val;
1139 int i;
1140
1141 DPRINT(("mask=0x%lx\n", mask));
1142
1143 for (i=0; mask; i++, mask>>=1) {
1144
1145 val = ctx->ctx_pmds[i].val;
1146
1147
1148
1149
1150
1151
1152
1153 if (PMD_IS_COUNTING(i)) {
1154 ctx->ctx_pmds[i].val = val & ~ovfl_val;
1155 val &= ovfl_val;
1156 }
1157 ctx->th_pmds[i] = val;
1158
1159 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1160 i,
1161 ctx->th_pmds[i],
1162 ctx->ctx_pmds[i].val));
1163 }
1164}
1165
1166
1167
1168
1169static inline void
1170pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1171{
1172 unsigned long mask = ctx->ctx_all_pmcs[0];
1173 int i;
1174
1175 DPRINT(("mask=0x%lx\n", mask));
1176
1177 for (i=0; mask; i++, mask>>=1) {
1178
1179 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1180 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1181 }
1182}
1183
1184
1185
1186static inline void
1187pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
1188{
1189 int i;
1190
1191 for (i=0; mask; i++, mask>>=1) {
1192 if ((mask & 0x1) == 0) continue;
1193 ia64_set_pmc(i, pmcs[i]);
1194 }
1195 ia64_srlz_d();
1196}
1197
1198static inline int
1199pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
1200{
1201 return memcmp(a, b, sizeof(pfm_uuid_t));
1202}
1203
1204static inline int
1205pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
1206{
1207 int ret = 0;
1208 if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
1209 return ret;
1210}
1211
1212static inline int
1213pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
1214{
1215 int ret = 0;
1216 if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
1217 return ret;
1218}
1219
1220
1221static inline int
1222pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
1223 int cpu, void *arg)
1224{
1225 int ret = 0;
1226 if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
1227 return ret;
1228}
1229
1230static inline int
1231pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
1232 int cpu, void *arg)
1233{
1234 int ret = 0;
1235 if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
1236 return ret;
1237}
1238
1239static inline int
1240pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1241{
1242 int ret = 0;
1243 if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
1244 return ret;
1245}
1246
1247static inline int
1248pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1249{
1250 int ret = 0;
1251 if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
1252 return ret;
1253}
1254
1255static pfm_buffer_fmt_t *
1256__pfm_find_buffer_fmt(pfm_uuid_t uuid)
1257{
1258 struct list_head * pos;
1259 pfm_buffer_fmt_t * entry;
1260
1261 list_for_each(pos, &pfm_buffer_fmt_list) {
1262 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
1263 if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
1264 return entry;
1265 }
1266 return NULL;
1267}
1268
1269
1270
1271
1272static pfm_buffer_fmt_t *
1273pfm_find_buffer_fmt(pfm_uuid_t uuid)
1274{
1275 pfm_buffer_fmt_t * fmt;
1276 spin_lock(&pfm_buffer_fmt_lock);
1277 fmt = __pfm_find_buffer_fmt(uuid);
1278 spin_unlock(&pfm_buffer_fmt_lock);
1279 return fmt;
1280}
1281
1282int
1283pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
1284{
1285 int ret = 0;
1286
1287
1288 if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
1289
1290
1291 if (fmt->fmt_handler == NULL) return -EINVAL;
1292
1293
1294
1295
1296
1297 spin_lock(&pfm_buffer_fmt_lock);
1298
1299 if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
1300 printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
1301 ret = -EBUSY;
1302 goto out;
1303 }
1304 list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
1305 printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
1306
1307out:
1308 spin_unlock(&pfm_buffer_fmt_lock);
1309 return ret;
1310}
1311EXPORT_SYMBOL(pfm_register_buffer_fmt);
1312
1313int
1314pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
1315{
1316 pfm_buffer_fmt_t *fmt;
1317 int ret = 0;
1318
1319 spin_lock(&pfm_buffer_fmt_lock);
1320
1321 fmt = __pfm_find_buffer_fmt(uuid);
1322 if (!fmt) {
1323 printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
1324 ret = -EINVAL;
1325 goto out;
1326 }
1327 list_del_init(&fmt->fmt_list);
1328 printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
1329
1330out:
1331 spin_unlock(&pfm_buffer_fmt_lock);
1332 return ret;
1333
1334}
1335EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1336
1337extern void update_pal_halt_status(int);
1338
1339static int
1340pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1341{
1342 unsigned long flags;
1343
1344
1345
1346 LOCK_PFS(flags);
1347
1348 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1349 pfm_sessions.pfs_sys_sessions,
1350 pfm_sessions.pfs_task_sessions,
1351 pfm_sessions.pfs_sys_use_dbregs,
1352 is_syswide,
1353 cpu));
1354
1355 if (is_syswide) {
1356
1357
1358
1359 if (pfm_sessions.pfs_task_sessions > 0UL) {
1360 DPRINT(("system wide not possible, %u conflicting task_sessions\n",
1361 pfm_sessions.pfs_task_sessions));
1362 goto abort;
1363 }
1364
1365 if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
1366
1367 DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
1368
1369 pfm_sessions.pfs_sys_session[cpu] = task;
1370
1371 pfm_sessions.pfs_sys_sessions++ ;
1372
1373 } else {
1374 if (pfm_sessions.pfs_sys_sessions) goto abort;
1375 pfm_sessions.pfs_task_sessions++;
1376 }
1377
1378 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1379 pfm_sessions.pfs_sys_sessions,
1380 pfm_sessions.pfs_task_sessions,
1381 pfm_sessions.pfs_sys_use_dbregs,
1382 is_syswide,
1383 cpu));
1384
1385
1386
1387
1388 update_pal_halt_status(0);
1389
1390 UNLOCK_PFS(flags);
1391
1392 return 0;
1393
1394error_conflict:
1395 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
1396 task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
1397 cpu));
1398abort:
1399 UNLOCK_PFS(flags);
1400
1401 return -EBUSY;
1402
1403}
1404
1405static int
1406pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1407{
1408 unsigned long flags;
1409
1410
1411
1412 LOCK_PFS(flags);
1413
1414 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1415 pfm_sessions.pfs_sys_sessions,
1416 pfm_sessions.pfs_task_sessions,
1417 pfm_sessions.pfs_sys_use_dbregs,
1418 is_syswide,
1419 cpu));
1420
1421
1422 if (is_syswide) {
1423 pfm_sessions.pfs_sys_session[cpu] = NULL;
1424
1425
1426
1427 if (ctx && ctx->ctx_fl_using_dbreg) {
1428 if (pfm_sessions.pfs_sys_use_dbregs == 0) {
1429 printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
1430 } else {
1431 pfm_sessions.pfs_sys_use_dbregs--;
1432 }
1433 }
1434 pfm_sessions.pfs_sys_sessions--;
1435 } else {
1436 pfm_sessions.pfs_task_sessions--;
1437 }
1438 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1439 pfm_sessions.pfs_sys_sessions,
1440 pfm_sessions.pfs_task_sessions,
1441 pfm_sessions.pfs_sys_use_dbregs,
1442 is_syswide,
1443 cpu));
1444
1445
1446
1447
1448 if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
1449 update_pal_halt_status(1);
1450
1451 UNLOCK_PFS(flags);
1452
1453 return 0;
1454}
1455
1456
1457
1458
1459
1460
1461static int
1462pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size)
1463{
1464 int r;
1465
1466
1467 if (task->mm == NULL || size == 0UL || vaddr == NULL) {
1468 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
1469 return -EINVAL;
1470 }
1471
1472 DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
1473
1474
1475
1476
1477 down_write(&task->mm->mmap_sem);
1478
1479 DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size));
1480
1481 r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0);
1482
1483 up_write(&task->mm->mmap_sem);
1484 if (r !=0) {
1485 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
1486 }
1487
1488 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
1489
1490 return 0;
1491}
1492
1493
1494
1495
1496#if 0
1497static int
1498pfm_free_smpl_buffer(pfm_context_t *ctx)
1499{
1500 pfm_buffer_fmt_t *fmt;
1501
1502 if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
1503
1504
1505
1506
1507 fmt = ctx->ctx_buf_fmt;
1508
1509 DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
1510 ctx->ctx_smpl_hdr,
1511 ctx->ctx_smpl_size,
1512 ctx->ctx_smpl_vaddr));
1513
1514 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1515
1516
1517
1518
1519 pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
1520
1521 ctx->ctx_smpl_hdr = NULL;
1522 ctx->ctx_smpl_size = 0UL;
1523
1524 return 0;
1525
1526invalid_free:
1527 printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
1528 return -EINVAL;
1529}
1530#endif
1531
1532static inline void
1533pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
1534{
1535 if (fmt == NULL) return;
1536
1537 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1538
1539}
1540
1541
1542
1543
1544
1545
1546
1547static struct vfsmount *pfmfs_mnt __read_mostly;
1548
1549static int __init
1550init_pfm_fs(void)
1551{
1552 int err = register_filesystem(&pfm_fs_type);
1553 if (!err) {
1554 pfmfs_mnt = kern_mount(&pfm_fs_type);
1555 err = PTR_ERR(pfmfs_mnt);
1556 if (IS_ERR(pfmfs_mnt))
1557 unregister_filesystem(&pfm_fs_type);
1558 else
1559 err = 0;
1560 }
1561 return err;
1562}
1563
1564static ssize_t
1565pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1566{
1567 pfm_context_t *ctx;
1568 pfm_msg_t *msg;
1569 ssize_t ret;
1570 unsigned long flags;
1571 DECLARE_WAITQUEUE(wait, current);
1572 if (PFM_IS_FILE(filp) == 0) {
1573 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1574 return -EINVAL;
1575 }
1576
1577 ctx = filp->private_data;
1578 if (ctx == NULL) {
1579 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
1580 return -EINVAL;
1581 }
1582
1583
1584
1585
1586 if (size < sizeof(pfm_msg_t)) {
1587 DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
1588 return -EINVAL;
1589 }
1590
1591 PROTECT_CTX(ctx, flags);
1592
1593
1594
1595
1596 add_wait_queue(&ctx->ctx_msgq_wait, &wait);
1597
1598
1599 for(;;) {
1600
1601
1602
1603
1604 set_current_state(TASK_INTERRUPTIBLE);
1605
1606 DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
1607
1608 ret = 0;
1609 if(PFM_CTXQ_EMPTY(ctx) == 0) break;
1610
1611 UNPROTECT_CTX(ctx, flags);
1612
1613
1614
1615
1616 ret = -EAGAIN;
1617 if(filp->f_flags & O_NONBLOCK) break;
1618
1619
1620
1621
1622 if(signal_pending(current)) {
1623 ret = -EINTR;
1624 break;
1625 }
1626
1627
1628
1629 schedule();
1630
1631 PROTECT_CTX(ctx, flags);
1632 }
1633 DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
1634 set_current_state(TASK_RUNNING);
1635 remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
1636
1637 if (ret < 0) goto abort;
1638
1639 ret = -EINVAL;
1640 msg = pfm_get_next_msg(ctx);
1641 if (msg == NULL) {
1642 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
1643 goto abort_locked;
1644 }
1645
1646 DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
1647
1648 ret = -EFAULT;
1649 if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
1650
1651abort_locked:
1652 UNPROTECT_CTX(ctx, flags);
1653abort:
1654 return ret;
1655}
1656
1657static ssize_t
1658pfm_write(struct file *file, const char __user *ubuf,
1659 size_t size, loff_t *ppos)
1660{
1661 DPRINT(("pfm_write called\n"));
1662 return -EINVAL;
1663}
1664
1665static unsigned int
1666pfm_poll(struct file *filp, poll_table * wait)
1667{
1668 pfm_context_t *ctx;
1669 unsigned long flags;
1670 unsigned int mask = 0;
1671
1672 if (PFM_IS_FILE(filp) == 0) {
1673 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1674 return 0;
1675 }
1676
1677 ctx = filp->private_data;
1678 if (ctx == NULL) {
1679 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
1680 return 0;
1681 }
1682
1683
1684 DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
1685
1686 poll_wait(filp, &ctx->ctx_msgq_wait, wait);
1687
1688 PROTECT_CTX(ctx, flags);
1689
1690 if (PFM_CTXQ_EMPTY(ctx) == 0)
1691 mask = POLLIN | POLLRDNORM;
1692
1693 UNPROTECT_CTX(ctx, flags);
1694
1695 DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
1696
1697 return mask;
1698}
1699
1700static long
1701pfm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1702{
1703 DPRINT(("pfm_ioctl called\n"));
1704 return -EINVAL;
1705}
1706
1707
1708
1709
1710static inline int
1711pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
1712{
1713 int ret;
1714
1715 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
1716
1717 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1718 task_pid_nr(current),
1719 fd,
1720 on,
1721 ctx->ctx_async_queue, ret));
1722
1723 return ret;
1724}
1725
1726static int
1727pfm_fasync(int fd, struct file *filp, int on)
1728{
1729 pfm_context_t *ctx;
1730 int ret;
1731
1732 if (PFM_IS_FILE(filp) == 0) {
1733 printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
1734 return -EBADF;
1735 }
1736
1737 ctx = filp->private_data;
1738 if (ctx == NULL) {
1739 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
1740 return -EBADF;
1741 }
1742
1743
1744
1745
1746
1747
1748
1749 ret = pfm_do_fasync(fd, filp, ctx, on);
1750
1751
1752 DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1753 fd,
1754 on,
1755 ctx->ctx_async_queue, ret));
1756
1757 return ret;
1758}
1759
1760#ifdef CONFIG_SMP
1761
1762
1763
1764
1765
1766static void
1767pfm_syswide_force_stop(void *info)
1768{
1769 pfm_context_t *ctx = (pfm_context_t *)info;
1770 struct pt_regs *regs = task_pt_regs(current);
1771 struct task_struct *owner;
1772 unsigned long flags;
1773 int ret;
1774
1775 if (ctx->ctx_cpu != smp_processor_id()) {
1776 printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
1777 ctx->ctx_cpu,
1778 smp_processor_id());
1779 return;
1780 }
1781 owner = GET_PMU_OWNER();
1782 if (owner != ctx->ctx_task) {
1783 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1784 smp_processor_id(),
1785 task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
1786 return;
1787 }
1788 if (GET_PMU_CTX() != ctx) {
1789 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
1790 smp_processor_id(),
1791 GET_PMU_CTX(), ctx);
1792 return;
1793 }
1794
1795 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
1796
1797
1798
1799
1800
1801 local_irq_save(flags);
1802
1803 ret = pfm_context_unload(ctx, NULL, 0, regs);
1804 if (ret) {
1805 DPRINT(("context_unload returned %d\n", ret));
1806 }
1807
1808
1809
1810
1811 local_irq_restore(flags);
1812}
1813
1814static void
1815pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
1816{
1817 int ret;
1818
1819 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
1820 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
1821 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
1822}
1823#endif
1824
1825
1826
1827
1828
1829static int
1830pfm_flush(struct file *filp, fl_owner_t id)
1831{
1832 pfm_context_t *ctx;
1833 struct task_struct *task;
1834 struct pt_regs *regs;
1835 unsigned long flags;
1836 unsigned long smpl_buf_size = 0UL;
1837 void *smpl_buf_vaddr = NULL;
1838 int state, is_system;
1839
1840 if (PFM_IS_FILE(filp) == 0) {
1841 DPRINT(("bad magic for\n"));
1842 return -EBADF;
1843 }
1844
1845 ctx = filp->private_data;
1846 if (ctx == NULL) {
1847 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
1848 return -EBADF;
1849 }
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864 PROTECT_CTX(ctx, flags);
1865
1866 state = ctx->ctx_state;
1867 is_system = ctx->ctx_fl_system;
1868
1869 task = PFM_CTX_TASK(ctx);
1870 regs = task_pt_regs(task);
1871
1872 DPRINT(("ctx_state=%d is_current=%d\n",
1873 state,
1874 task == current ? 1 : 0));
1875
1876
1877
1878
1879
1880
1881
1882
1883 if (task == current) {
1884#ifdef CONFIG_SMP
1885
1886
1887
1888
1889
1890
1891
1892 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
1893
1894 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
1895
1896
1897
1898 local_irq_restore(flags);
1899
1900 pfm_syswide_cleanup_other_cpu(ctx);
1901
1902
1903
1904
1905 local_irq_save(flags);
1906
1907
1908
1909
1910 } else
1911#endif
1912 {
1913
1914 DPRINT(("forcing unload\n"));
1915
1916
1917
1918
1919 pfm_context_unload(ctx, NULL, 0, regs);
1920
1921 DPRINT(("ctx_state=%d\n", ctx->ctx_state));
1922 }
1923 }
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936 if (ctx->ctx_smpl_vaddr && current->mm) {
1937 smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
1938 smpl_buf_size = ctx->ctx_smpl_size;
1939 }
1940
1941 UNPROTECT_CTX(ctx, flags);
1942
1943
1944
1945
1946
1947
1948
1949 if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size);
1950
1951 return 0;
1952}
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968static int
1969pfm_close(struct inode *inode, struct file *filp)
1970{
1971 pfm_context_t *ctx;
1972 struct task_struct *task;
1973 struct pt_regs *regs;
1974 DECLARE_WAITQUEUE(wait, current);
1975 unsigned long flags;
1976 unsigned long smpl_buf_size = 0UL;
1977 void *smpl_buf_addr = NULL;
1978 int free_possible = 1;
1979 int state, is_system;
1980
1981 DPRINT(("pfm_close called private=%p\n", filp->private_data));
1982
1983 if (PFM_IS_FILE(filp) == 0) {
1984 DPRINT(("bad magic\n"));
1985 return -EBADF;
1986 }
1987
1988 ctx = filp->private_data;
1989 if (ctx == NULL) {
1990 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
1991 return -EBADF;
1992 }
1993
1994 PROTECT_CTX(ctx, flags);
1995
1996 state = ctx->ctx_state;
1997 is_system = ctx->ctx_fl_system;
1998
1999 task = PFM_CTX_TASK(ctx);
2000 regs = task_pt_regs(task);
2001
2002 DPRINT(("ctx_state=%d is_current=%d\n",
2003 state,
2004 task == current ? 1 : 0));
2005
2006
2007
2008
2009 if (state == PFM_CTX_UNLOADED) goto doit;
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023 if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039 ctx->ctx_fl_going_zombie = 1;
2040
2041
2042
2043
2044 complete(&ctx->ctx_restart_done);
2045
2046 DPRINT(("waking up ctx_state=%d\n", state));
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056 set_current_state(TASK_INTERRUPTIBLE);
2057 add_wait_queue(&ctx->ctx_zombieq, &wait);
2058
2059 UNPROTECT_CTX(ctx, flags);
2060
2061
2062
2063
2064
2065
2066 schedule();
2067
2068
2069 PROTECT_CTX(ctx, flags);
2070
2071
2072 remove_wait_queue(&ctx->ctx_zombieq, &wait);
2073 set_current_state(TASK_RUNNING);
2074
2075
2076
2077
2078 DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
2079 }
2080 else if (task != current) {
2081#ifdef CONFIG_SMP
2082
2083
2084
2085 ctx->ctx_state = PFM_CTX_ZOMBIE;
2086
2087 DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
2088
2089
2090
2091
2092 free_possible = 0;
2093#else
2094 pfm_context_unload(ctx, NULL, 0, regs);
2095#endif
2096 }
2097
2098doit:
2099
2100 state = ctx->ctx_state;
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116 if (ctx->ctx_smpl_hdr) {
2117 smpl_buf_addr = ctx->ctx_smpl_hdr;
2118 smpl_buf_size = ctx->ctx_smpl_size;
2119
2120 ctx->ctx_smpl_hdr = NULL;
2121 ctx->ctx_fl_is_sampling = 0;
2122 }
2123
2124 DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
2125 state,
2126 free_possible,
2127 smpl_buf_addr,
2128 smpl_buf_size));
2129
2130 if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
2131
2132
2133
2134
2135 if (state == PFM_CTX_ZOMBIE) {
2136 pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
2137 }
2138
2139
2140
2141
2142
2143 filp->private_data = NULL;
2144
2145
2146
2147
2148
2149
2150
2151
2152 UNPROTECT_CTX(ctx, flags);
2153
2154
2155
2156
2157
2158 if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
2159
2160
2161
2162
2163 if (free_possible) pfm_context_free(ctx);
2164
2165 return 0;
2166}
2167
2168static int
2169pfm_no_open(struct inode *irrelevant, struct file *dontcare)
2170{
2171 DPRINT(("pfm_no_open called\n"));
2172 return -ENXIO;
2173}
2174
2175
2176
2177static const struct file_operations pfm_file_ops = {
2178 .llseek = no_llseek,
2179 .read = pfm_read,
2180 .write = pfm_write,
2181 .poll = pfm_poll,
2182 .unlocked_ioctl = pfm_ioctl,
2183 .open = pfm_no_open,
2184 .fasync = pfm_fasync,
2185 .release = pfm_close,
2186 .flush = pfm_flush
2187};
2188
2189static int
2190pfmfs_delete_dentry(const struct dentry *dentry)
2191{
2192 return 1;
2193}
2194
2195static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen)
2196{
2197 return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]",
2198 dentry->d_inode->i_ino);
2199}
2200
2201static const struct dentry_operations pfmfs_dentry_operations = {
2202 .d_delete = pfmfs_delete_dentry,
2203 .d_dname = pfmfs_dname,
2204};
2205
2206
2207static struct file *
2208pfm_alloc_file(pfm_context_t *ctx)
2209{
2210 struct file *file;
2211 struct inode *inode;
2212 struct path path;
2213 struct qstr this = { .name = "" };
2214
2215
2216
2217
2218 inode = new_inode(pfmfs_mnt->mnt_sb);
2219 if (!inode)
2220 return ERR_PTR(-ENOMEM);
2221
2222 DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
2223
2224 inode->i_mode = S_IFCHR|S_IRUGO;
2225 inode->i_uid = current_fsuid();
2226 inode->i_gid = current_fsgid();
2227
2228
2229
2230
2231 path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
2232 if (!path.dentry) {
2233 iput(inode);
2234 return ERR_PTR(-ENOMEM);
2235 }
2236 path.mnt = mntget(pfmfs_mnt);
2237
2238 d_add(path.dentry, inode);
2239
2240 file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
2241 if (!file) {
2242 path_put(&path);
2243 return ERR_PTR(-ENFILE);
2244 }
2245
2246 file->f_flags = O_RDONLY;
2247 file->private_data = ctx;
2248
2249 return file;
2250}
2251
2252static int
2253pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
2254{
2255 DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
2256
2257 while (size > 0) {
2258 unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
2259
2260
2261 if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
2262 return -ENOMEM;
2263
2264 addr += PAGE_SIZE;
2265 buf += PAGE_SIZE;
2266 size -= PAGE_SIZE;
2267 }
2268 return 0;
2269}
2270
2271
2272
2273
2274static int
2275pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
2276{
2277 struct mm_struct *mm = task->mm;
2278 struct vm_area_struct *vma = NULL;
2279 unsigned long size;
2280 void *smpl_buf;
2281
2282
2283
2284
2285
2286 size = PAGE_ALIGN(rsize);
2287
2288 DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298 if (size > task_rlimit(task, RLIMIT_MEMLOCK))
2299 return -ENOMEM;
2300
2301
2302
2303
2304
2305
2306 smpl_buf = pfm_rvmalloc(size);
2307 if (smpl_buf == NULL) {
2308 DPRINT(("Can't allocate sampling buffer\n"));
2309 return -ENOMEM;
2310 }
2311
2312 DPRINT(("smpl_buf @%p\n", smpl_buf));
2313
2314
2315 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2316 if (!vma) {
2317 DPRINT(("Cannot allocate vma\n"));
2318 goto error_kmem;
2319 }
2320 INIT_LIST_HEAD(&vma->anon_vma_chain);
2321
2322
2323
2324
2325 vma->vm_mm = mm;
2326 vma->vm_file = filp;
2327 vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
2328 vma->vm_page_prot = PAGE_READONLY;
2329
2330
2331
2332
2333
2334
2335 ctx->ctx_smpl_hdr = smpl_buf;
2336 ctx->ctx_smpl_size = size;
2337
2338
2339
2340
2341
2342
2343
2344 down_write(&task->mm->mmap_sem);
2345
2346
2347 vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0);
2348 if (vma->vm_start == 0UL) {
2349 DPRINT(("Cannot find unmapped area for size %ld\n", size));
2350 up_write(&task->mm->mmap_sem);
2351 goto error;
2352 }
2353 vma->vm_end = vma->vm_start + size;
2354 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2355
2356 DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
2357
2358
2359 if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
2360 DPRINT(("Can't remap buffer\n"));
2361 up_write(&task->mm->mmap_sem);
2362 goto error;
2363 }
2364
2365 get_file(filp);
2366
2367
2368
2369
2370
2371 insert_vm_struct(mm, vma);
2372
2373 mm->total_vm += size >> PAGE_SHIFT;
2374 vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
2375 vma_pages(vma));
2376 up_write(&task->mm->mmap_sem);
2377
2378
2379
2380
2381 ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
2382 *(unsigned long *)user_vaddr = vma->vm_start;
2383
2384 return 0;
2385
2386error:
2387 kmem_cache_free(vm_area_cachep, vma);
2388error_kmem:
2389 pfm_rvfree(smpl_buf, size);
2390
2391 return -ENOMEM;
2392}
2393
2394
2395
2396
2397static int
2398pfm_bad_permissions(struct task_struct *task)
2399{
2400 const struct cred *tcred;
2401 uid_t uid = current_uid();
2402 gid_t gid = current_gid();
2403 int ret;
2404
2405 rcu_read_lock();
2406 tcred = __task_cred(task);
2407
2408
2409 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
2410 uid,
2411 gid,
2412 tcred->euid,
2413 tcred->suid,
2414 tcred->uid,
2415 tcred->egid,
2416 tcred->sgid));
2417
2418 ret = ((uid != tcred->euid)
2419 || (uid != tcred->suid)
2420 || (uid != tcred->uid)
2421 || (gid != tcred->egid)
2422 || (gid != tcred->sgid)
2423 || (gid != tcred->gid)) && !capable(CAP_SYS_PTRACE);
2424
2425 rcu_read_unlock();
2426 return ret;
2427}
2428
2429static int
2430pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
2431{
2432 int ctx_flags;
2433
2434
2435
2436 ctx_flags = pfx->ctx_flags;
2437
2438 if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
2439
2440
2441
2442
2443 if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
2444 DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
2445 return -EINVAL;
2446 }
2447 } else {
2448 }
2449
2450
2451 return 0;
2452}
2453
2454static int
2455pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
2456 unsigned int cpu, pfarg_context_t *arg)
2457{
2458 pfm_buffer_fmt_t *fmt = NULL;
2459 unsigned long size = 0UL;
2460 void *uaddr = NULL;
2461 void *fmt_arg = NULL;
2462 int ret = 0;
2463#define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
2464
2465
2466 fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
2467 if (fmt == NULL) {
2468 DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
2469 return -EINVAL;
2470 }
2471
2472
2473
2474
2475 if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
2476
2477 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
2478
2479 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
2480
2481 if (ret) goto error;
2482
2483
2484 ctx->ctx_buf_fmt = fmt;
2485 ctx->ctx_fl_is_sampling = 1;
2486
2487
2488
2489
2490 ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
2491 if (ret) goto error;
2492
2493 if (size) {
2494
2495
2496
2497 ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
2498 if (ret) goto error;
2499
2500
2501 arg->ctx_smpl_vaddr = uaddr;
2502 }
2503 ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
2504
2505error:
2506 return ret;
2507}
2508
2509static void
2510pfm_reset_pmu_state(pfm_context_t *ctx)
2511{
2512 int i;
2513
2514
2515
2516
2517 for (i=1; PMC_IS_LAST(i) == 0; i++) {
2518 if (PMC_IS_IMPL(i) == 0) continue;
2519 ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
2520 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
2521 }
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
2550
2551
2552
2553
2554 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
2555
2556 DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
2557
2558
2559
2560
2561 ctx->ctx_used_ibrs[0] = 0UL;
2562 ctx->ctx_used_dbrs[0] = 0UL;
2563}
2564
2565static int
2566pfm_ctx_getsize(void *arg, size_t *sz)
2567{
2568 pfarg_context_t *req = (pfarg_context_t *)arg;
2569 pfm_buffer_fmt_t *fmt;
2570
2571 *sz = 0;
2572
2573 if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
2574
2575 fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
2576 if (fmt == NULL) {
2577 DPRINT(("cannot find buffer format\n"));
2578 return -EINVAL;
2579 }
2580
2581 *sz = fmt->fmt_arg_size;
2582 DPRINT(("arg_size=%lu\n", *sz));
2583
2584 return 0;
2585}
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595static int
2596pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2597{
2598
2599
2600
2601 if (task->mm == NULL) {
2602 DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
2603 return -EPERM;
2604 }
2605 if (pfm_bad_permissions(task)) {
2606 DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
2607 return -EPERM;
2608 }
2609
2610
2611
2612 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
2613 DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
2614 return -EINVAL;
2615 }
2616
2617 if (task->exit_state == EXIT_ZOMBIE) {
2618 DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
2619 return -EBUSY;
2620 }
2621
2622
2623
2624
2625 if (task == current) return 0;
2626
2627 if (!task_is_stopped_or_traced(task)) {
2628 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
2629 return -EBUSY;
2630 }
2631
2632
2633
2634 wait_task_inactive(task, 0);
2635
2636
2637
2638 return 0;
2639}
2640
2641static int
2642pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
2643{
2644 struct task_struct *p = current;
2645 int ret;
2646
2647
2648 if (pid < 2) return -EPERM;
2649
2650 if (pid != task_pid_vnr(current)) {
2651
2652 read_lock(&tasklist_lock);
2653
2654 p = find_task_by_vpid(pid);
2655
2656
2657 if (p) get_task_struct(p);
2658
2659 read_unlock(&tasklist_lock);
2660
2661 if (p == NULL) return -ESRCH;
2662 }
2663
2664 ret = pfm_task_incompatible(ctx, p);
2665 if (ret == 0) {
2666 *task = p;
2667 } else if (p != current) {
2668 pfm_put_task(p);
2669 }
2670 return ret;
2671}
2672
2673
2674
2675static int
2676pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2677{
2678 pfarg_context_t *req = (pfarg_context_t *)arg;
2679 struct file *filp;
2680 struct path path;
2681 int ctx_flags;
2682 int fd;
2683 int ret;
2684
2685
2686 ret = pfarg_is_sane(current, req);
2687 if (ret < 0)
2688 return ret;
2689
2690 ctx_flags = req->ctx_flags;
2691
2692 ret = -ENOMEM;
2693
2694 fd = get_unused_fd();
2695 if (fd < 0)
2696 return fd;
2697
2698 ctx = pfm_context_alloc(ctx_flags);
2699 if (!ctx)
2700 goto error;
2701
2702 filp = pfm_alloc_file(ctx);
2703 if (IS_ERR(filp)) {
2704 ret = PTR_ERR(filp);
2705 goto error_file;
2706 }
2707
2708 req->ctx_fd = ctx->ctx_fd = fd;
2709
2710
2711
2712
2713 if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
2714 ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
2715 if (ret)
2716 goto buffer_error;
2717 }
2718
2719 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n",
2720 ctx,
2721 ctx_flags,
2722 ctx->ctx_fl_system,
2723 ctx->ctx_fl_block,
2724 ctx->ctx_fl_excl_idle,
2725 ctx->ctx_fl_no_msg,
2726 ctx->ctx_fd));
2727
2728
2729
2730
2731 pfm_reset_pmu_state(ctx);
2732
2733 fd_install(fd, filp);
2734
2735 return 0;
2736
2737buffer_error:
2738 path = filp->f_path;
2739 put_filp(filp);
2740 path_put(&path);
2741
2742 if (ctx->ctx_buf_fmt) {
2743 pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
2744 }
2745error_file:
2746 pfm_context_free(ctx);
2747
2748error:
2749 put_unused_fd(fd);
2750 return ret;
2751}
2752
2753static inline unsigned long
2754pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
2755{
2756 unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
2757 unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
2758 extern unsigned long carta_random32 (unsigned long seed);
2759
2760 if (reg->flags & PFM_REGFL_RANDOM) {
2761 new_seed = carta_random32(old_seed);
2762 val -= (old_seed & mask);
2763 if ((mask >> 32) != 0)
2764
2765 new_seed |= carta_random32(old_seed >> 32) << 32;
2766 reg->seed = new_seed;
2767 }
2768 reg->lval = val;
2769 return val;
2770}
2771
2772static void
2773pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2774{
2775 unsigned long mask = ovfl_regs[0];
2776 unsigned long reset_others = 0UL;
2777 unsigned long val;
2778 int i;
2779
2780
2781
2782
2783 mask >>= PMU_FIRST_COUNTER;
2784 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2785
2786 if ((mask & 0x1UL) == 0UL) continue;
2787
2788 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2789 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2790
2791 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2792 }
2793
2794
2795
2796
2797 for(i = 0; reset_others; i++, reset_others >>= 1) {
2798
2799 if ((reset_others & 0x1) == 0) continue;
2800
2801 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2802
2803 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2804 is_long_reset ? "long" : "short", i, val));
2805 }
2806}
2807
2808static void
2809pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2810{
2811 unsigned long mask = ovfl_regs[0];
2812 unsigned long reset_others = 0UL;
2813 unsigned long val;
2814 int i;
2815
2816 DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
2817
2818 if (ctx->ctx_state == PFM_CTX_MASKED) {
2819 pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
2820 return;
2821 }
2822
2823
2824
2825
2826 mask >>= PMU_FIRST_COUNTER;
2827 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2828
2829 if ((mask & 0x1UL) == 0UL) continue;
2830
2831 val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2832 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2833
2834 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2835
2836 pfm_write_soft_counter(ctx, i, val);
2837 }
2838
2839
2840
2841
2842 for(i = 0; reset_others; i++, reset_others >>= 1) {
2843
2844 if ((reset_others & 0x1) == 0) continue;
2845
2846 val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2847
2848 if (PMD_IS_COUNTING(i)) {
2849 pfm_write_soft_counter(ctx, i, val);
2850 } else {
2851 ia64_set_pmd(i, val);
2852 }
2853 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2854 is_long_reset ? "long" : "short", i, val));
2855 }
2856 ia64_srlz_d();
2857}
2858
2859static int
2860pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2861{
2862 struct task_struct *task;
2863 pfarg_reg_t *req = (pfarg_reg_t *)arg;
2864 unsigned long value, pmc_pm;
2865 unsigned long smpl_pmds, reset_pmds, impl_pmds;
2866 unsigned int cnum, reg_flags, flags, pmc_type;
2867 int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
2868 int is_monitor, is_counting, state;
2869 int ret = -EINVAL;
2870 pfm_reg_check_t wr_func;
2871#define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
2872
2873 state = ctx->ctx_state;
2874 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
2875 is_system = ctx->ctx_fl_system;
2876 task = ctx->ctx_task;
2877 impl_pmds = pmu_conf->impl_pmds[0];
2878
2879 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
2880
2881 if (is_loaded) {
2882
2883
2884
2885
2886
2887 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
2888 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
2889 return -EBUSY;
2890 }
2891 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
2892 }
2893 expert_mode = pfm_sysctl.expert_mode;
2894
2895 for (i = 0; i < count; i++, req++) {
2896
2897 cnum = req->reg_num;
2898 reg_flags = req->reg_flags;
2899 value = req->reg_value;
2900 smpl_pmds = req->reg_smpl_pmds[0];
2901 reset_pmds = req->reg_reset_pmds[0];
2902 flags = 0;
2903
2904
2905 if (cnum >= PMU_MAX_PMCS) {
2906 DPRINT(("pmc%u is invalid\n", cnum));
2907 goto error;
2908 }
2909
2910 pmc_type = pmu_conf->pmc_desc[cnum].type;
2911 pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
2912 is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
2913 is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
2914
2915
2916
2917
2918
2919
2920 if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
2921 DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
2922 goto error;
2923 }
2924 wr_func = pmu_conf->pmc_desc[cnum].write_check;
2925
2926
2927
2928
2929
2930 if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
2931 DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
2932 cnum,
2933 pmc_pm,
2934 is_system));
2935 goto error;
2936 }
2937
2938 if (is_counting) {
2939
2940
2941
2942
2943 value |= 1 << PMU_PMC_OI;
2944
2945 if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
2946 flags |= PFM_REGFL_OVFL_NOTIFY;
2947 }
2948
2949 if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
2950
2951
2952 if ((smpl_pmds & impl_pmds) != smpl_pmds) {
2953 DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
2954 goto error;
2955 }
2956
2957
2958 if ((reset_pmds & impl_pmds) != reset_pmds) {
2959 DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
2960 goto error;
2961 }
2962 } else {
2963 if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
2964 DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
2965 goto error;
2966 }
2967
2968 }
2969
2970
2971
2972
2973 if (likely(expert_mode == 0 && wr_func)) {
2974 ret = (*wr_func)(task, ctx, cnum, &value, regs);
2975 if (ret) goto error;
2976 ret = -EINVAL;
2977 }
2978
2979
2980
2981
2982 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
2983
2984
2985
2986
2987
2988
2989
2990
2991 if (is_counting) {
2992
2993
2994
2995 ctx->ctx_pmds[cnum].flags = flags;
2996
2997 ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
2998 ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
2999 ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012 CTX_USED_PMD(ctx, reset_pmds);
3013 CTX_USED_PMD(ctx, smpl_pmds);
3014
3015
3016
3017
3018 if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3019 }
3020
3021
3022
3023
3024
3025 CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
3040
3041
3042
3043
3044 ctx->ctx_pmcs[cnum] = value;
3045
3046 if (is_loaded) {
3047
3048
3049
3050 if (is_system == 0) ctx->th_pmcs[cnum] = value;
3051
3052
3053
3054
3055 if (can_access_pmu) {
3056 ia64_set_pmc(cnum, value);
3057 }
3058#ifdef CONFIG_SMP
3059 else {
3060
3061
3062
3063
3064
3065
3066
3067 ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
3068 }
3069#endif
3070 }
3071
3072 DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
3073 cnum,
3074 value,
3075 is_loaded,
3076 can_access_pmu,
3077 flags,
3078 ctx->ctx_all_pmcs[0],
3079 ctx->ctx_used_pmds[0],
3080 ctx->ctx_pmds[cnum].eventid,
3081 smpl_pmds,
3082 reset_pmds,
3083 ctx->ctx_reload_pmcs[0],
3084 ctx->ctx_used_monitors[0],
3085 ctx->ctx_ovfl_regs[0]));
3086 }
3087
3088
3089
3090
3091 if (can_access_pmu) ia64_srlz_d();
3092
3093 return 0;
3094error:
3095 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3096 return ret;
3097}
3098
3099static int
3100pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3101{
3102 struct task_struct *task;
3103 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3104 unsigned long value, hw_value, ovfl_mask;
3105 unsigned int cnum;
3106 int i, can_access_pmu = 0, state;
3107 int is_counting, is_loaded, is_system, expert_mode;
3108 int ret = -EINVAL;
3109 pfm_reg_check_t wr_func;
3110
3111
3112 state = ctx->ctx_state;
3113 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3114 is_system = ctx->ctx_fl_system;
3115 ovfl_mask = pmu_conf->ovfl_val;
3116 task = ctx->ctx_task;
3117
3118 if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
3119
3120
3121
3122
3123
3124 if (likely(is_loaded)) {
3125
3126
3127
3128
3129
3130 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3131 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3132 return -EBUSY;
3133 }
3134 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3135 }
3136 expert_mode = pfm_sysctl.expert_mode;
3137
3138 for (i = 0; i < count; i++, req++) {
3139
3140 cnum = req->reg_num;
3141 value = req->reg_value;
3142
3143 if (!PMD_IS_IMPL(cnum)) {
3144 DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
3145 goto abort_mission;
3146 }
3147 is_counting = PMD_IS_COUNTING(cnum);
3148 wr_func = pmu_conf->pmd_desc[cnum].write_check;
3149
3150
3151
3152
3153 if (unlikely(expert_mode == 0 && wr_func)) {
3154 unsigned long v = value;
3155
3156 ret = (*wr_func)(task, ctx, cnum, &v, regs);
3157 if (ret) goto abort_mission;
3158
3159 value = v;
3160 ret = -EINVAL;
3161 }
3162
3163
3164
3165
3166 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
3167
3168
3169
3170
3171 hw_value = value;
3172
3173
3174
3175
3176 if (is_counting) {
3177
3178
3179
3180 ctx->ctx_pmds[cnum].lval = value;
3181
3182
3183
3184
3185 if (is_loaded) {
3186 hw_value = value & ovfl_mask;
3187 value = value & ~ovfl_mask;
3188 }
3189 }
3190
3191
3192
3193 ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
3194 ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
3195
3196
3197
3198
3199 ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
3200 ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
3201
3202
3203
3204
3205 ctx->ctx_pmds[cnum].val = value;
3206
3207
3208
3209
3210
3211
3212
3213 CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
3214
3215
3216
3217
3218 CTX_USED_PMD(ctx, RDEP(cnum));
3219
3220
3221
3222
3223
3224 if (is_counting && state == PFM_CTX_MASKED) {
3225 ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3226 }
3227
3228 if (is_loaded) {
3229
3230
3231
3232 if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
3233
3234
3235
3236
3237 if (can_access_pmu) {
3238 ia64_set_pmd(cnum, hw_value);
3239 } else {
3240#ifdef CONFIG_SMP
3241
3242
3243
3244
3245
3246 ctx->ctx_reload_pmds[0] |= 1UL << cnum;
3247#endif
3248 }
3249 }
3250
3251 DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
3252 "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
3253 cnum,
3254 value,
3255 is_loaded,
3256 can_access_pmu,
3257 hw_value,
3258 ctx->ctx_pmds[cnum].val,
3259 ctx->ctx_pmds[cnum].short_reset,
3260 ctx->ctx_pmds[cnum].long_reset,
3261 PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
3262 ctx->ctx_pmds[cnum].seed,
3263 ctx->ctx_pmds[cnum].mask,
3264 ctx->ctx_used_pmds[0],
3265 ctx->ctx_pmds[cnum].reset_pmds[0],
3266 ctx->ctx_reload_pmds[0],
3267 ctx->ctx_all_pmds[0],
3268 ctx->ctx_ovfl_regs[0]));
3269 }
3270
3271
3272
3273
3274 if (can_access_pmu) ia64_srlz_d();
3275
3276 return 0;
3277
3278abort_mission:
3279
3280
3281
3282 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3283 return ret;
3284}
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295static int
3296pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3297{
3298 struct task_struct *task;
3299 unsigned long val = 0UL, lval, ovfl_mask, sval;
3300 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3301 unsigned int cnum, reg_flags = 0;
3302 int i, can_access_pmu = 0, state;
3303 int is_loaded, is_system, is_counting, expert_mode;
3304 int ret = -EINVAL;
3305 pfm_reg_check_t rd_func;
3306
3307
3308
3309
3310
3311
3312 state = ctx->ctx_state;
3313 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3314 is_system = ctx->ctx_fl_system;
3315 ovfl_mask = pmu_conf->ovfl_val;
3316 task = ctx->ctx_task;
3317
3318 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3319
3320 if (likely(is_loaded)) {
3321
3322
3323
3324
3325
3326 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3327 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3328 return -EBUSY;
3329 }
3330
3331
3332
3333 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3334
3335 if (can_access_pmu) ia64_srlz_d();
3336 }
3337 expert_mode = pfm_sysctl.expert_mode;
3338
3339 DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
3340 is_loaded,
3341 can_access_pmu,
3342 state));
3343
3344
3345
3346
3347
3348
3349 for (i = 0; i < count; i++, req++) {
3350
3351 cnum = req->reg_num;
3352 reg_flags = req->reg_flags;
3353
3354 if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
3355
3356
3357
3358
3359
3360
3361
3362
3363 if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
3364
3365 sval = ctx->ctx_pmds[cnum].val;
3366 lval = ctx->ctx_pmds[cnum].lval;
3367 is_counting = PMD_IS_COUNTING(cnum);
3368
3369
3370
3371
3372
3373
3374 if (can_access_pmu){
3375 val = ia64_get_pmd(cnum);
3376 } else {
3377
3378
3379
3380
3381
3382 val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
3383 }
3384 rd_func = pmu_conf->pmd_desc[cnum].read_check;
3385
3386 if (is_counting) {
3387
3388
3389
3390 val &= ovfl_mask;
3391 val += sval;
3392 }
3393
3394
3395
3396
3397 if (unlikely(expert_mode == 0 && rd_func)) {
3398 unsigned long v = val;
3399 ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
3400 if (ret) goto error;
3401 val = v;
3402 ret = -EINVAL;
3403 }
3404
3405 PFM_REG_RETFLAG_SET(reg_flags, 0);
3406
3407 DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
3408
3409
3410
3411
3412
3413
3414 req->reg_value = val;
3415 req->reg_flags = reg_flags;
3416 req->reg_last_reset_val = lval;
3417 }
3418
3419 return 0;
3420
3421error:
3422 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3423 return ret;
3424}
3425
3426int
3427pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3428{
3429 pfm_context_t *ctx;
3430
3431 if (req == NULL) return -EINVAL;
3432
3433 ctx = GET_PMU_CTX();
3434
3435 if (ctx == NULL) return -EINVAL;
3436
3437
3438
3439
3440
3441 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3442
3443 return pfm_write_pmcs(ctx, req, nreq, regs);
3444}
3445EXPORT_SYMBOL(pfm_mod_write_pmcs);
3446
3447int
3448pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3449{
3450 pfm_context_t *ctx;
3451
3452 if (req == NULL) return -EINVAL;
3453
3454 ctx = GET_PMU_CTX();
3455
3456 if (ctx == NULL) return -EINVAL;
3457
3458
3459
3460
3461
3462 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3463
3464 return pfm_read_pmds(ctx, req, nreq, regs);
3465}
3466EXPORT_SYMBOL(pfm_mod_read_pmds);
3467
3468
3469
3470
3471
3472int
3473pfm_use_debug_registers(struct task_struct *task)
3474{
3475 pfm_context_t *ctx = task->thread.pfm_context;
3476 unsigned long flags;
3477 int ret = 0;
3478
3479 if (pmu_conf->use_rr_dbregs == 0) return 0;
3480
3481 DPRINT(("called for [%d]\n", task_pid_nr(task)));
3482
3483
3484
3485
3486 if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496 if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
3497
3498 LOCK_PFS(flags);
3499
3500
3501
3502
3503
3504 if (pfm_sessions.pfs_sys_use_dbregs> 0)
3505 ret = -1;
3506 else
3507 pfm_sessions.pfs_ptrace_use_dbregs++;
3508
3509 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3510 pfm_sessions.pfs_ptrace_use_dbregs,
3511 pfm_sessions.pfs_sys_use_dbregs,
3512 task_pid_nr(task), ret));
3513
3514 UNLOCK_PFS(flags);
3515
3516 return ret;
3517}
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527int
3528pfm_release_debug_registers(struct task_struct *task)
3529{
3530 unsigned long flags;
3531 int ret;
3532
3533 if (pmu_conf->use_rr_dbregs == 0) return 0;
3534
3535 LOCK_PFS(flags);
3536 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
3537 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
3538 ret = -1;
3539 } else {
3540 pfm_sessions.pfs_ptrace_use_dbregs--;
3541 ret = 0;
3542 }
3543 UNLOCK_PFS(flags);
3544
3545 return ret;
3546}
3547
3548static int
3549pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3550{
3551 struct task_struct *task;
3552 pfm_buffer_fmt_t *fmt;
3553 pfm_ovfl_ctrl_t rst_ctrl;
3554 int state, is_system;
3555 int ret = 0;
3556
3557 state = ctx->ctx_state;
3558 fmt = ctx->ctx_buf_fmt;
3559 is_system = ctx->ctx_fl_system;
3560 task = PFM_CTX_TASK(ctx);
3561
3562 switch(state) {
3563 case PFM_CTX_MASKED:
3564 break;
3565 case PFM_CTX_LOADED:
3566 if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
3567
3568 case PFM_CTX_UNLOADED:
3569 case PFM_CTX_ZOMBIE:
3570 DPRINT(("invalid state=%d\n", state));
3571 return -EBUSY;
3572 default:
3573 DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
3574 return -EINVAL;
3575 }
3576
3577
3578
3579
3580
3581
3582 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3583 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3584 return -EBUSY;
3585 }
3586
3587
3588 if (unlikely(task == NULL)) {
3589 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
3590 return -EINVAL;
3591 }
3592
3593 if (task == current || is_system) {
3594
3595 fmt = ctx->ctx_buf_fmt;
3596
3597 DPRINT(("restarting self %d ovfl=0x%lx\n",
3598 task_pid_nr(task),
3599 ctx->ctx_ovfl_regs[0]));
3600
3601 if (CTX_HAS_SMPL(ctx)) {
3602
3603 prefetch(ctx->ctx_smpl_hdr);
3604
3605 rst_ctrl.bits.mask_monitoring = 0;
3606 rst_ctrl.bits.reset_ovfl_pmds = 0;
3607
3608 if (state == PFM_CTX_LOADED)
3609 ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3610 else
3611 ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3612 } else {
3613 rst_ctrl.bits.mask_monitoring = 0;
3614 rst_ctrl.bits.reset_ovfl_pmds = 1;
3615 }
3616
3617 if (ret == 0) {
3618 if (rst_ctrl.bits.reset_ovfl_pmds)
3619 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
3620
3621 if (rst_ctrl.bits.mask_monitoring == 0) {
3622 DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
3623
3624 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
3625 } else {
3626 DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
3627
3628
3629 }
3630 }
3631
3632
3633
3634 ctx->ctx_ovfl_regs[0] = 0UL;
3635
3636
3637
3638
3639 ctx->ctx_state = PFM_CTX_LOADED;
3640
3641
3642
3643
3644 ctx->ctx_fl_can_restart = 0;
3645
3646 return 0;
3647 }
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657 if (state == PFM_CTX_MASKED) {
3658 if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
3659
3660
3661
3662
3663 ctx->ctx_fl_can_restart = 0;
3664 }
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
3683 DPRINT(("unblocking [%d]\n", task_pid_nr(task)));
3684 complete(&ctx->ctx_restart_done);
3685 } else {
3686 DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
3687
3688 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
3689
3690 PFM_SET_WORK_PENDING(task, 1);
3691
3692 set_notify_resume(task);
3693
3694
3695
3696
3697 }
3698 return 0;
3699}
3700
3701static int
3702pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3703{
3704 unsigned int m = *(unsigned int *)arg;
3705
3706 pfm_sysctl.debug = m == 0 ? 0 : 1;
3707
3708 printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
3709
3710 if (m == 0) {
3711 memset(pfm_stats, 0, sizeof(pfm_stats));
3712 for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
3713 }
3714 return 0;
3715}
3716
3717
3718
3719
3720static int
3721pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3722{
3723 struct thread_struct *thread = NULL;
3724 struct task_struct *task;
3725 pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
3726 unsigned long flags;
3727 dbreg_t dbreg;
3728 unsigned int rnum;
3729 int first_time;
3730 int ret = 0, state;
3731 int i, can_access_pmu = 0;
3732 int is_system, is_loaded;
3733
3734 if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
3735
3736 state = ctx->ctx_state;
3737 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3738 is_system = ctx->ctx_fl_system;
3739 task = ctx->ctx_task;
3740
3741 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3742
3743
3744
3745
3746
3747 if (is_loaded) {
3748 thread = &task->thread;
3749
3750
3751
3752
3753
3754 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3755 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3756 return -EBUSY;
3757 }
3758 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3759 }
3760
3761
3762
3763
3764
3765
3766
3767
3768 first_time = ctx->ctx_fl_using_dbreg == 0;
3769
3770
3771
3772
3773 if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
3774 DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
3775 return -EBUSY;
3776 }
3777
3778
3779
3780
3781
3782
3783
3784
3785 if (is_loaded) {
3786 LOCK_PFS(flags);
3787
3788 if (first_time && is_system) {
3789 if (pfm_sessions.pfs_ptrace_use_dbregs)
3790 ret = -EBUSY;
3791 else
3792 pfm_sessions.pfs_sys_use_dbregs++;
3793 }
3794 UNLOCK_PFS(flags);
3795 }
3796
3797 if (ret != 0) return ret;
3798
3799
3800
3801
3802
3803 ctx->ctx_fl_using_dbreg = 1;
3804
3805
3806
3807
3808
3809
3810
3811
3812
3813
3814 if (first_time && can_access_pmu) {
3815 DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
3816 for (i=0; i < pmu_conf->num_ibrs; i++) {
3817 ia64_set_ibr(i, 0UL);
3818 ia64_dv_serialize_instruction();
3819 }
3820 ia64_srlz_i();
3821 for (i=0; i < pmu_conf->num_dbrs; i++) {
3822 ia64_set_dbr(i, 0UL);
3823 ia64_dv_serialize_data();
3824 }
3825 ia64_srlz_d();
3826 }
3827
3828
3829
3830
3831 for (i = 0; i < count; i++, req++) {
3832
3833 rnum = req->dbreg_num;
3834 dbreg.val = req->dbreg_value;
3835
3836 ret = -EINVAL;
3837
3838 if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
3839 DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
3840 rnum, dbreg.val, mode, i, count));
3841
3842 goto abort_mission;
3843 }
3844
3845
3846
3847
3848 if (rnum & 0x1) {
3849 if (mode == PFM_CODE_RR)
3850 dbreg.ibr.ibr_x = 0;
3851 else
3852 dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
3853 }
3854
3855 PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867 if (mode == PFM_CODE_RR) {
3868 CTX_USED_IBR(ctx, rnum);
3869
3870 if (can_access_pmu) {
3871 ia64_set_ibr(rnum, dbreg.val);
3872 ia64_dv_serialize_instruction();
3873 }
3874
3875 ctx->ctx_ibrs[rnum] = dbreg.val;
3876
3877 DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
3878 rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
3879 } else {
3880 CTX_USED_DBR(ctx, rnum);
3881
3882 if (can_access_pmu) {
3883 ia64_set_dbr(rnum, dbreg.val);
3884 ia64_dv_serialize_data();
3885 }
3886 ctx->ctx_dbrs[rnum] = dbreg.val;
3887
3888 DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
3889 rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
3890 }
3891 }
3892
3893 return 0;
3894
3895abort_mission:
3896
3897
3898
3899 if (first_time) {
3900 LOCK_PFS(flags);
3901 if (ctx->ctx_fl_system) {
3902 pfm_sessions.pfs_sys_use_dbregs--;
3903 }
3904 UNLOCK_PFS(flags);
3905 ctx->ctx_fl_using_dbreg = 0;
3906 }
3907
3908
3909
3910 PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
3911
3912 return ret;
3913}
3914
3915static int
3916pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3917{
3918 return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
3919}
3920
3921static int
3922pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3923{
3924 return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
3925}
3926
3927int
3928pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3929{
3930 pfm_context_t *ctx;
3931
3932 if (req == NULL) return -EINVAL;
3933
3934 ctx = GET_PMU_CTX();
3935
3936 if (ctx == NULL) return -EINVAL;
3937
3938
3939
3940
3941
3942 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3943
3944 return pfm_write_ibrs(ctx, req, nreq, regs);
3945}
3946EXPORT_SYMBOL(pfm_mod_write_ibrs);
3947
3948int
3949pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3950{
3951 pfm_context_t *ctx;
3952
3953 if (req == NULL) return -EINVAL;
3954
3955 ctx = GET_PMU_CTX();
3956
3957 if (ctx == NULL) return -EINVAL;
3958
3959
3960
3961
3962
3963 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3964
3965 return pfm_write_dbrs(ctx, req, nreq, regs);
3966}
3967EXPORT_SYMBOL(pfm_mod_write_dbrs);
3968
3969
3970static int
3971pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3972{
3973 pfarg_features_t *req = (pfarg_features_t *)arg;
3974
3975 req->ft_version = PFM_VERSION;
3976 return 0;
3977}
3978
3979static int
3980pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3981{
3982 struct pt_regs *tregs;
3983 struct task_struct *task = PFM_CTX_TASK(ctx);
3984 int state, is_system;
3985
3986 state = ctx->ctx_state;
3987 is_system = ctx->ctx_fl_system;
3988
3989
3990
3991
3992 if (state == PFM_CTX_UNLOADED) return -EINVAL;
3993
3994
3995
3996
3997
3998
3999 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
4000 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
4001 return -EBUSY;
4002 }
4003 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
4004 task_pid_nr(PFM_CTX_TASK(ctx)),
4005 state,
4006 is_system));
4007
4008
4009
4010
4011
4012 if (is_system) {
4013
4014
4015
4016
4017
4018 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
4019 ia64_srlz_i();
4020
4021
4022
4023
4024 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4025
4026
4027
4028
4029 pfm_clear_psr_pp();
4030
4031
4032
4033
4034 ia64_psr(regs)->pp = 0;
4035
4036 return 0;
4037 }
4038
4039
4040
4041
4042 if (task == current) {
4043
4044 pfm_clear_psr_up();
4045
4046
4047
4048
4049 ia64_psr(regs)->up = 0;
4050 } else {
4051 tregs = task_pt_regs(task);
4052
4053
4054
4055
4056 ia64_psr(tregs)->up = 0;
4057
4058
4059
4060
4061 ctx->ctx_saved_psr_up = 0;
4062 DPRINT(("task=[%d]\n", task_pid_nr(task)));
4063 }
4064 return 0;
4065}
4066
4067
4068static int
4069pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4070{
4071 struct pt_regs *tregs;
4072 int state, is_system;
4073
4074 state = ctx->ctx_state;
4075 is_system = ctx->ctx_fl_system;
4076
4077 if (state != PFM_CTX_LOADED) return -EINVAL;
4078
4079
4080
4081
4082
4083
4084 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
4085 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
4086 return -EBUSY;
4087 }
4088
4089
4090
4091
4092
4093
4094 if (is_system) {
4095
4096
4097
4098
4099 ia64_psr(regs)->pp = 1;
4100
4101
4102
4103
4104 PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
4105
4106
4107
4108
4109 pfm_set_psr_pp();
4110
4111
4112 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
4113 ia64_srlz_i();
4114
4115 return 0;
4116 }
4117
4118
4119
4120
4121
4122 if (ctx->ctx_task == current) {
4123
4124
4125 pfm_set_psr_up();
4126
4127
4128
4129
4130 ia64_psr(regs)->up = 1;
4131
4132 } else {
4133 tregs = task_pt_regs(ctx->ctx_task);
4134
4135
4136
4137
4138
4139 ctx->ctx_saved_psr_up = IA64_PSR_UP;
4140
4141
4142
4143
4144 ia64_psr(tregs)->up = 1;
4145 }
4146 return 0;
4147}
4148
4149static int
4150pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4151{
4152 pfarg_reg_t *req = (pfarg_reg_t *)arg;
4153 unsigned int cnum;
4154 int i;
4155 int ret = -EINVAL;
4156
4157 for (i = 0; i < count; i++, req++) {
4158
4159 cnum = req->reg_num;
4160
4161 if (!PMC_IS_IMPL(cnum)) goto abort_mission;
4162
4163 req->reg_value = PMC_DFL_VAL(cnum);
4164
4165 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
4166
4167 DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
4168 }
4169 return 0;
4170
4171abort_mission:
4172 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
4173 return ret;
4174}
4175
4176static int
4177pfm_check_task_exist(pfm_context_t *ctx)
4178{
4179 struct task_struct *g, *t;
4180 int ret = -ESRCH;
4181
4182 read_lock(&tasklist_lock);
4183
4184 do_each_thread (g, t) {
4185 if (t->thread.pfm_context == ctx) {
4186 ret = 0;
4187 goto out;
4188 }
4189 } while_each_thread (g, t);
4190out:
4191 read_unlock(&tasklist_lock);
4192
4193 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
4194
4195 return ret;
4196}
4197
4198static int
4199pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4200{
4201 struct task_struct *task;
4202 struct thread_struct *thread;
4203 struct pfm_context_t *old;
4204 unsigned long flags;
4205#ifndef CONFIG_SMP
4206 struct task_struct *owner_task = NULL;
4207#endif
4208 pfarg_load_t *req = (pfarg_load_t *)arg;
4209 unsigned long *pmcs_source, *pmds_source;
4210 int the_cpu;
4211 int ret = 0;
4212 int state, is_system, set_dbregs = 0;
4213
4214 state = ctx->ctx_state;
4215 is_system = ctx->ctx_fl_system;
4216
4217
4218
4219 if (state != PFM_CTX_UNLOADED) {
4220 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4221 req->load_pid,
4222 ctx->ctx_state));
4223 return -EBUSY;
4224 }
4225
4226 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
4227
4228 if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
4229 DPRINT(("cannot use blocking mode on self\n"));
4230 return -EINVAL;
4231 }
4232
4233 ret = pfm_get_task(ctx, req->load_pid, &task);
4234 if (ret) {
4235 DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
4236 return ret;
4237 }
4238
4239 ret = -EINVAL;
4240
4241
4242
4243
4244 if (is_system && task != current) {
4245 DPRINT(("system wide is self monitoring only load_pid=%d\n",
4246 req->load_pid));
4247 goto error;
4248 }
4249
4250 thread = &task->thread;
4251
4252 ret = 0;
4253
4254
4255
4256
4257 if (ctx->ctx_fl_using_dbreg) {
4258 if (thread->flags & IA64_THREAD_DBG_VALID) {
4259 ret = -EBUSY;
4260 DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
4261 goto error;
4262 }
4263 LOCK_PFS(flags);
4264
4265 if (is_system) {
4266 if (pfm_sessions.pfs_ptrace_use_dbregs) {
4267 DPRINT(("cannot load [%d] dbregs in use\n",
4268 task_pid_nr(task)));
4269 ret = -EBUSY;
4270 } else {
4271 pfm_sessions.pfs_sys_use_dbregs++;
4272 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
4273 set_dbregs = 1;
4274 }
4275 }
4276
4277 UNLOCK_PFS(flags);
4278
4279 if (ret) goto error;
4280 }
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295
4296
4297 the_cpu = ctx->ctx_cpu = smp_processor_id();
4298
4299 ret = -EBUSY;
4300
4301
4302
4303 ret = pfm_reserve_session(current, is_system, the_cpu);
4304 if (ret) goto error;
4305
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315 DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
4316 thread->pfm_context, ctx));
4317
4318 ret = -EBUSY;
4319 old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
4320 if (old != NULL) {
4321 DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
4322 goto error_unres;
4323 }
4324
4325 pfm_reset_msgq(ctx);
4326
4327 ctx->ctx_state = PFM_CTX_LOADED;
4328
4329
4330
4331
4332 ctx->ctx_task = task;
4333
4334 if (is_system) {
4335
4336
4337
4338 PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
4339 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4340
4341 if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
4342 } else {
4343 thread->flags |= IA64_THREAD_PM_VALID;
4344 }
4345
4346
4347
4348
4349 pfm_copy_pmds(task, ctx);
4350 pfm_copy_pmcs(task, ctx);
4351
4352 pmcs_source = ctx->th_pmcs;
4353 pmds_source = ctx->th_pmds;
4354
4355
4356
4357
4358 if (task == current) {
4359
4360 if (is_system == 0) {
4361
4362
4363 ia64_psr(regs)->sp = 0;
4364 DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
4365
4366 SET_LAST_CPU(ctx, smp_processor_id());
4367 INC_ACTIVATION();
4368 SET_ACTIVATION(ctx);
4369#ifndef CONFIG_SMP
4370
4371
4372
4373 owner_task = GET_PMU_OWNER();
4374 if (owner_task) pfm_lazy_save_regs(owner_task);
4375#endif
4376 }
4377
4378
4379
4380
4381 pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
4382 pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
4383
4384 ctx->ctx_reload_pmcs[0] = 0UL;
4385 ctx->ctx_reload_pmds[0] = 0UL;
4386
4387
4388
4389
4390 if (ctx->ctx_fl_using_dbreg) {
4391 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
4392 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
4393 }
4394
4395
4396
4397 SET_PMU_OWNER(task, ctx);
4398
4399 DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
4400 } else {
4401
4402
4403
4404 regs = task_pt_regs(task);
4405
4406
4407 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4408 SET_LAST_CPU(ctx, -1);
4409
4410
4411 ctx->ctx_saved_psr_up = 0UL;
4412 ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
4413 }
4414
4415 ret = 0;
4416
4417error_unres:
4418 if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
4419error:
4420
4421
4422
4423 if (ret && set_dbregs) {
4424 LOCK_PFS(flags);
4425 pfm_sessions.pfs_sys_use_dbregs--;
4426 UNLOCK_PFS(flags);
4427 }
4428
4429
4430
4431 if (is_system == 0 && task != current) {
4432 pfm_put_task(task);
4433
4434 if (ret == 0) {
4435 ret = pfm_check_task_exist(ctx);
4436 if (ret) {
4437 ctx->ctx_state = PFM_CTX_UNLOADED;
4438 ctx->ctx_task = NULL;
4439 }
4440 }
4441 }
4442 return ret;
4443}
4444
4445
4446
4447
4448
4449
4450
4451
4452
4453static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
4454
4455static int
4456pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4457{
4458 struct task_struct *task = PFM_CTX_TASK(ctx);
4459 struct pt_regs *tregs;
4460 int prev_state, is_system;
4461 int ret;
4462
4463 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
4464
4465 prev_state = ctx->ctx_state;
4466 is_system = ctx->ctx_fl_system;
4467
4468
4469
4470
4471 if (prev_state == PFM_CTX_UNLOADED) {
4472 DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
4473 return 0;
4474 }
4475
4476
4477
4478
4479 ret = pfm_stop(ctx, NULL, 0, regs);
4480 if (ret) return ret;
4481
4482 ctx->ctx_state = PFM_CTX_UNLOADED;
4483
4484
4485
4486
4487
4488
4489 if (is_system) {
4490
4491
4492
4493
4494
4495
4496 PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
4497 PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
4498
4499
4500
4501
4502
4503 pfm_flush_pmds(current, ctx);
4504
4505
4506
4507
4508
4509 if (prev_state != PFM_CTX_ZOMBIE)
4510 pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
4511
4512
4513
4514
4515 task->thread.pfm_context = NULL;
4516
4517
4518
4519 ctx->ctx_task = NULL;
4520
4521
4522
4523
4524 return 0;
4525 }
4526
4527
4528
4529
4530 tregs = task == current ? regs : task_pt_regs(task);
4531
4532 if (task == current) {
4533
4534
4535
4536 ia64_psr(regs)->sp = 1;
4537
4538 DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
4539 }
4540
4541
4542
4543
4544 pfm_flush_pmds(task, ctx);
4545
4546
4547
4548
4549
4550
4551
4552 if (prev_state != PFM_CTX_ZOMBIE)
4553 pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
4554
4555
4556
4557
4558 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4559 SET_LAST_CPU(ctx, -1);
4560
4561
4562
4563
4564 task->thread.flags &= ~IA64_THREAD_PM_VALID;
4565
4566
4567
4568
4569 task->thread.pfm_context = NULL;
4570 ctx->ctx_task = NULL;
4571
4572 PFM_SET_WORK_PENDING(task, 0);
4573
4574 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4575 ctx->ctx_fl_can_restart = 0;
4576 ctx->ctx_fl_going_zombie = 0;
4577
4578 DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
4579
4580 return 0;
4581}
4582
4583
4584
4585
4586
4587
4588void
4589pfm_exit_thread(struct task_struct *task)
4590{
4591 pfm_context_t *ctx;
4592 unsigned long flags;
4593 struct pt_regs *regs = task_pt_regs(task);
4594 int ret, state;
4595 int free_ok = 0;
4596
4597 ctx = PFM_GET_CTX(task);
4598
4599 PROTECT_CTX(ctx, flags);
4600
4601 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
4602
4603 state = ctx->ctx_state;
4604 switch(state) {
4605 case PFM_CTX_UNLOADED:
4606
4607
4608
4609
4610 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
4611 break;
4612 case PFM_CTX_LOADED:
4613 case PFM_CTX_MASKED:
4614 ret = pfm_context_unload(ctx, NULL, 0, regs);
4615 if (ret) {
4616 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4617 }
4618 DPRINT(("ctx unloaded for current state was %d\n", state));
4619
4620 pfm_end_notify_user(ctx);
4621 break;
4622 case PFM_CTX_ZOMBIE:
4623 ret = pfm_context_unload(ctx, NULL, 0, regs);
4624 if (ret) {
4625 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4626 }
4627 free_ok = 1;
4628 break;
4629 default:
4630 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
4631 break;
4632 }
4633 UNPROTECT_CTX(ctx, flags);
4634
4635 { u64 psr = pfm_get_psr();
4636 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
4637 BUG_ON(GET_PMU_OWNER());
4638 BUG_ON(ia64_psr(regs)->up);
4639 BUG_ON(ia64_psr(regs)->pp);
4640 }
4641
4642
4643
4644
4645
4646 if (free_ok) pfm_context_free(ctx);
4647}
4648
4649
4650
4651
4652#define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
4653#define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
4654#define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
4655#define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
4656#define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
4657
4658static pfm_cmd_desc_t pfm_cmd_tab[]={
4659PFM_CMD_NONE,
4660PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4661PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4662PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4663PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
4664PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
4665PFM_CMD_NONE,
4666PFM_CMD_NONE,
4667PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
4668PFM_CMD_NONE,
4669PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
4670PFM_CMD_NONE,
4671PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
4672PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
4673PFM_CMD_NONE,
4674PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4675PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
4676PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
4677PFM_CMD_NONE,
4678PFM_CMD_NONE,
4679PFM_CMD_NONE,
4680PFM_CMD_NONE,
4681PFM_CMD_NONE,
4682PFM_CMD_NONE,
4683PFM_CMD_NONE,
4684PFM_CMD_NONE,
4685PFM_CMD_NONE,
4686PFM_CMD_NONE,
4687PFM_CMD_NONE,
4688PFM_CMD_NONE,
4689PFM_CMD_NONE,
4690PFM_CMD_NONE,
4691PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
4692PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
4693};
4694#define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
4695
4696static int
4697pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
4698{
4699 struct task_struct *task;
4700 int state, old_state;
4701
4702recheck:
4703 state = ctx->ctx_state;
4704 task = ctx->ctx_task;
4705
4706 if (task == NULL) {
4707 DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
4708 return 0;
4709 }
4710
4711 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4712 ctx->ctx_fd,
4713 state,
4714 task_pid_nr(task),
4715 task->state, PFM_CMD_STOPPED(cmd)));
4716
4717
4718
4719
4720
4721
4722
4723
4724 if (task == current || ctx->ctx_fl_system) return 0;
4725
4726
4727
4728
4729 switch(state) {
4730 case PFM_CTX_UNLOADED:
4731
4732
4733
4734 return 0;
4735 case PFM_CTX_ZOMBIE:
4736
4737
4738
4739 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
4740 return -EINVAL;
4741 case PFM_CTX_MASKED:
4742
4743
4744
4745
4746 if (cmd != PFM_UNLOAD_CONTEXT) return 0;
4747 }
4748
4749
4750
4751
4752
4753
4754
4755
4756
4757
4758
4759 if (PFM_CMD_STOPPED(cmd)) {
4760 if (!task_is_stopped_or_traced(task)) {
4761 DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
4762 return -EBUSY;
4763 }
4764
4765
4766
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777
4778 old_state = state;
4779
4780 UNPROTECT_CTX(ctx, flags);
4781
4782 wait_task_inactive(task, 0);
4783
4784 PROTECT_CTX(ctx, flags);
4785
4786
4787
4788
4789 if (ctx->ctx_state != old_state) {
4790 DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
4791 goto recheck;
4792 }
4793 }
4794 return 0;
4795}
4796
4797
4798
4799
4800asmlinkage long
4801sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
4802{
4803 struct file *file = NULL;
4804 pfm_context_t *ctx = NULL;
4805 unsigned long flags = 0UL;
4806 void *args_k = NULL;
4807 long ret;
4808 size_t base_sz, sz, xtra_sz = 0;
4809 int narg, completed_args = 0, call_made = 0, cmd_flags;
4810 int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
4811 int (*getsize)(void *arg, size_t *sz);
4812#define PFM_MAX_ARGSIZE 4096
4813
4814
4815
4816
4817 if (unlikely(pmu_conf == NULL)) return -ENOSYS;
4818
4819 if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
4820 DPRINT(("invalid cmd=%d\n", cmd));
4821 return -EINVAL;
4822 }
4823
4824 func = pfm_cmd_tab[cmd].cmd_func;
4825 narg = pfm_cmd_tab[cmd].cmd_narg;
4826 base_sz = pfm_cmd_tab[cmd].cmd_argsize;
4827 getsize = pfm_cmd_tab[cmd].cmd_getsize;
4828 cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
4829
4830 if (unlikely(func == NULL)) {
4831 DPRINT(("invalid cmd=%d\n", cmd));
4832 return -EINVAL;
4833 }
4834
4835 DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
4836 PFM_CMD_NAME(cmd),
4837 cmd,
4838 narg,
4839 base_sz,
4840 count));
4841
4842
4843
4844
4845 if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
4846 return -EINVAL;
4847
4848restart_args:
4849 sz = xtra_sz + base_sz*count;
4850
4851
4852
4853 if (unlikely(sz > PFM_MAX_ARGSIZE)) {
4854 printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
4855 return -E2BIG;
4856 }
4857
4858
4859
4860
4861 if (likely(count && args_k == NULL)) {
4862 args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
4863 if (args_k == NULL) return -ENOMEM;
4864 }
4865
4866 ret = -EFAULT;
4867
4868
4869
4870
4871
4872
4873 if (sz && copy_from_user(args_k, arg, sz)) {
4874 DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
4875 goto error_args;
4876 }
4877
4878
4879
4880
4881 if (completed_args == 0 && getsize) {
4882
4883
4884
4885 ret = (*getsize)(args_k, &xtra_sz);
4886 if (ret) goto error_args;
4887
4888 completed_args = 1;
4889
4890 DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
4891
4892
4893 if (likely(xtra_sz)) goto restart_args;
4894 }
4895
4896 if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
4897
4898 ret = -EBADF;
4899
4900 file = fget(fd);
4901 if (unlikely(file == NULL)) {
4902 DPRINT(("invalid fd %d\n", fd));
4903 goto error_args;
4904 }
4905 if (unlikely(PFM_IS_FILE(file) == 0)) {
4906 DPRINT(("fd %d not related to perfmon\n", fd));
4907 goto error_args;
4908 }
4909
4910 ctx = file->private_data;
4911 if (unlikely(ctx == NULL)) {
4912 DPRINT(("no context for fd %d\n", fd));
4913 goto error_args;
4914 }
4915 prefetch(&ctx->ctx_state);
4916
4917 PROTECT_CTX(ctx, flags);
4918
4919
4920
4921
4922 ret = pfm_check_task_state(ctx, cmd, flags);
4923 if (unlikely(ret)) goto abort_locked;
4924
4925skip_fd:
4926 ret = (*func)(ctx, args_k, count, task_pt_regs(current));
4927
4928 call_made = 1;
4929
4930abort_locked:
4931 if (likely(ctx)) {
4932 DPRINT(("context unlocked\n"));
4933 UNPROTECT_CTX(ctx, flags);
4934 }
4935
4936
4937 if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
4938
4939error_args:
4940 if (file)
4941 fput(file);
4942
4943 kfree(args_k);
4944
4945 DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
4946
4947 return ret;
4948}
4949
4950static void
4951pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
4952{
4953 pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
4954 pfm_ovfl_ctrl_t rst_ctrl;
4955 int state;
4956 int ret = 0;
4957
4958 state = ctx->ctx_state;
4959
4960
4961
4962
4963 if (CTX_HAS_SMPL(ctx)) {
4964
4965 rst_ctrl.bits.mask_monitoring = 0;
4966 rst_ctrl.bits.reset_ovfl_pmds = 0;
4967
4968 if (state == PFM_CTX_LOADED)
4969 ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4970 else
4971 ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4972 } else {
4973 rst_ctrl.bits.mask_monitoring = 0;
4974 rst_ctrl.bits.reset_ovfl_pmds = 1;
4975 }
4976
4977 if (ret == 0) {
4978 if (rst_ctrl.bits.reset_ovfl_pmds) {
4979 pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
4980 }
4981 if (rst_ctrl.bits.mask_monitoring == 0) {
4982 DPRINT(("resuming monitoring\n"));
4983 if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
4984 } else {
4985 DPRINT(("stopping monitoring\n"));
4986
4987 }
4988 ctx->ctx_state = PFM_CTX_LOADED;
4989 }
4990}
4991
4992
4993
4994
4995
4996static void
4997pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
4998{
4999 int ret;
5000
5001 DPRINT(("entering for [%d]\n", task_pid_nr(current)));
5002
5003 ret = pfm_context_unload(ctx, NULL, 0, regs);
5004 if (ret) {
5005 printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
5006 }
5007
5008
5009
5010
5011 wake_up_interruptible(&ctx->ctx_zombieq);
5012
5013
5014
5015
5016
5017
5018}
5019
5020static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
5021
5022
5023
5024
5025
5026
5027
5028
5029
5030
5031void
5032pfm_handle_work(void)
5033{
5034 pfm_context_t *ctx;
5035 struct pt_regs *regs;
5036 unsigned long flags, dummy_flags;
5037 unsigned long ovfl_regs;
5038 unsigned int reason;
5039 int ret;
5040
5041 ctx = PFM_GET_CTX(current);
5042 if (ctx == NULL) {
5043 printk(KERN_ERR "perfmon: [%d] has no PFM context\n",
5044 task_pid_nr(current));
5045 return;
5046 }
5047
5048 PROTECT_CTX(ctx, flags);
5049
5050 PFM_SET_WORK_PENDING(current, 0);
5051
5052 regs = task_pt_regs(current);
5053
5054
5055
5056
5057 reason = ctx->ctx_fl_trap_reason;
5058 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
5059 ovfl_regs = ctx->ctx_ovfl_regs[0];
5060
5061 DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
5062
5063
5064
5065
5066 if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE)
5067 goto do_zombie;
5068
5069
5070 if (reason == PFM_TRAP_REASON_RESET)
5071 goto skip_blocking;
5072
5073
5074
5075
5076
5077 UNPROTECT_CTX(ctx, flags);
5078
5079
5080
5081
5082 local_irq_enable();
5083
5084 DPRINT(("before block sleeping\n"));
5085
5086
5087
5088
5089
5090 ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
5091
5092 DPRINT(("after block sleeping ret=%d\n", ret));
5093
5094
5095
5096
5097
5098
5099
5100 PROTECT_CTX(ctx, dummy_flags);
5101
5102
5103
5104
5105
5106
5107
5108 ovfl_regs = ctx->ctx_ovfl_regs[0];
5109
5110 if (ctx->ctx_fl_going_zombie) {
5111do_zombie:
5112 DPRINT(("context is zombie, bailing out\n"));
5113 pfm_context_force_terminate(ctx, regs);
5114 goto nothing_to_do;
5115 }
5116
5117
5118
5119 if (ret < 0)
5120 goto nothing_to_do;
5121
5122skip_blocking:
5123 pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
5124 ctx->ctx_ovfl_regs[0] = 0UL;
5125
5126nothing_to_do:
5127
5128
5129
5130 UNPROTECT_CTX(ctx, flags);
5131}
5132
5133static int
5134pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
5135{
5136 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5137 DPRINT(("ignoring overflow notification, owner is zombie\n"));
5138 return 0;
5139 }
5140
5141 DPRINT(("waking up somebody\n"));
5142
5143 if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
5144
5145
5146
5147
5148
5149 kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
5150
5151 return 0;
5152}
5153
5154static int
5155pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
5156{
5157 pfm_msg_t *msg = NULL;
5158
5159 if (ctx->ctx_fl_no_msg == 0) {
5160 msg = pfm_get_new_msg(ctx);
5161 if (msg == NULL) {
5162 printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
5163 return -1;
5164 }
5165
5166 msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
5167 msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
5168 msg->pfm_ovfl_msg.msg_active_set = 0;
5169 msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
5170 msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
5171 msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
5172 msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
5173 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5174 }
5175
5176 DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
5177 msg,
5178 ctx->ctx_fl_no_msg,
5179 ctx->ctx_fd,
5180 ovfl_pmds));
5181
5182 return pfm_notify_user(ctx, msg);
5183}
5184
5185static int
5186pfm_end_notify_user(pfm_context_t *ctx)
5187{
5188 pfm_msg_t *msg;
5189
5190 msg = pfm_get_new_msg(ctx);
5191 if (msg == NULL) {
5192 printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
5193 return -1;
5194 }
5195
5196 memset(msg, 0, sizeof(*msg));
5197
5198 msg->pfm_end_msg.msg_type = PFM_MSG_END;
5199 msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
5200 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5201
5202 DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
5203 msg,
5204 ctx->ctx_fl_no_msg,
5205 ctx->ctx_fd));
5206
5207 return pfm_notify_user(ctx, msg);
5208}
5209
5210
5211
5212
5213
5214static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx,
5215 unsigned long pmc0, struct pt_regs *regs)
5216{
5217 pfm_ovfl_arg_t *ovfl_arg;
5218 unsigned long mask;
5219 unsigned long old_val, ovfl_val, new_val;
5220 unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
5221 unsigned long tstamp;
5222 pfm_ovfl_ctrl_t ovfl_ctrl;
5223 unsigned int i, has_smpl;
5224 int must_notify = 0;
5225
5226 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
5227
5228
5229
5230
5231 if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
5232
5233 tstamp = ia64_get_itc();
5234 mask = pmc0 >> PMU_FIRST_COUNTER;
5235 ovfl_val = pmu_conf->ovfl_val;
5236 has_smpl = CTX_HAS_SMPL(ctx);
5237
5238 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5239 "used_pmds=0x%lx\n",
5240 pmc0,
5241 task ? task_pid_nr(task): -1,
5242 (regs ? regs->cr_iip : 0),
5243 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
5244 ctx->ctx_used_pmds[0]));
5245
5246
5247
5248
5249
5250
5251 for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
5252
5253
5254 if ((mask & 0x1) == 0) continue;
5255
5256
5257
5258
5259
5260
5261
5262 old_val = new_val = ctx->ctx_pmds[i].val;
5263 new_val += 1 + ovfl_val;
5264 ctx->ctx_pmds[i].val = new_val;
5265
5266
5267
5268
5269 if (likely(old_val > new_val)) {
5270 ovfl_pmds |= 1UL << i;
5271 if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
5272 }
5273
5274 DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
5275 i,
5276 new_val,
5277 old_val,
5278 ia64_get_pmd(i) & ovfl_val,
5279 ovfl_pmds,
5280 ovfl_notify));
5281 }
5282
5283
5284
5285
5286 if (ovfl_pmds == 0UL) return;
5287
5288
5289
5290
5291 ovfl_ctrl.val = 0;
5292 reset_pmds = 0UL;
5293
5294
5295
5296
5297
5298 if (has_smpl) {
5299 unsigned long start_cycles, end_cycles;
5300 unsigned long pmd_mask;
5301 int j, k, ret = 0;
5302 int this_cpu = smp_processor_id();
5303
5304 pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
5305 ovfl_arg = &ctx->ctx_ovfl_arg;
5306
5307 prefetch(ctx->ctx_smpl_hdr);
5308
5309 for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
5310
5311 mask = 1UL << i;
5312
5313 if ((pmd_mask & 0x1) == 0) continue;
5314
5315 ovfl_arg->ovfl_pmd = (unsigned char )i;
5316 ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
5317 ovfl_arg->active_set = 0;
5318 ovfl_arg->ovfl_ctrl.val = 0;
5319 ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
5320
5321 ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
5322 ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
5323 ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
5324
5325
5326
5327
5328
5329 if (smpl_pmds) {
5330 for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
5331 if ((smpl_pmds & 0x1) == 0) continue;
5332 ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
5333 DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
5334 }
5335 }
5336
5337 pfm_stats[this_cpu].pfm_smpl_handler_calls++;
5338
5339 start_cycles = ia64_get_itc();
5340
5341
5342
5343
5344 ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
5345
5346 end_cycles = ia64_get_itc();
5347
5348
5349
5350
5351
5352 ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
5353 ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
5354 ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
5355
5356
5357
5358 if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
5359
5360 pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
5361 }
5362
5363
5364
5365 if (ret && pmd_mask) {
5366 DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
5367 pmd_mask<<PMU_FIRST_COUNTER));
5368 }
5369
5370
5371
5372 ovfl_pmds &= ~reset_pmds;
5373 } else {
5374
5375
5376
5377
5378 ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
5379 ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
5380 ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0;
5381 ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
5382
5383
5384
5385 if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
5386 }
5387
5388 DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
5389
5390
5391
5392
5393 if (reset_pmds) {
5394 unsigned long bm = reset_pmds;
5395 pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
5396 }
5397
5398 if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
5399
5400
5401
5402 ctx->ctx_ovfl_regs[0] = ovfl_pmds;
5403
5404
5405
5406
5407 if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
5408
5409 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
5410
5411
5412
5413
5414 PFM_SET_WORK_PENDING(task, 1);
5415
5416
5417
5418
5419
5420 set_notify_resume(task);
5421 }
5422
5423
5424
5425
5426 must_notify = 1;
5427 }
5428
5429 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
5430 GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
5431 PFM_GET_WORK_PENDING(task),
5432 ctx->ctx_fl_trap_reason,
5433 ovfl_pmds,
5434 ovfl_notify,
5435 ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
5436
5437
5438
5439 if (ovfl_ctrl.bits.mask_monitoring) {
5440 pfm_mask_monitoring(task);
5441 ctx->ctx_state = PFM_CTX_MASKED;
5442 ctx->ctx_fl_can_restart = 1;
5443 }
5444
5445
5446
5447
5448 if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
5449
5450 return;
5451
5452sanity_check:
5453 printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5454 smp_processor_id(),
5455 task ? task_pid_nr(task) : -1,
5456 pmc0);
5457 return;
5458
5459stop_monitoring:
5460
5461
5462
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
5489 pfm_clear_psr_up();
5490 ia64_psr(regs)->up = 0;
5491 ia64_psr(regs)->sp = 1;
5492 return;
5493}
5494
5495static int
5496pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
5497{
5498 struct task_struct *task;
5499 pfm_context_t *ctx;
5500 unsigned long flags;
5501 u64 pmc0;
5502 int this_cpu = smp_processor_id();
5503 int retval = 0;
5504
5505 pfm_stats[this_cpu].pfm_ovfl_intr_count++;
5506
5507
5508
5509
5510 pmc0 = ia64_get_pmc(0);
5511
5512 task = GET_PMU_OWNER();
5513 ctx = GET_PMU_CTX();
5514
5515
5516
5517
5518
5519 if (PMC0_HAS_OVFL(pmc0) && task) {
5520
5521
5522
5523
5524
5525 if (!ctx) goto report_spurious1;
5526
5527 if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
5528 goto report_spurious2;
5529
5530 PROTECT_CTX_NOPRINT(ctx, flags);
5531
5532 pfm_overflow_handler(task, ctx, pmc0, regs);
5533
5534 UNPROTECT_CTX_NOPRINT(ctx, flags);
5535
5536 } else {
5537 pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
5538 retval = -1;
5539 }
5540
5541
5542
5543 pfm_unfreeze_pmu();
5544
5545 return retval;
5546
5547report_spurious1:
5548 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
5549 this_cpu, task_pid_nr(task));
5550 pfm_unfreeze_pmu();
5551 return -1;
5552report_spurious2:
5553 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5554 this_cpu,
5555 task_pid_nr(task));
5556 pfm_unfreeze_pmu();
5557 return -1;
5558}
5559
5560static irqreturn_t
5561pfm_interrupt_handler(int irq, void *arg)
5562{
5563 unsigned long start_cycles, total_cycles;
5564 unsigned long min, max;
5565 int this_cpu;
5566 int ret;
5567 struct pt_regs *regs = get_irq_regs();
5568
5569 this_cpu = get_cpu();
5570 if (likely(!pfm_alt_intr_handler)) {
5571 min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
5572 max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
5573
5574 start_cycles = ia64_get_itc();
5575
5576 ret = pfm_do_interrupt_handler(arg, regs);
5577
5578 total_cycles = ia64_get_itc();
5579
5580
5581
5582
5583 if (likely(ret == 0)) {
5584 total_cycles -= start_cycles;
5585
5586 if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
5587 if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
5588
5589 pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
5590 }
5591 }
5592 else {
5593 (*pfm_alt_intr_handler->handler)(irq, arg, regs);
5594 }
5595
5596 put_cpu();
5597 return IRQ_HANDLED;
5598}
5599
5600
5601
5602
5603
5604#define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
5605
5606static void *
5607pfm_proc_start(struct seq_file *m, loff_t *pos)
5608{
5609 if (*pos == 0) {
5610 return PFM_PROC_SHOW_HEADER;
5611 }
5612
5613 while (*pos <= nr_cpu_ids) {
5614 if (cpu_online(*pos - 1)) {
5615 return (void *)*pos;
5616 }
5617 ++*pos;
5618 }
5619 return NULL;
5620}
5621
5622static void *
5623pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
5624{
5625 ++*pos;
5626 return pfm_proc_start(m, pos);
5627}
5628
5629static void
5630pfm_proc_stop(struct seq_file *m, void *v)
5631{
5632}
5633
5634static void
5635pfm_proc_show_header(struct seq_file *m)
5636{
5637 struct list_head * pos;
5638 pfm_buffer_fmt_t * entry;
5639 unsigned long flags;
5640
5641 seq_printf(m,
5642 "perfmon version : %u.%u\n"
5643 "model : %s\n"
5644 "fastctxsw : %s\n"
5645 "expert mode : %s\n"
5646 "ovfl_mask : 0x%lx\n"
5647 "PMU flags : 0x%x\n",
5648 PFM_VERSION_MAJ, PFM_VERSION_MIN,
5649 pmu_conf->pmu_name,
5650 pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
5651 pfm_sysctl.expert_mode > 0 ? "Yes": "No",
5652 pmu_conf->ovfl_val,
5653 pmu_conf->flags);
5654
5655 LOCK_PFS(flags);
5656
5657 seq_printf(m,
5658 "proc_sessions : %u\n"
5659 "sys_sessions : %u\n"
5660 "sys_use_dbregs : %u\n"
5661 "ptrace_use_dbregs : %u\n",
5662 pfm_sessions.pfs_task_sessions,
5663 pfm_sessions.pfs_sys_sessions,
5664 pfm_sessions.pfs_sys_use_dbregs,
5665 pfm_sessions.pfs_ptrace_use_dbregs);
5666
5667 UNLOCK_PFS(flags);
5668
5669 spin_lock(&pfm_buffer_fmt_lock);
5670
5671 list_for_each(pos, &pfm_buffer_fmt_list) {
5672 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
5673 seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
5674 entry->fmt_uuid[0],
5675 entry->fmt_uuid[1],
5676 entry->fmt_uuid[2],
5677 entry->fmt_uuid[3],
5678 entry->fmt_uuid[4],
5679 entry->fmt_uuid[5],
5680 entry->fmt_uuid[6],
5681 entry->fmt_uuid[7],
5682 entry->fmt_uuid[8],
5683 entry->fmt_uuid[9],
5684 entry->fmt_uuid[10],
5685 entry->fmt_uuid[11],
5686 entry->fmt_uuid[12],
5687 entry->fmt_uuid[13],
5688 entry->fmt_uuid[14],
5689 entry->fmt_uuid[15],
5690 entry->fmt_name);
5691 }
5692 spin_unlock(&pfm_buffer_fmt_lock);
5693
5694}
5695
5696static int
5697pfm_proc_show(struct seq_file *m, void *v)
5698{
5699 unsigned long psr;
5700 unsigned int i;
5701 int cpu;
5702
5703 if (v == PFM_PROC_SHOW_HEADER) {
5704 pfm_proc_show_header(m);
5705 return 0;
5706 }
5707
5708
5709
5710 cpu = (long)v - 1;
5711 seq_printf(m,
5712 "CPU%-2d overflow intrs : %lu\n"
5713 "CPU%-2d overflow cycles : %lu\n"
5714 "CPU%-2d overflow min : %lu\n"
5715 "CPU%-2d overflow max : %lu\n"
5716 "CPU%-2d smpl handler calls : %lu\n"
5717 "CPU%-2d smpl handler cycles : %lu\n"
5718 "CPU%-2d spurious intrs : %lu\n"
5719 "CPU%-2d replay intrs : %lu\n"
5720 "CPU%-2d syst_wide : %d\n"
5721 "CPU%-2d dcr_pp : %d\n"
5722 "CPU%-2d exclude idle : %d\n"
5723 "CPU%-2d owner : %d\n"
5724 "CPU%-2d context : %p\n"
5725 "CPU%-2d activations : %lu\n",
5726 cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
5727 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
5728 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
5729 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
5730 cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
5731 cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
5732 cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
5733 cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
5734 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
5735 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
5736 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
5737 cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
5738 cpu, pfm_get_cpu_data(pmu_ctx, cpu),
5739 cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
5740
5741 if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
5742
5743 psr = pfm_get_psr();
5744
5745 ia64_srlz_d();
5746
5747 seq_printf(m,
5748 "CPU%-2d psr : 0x%lx\n"
5749 "CPU%-2d pmc0 : 0x%lx\n",
5750 cpu, psr,
5751 cpu, ia64_get_pmc(0));
5752
5753 for (i=0; PMC_IS_LAST(i) == 0; i++) {
5754 if (PMC_IS_COUNTING(i) == 0) continue;
5755 seq_printf(m,
5756 "CPU%-2d pmc%u : 0x%lx\n"
5757 "CPU%-2d pmd%u : 0x%lx\n",
5758 cpu, i, ia64_get_pmc(i),
5759 cpu, i, ia64_get_pmd(i));
5760 }
5761 }
5762 return 0;
5763}
5764
5765const struct seq_operations pfm_seq_ops = {
5766 .start = pfm_proc_start,
5767 .next = pfm_proc_next,
5768 .stop = pfm_proc_stop,
5769 .show = pfm_proc_show
5770};
5771
5772static int
5773pfm_proc_open(struct inode *inode, struct file *file)
5774{
5775 return seq_open(file, &pfm_seq_ops);
5776}
5777
5778
5779
5780
5781
5782
5783
5784
5785void
5786pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
5787{
5788 struct pt_regs *regs;
5789 unsigned long dcr;
5790 unsigned long dcr_pp;
5791
5792 dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
5793
5794
5795
5796
5797
5798 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
5799 regs = task_pt_regs(task);
5800 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
5801 return;
5802 }
5803
5804
5805
5806 if (dcr_pp) {
5807 dcr = ia64_getreg(_IA64_REG_CR_DCR);
5808
5809
5810
5811 if (is_ctxswin) {
5812
5813 ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
5814 pfm_clear_psr_pp();
5815 ia64_srlz_i();
5816 return;
5817 }
5818
5819
5820
5821
5822
5823
5824
5825 ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
5826 pfm_set_psr_pp();
5827 ia64_srlz_i();
5828 }
5829}
5830
5831#ifdef CONFIG_SMP
5832
5833static void
5834pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
5835{
5836 struct task_struct *task = ctx->ctx_task;
5837
5838 ia64_psr(regs)->up = 0;
5839 ia64_psr(regs)->sp = 1;
5840
5841 if (GET_PMU_OWNER() == task) {
5842 DPRINT(("cleared ownership for [%d]\n",
5843 task_pid_nr(ctx->ctx_task)));
5844 SET_PMU_OWNER(NULL, NULL);
5845 }
5846
5847
5848
5849
5850 PFM_SET_WORK_PENDING(task, 0);
5851
5852 task->thread.pfm_context = NULL;
5853 task->thread.flags &= ~IA64_THREAD_PM_VALID;
5854
5855 DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
5856}
5857
5858
5859
5860
5861
5862void
5863pfm_save_regs(struct task_struct *task)
5864{
5865 pfm_context_t *ctx;
5866 unsigned long flags;
5867 u64 psr;
5868
5869
5870 ctx = PFM_GET_CTX(task);
5871 if (ctx == NULL) return;
5872
5873
5874
5875
5876
5877
5878 flags = pfm_protect_ctx_ctxsw(ctx);
5879
5880 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5881 struct pt_regs *regs = task_pt_regs(task);
5882
5883 pfm_clear_psr_up();
5884
5885 pfm_force_cleanup(ctx, regs);
5886
5887 BUG_ON(ctx->ctx_smpl_hdr);
5888
5889 pfm_unprotect_ctx_ctxsw(ctx, flags);
5890
5891 pfm_context_free(ctx);
5892 return;
5893 }
5894
5895
5896
5897
5898 ia64_srlz_d();
5899 psr = pfm_get_psr();
5900
5901 BUG_ON(psr & (IA64_PSR_I));
5902
5903
5904
5905
5906
5907
5908
5909
5910 pfm_clear_psr_up();
5911
5912
5913
5914
5915 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5916
5917
5918
5919
5920
5921
5922 SET_PMU_OWNER(NULL, NULL);
5923
5924
5925
5926
5927
5928
5929 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
5930
5931
5932
5933
5934
5935
5936 ctx->th_pmcs[0] = ia64_get_pmc(0);
5937
5938
5939
5940
5941 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5942
5943
5944
5945
5946
5947 pfm_unprotect_ctx_ctxsw(ctx, flags);
5948}
5949
5950#else
5951void
5952pfm_save_regs(struct task_struct *task)
5953{
5954 pfm_context_t *ctx;
5955 u64 psr;
5956
5957 ctx = PFM_GET_CTX(task);
5958 if (ctx == NULL) return;
5959
5960
5961
5962
5963 psr = pfm_get_psr();
5964
5965 BUG_ON(psr & (IA64_PSR_I));
5966
5967
5968
5969
5970
5971
5972
5973
5974 pfm_clear_psr_up();
5975
5976
5977
5978
5979 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5980}
5981
5982static void
5983pfm_lazy_save_regs (struct task_struct *task)
5984{
5985 pfm_context_t *ctx;
5986 unsigned long flags;
5987
5988 { u64 psr = pfm_get_psr();
5989 BUG_ON(psr & IA64_PSR_UP);
5990 }
5991
5992 ctx = PFM_GET_CTX(task);
5993
5994
5995
5996
5997
5998
5999
6000
6001
6002
6003 PROTECT_CTX(ctx,flags);
6004
6005
6006
6007
6008
6009
6010
6011
6012 SET_PMU_OWNER(NULL, NULL);
6013
6014
6015
6016
6017 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
6018
6019
6020
6021
6022
6023
6024 ctx->th_pmcs[0] = ia64_get_pmc(0);
6025
6026
6027
6028
6029 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
6030
6031
6032
6033
6034
6035
6036 UNPROTECT_CTX(ctx,flags);
6037}
6038#endif
6039
6040#ifdef CONFIG_SMP
6041
6042
6043
6044void
6045pfm_load_regs (struct task_struct *task)
6046{
6047 pfm_context_t *ctx;
6048 unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
6049 unsigned long flags;
6050 u64 psr, psr_up;
6051 int need_irq_resend;
6052
6053 ctx = PFM_GET_CTX(task);
6054 if (unlikely(ctx == NULL)) return;
6055
6056 BUG_ON(GET_PMU_OWNER());
6057
6058
6059
6060
6061 if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
6062
6063
6064
6065
6066
6067
6068 flags = pfm_protect_ctx_ctxsw(ctx);
6069 psr = pfm_get_psr();
6070
6071 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6072
6073 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6074 BUG_ON(psr & IA64_PSR_I);
6075
6076 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
6077 struct pt_regs *regs = task_pt_regs(task);
6078
6079 BUG_ON(ctx->ctx_smpl_hdr);
6080
6081 pfm_force_cleanup(ctx, regs);
6082
6083 pfm_unprotect_ctx_ctxsw(ctx, flags);
6084
6085
6086
6087
6088 pfm_context_free(ctx);
6089
6090 return;
6091 }
6092
6093
6094
6095
6096
6097 if (ctx->ctx_fl_using_dbreg) {
6098 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6099 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6100 }
6101
6102
6103
6104 psr_up = ctx->ctx_saved_psr_up;
6105
6106
6107
6108
6109
6110 if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
6111
6112
6113
6114
6115 pmc_mask = ctx->ctx_reload_pmcs[0];
6116 pmd_mask = ctx->ctx_reload_pmds[0];
6117
6118 } else {
6119
6120
6121
6122
6123
6124
6125 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6126
6127
6128
6129
6130
6131
6132
6133
6134 pmc_mask = ctx->ctx_all_pmcs[0];
6135 }
6136
6137
6138
6139
6140
6141
6142
6143 if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6144 if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6145
6146
6147
6148
6149
6150 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6151
6152
6153
6154
6155 ia64_set_pmc(0, ctx->th_pmcs[0]);
6156 ia64_srlz_d();
6157 ctx->th_pmcs[0] = 0UL;
6158
6159
6160
6161
6162 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
6163
6164 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6165 }
6166
6167
6168
6169
6170 ctx->ctx_reload_pmcs[0] = 0UL;
6171 ctx->ctx_reload_pmds[0] = 0UL;
6172
6173 SET_LAST_CPU(ctx, smp_processor_id());
6174
6175
6176
6177
6178 INC_ACTIVATION();
6179
6180
6181
6182 SET_ACTIVATION(ctx);
6183
6184
6185
6186
6187 SET_PMU_OWNER(task, ctx);
6188
6189
6190
6191
6192
6193
6194
6195 if (likely(psr_up)) pfm_set_psr_up();
6196
6197
6198
6199
6200 pfm_unprotect_ctx_ctxsw(ctx, flags);
6201}
6202#else
6203
6204
6205
6206
6207void
6208pfm_load_regs (struct task_struct *task)
6209{
6210 pfm_context_t *ctx;
6211 struct task_struct *owner;
6212 unsigned long pmd_mask, pmc_mask;
6213 u64 psr, psr_up;
6214 int need_irq_resend;
6215
6216 owner = GET_PMU_OWNER();
6217 ctx = PFM_GET_CTX(task);
6218 psr = pfm_get_psr();
6219
6220 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6221 BUG_ON(psr & IA64_PSR_I);
6222
6223
6224
6225
6226
6227
6228
6229
6230
6231 if (ctx->ctx_fl_using_dbreg) {
6232 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6233 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6234 }
6235
6236
6237
6238
6239 psr_up = ctx->ctx_saved_psr_up;
6240 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6241
6242
6243
6244
6245
6246
6247
6248
6249
6250 if (likely(owner == task)) {
6251 if (likely(psr_up)) pfm_set_psr_up();
6252 return;
6253 }
6254
6255
6256
6257
6258
6259
6260
6261 if (owner) pfm_lazy_save_regs(owner);
6262
6263
6264
6265
6266
6267
6268
6269 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6270
6271
6272
6273
6274
6275
6276
6277
6278 pmc_mask = ctx->ctx_all_pmcs[0];
6279
6280 pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6281 pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6282
6283
6284
6285
6286
6287 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6288
6289
6290
6291
6292 ia64_set_pmc(0, ctx->th_pmcs[0]);
6293 ia64_srlz_d();
6294
6295 ctx->th_pmcs[0] = 0UL;
6296
6297
6298
6299
6300 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
6301
6302 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6303 }
6304
6305
6306
6307
6308 SET_PMU_OWNER(task, ctx);
6309
6310
6311
6312
6313
6314
6315
6316 if (likely(psr_up)) pfm_set_psr_up();
6317}
6318#endif
6319
6320
6321
6322
6323static void
6324pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6325{
6326 u64 pmc0;
6327 unsigned long mask2, val, pmd_val, ovfl_val;
6328 int i, can_access_pmu = 0;
6329 int is_self;
6330
6331
6332
6333
6334
6335 is_self = ctx->ctx_task == task ? 1 : 0;
6336
6337
6338
6339
6340
6341
6342
6343
6344 can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
6345 if (can_access_pmu) {
6346
6347
6348
6349
6350
6351
6352
6353
6354 SET_PMU_OWNER(NULL, NULL);
6355 DPRINT(("releasing ownership\n"));
6356
6357
6358
6359
6360
6361
6362 ia64_srlz_d();
6363 pmc0 = ia64_get_pmc(0);
6364
6365
6366
6367
6368 pfm_unfreeze_pmu();
6369 } else {
6370 pmc0 = ctx->th_pmcs[0];
6371
6372
6373
6374 ctx->th_pmcs[0] = 0;
6375 }
6376 ovfl_val = pmu_conf->ovfl_val;
6377
6378
6379
6380
6381
6382
6383 mask2 = ctx->ctx_used_pmds[0];
6384
6385 DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
6386
6387 for (i = 0; mask2; i++, mask2>>=1) {
6388
6389
6390 if ((mask2 & 0x1) == 0) continue;
6391
6392
6393
6394
6395 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
6396
6397 if (PMD_IS_COUNTING(i)) {
6398 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
6399 task_pid_nr(task),
6400 i,
6401 ctx->ctx_pmds[i].val,
6402 val & ovfl_val));
6403
6404
6405
6406
6407 val = ctx->ctx_pmds[i].val + (val & ovfl_val);
6408
6409
6410
6411
6412
6413
6414 pmd_val = 0UL;
6415
6416
6417
6418
6419 if (pmc0 & (1UL << i)) {
6420 val += 1 + ovfl_val;
6421 DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
6422 }
6423 }
6424
6425 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
6426
6427 if (is_self) ctx->th_pmds[i] = pmd_val;
6428
6429 ctx->ctx_pmds[i].val = val;
6430 }
6431}
6432
6433static struct irqaction perfmon_irqaction = {
6434 .handler = pfm_interrupt_handler,
6435 .flags = IRQF_DISABLED,
6436 .name = "perfmon"
6437};
6438
6439static void
6440pfm_alt_save_pmu_state(void *data)
6441{
6442 struct pt_regs *regs;
6443
6444 regs = task_pt_regs(current);
6445
6446 DPRINT(("called\n"));
6447
6448
6449
6450
6451
6452 pfm_clear_psr_up();
6453 pfm_clear_psr_pp();
6454 ia64_psr(regs)->pp = 0;
6455
6456
6457
6458
6459
6460 pfm_freeze_pmu();
6461
6462 ia64_srlz_d();
6463}
6464
6465void
6466pfm_alt_restore_pmu_state(void *data)
6467{
6468 struct pt_regs *regs;
6469
6470 regs = task_pt_regs(current);
6471
6472 DPRINT(("called\n"));
6473
6474
6475
6476
6477
6478 pfm_clear_psr_up();
6479 pfm_clear_psr_pp();
6480 ia64_psr(regs)->pp = 0;
6481
6482
6483
6484
6485 pfm_unfreeze_pmu();
6486
6487 ia64_srlz_d();
6488}
6489
6490int
6491pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6492{
6493 int ret, i;
6494 int reserve_cpu;
6495
6496
6497 if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
6498
6499
6500 if (pfm_alt_intr_handler) return -EBUSY;
6501
6502
6503 if (!spin_trylock(&pfm_alt_install_check)) {
6504 return -EBUSY;
6505 }
6506
6507
6508 for_each_online_cpu(reserve_cpu) {
6509 ret = pfm_reserve_session(NULL, 1, reserve_cpu);
6510 if (ret) goto cleanup_reserve;
6511 }
6512
6513
6514 ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
6515 if (ret) {
6516 DPRINT(("on_each_cpu() failed: %d\n", ret));
6517 goto cleanup_reserve;
6518 }
6519
6520
6521 pfm_alt_intr_handler = hdl;
6522
6523 spin_unlock(&pfm_alt_install_check);
6524
6525 return 0;
6526
6527cleanup_reserve:
6528 for_each_online_cpu(i) {
6529
6530 if (i >= reserve_cpu) break;
6531
6532 pfm_unreserve_session(NULL, 1, i);
6533 }
6534
6535 spin_unlock(&pfm_alt_install_check);
6536
6537 return ret;
6538}
6539EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
6540
6541int
6542pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6543{
6544 int i;
6545 int ret;
6546
6547 if (hdl == NULL) return -EINVAL;
6548
6549
6550 if (pfm_alt_intr_handler != hdl) return -EINVAL;
6551
6552
6553 if (!spin_trylock(&pfm_alt_install_check)) {
6554 return -EBUSY;
6555 }
6556
6557 pfm_alt_intr_handler = NULL;
6558
6559 ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
6560 if (ret) {
6561 DPRINT(("on_each_cpu() failed: %d\n", ret));
6562 }
6563
6564 for_each_online_cpu(i) {
6565 pfm_unreserve_session(NULL, 1, i);
6566 }
6567
6568 spin_unlock(&pfm_alt_install_check);
6569
6570 return 0;
6571}
6572EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
6573
6574
6575
6576
6577static int init_pfm_fs(void);
6578
6579static int __init
6580pfm_probe_pmu(void)
6581{
6582 pmu_config_t **p;
6583 int family;
6584
6585 family = local_cpu_data->family;
6586 p = pmu_confs;
6587
6588 while(*p) {
6589 if ((*p)->probe) {
6590 if ((*p)->probe() == 0) goto found;
6591 } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
6592 goto found;
6593 }
6594 p++;
6595 }
6596 return -1;
6597found:
6598 pmu_conf = *p;
6599 return 0;
6600}
6601
6602static const struct file_operations pfm_proc_fops = {
6603 .open = pfm_proc_open,
6604 .read = seq_read,
6605 .llseek = seq_lseek,
6606 .release = seq_release,
6607};
6608
6609int __init
6610pfm_init(void)
6611{
6612 unsigned int n, n_counters, i;
6613
6614 printk("perfmon: version %u.%u IRQ %u\n",
6615 PFM_VERSION_MAJ,
6616 PFM_VERSION_MIN,
6617 IA64_PERFMON_VECTOR);
6618
6619 if (pfm_probe_pmu()) {
6620 printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
6621 local_cpu_data->family);
6622 return -ENODEV;
6623 }
6624
6625
6626
6627
6628
6629 n = 0;
6630 for (i=0; PMC_IS_LAST(i) == 0; i++) {
6631 if (PMC_IS_IMPL(i) == 0) continue;
6632 pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
6633 n++;
6634 }
6635 pmu_conf->num_pmcs = n;
6636
6637 n = 0; n_counters = 0;
6638 for (i=0; PMD_IS_LAST(i) == 0; i++) {
6639 if (PMD_IS_IMPL(i) == 0) continue;
6640 pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
6641 n++;
6642 if (PMD_IS_COUNTING(i)) n_counters++;
6643 }
6644 pmu_conf->num_pmds = n;
6645 pmu_conf->num_counters = n_counters;
6646
6647
6648
6649
6650 if (pmu_conf->use_rr_dbregs) {
6651 if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
6652 printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
6653 pmu_conf = NULL;
6654 return -1;
6655 }
6656 if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
6657 printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
6658 pmu_conf = NULL;
6659 return -1;
6660 }
6661 }
6662
6663 printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
6664 pmu_conf->pmu_name,
6665 pmu_conf->num_pmcs,
6666 pmu_conf->num_pmds,
6667 pmu_conf->num_counters,
6668 ffz(pmu_conf->ovfl_val));
6669
6670
6671 if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
6672 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
6673 pmu_conf = NULL;
6674 return -1;
6675 }
6676
6677
6678
6679
6680 perfmon_dir = proc_create("perfmon", S_IRUGO, NULL, &pfm_proc_fops);
6681 if (perfmon_dir == NULL) {
6682 printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
6683 pmu_conf = NULL;
6684 return -1;
6685 }
6686
6687
6688
6689
6690 pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
6691
6692
6693
6694
6695 spin_lock_init(&pfm_sessions.pfs_lock);
6696 spin_lock_init(&pfm_buffer_fmt_lock);
6697
6698 init_pfm_fs();
6699
6700 for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
6701
6702 return 0;
6703}
6704
6705__initcall(pfm_init);
6706
6707
6708
6709
6710void
6711pfm_init_percpu (void)
6712{
6713 static int first_time=1;
6714
6715
6716
6717
6718 pfm_clear_psr_pp();
6719 pfm_clear_psr_up();
6720
6721
6722
6723
6724 pfm_unfreeze_pmu();
6725
6726 if (first_time) {
6727 register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
6728 first_time=0;
6729 }
6730
6731 ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
6732 ia64_srlz_d();
6733}
6734
6735
6736
6737
6738void
6739dump_pmu_state(const char *from)
6740{
6741 struct task_struct *task;
6742 struct pt_regs *regs;
6743 pfm_context_t *ctx;
6744 unsigned long psr, dcr, info, flags;
6745 int i, this_cpu;
6746
6747 local_irq_save(flags);
6748
6749 this_cpu = smp_processor_id();
6750 regs = task_pt_regs(current);
6751 info = PFM_CPUINFO_GET();
6752 dcr = ia64_getreg(_IA64_REG_CR_DCR);
6753
6754 if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
6755 local_irq_restore(flags);
6756 return;
6757 }
6758
6759 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6760 this_cpu,
6761 from,
6762 task_pid_nr(current),
6763 regs->cr_iip,
6764 current->comm);
6765
6766 task = GET_PMU_OWNER();
6767 ctx = GET_PMU_CTX();
6768
6769 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
6770
6771 psr = pfm_get_psr();
6772
6773 printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
6774 this_cpu,
6775 ia64_get_pmc(0),
6776 psr & IA64_PSR_PP ? 1 : 0,
6777 psr & IA64_PSR_UP ? 1 : 0,
6778 dcr & IA64_DCR_PP ? 1 : 0,
6779 info,
6780 ia64_psr(regs)->up,
6781 ia64_psr(regs)->pp);
6782
6783 ia64_psr(regs)->up = 0;
6784 ia64_psr(regs)->pp = 0;
6785
6786 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6787 if (PMC_IS_IMPL(i) == 0) continue;
6788 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
6789 }
6790
6791 for (i=1; PMD_IS_LAST(i) == 0; i++) {
6792 if (PMD_IS_IMPL(i) == 0) continue;
6793 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
6794 }
6795
6796 if (ctx) {
6797 printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
6798 this_cpu,
6799 ctx->ctx_state,
6800 ctx->ctx_smpl_vaddr,
6801 ctx->ctx_smpl_hdr,
6802 ctx->ctx_msgq_head,
6803 ctx->ctx_msgq_tail,
6804 ctx->ctx_saved_psr_up);
6805 }
6806 local_irq_restore(flags);
6807}
6808
6809
6810
6811
6812void
6813pfm_inherit(struct task_struct *task, struct pt_regs *regs)
6814{
6815 struct thread_struct *thread;
6816
6817 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
6818
6819 thread = &task->thread;
6820
6821
6822
6823
6824 thread->pfm_context = NULL;
6825
6826 PFM_SET_WORK_PENDING(task, 0);
6827
6828
6829
6830
6831}
6832#else
6833asmlinkage long
6834sys_perfmonctl (int fd, int cmd, void *arg, int count)
6835{
6836 return -ENOSYS;
6837}
6838#endif
6839