1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/sched.h>
26#include <linux/sched/task.h>
27#include <linux/sched/task_stack.h>
28#include <linux/interrupt.h>
29#include <linux/proc_fs.h>
30#include <linux/seq_file.h>
31#include <linux/init.h>
32#include <linux/vmalloc.h>
33#include <linux/mm.h>
34#include <linux/sysctl.h>
35#include <linux/list.h>
36#include <linux/file.h>
37#include <linux/poll.h>
38#include <linux/vfs.h>
39#include <linux/smp.h>
40#include <linux/pagemap.h>
41#include <linux/mount.h>
42#include <linux/bitops.h>
43#include <linux/capability.h>
44#include <linux/rcupdate.h>
45#include <linux/completion.h>
46#include <linux/tracehook.h>
47#include <linux/slab.h>
48#include <linux/cpu.h>
49
50#include <asm/errno.h>
51#include <asm/intrinsics.h>
52#include <asm/page.h>
53#include <asm/perfmon.h>
54#include <asm/processor.h>
55#include <asm/signal.h>
56#include <linux/uaccess.h>
57#include <asm/delay.h>
58
59#ifdef CONFIG_PERFMON
60
61
62
63#define PFM_CTX_UNLOADED 1
64#define PFM_CTX_LOADED 2
65#define PFM_CTX_MASKED 3
66#define PFM_CTX_ZOMBIE 4
67
68#define PFM_INVALID_ACTIVATION (~0UL)
69
70#define PFM_NUM_PMC_REGS 64
71#define PFM_NUM_PMD_REGS 64
72
73
74
75
76#define PFM_MAX_MSGS 32
77#define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
78
79
80
81
82
83
84
85
86
87
88
89
90#define PFM_REG_NOTIMPL 0x0
91#define PFM_REG_IMPL 0x1
92#define PFM_REG_END 0x2
93#define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL)
94#define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR)
95#define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL)
96#define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL)
97#define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL)
98
99#define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
100#define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
101
102#define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
103
104
105#define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
106#define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
107
108
109#define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
110#define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
111#define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
112#define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
113
114#define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
115#define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
116#define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
117#define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
118
119#define PFM_NUM_IBRS IA64_NUM_DBG_REGS
120#define PFM_NUM_DBRS IA64_NUM_DBG_REGS
121
122#define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
123#define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
124#define PFM_CTX_TASK(h) (h)->ctx_task
125
126#define PMU_PMC_OI 5
127
128
129#define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
130#define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
131
132#define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
133
134#define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
135#define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
136#define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
137#define PFM_CODE_RR 0
138#define PFM_DATA_RR 1
139
140#define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
141#define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
142#define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
143
144#define RDEP(x) (1UL<<(x))
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164#define PROTECT_CTX(c, f) \
165 do { \
166 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
167 spin_lock_irqsave(&(c)->ctx_lock, f); \
168 DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
169 } while(0)
170
171#define UNPROTECT_CTX(c, f) \
172 do { \
173 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
174 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
175 } while(0)
176
177#define PROTECT_CTX_NOPRINT(c, f) \
178 do { \
179 spin_lock_irqsave(&(c)->ctx_lock, f); \
180 } while(0)
181
182
183#define UNPROTECT_CTX_NOPRINT(c, f) \
184 do { \
185 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
186 } while(0)
187
188
189#define PROTECT_CTX_NOIRQ(c) \
190 do { \
191 spin_lock(&(c)->ctx_lock); \
192 } while(0)
193
194#define UNPROTECT_CTX_NOIRQ(c) \
195 do { \
196 spin_unlock(&(c)->ctx_lock); \
197 } while(0)
198
199
200#ifdef CONFIG_SMP
201
202#define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
203#define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
204#define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
205
206#else
207#define SET_ACTIVATION(t) do {} while(0)
208#define GET_ACTIVATION(t) do {} while(0)
209#define INC_ACTIVATION(t) do {} while(0)
210#endif
211
212#define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
213#define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
214#define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
215
216#define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
217#define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
218
219#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
220
221
222
223
224#define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
225
226#define PFMFS_MAGIC 0xa0b4d889
227
228
229
230
231#define PFM_DEBUGGING 1
232#ifdef PFM_DEBUGGING
233#define DPRINT(a) \
234 do { \
235 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
236 } while (0)
237
238#define DPRINT_ovfl(a) \
239 do { \
240 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
241 } while (0)
242#endif
243
244
245
246
247
248
249typedef struct {
250 unsigned long val;
251 unsigned long lval;
252 unsigned long long_reset;
253 unsigned long short_reset;
254 unsigned long reset_pmds[4];
255 unsigned long smpl_pmds[4];
256 unsigned long seed;
257 unsigned long mask;
258 unsigned int flags;
259 unsigned long eventid;
260} pfm_counter_t;
261
262
263
264
265typedef struct {
266 unsigned int block:1;
267 unsigned int system:1;
268 unsigned int using_dbreg:1;
269 unsigned int is_sampling:1;
270 unsigned int excl_idle:1;
271 unsigned int going_zombie:1;
272 unsigned int trap_reason:2;
273 unsigned int no_msg:1;
274 unsigned int can_restart:1;
275 unsigned int reserved:22;
276} pfm_context_flags_t;
277
278#define PFM_TRAP_REASON_NONE 0x0
279#define PFM_TRAP_REASON_BLOCK 0x1
280#define PFM_TRAP_REASON_RESET 0x2
281
282
283
284
285
286
287typedef struct pfm_context {
288 spinlock_t ctx_lock;
289
290 pfm_context_flags_t ctx_flags;
291 unsigned int ctx_state;
292
293 struct task_struct *ctx_task;
294
295 unsigned long ctx_ovfl_regs[4];
296
297 struct completion ctx_restart_done;
298
299 unsigned long ctx_used_pmds[4];
300 unsigned long ctx_all_pmds[4];
301 unsigned long ctx_reload_pmds[4];
302
303 unsigned long ctx_all_pmcs[4];
304 unsigned long ctx_reload_pmcs[4];
305 unsigned long ctx_used_monitors[4];
306
307 unsigned long ctx_pmcs[PFM_NUM_PMC_REGS];
308
309 unsigned int ctx_used_ibrs[1];
310 unsigned int ctx_used_dbrs[1];
311 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS];
312 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS];
313
314 pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS];
315
316 unsigned long th_pmcs[PFM_NUM_PMC_REGS];
317 unsigned long th_pmds[PFM_NUM_PMD_REGS];
318
319 unsigned long ctx_saved_psr_up;
320
321 unsigned long ctx_last_activation;
322 unsigned int ctx_last_cpu;
323 unsigned int ctx_cpu;
324
325 int ctx_fd;
326 pfm_ovfl_arg_t ctx_ovfl_arg;
327
328 pfm_buffer_fmt_t *ctx_buf_fmt;
329 void *ctx_smpl_hdr;
330 unsigned long ctx_smpl_size;
331 void *ctx_smpl_vaddr;
332
333 wait_queue_head_t ctx_msgq_wait;
334 pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
335 int ctx_msgq_head;
336 int ctx_msgq_tail;
337 struct fasync_struct *ctx_async_queue;
338
339 wait_queue_head_t ctx_zombieq;
340} pfm_context_t;
341
342
343
344
345
346#define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
347
348#define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
349
350#ifdef CONFIG_SMP
351#define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
352#define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
353#else
354#define SET_LAST_CPU(ctx, v) do {} while(0)
355#define GET_LAST_CPU(ctx) do {} while(0)
356#endif
357
358
359#define ctx_fl_block ctx_flags.block
360#define ctx_fl_system ctx_flags.system
361#define ctx_fl_using_dbreg ctx_flags.using_dbreg
362#define ctx_fl_is_sampling ctx_flags.is_sampling
363#define ctx_fl_excl_idle ctx_flags.excl_idle
364#define ctx_fl_going_zombie ctx_flags.going_zombie
365#define ctx_fl_trap_reason ctx_flags.trap_reason
366#define ctx_fl_no_msg ctx_flags.no_msg
367#define ctx_fl_can_restart ctx_flags.can_restart
368
369#define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
370#define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
371
372
373
374
375
376typedef struct {
377 spinlock_t pfs_lock;
378
379 unsigned int pfs_task_sessions;
380 unsigned int pfs_sys_sessions;
381 unsigned int pfs_sys_use_dbregs;
382 unsigned int pfs_ptrace_use_dbregs;
383 struct task_struct *pfs_sys_session[NR_CPUS];
384} pfm_session_t;
385
386
387
388
389
390
391typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
392typedef struct {
393 unsigned int type;
394 int pm_pos;
395 unsigned long default_value;
396 unsigned long reserved_mask;
397 pfm_reg_check_t read_check;
398 pfm_reg_check_t write_check;
399 unsigned long dep_pmd[4];
400 unsigned long dep_pmc[4];
401} pfm_reg_desc_t;
402
403
404#define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
405
406
407
408
409
410
411
412
413
414
415
416
417
418typedef struct {
419 unsigned long ovfl_val;
420
421 pfm_reg_desc_t *pmc_desc;
422 pfm_reg_desc_t *pmd_desc;
423
424 unsigned int num_pmcs;
425 unsigned int num_pmds;
426 unsigned long impl_pmcs[4];
427 unsigned long impl_pmds[4];
428
429 char *pmu_name;
430 unsigned int pmu_family;
431 unsigned int flags;
432 unsigned int num_ibrs;
433 unsigned int num_dbrs;
434 unsigned int num_counters;
435 int (*probe)(void);
436 unsigned int use_rr_dbregs:1;
437} pmu_config_t;
438
439
440
441#define PFM_PMU_IRQ_RESEND 1
442
443
444
445
446typedef struct {
447 unsigned long ibr_mask:56;
448 unsigned long ibr_plm:4;
449 unsigned long ibr_ig:3;
450 unsigned long ibr_x:1;
451} ibr_mask_reg_t;
452
453typedef struct {
454 unsigned long dbr_mask:56;
455 unsigned long dbr_plm:4;
456 unsigned long dbr_ig:2;
457 unsigned long dbr_w:1;
458 unsigned long dbr_r:1;
459} dbr_mask_reg_t;
460
461typedef union {
462 unsigned long val;
463 ibr_mask_reg_t ibr;
464 dbr_mask_reg_t dbr;
465} dbreg_t;
466
467
468
469
470
471typedef struct {
472 int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
473 char *cmd_name;
474 int cmd_flags;
475 unsigned int cmd_narg;
476 size_t cmd_argsize;
477 int (*cmd_getsize)(void *arg, size_t *sz);
478} pfm_cmd_desc_t;
479
480#define PFM_CMD_FD 0x01
481#define PFM_CMD_ARG_READ 0x02
482#define PFM_CMD_ARG_RW 0x04
483#define PFM_CMD_STOP 0x08
484
485
486#define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
487#define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
488#define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
489#define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
490#define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
491
492#define PFM_CMD_ARG_MANY -1
493
494typedef struct {
495 unsigned long pfm_spurious_ovfl_intr_count;
496 unsigned long pfm_replay_ovfl_intr_count;
497 unsigned long pfm_ovfl_intr_count;
498 unsigned long pfm_ovfl_intr_cycles;
499 unsigned long pfm_ovfl_intr_cycles_min;
500 unsigned long pfm_ovfl_intr_cycles_max;
501 unsigned long pfm_smpl_handler_calls;
502 unsigned long pfm_smpl_handler_cycles;
503 char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
504} pfm_stats_t;
505
506
507
508
509static pfm_stats_t pfm_stats[NR_CPUS];
510static pfm_session_t pfm_sessions;
511
512static DEFINE_SPINLOCK(pfm_alt_install_check);
513static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
514
515static struct proc_dir_entry *perfmon_dir;
516static pfm_uuid_t pfm_null_uuid = {0,};
517
518static spinlock_t pfm_buffer_fmt_lock;
519static LIST_HEAD(pfm_buffer_fmt_list);
520
521static pmu_config_t *pmu_conf;
522
523
524pfm_sysctl_t pfm_sysctl;
525EXPORT_SYMBOL(pfm_sysctl);
526
527static struct ctl_table pfm_ctl_table[] = {
528 {
529 .procname = "debug",
530 .data = &pfm_sysctl.debug,
531 .maxlen = sizeof(int),
532 .mode = 0666,
533 .proc_handler = proc_dointvec,
534 },
535 {
536 .procname = "debug_ovfl",
537 .data = &pfm_sysctl.debug_ovfl,
538 .maxlen = sizeof(int),
539 .mode = 0666,
540 .proc_handler = proc_dointvec,
541 },
542 {
543 .procname = "fastctxsw",
544 .data = &pfm_sysctl.fastctxsw,
545 .maxlen = sizeof(int),
546 .mode = 0600,
547 .proc_handler = proc_dointvec,
548 },
549 {
550 .procname = "expert_mode",
551 .data = &pfm_sysctl.expert_mode,
552 .maxlen = sizeof(int),
553 .mode = 0600,
554 .proc_handler = proc_dointvec,
555 },
556 {}
557};
558static struct ctl_table pfm_sysctl_dir[] = {
559 {
560 .procname = "perfmon",
561 .mode = 0555,
562 .child = pfm_ctl_table,
563 },
564 {}
565};
566static struct ctl_table pfm_sysctl_root[] = {
567 {
568 .procname = "kernel",
569 .mode = 0555,
570 .child = pfm_sysctl_dir,
571 },
572 {}
573};
574static struct ctl_table_header *pfm_sysctl_header;
575
576static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
577
578#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
579#define pfm_get_cpu_data(a,b) per_cpu(a, b)
580
581static inline void
582pfm_put_task(struct task_struct *task)
583{
584 if (task != current) put_task_struct(task);
585}
586
587static inline unsigned long
588pfm_protect_ctx_ctxsw(pfm_context_t *x)
589{
590 spin_lock(&(x)->ctx_lock);
591 return 0UL;
592}
593
594static inline void
595pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
596{
597 spin_unlock(&(x)->ctx_lock);
598}
599
600
601static const struct dentry_operations pfmfs_dentry_operations;
602
603static struct dentry *
604pfmfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
605{
606 return mount_pseudo(fs_type, "pfm:", NULL, &pfmfs_dentry_operations,
607 PFMFS_MAGIC);
608}
609
610static struct file_system_type pfm_fs_type = {
611 .name = "pfmfs",
612 .mount = pfmfs_mount,
613 .kill_sb = kill_anon_super,
614};
615MODULE_ALIAS_FS("pfmfs");
616
617DEFINE_PER_CPU(unsigned long, pfm_syst_info);
618DEFINE_PER_CPU(struct task_struct *, pmu_owner);
619DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
620DEFINE_PER_CPU(unsigned long, pmu_activation_number);
621EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
622
623
624
625static const struct file_operations pfm_file_ops;
626
627
628
629
630#ifndef CONFIG_SMP
631static void pfm_lazy_save_regs (struct task_struct *ta);
632#endif
633
634void dump_pmu_state(const char *);
635static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
636
637#include "perfmon_itanium.h"
638#include "perfmon_mckinley.h"
639#include "perfmon_montecito.h"
640#include "perfmon_generic.h"
641
642static pmu_config_t *pmu_confs[]={
643 &pmu_conf_mont,
644 &pmu_conf_mck,
645 &pmu_conf_ita,
646 &pmu_conf_gen,
647 NULL
648};
649
650
651static int pfm_end_notify_user(pfm_context_t *ctx);
652
653static inline void
654pfm_clear_psr_pp(void)
655{
656 ia64_rsm(IA64_PSR_PP);
657 ia64_srlz_i();
658}
659
660static inline void
661pfm_set_psr_pp(void)
662{
663 ia64_ssm(IA64_PSR_PP);
664 ia64_srlz_i();
665}
666
667static inline void
668pfm_clear_psr_up(void)
669{
670 ia64_rsm(IA64_PSR_UP);
671 ia64_srlz_i();
672}
673
674static inline void
675pfm_set_psr_up(void)
676{
677 ia64_ssm(IA64_PSR_UP);
678 ia64_srlz_i();
679}
680
681static inline unsigned long
682pfm_get_psr(void)
683{
684 unsigned long tmp;
685 tmp = ia64_getreg(_IA64_REG_PSR);
686 ia64_srlz_i();
687 return tmp;
688}
689
690static inline void
691pfm_set_psr_l(unsigned long val)
692{
693 ia64_setreg(_IA64_REG_PSR_L, val);
694 ia64_srlz_i();
695}
696
697static inline void
698pfm_freeze_pmu(void)
699{
700 ia64_set_pmc(0,1UL);
701 ia64_srlz_d();
702}
703
704static inline void
705pfm_unfreeze_pmu(void)
706{
707 ia64_set_pmc(0,0UL);
708 ia64_srlz_d();
709}
710
711static inline void
712pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
713{
714 int i;
715
716 for (i=0; i < nibrs; i++) {
717 ia64_set_ibr(i, ibrs[i]);
718 ia64_dv_serialize_instruction();
719 }
720 ia64_srlz_i();
721}
722
723static inline void
724pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
725{
726 int i;
727
728 for (i=0; i < ndbrs; i++) {
729 ia64_set_dbr(i, dbrs[i]);
730 ia64_dv_serialize_data();
731 }
732 ia64_srlz_d();
733}
734
735
736
737
738static inline unsigned long
739pfm_read_soft_counter(pfm_context_t *ctx, int i)
740{
741 return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
742}
743
744
745
746
747static inline void
748pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
749{
750 unsigned long ovfl_val = pmu_conf->ovfl_val;
751
752 ctx->ctx_pmds[i].val = val & ~ovfl_val;
753
754
755
756
757 ia64_set_pmd(i, val & ovfl_val);
758}
759
760static pfm_msg_t *
761pfm_get_new_msg(pfm_context_t *ctx)
762{
763 int idx, next;
764
765 next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
766
767 DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
768 if (next == ctx->ctx_msgq_head) return NULL;
769
770 idx = ctx->ctx_msgq_tail;
771 ctx->ctx_msgq_tail = next;
772
773 DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
774
775 return ctx->ctx_msgq+idx;
776}
777
778static pfm_msg_t *
779pfm_get_next_msg(pfm_context_t *ctx)
780{
781 pfm_msg_t *msg;
782
783 DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
784
785 if (PFM_CTXQ_EMPTY(ctx)) return NULL;
786
787
788
789
790 msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
791
792
793
794
795 ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
796
797 DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
798
799 return msg;
800}
801
802static void
803pfm_reset_msgq(pfm_context_t *ctx)
804{
805 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
806 DPRINT(("ctx=%p msgq reset\n", ctx));
807}
808
809static pfm_context_t *
810pfm_context_alloc(int ctx_flags)
811{
812 pfm_context_t *ctx;
813
814
815
816
817
818 ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
819 if (ctx) {
820 DPRINT(("alloc ctx @%p\n", ctx));
821
822
823
824
825 spin_lock_init(&ctx->ctx_lock);
826
827
828
829
830 ctx->ctx_state = PFM_CTX_UNLOADED;
831
832
833
834
835 ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
836 ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
837 ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
838
839
840
841
842
843
844
845
846 init_completion(&ctx->ctx_restart_done);
847
848
849
850
851 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
852 SET_LAST_CPU(ctx, -1);
853
854
855
856
857 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
858 init_waitqueue_head(&ctx->ctx_msgq_wait);
859 init_waitqueue_head(&ctx->ctx_zombieq);
860
861 }
862 return ctx;
863}
864
865static void
866pfm_context_free(pfm_context_t *ctx)
867{
868 if (ctx) {
869 DPRINT(("free ctx @%p\n", ctx));
870 kfree(ctx);
871 }
872}
873
874static void
875pfm_mask_monitoring(struct task_struct *task)
876{
877 pfm_context_t *ctx = PFM_GET_CTX(task);
878 unsigned long mask, val, ovfl_mask;
879 int i;
880
881 DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
882
883 ovfl_mask = pmu_conf->ovfl_val;
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903 mask = ctx->ctx_used_pmds[0];
904 for (i = 0; mask; i++, mask>>=1) {
905
906 if ((mask & 0x1) == 0) continue;
907 val = ia64_get_pmd(i);
908
909 if (PMD_IS_COUNTING(i)) {
910
911
912
913 ctx->ctx_pmds[i].val += (val & ovfl_mask);
914 } else {
915 ctx->ctx_pmds[i].val = val;
916 }
917 DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
918 i,
919 ctx->ctx_pmds[i].val,
920 val & ovfl_mask));
921 }
922
923
924
925
926
927
928
929
930 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
931 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
932 if ((mask & 0x1) == 0UL) continue;
933 ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
934 ctx->th_pmcs[i] &= ~0xfUL;
935 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
936 }
937
938
939
940 ia64_srlz_d();
941}
942
943
944
945
946
947
948static void
949pfm_restore_monitoring(struct task_struct *task)
950{
951 pfm_context_t *ctx = PFM_GET_CTX(task);
952 unsigned long mask, ovfl_mask;
953 unsigned long psr, val;
954 int i, is_system;
955
956 is_system = ctx->ctx_fl_system;
957 ovfl_mask = pmu_conf->ovfl_val;
958
959 if (task != current) {
960 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
961 return;
962 }
963 if (ctx->ctx_state != PFM_CTX_MASKED) {
964 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
965 task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
966 return;
967 }
968 psr = pfm_get_psr();
969
970
971
972
973
974
975
976
977
978
979 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
980
981 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
982 pfm_clear_psr_pp();
983 } else {
984 pfm_clear_psr_up();
985 }
986
987
988
989 mask = ctx->ctx_used_pmds[0];
990 for (i = 0; mask; i++, mask>>=1) {
991
992 if ((mask & 0x1) == 0) continue;
993
994 if (PMD_IS_COUNTING(i)) {
995
996
997
998
999 val = ctx->ctx_pmds[i].val & ovfl_mask;
1000 ctx->ctx_pmds[i].val &= ~ovfl_mask;
1001 } else {
1002 val = ctx->ctx_pmds[i].val;
1003 }
1004 ia64_set_pmd(i, val);
1005
1006 DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
1007 i,
1008 ctx->ctx_pmds[i].val,
1009 val));
1010 }
1011
1012
1013
1014 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
1015 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
1016 if ((mask & 0x1) == 0UL) continue;
1017 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1018 ia64_set_pmc(i, ctx->th_pmcs[i]);
1019 DPRINT(("[%d] pmc[%d]=0x%lx\n",
1020 task_pid_nr(task), i, ctx->th_pmcs[i]));
1021 }
1022 ia64_srlz_d();
1023
1024
1025
1026
1027
1028 if (ctx->ctx_fl_using_dbreg) {
1029 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
1030 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
1031 }
1032
1033
1034
1035
1036 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1037
1038 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
1039 ia64_srlz_i();
1040 }
1041 pfm_set_psr_l(psr);
1042}
1043
1044static inline void
1045pfm_save_pmds(unsigned long *pmds, unsigned long mask)
1046{
1047 int i;
1048
1049 ia64_srlz_d();
1050
1051 for (i=0; mask; i++, mask>>=1) {
1052 if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
1053 }
1054}
1055
1056
1057
1058
1059static inline void
1060pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
1061{
1062 int i;
1063 unsigned long val, ovfl_val = pmu_conf->ovfl_val;
1064
1065 for (i=0; mask; i++, mask>>=1) {
1066 if ((mask & 0x1) == 0) continue;
1067 val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
1068 ia64_set_pmd(i, val);
1069 }
1070 ia64_srlz_d();
1071}
1072
1073
1074
1075
1076static inline void
1077pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1078{
1079 unsigned long ovfl_val = pmu_conf->ovfl_val;
1080 unsigned long mask = ctx->ctx_all_pmds[0];
1081 unsigned long val;
1082 int i;
1083
1084 DPRINT(("mask=0x%lx\n", mask));
1085
1086 for (i=0; mask; i++, mask>>=1) {
1087
1088 val = ctx->ctx_pmds[i].val;
1089
1090
1091
1092
1093
1094
1095
1096 if (PMD_IS_COUNTING(i)) {
1097 ctx->ctx_pmds[i].val = val & ~ovfl_val;
1098 val &= ovfl_val;
1099 }
1100 ctx->th_pmds[i] = val;
1101
1102 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1103 i,
1104 ctx->th_pmds[i],
1105 ctx->ctx_pmds[i].val));
1106 }
1107}
1108
1109
1110
1111
1112static inline void
1113pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1114{
1115 unsigned long mask = ctx->ctx_all_pmcs[0];
1116 int i;
1117
1118 DPRINT(("mask=0x%lx\n", mask));
1119
1120 for (i=0; mask; i++, mask>>=1) {
1121
1122 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1123 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1124 }
1125}
1126
1127
1128
1129static inline void
1130pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
1131{
1132 int i;
1133
1134 for (i=0; mask; i++, mask>>=1) {
1135 if ((mask & 0x1) == 0) continue;
1136 ia64_set_pmc(i, pmcs[i]);
1137 }
1138 ia64_srlz_d();
1139}
1140
1141static inline int
1142pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
1143{
1144 return memcmp(a, b, sizeof(pfm_uuid_t));
1145}
1146
1147static inline int
1148pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
1149{
1150 int ret = 0;
1151 if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
1152 return ret;
1153}
1154
1155static inline int
1156pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
1157{
1158 int ret = 0;
1159 if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
1160 return ret;
1161}
1162
1163
1164static inline int
1165pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
1166 int cpu, void *arg)
1167{
1168 int ret = 0;
1169 if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
1170 return ret;
1171}
1172
1173static inline int
1174pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
1175 int cpu, void *arg)
1176{
1177 int ret = 0;
1178 if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
1179 return ret;
1180}
1181
1182static inline int
1183pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1184{
1185 int ret = 0;
1186 if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
1187 return ret;
1188}
1189
1190static inline int
1191pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1192{
1193 int ret = 0;
1194 if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
1195 return ret;
1196}
1197
1198static pfm_buffer_fmt_t *
1199__pfm_find_buffer_fmt(pfm_uuid_t uuid)
1200{
1201 struct list_head * pos;
1202 pfm_buffer_fmt_t * entry;
1203
1204 list_for_each(pos, &pfm_buffer_fmt_list) {
1205 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
1206 if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
1207 return entry;
1208 }
1209 return NULL;
1210}
1211
1212
1213
1214
1215static pfm_buffer_fmt_t *
1216pfm_find_buffer_fmt(pfm_uuid_t uuid)
1217{
1218 pfm_buffer_fmt_t * fmt;
1219 spin_lock(&pfm_buffer_fmt_lock);
1220 fmt = __pfm_find_buffer_fmt(uuid);
1221 spin_unlock(&pfm_buffer_fmt_lock);
1222 return fmt;
1223}
1224
1225int
1226pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
1227{
1228 int ret = 0;
1229
1230
1231 if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
1232
1233
1234 if (fmt->fmt_handler == NULL) return -EINVAL;
1235
1236
1237
1238
1239
1240 spin_lock(&pfm_buffer_fmt_lock);
1241
1242 if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
1243 printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
1244 ret = -EBUSY;
1245 goto out;
1246 }
1247 list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
1248 printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
1249
1250out:
1251 spin_unlock(&pfm_buffer_fmt_lock);
1252 return ret;
1253}
1254EXPORT_SYMBOL(pfm_register_buffer_fmt);
1255
1256int
1257pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
1258{
1259 pfm_buffer_fmt_t *fmt;
1260 int ret = 0;
1261
1262 spin_lock(&pfm_buffer_fmt_lock);
1263
1264 fmt = __pfm_find_buffer_fmt(uuid);
1265 if (!fmt) {
1266 printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
1267 ret = -EINVAL;
1268 goto out;
1269 }
1270 list_del_init(&fmt->fmt_list);
1271 printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
1272
1273out:
1274 spin_unlock(&pfm_buffer_fmt_lock);
1275 return ret;
1276
1277}
1278EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1279
1280static int
1281pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1282{
1283 unsigned long flags;
1284
1285
1286
1287 LOCK_PFS(flags);
1288
1289 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1290 pfm_sessions.pfs_sys_sessions,
1291 pfm_sessions.pfs_task_sessions,
1292 pfm_sessions.pfs_sys_use_dbregs,
1293 is_syswide,
1294 cpu));
1295
1296 if (is_syswide) {
1297
1298
1299
1300 if (pfm_sessions.pfs_task_sessions > 0UL) {
1301 DPRINT(("system wide not possible, %u conflicting task_sessions\n",
1302 pfm_sessions.pfs_task_sessions));
1303 goto abort;
1304 }
1305
1306 if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
1307
1308 DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
1309
1310 pfm_sessions.pfs_sys_session[cpu] = task;
1311
1312 pfm_sessions.pfs_sys_sessions++ ;
1313
1314 } else {
1315 if (pfm_sessions.pfs_sys_sessions) goto abort;
1316 pfm_sessions.pfs_task_sessions++;
1317 }
1318
1319 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1320 pfm_sessions.pfs_sys_sessions,
1321 pfm_sessions.pfs_task_sessions,
1322 pfm_sessions.pfs_sys_use_dbregs,
1323 is_syswide,
1324 cpu));
1325
1326
1327
1328
1329 cpu_idle_poll_ctrl(true);
1330
1331 UNLOCK_PFS(flags);
1332
1333 return 0;
1334
1335error_conflict:
1336 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
1337 task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
1338 cpu));
1339abort:
1340 UNLOCK_PFS(flags);
1341
1342 return -EBUSY;
1343
1344}
1345
1346static int
1347pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1348{
1349 unsigned long flags;
1350
1351
1352
1353 LOCK_PFS(flags);
1354
1355 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1356 pfm_sessions.pfs_sys_sessions,
1357 pfm_sessions.pfs_task_sessions,
1358 pfm_sessions.pfs_sys_use_dbregs,
1359 is_syswide,
1360 cpu));
1361
1362
1363 if (is_syswide) {
1364 pfm_sessions.pfs_sys_session[cpu] = NULL;
1365
1366
1367
1368 if (ctx && ctx->ctx_fl_using_dbreg) {
1369 if (pfm_sessions.pfs_sys_use_dbregs == 0) {
1370 printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
1371 } else {
1372 pfm_sessions.pfs_sys_use_dbregs--;
1373 }
1374 }
1375 pfm_sessions.pfs_sys_sessions--;
1376 } else {
1377 pfm_sessions.pfs_task_sessions--;
1378 }
1379 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1380 pfm_sessions.pfs_sys_sessions,
1381 pfm_sessions.pfs_task_sessions,
1382 pfm_sessions.pfs_sys_use_dbregs,
1383 is_syswide,
1384 cpu));
1385
1386
1387 cpu_idle_poll_ctrl(false);
1388
1389 UNLOCK_PFS(flags);
1390
1391 return 0;
1392}
1393
1394
1395
1396
1397
1398
1399static int
1400pfm_remove_smpl_mapping(void *vaddr, unsigned long size)
1401{
1402 struct task_struct *task = current;
1403 int r;
1404
1405
1406 if (task->mm == NULL || size == 0UL || vaddr == NULL) {
1407 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
1408 return -EINVAL;
1409 }
1410
1411 DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
1412
1413
1414
1415
1416 r = vm_munmap((unsigned long)vaddr, size);
1417
1418 if (r !=0) {
1419 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
1420 }
1421
1422 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
1423
1424 return 0;
1425}
1426
1427
1428
1429
1430#if 0
1431static int
1432pfm_free_smpl_buffer(pfm_context_t *ctx)
1433{
1434 pfm_buffer_fmt_t *fmt;
1435
1436 if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
1437
1438
1439
1440
1441 fmt = ctx->ctx_buf_fmt;
1442
1443 DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
1444 ctx->ctx_smpl_hdr,
1445 ctx->ctx_smpl_size,
1446 ctx->ctx_smpl_vaddr));
1447
1448 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1449
1450
1451
1452
1453 vfree(ctx->ctx_smpl_hdr);
1454
1455 ctx->ctx_smpl_hdr = NULL;
1456 ctx->ctx_smpl_size = 0UL;
1457
1458 return 0;
1459
1460invalid_free:
1461 printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
1462 return -EINVAL;
1463}
1464#endif
1465
1466static inline void
1467pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
1468{
1469 if (fmt == NULL) return;
1470
1471 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1472
1473}
1474
1475
1476
1477
1478
1479
1480
1481static struct vfsmount *pfmfs_mnt __read_mostly;
1482
1483static int __init
1484init_pfm_fs(void)
1485{
1486 int err = register_filesystem(&pfm_fs_type);
1487 if (!err) {
1488 pfmfs_mnt = kern_mount(&pfm_fs_type);
1489 err = PTR_ERR(pfmfs_mnt);
1490 if (IS_ERR(pfmfs_mnt))
1491 unregister_filesystem(&pfm_fs_type);
1492 else
1493 err = 0;
1494 }
1495 return err;
1496}
1497
1498static ssize_t
1499pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1500{
1501 pfm_context_t *ctx;
1502 pfm_msg_t *msg;
1503 ssize_t ret;
1504 unsigned long flags;
1505 DECLARE_WAITQUEUE(wait, current);
1506 if (PFM_IS_FILE(filp) == 0) {
1507 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1508 return -EINVAL;
1509 }
1510
1511 ctx = filp->private_data;
1512 if (ctx == NULL) {
1513 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
1514 return -EINVAL;
1515 }
1516
1517
1518
1519
1520 if (size < sizeof(pfm_msg_t)) {
1521 DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
1522 return -EINVAL;
1523 }
1524
1525 PROTECT_CTX(ctx, flags);
1526
1527
1528
1529
1530 add_wait_queue(&ctx->ctx_msgq_wait, &wait);
1531
1532
1533 for(;;) {
1534
1535
1536
1537
1538 set_current_state(TASK_INTERRUPTIBLE);
1539
1540 DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
1541
1542 ret = 0;
1543 if(PFM_CTXQ_EMPTY(ctx) == 0) break;
1544
1545 UNPROTECT_CTX(ctx, flags);
1546
1547
1548
1549
1550 ret = -EAGAIN;
1551 if(filp->f_flags & O_NONBLOCK) break;
1552
1553
1554
1555
1556 if(signal_pending(current)) {
1557 ret = -EINTR;
1558 break;
1559 }
1560
1561
1562
1563 schedule();
1564
1565 PROTECT_CTX(ctx, flags);
1566 }
1567 DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
1568 set_current_state(TASK_RUNNING);
1569 remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
1570
1571 if (ret < 0) goto abort;
1572
1573 ret = -EINVAL;
1574 msg = pfm_get_next_msg(ctx);
1575 if (msg == NULL) {
1576 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
1577 goto abort_locked;
1578 }
1579
1580 DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
1581
1582 ret = -EFAULT;
1583 if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
1584
1585abort_locked:
1586 UNPROTECT_CTX(ctx, flags);
1587abort:
1588 return ret;
1589}
1590
1591static ssize_t
1592pfm_write(struct file *file, const char __user *ubuf,
1593 size_t size, loff_t *ppos)
1594{
1595 DPRINT(("pfm_write called\n"));
1596 return -EINVAL;
1597}
1598
1599static __poll_t
1600pfm_poll(struct file *filp, poll_table * wait)
1601{
1602 pfm_context_t *ctx;
1603 unsigned long flags;
1604 __poll_t mask = 0;
1605
1606 if (PFM_IS_FILE(filp) == 0) {
1607 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1608 return 0;
1609 }
1610
1611 ctx = filp->private_data;
1612 if (ctx == NULL) {
1613 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
1614 return 0;
1615 }
1616
1617
1618 DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
1619
1620 poll_wait(filp, &ctx->ctx_msgq_wait, wait);
1621
1622 PROTECT_CTX(ctx, flags);
1623
1624 if (PFM_CTXQ_EMPTY(ctx) == 0)
1625 mask = EPOLLIN | EPOLLRDNORM;
1626
1627 UNPROTECT_CTX(ctx, flags);
1628
1629 DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
1630
1631 return mask;
1632}
1633
1634static long
1635pfm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1636{
1637 DPRINT(("pfm_ioctl called\n"));
1638 return -EINVAL;
1639}
1640
1641
1642
1643
1644static inline int
1645pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
1646{
1647 int ret;
1648
1649 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
1650
1651 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1652 task_pid_nr(current),
1653 fd,
1654 on,
1655 ctx->ctx_async_queue, ret));
1656
1657 return ret;
1658}
1659
1660static int
1661pfm_fasync(int fd, struct file *filp, int on)
1662{
1663 pfm_context_t *ctx;
1664 int ret;
1665
1666 if (PFM_IS_FILE(filp) == 0) {
1667 printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
1668 return -EBADF;
1669 }
1670
1671 ctx = filp->private_data;
1672 if (ctx == NULL) {
1673 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
1674 return -EBADF;
1675 }
1676
1677
1678
1679
1680
1681
1682
1683 ret = pfm_do_fasync(fd, filp, ctx, on);
1684
1685
1686 DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1687 fd,
1688 on,
1689 ctx->ctx_async_queue, ret));
1690
1691 return ret;
1692}
1693
1694#ifdef CONFIG_SMP
1695
1696
1697
1698
1699
1700static void
1701pfm_syswide_force_stop(void *info)
1702{
1703 pfm_context_t *ctx = (pfm_context_t *)info;
1704 struct pt_regs *regs = task_pt_regs(current);
1705 struct task_struct *owner;
1706 unsigned long flags;
1707 int ret;
1708
1709 if (ctx->ctx_cpu != smp_processor_id()) {
1710 printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
1711 ctx->ctx_cpu,
1712 smp_processor_id());
1713 return;
1714 }
1715 owner = GET_PMU_OWNER();
1716 if (owner != ctx->ctx_task) {
1717 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1718 smp_processor_id(),
1719 task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
1720 return;
1721 }
1722 if (GET_PMU_CTX() != ctx) {
1723 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
1724 smp_processor_id(),
1725 GET_PMU_CTX(), ctx);
1726 return;
1727 }
1728
1729 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
1730
1731
1732
1733
1734
1735 local_irq_save(flags);
1736
1737 ret = pfm_context_unload(ctx, NULL, 0, regs);
1738 if (ret) {
1739 DPRINT(("context_unload returned %d\n", ret));
1740 }
1741
1742
1743
1744
1745 local_irq_restore(flags);
1746}
1747
1748static void
1749pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
1750{
1751 int ret;
1752
1753 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
1754 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
1755 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
1756}
1757#endif
1758
1759
1760
1761
1762
1763static int
1764pfm_flush(struct file *filp, fl_owner_t id)
1765{
1766 pfm_context_t *ctx;
1767 struct task_struct *task;
1768 struct pt_regs *regs;
1769 unsigned long flags;
1770 unsigned long smpl_buf_size = 0UL;
1771 void *smpl_buf_vaddr = NULL;
1772 int state, is_system;
1773
1774 if (PFM_IS_FILE(filp) == 0) {
1775 DPRINT(("bad magic for\n"));
1776 return -EBADF;
1777 }
1778
1779 ctx = filp->private_data;
1780 if (ctx == NULL) {
1781 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
1782 return -EBADF;
1783 }
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798 PROTECT_CTX(ctx, flags);
1799
1800 state = ctx->ctx_state;
1801 is_system = ctx->ctx_fl_system;
1802
1803 task = PFM_CTX_TASK(ctx);
1804 regs = task_pt_regs(task);
1805
1806 DPRINT(("ctx_state=%d is_current=%d\n",
1807 state,
1808 task == current ? 1 : 0));
1809
1810
1811
1812
1813
1814
1815
1816
1817 if (task == current) {
1818#ifdef CONFIG_SMP
1819
1820
1821
1822
1823
1824
1825
1826 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
1827
1828 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
1829
1830
1831
1832 local_irq_restore(flags);
1833
1834 pfm_syswide_cleanup_other_cpu(ctx);
1835
1836
1837
1838
1839 local_irq_save(flags);
1840
1841
1842
1843
1844 } else
1845#endif
1846 {
1847
1848 DPRINT(("forcing unload\n"));
1849
1850
1851
1852
1853 pfm_context_unload(ctx, NULL, 0, regs);
1854
1855 DPRINT(("ctx_state=%d\n", ctx->ctx_state));
1856 }
1857 }
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870 if (ctx->ctx_smpl_vaddr && current->mm) {
1871 smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
1872 smpl_buf_size = ctx->ctx_smpl_size;
1873 }
1874
1875 UNPROTECT_CTX(ctx, flags);
1876
1877
1878
1879
1880
1881
1882
1883 if (smpl_buf_vaddr) pfm_remove_smpl_mapping(smpl_buf_vaddr, smpl_buf_size);
1884
1885 return 0;
1886}
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902static int
1903pfm_close(struct inode *inode, struct file *filp)
1904{
1905 pfm_context_t *ctx;
1906 struct task_struct *task;
1907 struct pt_regs *regs;
1908 DECLARE_WAITQUEUE(wait, current);
1909 unsigned long flags;
1910 unsigned long smpl_buf_size = 0UL;
1911 void *smpl_buf_addr = NULL;
1912 int free_possible = 1;
1913 int state, is_system;
1914
1915 DPRINT(("pfm_close called private=%p\n", filp->private_data));
1916
1917 if (PFM_IS_FILE(filp) == 0) {
1918 DPRINT(("bad magic\n"));
1919 return -EBADF;
1920 }
1921
1922 ctx = filp->private_data;
1923 if (ctx == NULL) {
1924 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
1925 return -EBADF;
1926 }
1927
1928 PROTECT_CTX(ctx, flags);
1929
1930 state = ctx->ctx_state;
1931 is_system = ctx->ctx_fl_system;
1932
1933 task = PFM_CTX_TASK(ctx);
1934 regs = task_pt_regs(task);
1935
1936 DPRINT(("ctx_state=%d is_current=%d\n",
1937 state,
1938 task == current ? 1 : 0));
1939
1940
1941
1942
1943 if (state == PFM_CTX_UNLOADED) goto doit;
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957 if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973 ctx->ctx_fl_going_zombie = 1;
1974
1975
1976
1977
1978 complete(&ctx->ctx_restart_done);
1979
1980 DPRINT(("waking up ctx_state=%d\n", state));
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990 set_current_state(TASK_INTERRUPTIBLE);
1991 add_wait_queue(&ctx->ctx_zombieq, &wait);
1992
1993 UNPROTECT_CTX(ctx, flags);
1994
1995
1996
1997
1998
1999
2000 schedule();
2001
2002
2003 PROTECT_CTX(ctx, flags);
2004
2005
2006 remove_wait_queue(&ctx->ctx_zombieq, &wait);
2007 set_current_state(TASK_RUNNING);
2008
2009
2010
2011
2012 DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
2013 }
2014 else if (task != current) {
2015#ifdef CONFIG_SMP
2016
2017
2018
2019 ctx->ctx_state = PFM_CTX_ZOMBIE;
2020
2021 DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
2022
2023
2024
2025
2026 free_possible = 0;
2027#else
2028 pfm_context_unload(ctx, NULL, 0, regs);
2029#endif
2030 }
2031
2032doit:
2033
2034 state = ctx->ctx_state;
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050 if (ctx->ctx_smpl_hdr) {
2051 smpl_buf_addr = ctx->ctx_smpl_hdr;
2052 smpl_buf_size = ctx->ctx_smpl_size;
2053
2054 ctx->ctx_smpl_hdr = NULL;
2055 ctx->ctx_fl_is_sampling = 0;
2056 }
2057
2058 DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
2059 state,
2060 free_possible,
2061 smpl_buf_addr,
2062 smpl_buf_size));
2063
2064 if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
2065
2066
2067
2068
2069 if (state == PFM_CTX_ZOMBIE) {
2070 pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
2071 }
2072
2073
2074
2075
2076
2077 filp->private_data = NULL;
2078
2079
2080
2081
2082
2083
2084
2085
2086 UNPROTECT_CTX(ctx, flags);
2087
2088
2089
2090
2091
2092 vfree(smpl_buf_addr);
2093
2094
2095
2096
2097 if (free_possible) pfm_context_free(ctx);
2098
2099 return 0;
2100}
2101
2102static const struct file_operations pfm_file_ops = {
2103 .llseek = no_llseek,
2104 .read = pfm_read,
2105 .write = pfm_write,
2106 .poll = pfm_poll,
2107 .unlocked_ioctl = pfm_ioctl,
2108 .fasync = pfm_fasync,
2109 .release = pfm_close,
2110 .flush = pfm_flush
2111};
2112
2113static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen)
2114{
2115 return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]",
2116 d_inode(dentry)->i_ino);
2117}
2118
2119static const struct dentry_operations pfmfs_dentry_operations = {
2120 .d_delete = always_delete_dentry,
2121 .d_dname = pfmfs_dname,
2122};
2123
2124
2125static struct file *
2126pfm_alloc_file(pfm_context_t *ctx)
2127{
2128 struct file *file;
2129 struct inode *inode;
2130 struct path path;
2131 struct qstr this = { .name = "" };
2132
2133
2134
2135
2136 inode = new_inode(pfmfs_mnt->mnt_sb);
2137 if (!inode)
2138 return ERR_PTR(-ENOMEM);
2139
2140 DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
2141
2142 inode->i_mode = S_IFCHR|S_IRUGO;
2143 inode->i_uid = current_fsuid();
2144 inode->i_gid = current_fsgid();
2145
2146
2147
2148
2149 path.dentry = d_alloc(pfmfs_mnt->mnt_root, &this);
2150 if (!path.dentry) {
2151 iput(inode);
2152 return ERR_PTR(-ENOMEM);
2153 }
2154 path.mnt = mntget(pfmfs_mnt);
2155
2156 d_add(path.dentry, inode);
2157
2158 file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
2159 if (IS_ERR(file)) {
2160 path_put(&path);
2161 return file;
2162 }
2163
2164 file->f_flags = O_RDONLY;
2165 file->private_data = ctx;
2166
2167 return file;
2168}
2169
2170static int
2171pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
2172{
2173 DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
2174
2175 while (size > 0) {
2176 unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
2177
2178
2179 if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
2180 return -ENOMEM;
2181
2182 addr += PAGE_SIZE;
2183 buf += PAGE_SIZE;
2184 size -= PAGE_SIZE;
2185 }
2186 return 0;
2187}
2188
2189
2190
2191
2192static int
2193pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
2194{
2195 struct mm_struct *mm = task->mm;
2196 struct vm_area_struct *vma = NULL;
2197 unsigned long size;
2198 void *smpl_buf;
2199
2200
2201
2202
2203
2204 size = PAGE_ALIGN(rsize);
2205
2206 DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216 if (size > task_rlimit(task, RLIMIT_MEMLOCK))
2217 return -ENOMEM;
2218
2219
2220
2221
2222 smpl_buf = vzalloc(size);
2223 if (smpl_buf == NULL) {
2224 DPRINT(("Can't allocate sampling buffer\n"));
2225 return -ENOMEM;
2226 }
2227
2228 DPRINT(("smpl_buf @%p\n", smpl_buf));
2229
2230
2231 vma = vm_area_alloc(mm);
2232 if (!vma) {
2233 DPRINT(("Cannot allocate vma\n"));
2234 goto error_kmem;
2235 }
2236
2237
2238
2239
2240 vma->vm_file = get_file(filp);
2241 vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
2242 vma->vm_page_prot = PAGE_READONLY;
2243
2244
2245
2246
2247
2248
2249 ctx->ctx_smpl_hdr = smpl_buf;
2250 ctx->ctx_smpl_size = size;
2251
2252
2253
2254
2255
2256
2257
2258 down_write(&task->mm->mmap_sem);
2259
2260
2261 vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
2262 if (IS_ERR_VALUE(vma->vm_start)) {
2263 DPRINT(("Cannot find unmapped area for size %ld\n", size));
2264 up_write(&task->mm->mmap_sem);
2265 goto error;
2266 }
2267 vma->vm_end = vma->vm_start + size;
2268 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2269
2270 DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
2271
2272
2273 if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
2274 DPRINT(("Can't remap buffer\n"));
2275 up_write(&task->mm->mmap_sem);
2276 goto error;
2277 }
2278
2279
2280
2281
2282
2283 insert_vm_struct(mm, vma);
2284
2285 vm_stat_account(vma->vm_mm, vma->vm_flags, vma_pages(vma));
2286 up_write(&task->mm->mmap_sem);
2287
2288
2289
2290
2291 ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
2292 *(unsigned long *)user_vaddr = vma->vm_start;
2293
2294 return 0;
2295
2296error:
2297 vm_area_free(vma);
2298error_kmem:
2299 vfree(smpl_buf);
2300
2301 return -ENOMEM;
2302}
2303
2304
2305
2306
2307static int
2308pfm_bad_permissions(struct task_struct *task)
2309{
2310 const struct cred *tcred;
2311 kuid_t uid = current_uid();
2312 kgid_t gid = current_gid();
2313 int ret;
2314
2315 rcu_read_lock();
2316 tcred = __task_cred(task);
2317
2318
2319 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
2320 from_kuid(&init_user_ns, uid),
2321 from_kgid(&init_user_ns, gid),
2322 from_kuid(&init_user_ns, tcred->euid),
2323 from_kuid(&init_user_ns, tcred->suid),
2324 from_kuid(&init_user_ns, tcred->uid),
2325 from_kgid(&init_user_ns, tcred->egid),
2326 from_kgid(&init_user_ns, tcred->sgid)));
2327
2328 ret = ((!uid_eq(uid, tcred->euid))
2329 || (!uid_eq(uid, tcred->suid))
2330 || (!uid_eq(uid, tcred->uid))
2331 || (!gid_eq(gid, tcred->egid))
2332 || (!gid_eq(gid, tcred->sgid))
2333 || (!gid_eq(gid, tcred->gid))) && !capable(CAP_SYS_PTRACE);
2334
2335 rcu_read_unlock();
2336 return ret;
2337}
2338
2339static int
2340pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
2341{
2342 int ctx_flags;
2343
2344
2345
2346 ctx_flags = pfx->ctx_flags;
2347
2348 if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
2349
2350
2351
2352
2353 if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
2354 DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
2355 return -EINVAL;
2356 }
2357 } else {
2358 }
2359
2360
2361 return 0;
2362}
2363
2364static int
2365pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
2366 unsigned int cpu, pfarg_context_t *arg)
2367{
2368 pfm_buffer_fmt_t *fmt = NULL;
2369 unsigned long size = 0UL;
2370 void *uaddr = NULL;
2371 void *fmt_arg = NULL;
2372 int ret = 0;
2373#define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
2374
2375
2376 fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
2377 if (fmt == NULL) {
2378 DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
2379 return -EINVAL;
2380 }
2381
2382
2383
2384
2385 if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
2386
2387 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
2388
2389 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
2390
2391 if (ret) goto error;
2392
2393
2394 ctx->ctx_buf_fmt = fmt;
2395 ctx->ctx_fl_is_sampling = 1;
2396
2397
2398
2399
2400 ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
2401 if (ret) goto error;
2402
2403 if (size) {
2404
2405
2406
2407 ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
2408 if (ret) goto error;
2409
2410
2411 arg->ctx_smpl_vaddr = uaddr;
2412 }
2413 ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
2414
2415error:
2416 return ret;
2417}
2418
2419static void
2420pfm_reset_pmu_state(pfm_context_t *ctx)
2421{
2422 int i;
2423
2424
2425
2426
2427 for (i=1; PMC_IS_LAST(i) == 0; i++) {
2428 if (PMC_IS_IMPL(i) == 0) continue;
2429 ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
2430 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
2431 }
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
2460
2461
2462
2463
2464 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
2465
2466 DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
2467
2468
2469
2470
2471 ctx->ctx_used_ibrs[0] = 0UL;
2472 ctx->ctx_used_dbrs[0] = 0UL;
2473}
2474
2475static int
2476pfm_ctx_getsize(void *arg, size_t *sz)
2477{
2478 pfarg_context_t *req = (pfarg_context_t *)arg;
2479 pfm_buffer_fmt_t *fmt;
2480
2481 *sz = 0;
2482
2483 if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
2484
2485 fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
2486 if (fmt == NULL) {
2487 DPRINT(("cannot find buffer format\n"));
2488 return -EINVAL;
2489 }
2490
2491 *sz = fmt->fmt_arg_size;
2492 DPRINT(("arg_size=%lu\n", *sz));
2493
2494 return 0;
2495}
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505static int
2506pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2507{
2508
2509
2510
2511 if (task->mm == NULL) {
2512 DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
2513 return -EPERM;
2514 }
2515 if (pfm_bad_permissions(task)) {
2516 DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
2517 return -EPERM;
2518 }
2519
2520
2521
2522 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
2523 DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
2524 return -EINVAL;
2525 }
2526
2527 if (task->exit_state == EXIT_ZOMBIE) {
2528 DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
2529 return -EBUSY;
2530 }
2531
2532
2533
2534
2535 if (task == current) return 0;
2536
2537 if (!task_is_stopped_or_traced(task)) {
2538 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
2539 return -EBUSY;
2540 }
2541
2542
2543
2544 wait_task_inactive(task, 0);
2545
2546
2547
2548 return 0;
2549}
2550
2551static int
2552pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
2553{
2554 struct task_struct *p = current;
2555 int ret;
2556
2557
2558 if (pid < 2) return -EPERM;
2559
2560 if (pid != task_pid_vnr(current)) {
2561
2562 p = find_get_task_by_vpid(pid);
2563 if (!p)
2564 return -ESRCH;
2565 }
2566
2567 ret = pfm_task_incompatible(ctx, p);
2568 if (ret == 0) {
2569 *task = p;
2570 } else if (p != current) {
2571 pfm_put_task(p);
2572 }
2573 return ret;
2574}
2575
2576
2577
2578static int
2579pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2580{
2581 pfarg_context_t *req = (pfarg_context_t *)arg;
2582 struct file *filp;
2583 struct path path;
2584 int ctx_flags;
2585 int fd;
2586 int ret;
2587
2588
2589 ret = pfarg_is_sane(current, req);
2590 if (ret < 0)
2591 return ret;
2592
2593 ctx_flags = req->ctx_flags;
2594
2595 ret = -ENOMEM;
2596
2597 fd = get_unused_fd_flags(0);
2598 if (fd < 0)
2599 return fd;
2600
2601 ctx = pfm_context_alloc(ctx_flags);
2602 if (!ctx)
2603 goto error;
2604
2605 filp = pfm_alloc_file(ctx);
2606 if (IS_ERR(filp)) {
2607 ret = PTR_ERR(filp);
2608 goto error_file;
2609 }
2610
2611 req->ctx_fd = ctx->ctx_fd = fd;
2612
2613
2614
2615
2616 if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
2617 ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
2618 if (ret)
2619 goto buffer_error;
2620 }
2621
2622 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n",
2623 ctx,
2624 ctx_flags,
2625 ctx->ctx_fl_system,
2626 ctx->ctx_fl_block,
2627 ctx->ctx_fl_excl_idle,
2628 ctx->ctx_fl_no_msg,
2629 ctx->ctx_fd));
2630
2631
2632
2633
2634 pfm_reset_pmu_state(ctx);
2635
2636 fd_install(fd, filp);
2637
2638 return 0;
2639
2640buffer_error:
2641 path = filp->f_path;
2642 put_filp(filp);
2643 path_put(&path);
2644
2645 if (ctx->ctx_buf_fmt) {
2646 pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
2647 }
2648error_file:
2649 pfm_context_free(ctx);
2650
2651error:
2652 put_unused_fd(fd);
2653 return ret;
2654}
2655
2656static inline unsigned long
2657pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
2658{
2659 unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
2660 unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
2661 extern unsigned long carta_random32 (unsigned long seed);
2662
2663 if (reg->flags & PFM_REGFL_RANDOM) {
2664 new_seed = carta_random32(old_seed);
2665 val -= (old_seed & mask);
2666 if ((mask >> 32) != 0)
2667
2668 new_seed |= carta_random32(old_seed >> 32) << 32;
2669 reg->seed = new_seed;
2670 }
2671 reg->lval = val;
2672 return val;
2673}
2674
2675static void
2676pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2677{
2678 unsigned long mask = ovfl_regs[0];
2679 unsigned long reset_others = 0UL;
2680 unsigned long val;
2681 int i;
2682
2683
2684
2685
2686 mask >>= PMU_FIRST_COUNTER;
2687 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2688
2689 if ((mask & 0x1UL) == 0UL) continue;
2690
2691 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2692 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2693
2694 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2695 }
2696
2697
2698
2699
2700 for(i = 0; reset_others; i++, reset_others >>= 1) {
2701
2702 if ((reset_others & 0x1) == 0) continue;
2703
2704 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2705
2706 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2707 is_long_reset ? "long" : "short", i, val));
2708 }
2709}
2710
2711static void
2712pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2713{
2714 unsigned long mask = ovfl_regs[0];
2715 unsigned long reset_others = 0UL;
2716 unsigned long val;
2717 int i;
2718
2719 DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
2720
2721 if (ctx->ctx_state == PFM_CTX_MASKED) {
2722 pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
2723 return;
2724 }
2725
2726
2727
2728
2729 mask >>= PMU_FIRST_COUNTER;
2730 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2731
2732 if ((mask & 0x1UL) == 0UL) continue;
2733
2734 val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2735 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2736
2737 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2738
2739 pfm_write_soft_counter(ctx, i, val);
2740 }
2741
2742
2743
2744
2745 for(i = 0; reset_others; i++, reset_others >>= 1) {
2746
2747 if ((reset_others & 0x1) == 0) continue;
2748
2749 val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2750
2751 if (PMD_IS_COUNTING(i)) {
2752 pfm_write_soft_counter(ctx, i, val);
2753 } else {
2754 ia64_set_pmd(i, val);
2755 }
2756 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2757 is_long_reset ? "long" : "short", i, val));
2758 }
2759 ia64_srlz_d();
2760}
2761
2762static int
2763pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2764{
2765 struct task_struct *task;
2766 pfarg_reg_t *req = (pfarg_reg_t *)arg;
2767 unsigned long value, pmc_pm;
2768 unsigned long smpl_pmds, reset_pmds, impl_pmds;
2769 unsigned int cnum, reg_flags, flags, pmc_type;
2770 int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
2771 int is_monitor, is_counting, state;
2772 int ret = -EINVAL;
2773 pfm_reg_check_t wr_func;
2774#define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
2775
2776 state = ctx->ctx_state;
2777 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
2778 is_system = ctx->ctx_fl_system;
2779 task = ctx->ctx_task;
2780 impl_pmds = pmu_conf->impl_pmds[0];
2781
2782 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
2783
2784 if (is_loaded) {
2785
2786
2787
2788
2789
2790 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
2791 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
2792 return -EBUSY;
2793 }
2794 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
2795 }
2796 expert_mode = pfm_sysctl.expert_mode;
2797
2798 for (i = 0; i < count; i++, req++) {
2799
2800 cnum = req->reg_num;
2801 reg_flags = req->reg_flags;
2802 value = req->reg_value;
2803 smpl_pmds = req->reg_smpl_pmds[0];
2804 reset_pmds = req->reg_reset_pmds[0];
2805 flags = 0;
2806
2807
2808 if (cnum >= PMU_MAX_PMCS) {
2809 DPRINT(("pmc%u is invalid\n", cnum));
2810 goto error;
2811 }
2812
2813 pmc_type = pmu_conf->pmc_desc[cnum].type;
2814 pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
2815 is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
2816 is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
2817
2818
2819
2820
2821
2822
2823 if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
2824 DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
2825 goto error;
2826 }
2827 wr_func = pmu_conf->pmc_desc[cnum].write_check;
2828
2829
2830
2831
2832
2833 if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
2834 DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
2835 cnum,
2836 pmc_pm,
2837 is_system));
2838 goto error;
2839 }
2840
2841 if (is_counting) {
2842
2843
2844
2845
2846 value |= 1 << PMU_PMC_OI;
2847
2848 if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
2849 flags |= PFM_REGFL_OVFL_NOTIFY;
2850 }
2851
2852 if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
2853
2854
2855 if ((smpl_pmds & impl_pmds) != smpl_pmds) {
2856 DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
2857 goto error;
2858 }
2859
2860
2861 if ((reset_pmds & impl_pmds) != reset_pmds) {
2862 DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
2863 goto error;
2864 }
2865 } else {
2866 if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
2867 DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
2868 goto error;
2869 }
2870
2871 }
2872
2873
2874
2875
2876 if (likely(expert_mode == 0 && wr_func)) {
2877 ret = (*wr_func)(task, ctx, cnum, &value, regs);
2878 if (ret) goto error;
2879 ret = -EINVAL;
2880 }
2881
2882
2883
2884
2885 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
2886
2887
2888
2889
2890
2891
2892
2893
2894 if (is_counting) {
2895
2896
2897
2898 ctx->ctx_pmds[cnum].flags = flags;
2899
2900 ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
2901 ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
2902 ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915 CTX_USED_PMD(ctx, reset_pmds);
2916 CTX_USED_PMD(ctx, smpl_pmds);
2917
2918
2919
2920
2921 if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
2922 }
2923
2924
2925
2926
2927
2928 CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
2943
2944
2945
2946
2947 ctx->ctx_pmcs[cnum] = value;
2948
2949 if (is_loaded) {
2950
2951
2952
2953 if (is_system == 0) ctx->th_pmcs[cnum] = value;
2954
2955
2956
2957
2958 if (can_access_pmu) {
2959 ia64_set_pmc(cnum, value);
2960 }
2961#ifdef CONFIG_SMP
2962 else {
2963
2964
2965
2966
2967
2968
2969
2970 ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
2971 }
2972#endif
2973 }
2974
2975 DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
2976 cnum,
2977 value,
2978 is_loaded,
2979 can_access_pmu,
2980 flags,
2981 ctx->ctx_all_pmcs[0],
2982 ctx->ctx_used_pmds[0],
2983 ctx->ctx_pmds[cnum].eventid,
2984 smpl_pmds,
2985 reset_pmds,
2986 ctx->ctx_reload_pmcs[0],
2987 ctx->ctx_used_monitors[0],
2988 ctx->ctx_ovfl_regs[0]));
2989 }
2990
2991
2992
2993
2994 if (can_access_pmu) ia64_srlz_d();
2995
2996 return 0;
2997error:
2998 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
2999 return ret;
3000}
3001
3002static int
3003pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3004{
3005 struct task_struct *task;
3006 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3007 unsigned long value, hw_value, ovfl_mask;
3008 unsigned int cnum;
3009 int i, can_access_pmu = 0, state;
3010 int is_counting, is_loaded, is_system, expert_mode;
3011 int ret = -EINVAL;
3012 pfm_reg_check_t wr_func;
3013
3014
3015 state = ctx->ctx_state;
3016 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3017 is_system = ctx->ctx_fl_system;
3018 ovfl_mask = pmu_conf->ovfl_val;
3019 task = ctx->ctx_task;
3020
3021 if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
3022
3023
3024
3025
3026
3027 if (likely(is_loaded)) {
3028
3029
3030
3031
3032
3033 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3034 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3035 return -EBUSY;
3036 }
3037 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3038 }
3039 expert_mode = pfm_sysctl.expert_mode;
3040
3041 for (i = 0; i < count; i++, req++) {
3042
3043 cnum = req->reg_num;
3044 value = req->reg_value;
3045
3046 if (!PMD_IS_IMPL(cnum)) {
3047 DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
3048 goto abort_mission;
3049 }
3050 is_counting = PMD_IS_COUNTING(cnum);
3051 wr_func = pmu_conf->pmd_desc[cnum].write_check;
3052
3053
3054
3055
3056 if (unlikely(expert_mode == 0 && wr_func)) {
3057 unsigned long v = value;
3058
3059 ret = (*wr_func)(task, ctx, cnum, &v, regs);
3060 if (ret) goto abort_mission;
3061
3062 value = v;
3063 ret = -EINVAL;
3064 }
3065
3066
3067
3068
3069 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
3070
3071
3072
3073
3074 hw_value = value;
3075
3076
3077
3078
3079 if (is_counting) {
3080
3081
3082
3083 ctx->ctx_pmds[cnum].lval = value;
3084
3085
3086
3087
3088 if (is_loaded) {
3089 hw_value = value & ovfl_mask;
3090 value = value & ~ovfl_mask;
3091 }
3092 }
3093
3094
3095
3096 ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
3097 ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
3098
3099
3100
3101
3102 ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
3103 ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
3104
3105
3106
3107
3108 ctx->ctx_pmds[cnum].val = value;
3109
3110
3111
3112
3113
3114
3115
3116 CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
3117
3118
3119
3120
3121 CTX_USED_PMD(ctx, RDEP(cnum));
3122
3123
3124
3125
3126
3127 if (is_counting && state == PFM_CTX_MASKED) {
3128 ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3129 }
3130
3131 if (is_loaded) {
3132
3133
3134
3135 if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
3136
3137
3138
3139
3140 if (can_access_pmu) {
3141 ia64_set_pmd(cnum, hw_value);
3142 } else {
3143#ifdef CONFIG_SMP
3144
3145
3146
3147
3148
3149 ctx->ctx_reload_pmds[0] |= 1UL << cnum;
3150#endif
3151 }
3152 }
3153
3154 DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
3155 "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
3156 cnum,
3157 value,
3158 is_loaded,
3159 can_access_pmu,
3160 hw_value,
3161 ctx->ctx_pmds[cnum].val,
3162 ctx->ctx_pmds[cnum].short_reset,
3163 ctx->ctx_pmds[cnum].long_reset,
3164 PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
3165 ctx->ctx_pmds[cnum].seed,
3166 ctx->ctx_pmds[cnum].mask,
3167 ctx->ctx_used_pmds[0],
3168 ctx->ctx_pmds[cnum].reset_pmds[0],
3169 ctx->ctx_reload_pmds[0],
3170 ctx->ctx_all_pmds[0],
3171 ctx->ctx_ovfl_regs[0]));
3172 }
3173
3174
3175
3176
3177 if (can_access_pmu) ia64_srlz_d();
3178
3179 return 0;
3180
3181abort_mission:
3182
3183
3184
3185 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3186 return ret;
3187}
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198static int
3199pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3200{
3201 struct task_struct *task;
3202 unsigned long val = 0UL, lval, ovfl_mask, sval;
3203 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3204 unsigned int cnum, reg_flags = 0;
3205 int i, can_access_pmu = 0, state;
3206 int is_loaded, is_system, is_counting, expert_mode;
3207 int ret = -EINVAL;
3208 pfm_reg_check_t rd_func;
3209
3210
3211
3212
3213
3214
3215 state = ctx->ctx_state;
3216 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3217 is_system = ctx->ctx_fl_system;
3218 ovfl_mask = pmu_conf->ovfl_val;
3219 task = ctx->ctx_task;
3220
3221 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3222
3223 if (likely(is_loaded)) {
3224
3225
3226
3227
3228
3229 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3230 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3231 return -EBUSY;
3232 }
3233
3234
3235
3236 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3237
3238 if (can_access_pmu) ia64_srlz_d();
3239 }
3240 expert_mode = pfm_sysctl.expert_mode;
3241
3242 DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
3243 is_loaded,
3244 can_access_pmu,
3245 state));
3246
3247
3248
3249
3250
3251
3252 for (i = 0; i < count; i++, req++) {
3253
3254 cnum = req->reg_num;
3255 reg_flags = req->reg_flags;
3256
3257 if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
3258
3259
3260
3261
3262
3263
3264
3265
3266 if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
3267
3268 sval = ctx->ctx_pmds[cnum].val;
3269 lval = ctx->ctx_pmds[cnum].lval;
3270 is_counting = PMD_IS_COUNTING(cnum);
3271
3272
3273
3274
3275
3276
3277 if (can_access_pmu){
3278 val = ia64_get_pmd(cnum);
3279 } else {
3280
3281
3282
3283
3284
3285 val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
3286 }
3287 rd_func = pmu_conf->pmd_desc[cnum].read_check;
3288
3289 if (is_counting) {
3290
3291
3292
3293 val &= ovfl_mask;
3294 val += sval;
3295 }
3296
3297
3298
3299
3300 if (unlikely(expert_mode == 0 && rd_func)) {
3301 unsigned long v = val;
3302 ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
3303 if (ret) goto error;
3304 val = v;
3305 ret = -EINVAL;
3306 }
3307
3308 PFM_REG_RETFLAG_SET(reg_flags, 0);
3309
3310 DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
3311
3312
3313
3314
3315
3316
3317 req->reg_value = val;
3318 req->reg_flags = reg_flags;
3319 req->reg_last_reset_val = lval;
3320 }
3321
3322 return 0;
3323
3324error:
3325 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3326 return ret;
3327}
3328
3329int
3330pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3331{
3332 pfm_context_t *ctx;
3333
3334 if (req == NULL) return -EINVAL;
3335
3336 ctx = GET_PMU_CTX();
3337
3338 if (ctx == NULL) return -EINVAL;
3339
3340
3341
3342
3343
3344 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3345
3346 return pfm_write_pmcs(ctx, req, nreq, regs);
3347}
3348EXPORT_SYMBOL(pfm_mod_write_pmcs);
3349
3350int
3351pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3352{
3353 pfm_context_t *ctx;
3354
3355 if (req == NULL) return -EINVAL;
3356
3357 ctx = GET_PMU_CTX();
3358
3359 if (ctx == NULL) return -EINVAL;
3360
3361
3362
3363
3364
3365 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3366
3367 return pfm_read_pmds(ctx, req, nreq, regs);
3368}
3369EXPORT_SYMBOL(pfm_mod_read_pmds);
3370
3371
3372
3373
3374
3375int
3376pfm_use_debug_registers(struct task_struct *task)
3377{
3378 pfm_context_t *ctx = task->thread.pfm_context;
3379 unsigned long flags;
3380 int ret = 0;
3381
3382 if (pmu_conf->use_rr_dbregs == 0) return 0;
3383
3384 DPRINT(("called for [%d]\n", task_pid_nr(task)));
3385
3386
3387
3388
3389 if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399 if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
3400
3401 LOCK_PFS(flags);
3402
3403
3404
3405
3406
3407 if (pfm_sessions.pfs_sys_use_dbregs> 0)
3408 ret = -1;
3409 else
3410 pfm_sessions.pfs_ptrace_use_dbregs++;
3411
3412 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3413 pfm_sessions.pfs_ptrace_use_dbregs,
3414 pfm_sessions.pfs_sys_use_dbregs,
3415 task_pid_nr(task), ret));
3416
3417 UNLOCK_PFS(flags);
3418
3419 return ret;
3420}
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430int
3431pfm_release_debug_registers(struct task_struct *task)
3432{
3433 unsigned long flags;
3434 int ret;
3435
3436 if (pmu_conf->use_rr_dbregs == 0) return 0;
3437
3438 LOCK_PFS(flags);
3439 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
3440 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
3441 ret = -1;
3442 } else {
3443 pfm_sessions.pfs_ptrace_use_dbregs--;
3444 ret = 0;
3445 }
3446 UNLOCK_PFS(flags);
3447
3448 return ret;
3449}
3450
3451static int
3452pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3453{
3454 struct task_struct *task;
3455 pfm_buffer_fmt_t *fmt;
3456 pfm_ovfl_ctrl_t rst_ctrl;
3457 int state, is_system;
3458 int ret = 0;
3459
3460 state = ctx->ctx_state;
3461 fmt = ctx->ctx_buf_fmt;
3462 is_system = ctx->ctx_fl_system;
3463 task = PFM_CTX_TASK(ctx);
3464
3465 switch(state) {
3466 case PFM_CTX_MASKED:
3467 break;
3468 case PFM_CTX_LOADED:
3469 if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
3470
3471 case PFM_CTX_UNLOADED:
3472 case PFM_CTX_ZOMBIE:
3473 DPRINT(("invalid state=%d\n", state));
3474 return -EBUSY;
3475 default:
3476 DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
3477 return -EINVAL;
3478 }
3479
3480
3481
3482
3483
3484
3485 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3486 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3487 return -EBUSY;
3488 }
3489
3490
3491 if (unlikely(task == NULL)) {
3492 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
3493 return -EINVAL;
3494 }
3495
3496 if (task == current || is_system) {
3497
3498 fmt = ctx->ctx_buf_fmt;
3499
3500 DPRINT(("restarting self %d ovfl=0x%lx\n",
3501 task_pid_nr(task),
3502 ctx->ctx_ovfl_regs[0]));
3503
3504 if (CTX_HAS_SMPL(ctx)) {
3505
3506 prefetch(ctx->ctx_smpl_hdr);
3507
3508 rst_ctrl.bits.mask_monitoring = 0;
3509 rst_ctrl.bits.reset_ovfl_pmds = 0;
3510
3511 if (state == PFM_CTX_LOADED)
3512 ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3513 else
3514 ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3515 } else {
3516 rst_ctrl.bits.mask_monitoring = 0;
3517 rst_ctrl.bits.reset_ovfl_pmds = 1;
3518 }
3519
3520 if (ret == 0) {
3521 if (rst_ctrl.bits.reset_ovfl_pmds)
3522 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
3523
3524 if (rst_ctrl.bits.mask_monitoring == 0) {
3525 DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
3526
3527 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
3528 } else {
3529 DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
3530
3531
3532 }
3533 }
3534
3535
3536
3537 ctx->ctx_ovfl_regs[0] = 0UL;
3538
3539
3540
3541
3542 ctx->ctx_state = PFM_CTX_LOADED;
3543
3544
3545
3546
3547 ctx->ctx_fl_can_restart = 0;
3548
3549 return 0;
3550 }
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560 if (state == PFM_CTX_MASKED) {
3561 if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
3562
3563
3564
3565
3566 ctx->ctx_fl_can_restart = 0;
3567 }
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
3586 DPRINT(("unblocking [%d]\n", task_pid_nr(task)));
3587 complete(&ctx->ctx_restart_done);
3588 } else {
3589 DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
3590
3591 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
3592
3593 PFM_SET_WORK_PENDING(task, 1);
3594
3595 set_notify_resume(task);
3596
3597
3598
3599
3600 }
3601 return 0;
3602}
3603
3604static int
3605pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3606{
3607 unsigned int m = *(unsigned int *)arg;
3608
3609 pfm_sysctl.debug = m == 0 ? 0 : 1;
3610
3611 printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
3612
3613 if (m == 0) {
3614 memset(pfm_stats, 0, sizeof(pfm_stats));
3615 for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
3616 }
3617 return 0;
3618}
3619
3620
3621
3622
3623static int
3624pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3625{
3626 struct thread_struct *thread = NULL;
3627 struct task_struct *task;
3628 pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
3629 unsigned long flags;
3630 dbreg_t dbreg;
3631 unsigned int rnum;
3632 int first_time;
3633 int ret = 0, state;
3634 int i, can_access_pmu = 0;
3635 int is_system, is_loaded;
3636
3637 if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
3638
3639 state = ctx->ctx_state;
3640 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3641 is_system = ctx->ctx_fl_system;
3642 task = ctx->ctx_task;
3643
3644 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3645
3646
3647
3648
3649
3650 if (is_loaded) {
3651 thread = &task->thread;
3652
3653
3654
3655
3656
3657 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3658 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3659 return -EBUSY;
3660 }
3661 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3662 }
3663
3664
3665
3666
3667
3668
3669
3670
3671 first_time = ctx->ctx_fl_using_dbreg == 0;
3672
3673
3674
3675
3676 if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
3677 DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
3678 return -EBUSY;
3679 }
3680
3681
3682
3683
3684
3685
3686
3687
3688 if (is_loaded) {
3689 LOCK_PFS(flags);
3690
3691 if (first_time && is_system) {
3692 if (pfm_sessions.pfs_ptrace_use_dbregs)
3693 ret = -EBUSY;
3694 else
3695 pfm_sessions.pfs_sys_use_dbregs++;
3696 }
3697 UNLOCK_PFS(flags);
3698 }
3699
3700 if (ret != 0) return ret;
3701
3702
3703
3704
3705
3706 ctx->ctx_fl_using_dbreg = 1;
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717 if (first_time && can_access_pmu) {
3718 DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
3719 for (i=0; i < pmu_conf->num_ibrs; i++) {
3720 ia64_set_ibr(i, 0UL);
3721 ia64_dv_serialize_instruction();
3722 }
3723 ia64_srlz_i();
3724 for (i=0; i < pmu_conf->num_dbrs; i++) {
3725 ia64_set_dbr(i, 0UL);
3726 ia64_dv_serialize_data();
3727 }
3728 ia64_srlz_d();
3729 }
3730
3731
3732
3733
3734 for (i = 0; i < count; i++, req++) {
3735
3736 rnum = req->dbreg_num;
3737 dbreg.val = req->dbreg_value;
3738
3739 ret = -EINVAL;
3740
3741 if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
3742 DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
3743 rnum, dbreg.val, mode, i, count));
3744
3745 goto abort_mission;
3746 }
3747
3748
3749
3750
3751 if (rnum & 0x1) {
3752 if (mode == PFM_CODE_RR)
3753 dbreg.ibr.ibr_x = 0;
3754 else
3755 dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
3756 }
3757
3758 PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
3759
3760
3761
3762
3763
3764
3765
3766
3767
3768
3769
3770 if (mode == PFM_CODE_RR) {
3771 CTX_USED_IBR(ctx, rnum);
3772
3773 if (can_access_pmu) {
3774 ia64_set_ibr(rnum, dbreg.val);
3775 ia64_dv_serialize_instruction();
3776 }
3777
3778 ctx->ctx_ibrs[rnum] = dbreg.val;
3779
3780 DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
3781 rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
3782 } else {
3783 CTX_USED_DBR(ctx, rnum);
3784
3785 if (can_access_pmu) {
3786 ia64_set_dbr(rnum, dbreg.val);
3787 ia64_dv_serialize_data();
3788 }
3789 ctx->ctx_dbrs[rnum] = dbreg.val;
3790
3791 DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
3792 rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
3793 }
3794 }
3795
3796 return 0;
3797
3798abort_mission:
3799
3800
3801
3802 if (first_time) {
3803 LOCK_PFS(flags);
3804 if (ctx->ctx_fl_system) {
3805 pfm_sessions.pfs_sys_use_dbregs--;
3806 }
3807 UNLOCK_PFS(flags);
3808 ctx->ctx_fl_using_dbreg = 0;
3809 }
3810
3811
3812
3813 PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
3814
3815 return ret;
3816}
3817
3818static int
3819pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3820{
3821 return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
3822}
3823
3824static int
3825pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3826{
3827 return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
3828}
3829
3830int
3831pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3832{
3833 pfm_context_t *ctx;
3834
3835 if (req == NULL) return -EINVAL;
3836
3837 ctx = GET_PMU_CTX();
3838
3839 if (ctx == NULL) return -EINVAL;
3840
3841
3842
3843
3844
3845 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3846
3847 return pfm_write_ibrs(ctx, req, nreq, regs);
3848}
3849EXPORT_SYMBOL(pfm_mod_write_ibrs);
3850
3851int
3852pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3853{
3854 pfm_context_t *ctx;
3855
3856 if (req == NULL) return -EINVAL;
3857
3858 ctx = GET_PMU_CTX();
3859
3860 if (ctx == NULL) return -EINVAL;
3861
3862
3863
3864
3865
3866 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3867
3868 return pfm_write_dbrs(ctx, req, nreq, regs);
3869}
3870EXPORT_SYMBOL(pfm_mod_write_dbrs);
3871
3872
3873static int
3874pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3875{
3876 pfarg_features_t *req = (pfarg_features_t *)arg;
3877
3878 req->ft_version = PFM_VERSION;
3879 return 0;
3880}
3881
3882static int
3883pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3884{
3885 struct pt_regs *tregs;
3886 struct task_struct *task = PFM_CTX_TASK(ctx);
3887 int state, is_system;
3888
3889 state = ctx->ctx_state;
3890 is_system = ctx->ctx_fl_system;
3891
3892
3893
3894
3895 if (state == PFM_CTX_UNLOADED) return -EINVAL;
3896
3897
3898
3899
3900
3901
3902 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3903 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3904 return -EBUSY;
3905 }
3906 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
3907 task_pid_nr(PFM_CTX_TASK(ctx)),
3908 state,
3909 is_system));
3910
3911
3912
3913
3914
3915 if (is_system) {
3916
3917
3918
3919
3920
3921 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
3922 ia64_srlz_i();
3923
3924
3925
3926
3927 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
3928
3929
3930
3931
3932 pfm_clear_psr_pp();
3933
3934
3935
3936
3937 ia64_psr(regs)->pp = 0;
3938
3939 return 0;
3940 }
3941
3942
3943
3944
3945 if (task == current) {
3946
3947 pfm_clear_psr_up();
3948
3949
3950
3951
3952 ia64_psr(regs)->up = 0;
3953 } else {
3954 tregs = task_pt_regs(task);
3955
3956
3957
3958
3959 ia64_psr(tregs)->up = 0;
3960
3961
3962
3963
3964 ctx->ctx_saved_psr_up = 0;
3965 DPRINT(("task=[%d]\n", task_pid_nr(task)));
3966 }
3967 return 0;
3968}
3969
3970
3971static int
3972pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3973{
3974 struct pt_regs *tregs;
3975 int state, is_system;
3976
3977 state = ctx->ctx_state;
3978 is_system = ctx->ctx_fl_system;
3979
3980 if (state != PFM_CTX_LOADED) return -EINVAL;
3981
3982
3983
3984
3985
3986
3987 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3988 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3989 return -EBUSY;
3990 }
3991
3992
3993
3994
3995
3996
3997 if (is_system) {
3998
3999
4000
4001
4002 ia64_psr(regs)->pp = 1;
4003
4004
4005
4006
4007 PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
4008
4009
4010
4011
4012 pfm_set_psr_pp();
4013
4014
4015 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
4016 ia64_srlz_i();
4017
4018 return 0;
4019 }
4020
4021
4022
4023
4024
4025 if (ctx->ctx_task == current) {
4026
4027
4028 pfm_set_psr_up();
4029
4030
4031
4032
4033 ia64_psr(regs)->up = 1;
4034
4035 } else {
4036 tregs = task_pt_regs(ctx->ctx_task);
4037
4038
4039
4040
4041
4042 ctx->ctx_saved_psr_up = IA64_PSR_UP;
4043
4044
4045
4046
4047 ia64_psr(tregs)->up = 1;
4048 }
4049 return 0;
4050}
4051
4052static int
4053pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4054{
4055 pfarg_reg_t *req = (pfarg_reg_t *)arg;
4056 unsigned int cnum;
4057 int i;
4058 int ret = -EINVAL;
4059
4060 for (i = 0; i < count; i++, req++) {
4061
4062 cnum = req->reg_num;
4063
4064 if (!PMC_IS_IMPL(cnum)) goto abort_mission;
4065
4066 req->reg_value = PMC_DFL_VAL(cnum);
4067
4068 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
4069
4070 DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
4071 }
4072 return 0;
4073
4074abort_mission:
4075 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
4076 return ret;
4077}
4078
4079static int
4080pfm_check_task_exist(pfm_context_t *ctx)
4081{
4082 struct task_struct *g, *t;
4083 int ret = -ESRCH;
4084
4085 read_lock(&tasklist_lock);
4086
4087 do_each_thread (g, t) {
4088 if (t->thread.pfm_context == ctx) {
4089 ret = 0;
4090 goto out;
4091 }
4092 } while_each_thread (g, t);
4093out:
4094 read_unlock(&tasklist_lock);
4095
4096 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
4097
4098 return ret;
4099}
4100
4101static int
4102pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4103{
4104 struct task_struct *task;
4105 struct thread_struct *thread;
4106 struct pfm_context_t *old;
4107 unsigned long flags;
4108#ifndef CONFIG_SMP
4109 struct task_struct *owner_task = NULL;
4110#endif
4111 pfarg_load_t *req = (pfarg_load_t *)arg;
4112 unsigned long *pmcs_source, *pmds_source;
4113 int the_cpu;
4114 int ret = 0;
4115 int state, is_system, set_dbregs = 0;
4116
4117 state = ctx->ctx_state;
4118 is_system = ctx->ctx_fl_system;
4119
4120
4121
4122 if (state != PFM_CTX_UNLOADED) {
4123 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4124 req->load_pid,
4125 ctx->ctx_state));
4126 return -EBUSY;
4127 }
4128
4129 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
4130
4131 if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
4132 DPRINT(("cannot use blocking mode on self\n"));
4133 return -EINVAL;
4134 }
4135
4136 ret = pfm_get_task(ctx, req->load_pid, &task);
4137 if (ret) {
4138 DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
4139 return ret;
4140 }
4141
4142 ret = -EINVAL;
4143
4144
4145
4146
4147 if (is_system && task != current) {
4148 DPRINT(("system wide is self monitoring only load_pid=%d\n",
4149 req->load_pid));
4150 goto error;
4151 }
4152
4153 thread = &task->thread;
4154
4155 ret = 0;
4156
4157
4158
4159
4160 if (ctx->ctx_fl_using_dbreg) {
4161 if (thread->flags & IA64_THREAD_DBG_VALID) {
4162 ret = -EBUSY;
4163 DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
4164 goto error;
4165 }
4166 LOCK_PFS(flags);
4167
4168 if (is_system) {
4169 if (pfm_sessions.pfs_ptrace_use_dbregs) {
4170 DPRINT(("cannot load [%d] dbregs in use\n",
4171 task_pid_nr(task)));
4172 ret = -EBUSY;
4173 } else {
4174 pfm_sessions.pfs_sys_use_dbregs++;
4175 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
4176 set_dbregs = 1;
4177 }
4178 }
4179
4180 UNLOCK_PFS(flags);
4181
4182 if (ret) goto error;
4183 }
4184
4185
4186
4187
4188
4189
4190
4191
4192
4193
4194
4195
4196
4197
4198
4199
4200 the_cpu = ctx->ctx_cpu = smp_processor_id();
4201
4202 ret = -EBUSY;
4203
4204
4205
4206 ret = pfm_reserve_session(current, is_system, the_cpu);
4207 if (ret) goto error;
4208
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218 DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
4219 thread->pfm_context, ctx));
4220
4221 ret = -EBUSY;
4222 old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
4223 if (old != NULL) {
4224 DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
4225 goto error_unres;
4226 }
4227
4228 pfm_reset_msgq(ctx);
4229
4230 ctx->ctx_state = PFM_CTX_LOADED;
4231
4232
4233
4234
4235 ctx->ctx_task = task;
4236
4237 if (is_system) {
4238
4239
4240
4241 PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
4242 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4243
4244 if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
4245 } else {
4246 thread->flags |= IA64_THREAD_PM_VALID;
4247 }
4248
4249
4250
4251
4252 pfm_copy_pmds(task, ctx);
4253 pfm_copy_pmcs(task, ctx);
4254
4255 pmcs_source = ctx->th_pmcs;
4256 pmds_source = ctx->th_pmds;
4257
4258
4259
4260
4261 if (task == current) {
4262
4263 if (is_system == 0) {
4264
4265
4266 ia64_psr(regs)->sp = 0;
4267 DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
4268
4269 SET_LAST_CPU(ctx, smp_processor_id());
4270 INC_ACTIVATION();
4271 SET_ACTIVATION(ctx);
4272#ifndef CONFIG_SMP
4273
4274
4275
4276 owner_task = GET_PMU_OWNER();
4277 if (owner_task) pfm_lazy_save_regs(owner_task);
4278#endif
4279 }
4280
4281
4282
4283
4284 pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
4285 pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
4286
4287 ctx->ctx_reload_pmcs[0] = 0UL;
4288 ctx->ctx_reload_pmds[0] = 0UL;
4289
4290
4291
4292
4293 if (ctx->ctx_fl_using_dbreg) {
4294 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
4295 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
4296 }
4297
4298
4299
4300 SET_PMU_OWNER(task, ctx);
4301
4302 DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
4303 } else {
4304
4305
4306
4307 regs = task_pt_regs(task);
4308
4309
4310 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4311 SET_LAST_CPU(ctx, -1);
4312
4313
4314 ctx->ctx_saved_psr_up = 0UL;
4315 ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
4316 }
4317
4318 ret = 0;
4319
4320error_unres:
4321 if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
4322error:
4323
4324
4325
4326 if (ret && set_dbregs) {
4327 LOCK_PFS(flags);
4328 pfm_sessions.pfs_sys_use_dbregs--;
4329 UNLOCK_PFS(flags);
4330 }
4331
4332
4333
4334 if (is_system == 0 && task != current) {
4335 pfm_put_task(task);
4336
4337 if (ret == 0) {
4338 ret = pfm_check_task_exist(ctx);
4339 if (ret) {
4340 ctx->ctx_state = PFM_CTX_UNLOADED;
4341 ctx->ctx_task = NULL;
4342 }
4343 }
4344 }
4345 return ret;
4346}
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
4357
4358static int
4359pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4360{
4361 struct task_struct *task = PFM_CTX_TASK(ctx);
4362 struct pt_regs *tregs;
4363 int prev_state, is_system;
4364 int ret;
4365
4366 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
4367
4368 prev_state = ctx->ctx_state;
4369 is_system = ctx->ctx_fl_system;
4370
4371
4372
4373
4374 if (prev_state == PFM_CTX_UNLOADED) {
4375 DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
4376 return 0;
4377 }
4378
4379
4380
4381
4382 ret = pfm_stop(ctx, NULL, 0, regs);
4383 if (ret) return ret;
4384
4385 ctx->ctx_state = PFM_CTX_UNLOADED;
4386
4387
4388
4389
4390
4391
4392 if (is_system) {
4393
4394
4395
4396
4397
4398
4399 PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
4400 PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
4401
4402
4403
4404
4405
4406 pfm_flush_pmds(current, ctx);
4407
4408
4409
4410
4411
4412 if (prev_state != PFM_CTX_ZOMBIE)
4413 pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
4414
4415
4416
4417
4418 task->thread.pfm_context = NULL;
4419
4420
4421
4422 ctx->ctx_task = NULL;
4423
4424
4425
4426
4427 return 0;
4428 }
4429
4430
4431
4432
4433 tregs = task == current ? regs : task_pt_regs(task);
4434
4435 if (task == current) {
4436
4437
4438
4439 ia64_psr(regs)->sp = 1;
4440
4441 DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
4442 }
4443
4444
4445
4446
4447 pfm_flush_pmds(task, ctx);
4448
4449
4450
4451
4452
4453
4454
4455 if (prev_state != PFM_CTX_ZOMBIE)
4456 pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
4457
4458
4459
4460
4461 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4462 SET_LAST_CPU(ctx, -1);
4463
4464
4465
4466
4467 task->thread.flags &= ~IA64_THREAD_PM_VALID;
4468
4469
4470
4471
4472 task->thread.pfm_context = NULL;
4473 ctx->ctx_task = NULL;
4474
4475 PFM_SET_WORK_PENDING(task, 0);
4476
4477 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4478 ctx->ctx_fl_can_restart = 0;
4479 ctx->ctx_fl_going_zombie = 0;
4480
4481 DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
4482
4483 return 0;
4484}
4485
4486
4487
4488
4489
4490
4491void
4492pfm_exit_thread(struct task_struct *task)
4493{
4494 pfm_context_t *ctx;
4495 unsigned long flags;
4496 struct pt_regs *regs = task_pt_regs(task);
4497 int ret, state;
4498 int free_ok = 0;
4499
4500 ctx = PFM_GET_CTX(task);
4501
4502 PROTECT_CTX(ctx, flags);
4503
4504 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
4505
4506 state = ctx->ctx_state;
4507 switch(state) {
4508 case PFM_CTX_UNLOADED:
4509
4510
4511
4512
4513 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
4514 break;
4515 case PFM_CTX_LOADED:
4516 case PFM_CTX_MASKED:
4517 ret = pfm_context_unload(ctx, NULL, 0, regs);
4518 if (ret) {
4519 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4520 }
4521 DPRINT(("ctx unloaded for current state was %d\n", state));
4522
4523 pfm_end_notify_user(ctx);
4524 break;
4525 case PFM_CTX_ZOMBIE:
4526 ret = pfm_context_unload(ctx, NULL, 0, regs);
4527 if (ret) {
4528 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4529 }
4530 free_ok = 1;
4531 break;
4532 default:
4533 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
4534 break;
4535 }
4536 UNPROTECT_CTX(ctx, flags);
4537
4538 { u64 psr = pfm_get_psr();
4539 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
4540 BUG_ON(GET_PMU_OWNER());
4541 BUG_ON(ia64_psr(regs)->up);
4542 BUG_ON(ia64_psr(regs)->pp);
4543 }
4544
4545
4546
4547
4548
4549 if (free_ok) pfm_context_free(ctx);
4550}
4551
4552
4553
4554
4555#define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
4556#define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
4557#define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
4558#define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
4559#define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
4560
4561static pfm_cmd_desc_t pfm_cmd_tab[]={
4562PFM_CMD_NONE,
4563PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4564PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4565PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4566PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
4567PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
4568PFM_CMD_NONE,
4569PFM_CMD_NONE,
4570PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
4571PFM_CMD_NONE,
4572PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
4573PFM_CMD_NONE,
4574PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
4575PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
4576PFM_CMD_NONE,
4577PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4578PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
4579PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
4580PFM_CMD_NONE,
4581PFM_CMD_NONE,
4582PFM_CMD_NONE,
4583PFM_CMD_NONE,
4584PFM_CMD_NONE,
4585PFM_CMD_NONE,
4586PFM_CMD_NONE,
4587PFM_CMD_NONE,
4588PFM_CMD_NONE,
4589PFM_CMD_NONE,
4590PFM_CMD_NONE,
4591PFM_CMD_NONE,
4592PFM_CMD_NONE,
4593PFM_CMD_NONE,
4594PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
4595PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
4596};
4597#define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
4598
4599static int
4600pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
4601{
4602 struct task_struct *task;
4603 int state, old_state;
4604
4605recheck:
4606 state = ctx->ctx_state;
4607 task = ctx->ctx_task;
4608
4609 if (task == NULL) {
4610 DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
4611 return 0;
4612 }
4613
4614 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4615 ctx->ctx_fd,
4616 state,
4617 task_pid_nr(task),
4618 task->state, PFM_CMD_STOPPED(cmd)));
4619
4620
4621
4622
4623
4624
4625
4626
4627 if (task == current || ctx->ctx_fl_system) return 0;
4628
4629
4630
4631
4632 switch(state) {
4633 case PFM_CTX_UNLOADED:
4634
4635
4636
4637 return 0;
4638 case PFM_CTX_ZOMBIE:
4639
4640
4641
4642 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
4643 return -EINVAL;
4644 case PFM_CTX_MASKED:
4645
4646
4647
4648
4649 if (cmd != PFM_UNLOAD_CONTEXT) return 0;
4650 }
4651
4652
4653
4654
4655
4656
4657
4658
4659
4660
4661
4662 if (PFM_CMD_STOPPED(cmd)) {
4663 if (!task_is_stopped_or_traced(task)) {
4664 DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
4665 return -EBUSY;
4666 }
4667
4668
4669
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681 old_state = state;
4682
4683 UNPROTECT_CTX(ctx, flags);
4684
4685 wait_task_inactive(task, 0);
4686
4687 PROTECT_CTX(ctx, flags);
4688
4689
4690
4691
4692 if (ctx->ctx_state != old_state) {
4693 DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
4694 goto recheck;
4695 }
4696 }
4697 return 0;
4698}
4699
4700
4701
4702
4703asmlinkage long
4704sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
4705{
4706 struct fd f = {NULL, 0};
4707 pfm_context_t *ctx = NULL;
4708 unsigned long flags = 0UL;
4709 void *args_k = NULL;
4710 long ret;
4711 size_t base_sz, sz, xtra_sz = 0;
4712 int narg, completed_args = 0, call_made = 0, cmd_flags;
4713 int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
4714 int (*getsize)(void *arg, size_t *sz);
4715#define PFM_MAX_ARGSIZE 4096
4716
4717
4718
4719
4720 if (unlikely(pmu_conf == NULL)) return -ENOSYS;
4721
4722 if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
4723 DPRINT(("invalid cmd=%d\n", cmd));
4724 return -EINVAL;
4725 }
4726
4727 func = pfm_cmd_tab[cmd].cmd_func;
4728 narg = pfm_cmd_tab[cmd].cmd_narg;
4729 base_sz = pfm_cmd_tab[cmd].cmd_argsize;
4730 getsize = pfm_cmd_tab[cmd].cmd_getsize;
4731 cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
4732
4733 if (unlikely(func == NULL)) {
4734 DPRINT(("invalid cmd=%d\n", cmd));
4735 return -EINVAL;
4736 }
4737
4738 DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
4739 PFM_CMD_NAME(cmd),
4740 cmd,
4741 narg,
4742 base_sz,
4743 count));
4744
4745
4746
4747
4748 if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
4749 return -EINVAL;
4750
4751restart_args:
4752 sz = xtra_sz + base_sz*count;
4753
4754
4755
4756 if (unlikely(sz > PFM_MAX_ARGSIZE)) {
4757 printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
4758 return -E2BIG;
4759 }
4760
4761
4762
4763
4764 if (likely(count && args_k == NULL)) {
4765 args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
4766 if (args_k == NULL) return -ENOMEM;
4767 }
4768
4769 ret = -EFAULT;
4770
4771
4772
4773
4774
4775
4776 if (sz && copy_from_user(args_k, arg, sz)) {
4777 DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
4778 goto error_args;
4779 }
4780
4781
4782
4783
4784 if (completed_args == 0 && getsize) {
4785
4786
4787
4788 ret = (*getsize)(args_k, &xtra_sz);
4789 if (ret) goto error_args;
4790
4791 completed_args = 1;
4792
4793 DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
4794
4795
4796 if (likely(xtra_sz)) goto restart_args;
4797 }
4798
4799 if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
4800
4801 ret = -EBADF;
4802
4803 f = fdget(fd);
4804 if (unlikely(f.file == NULL)) {
4805 DPRINT(("invalid fd %d\n", fd));
4806 goto error_args;
4807 }
4808 if (unlikely(PFM_IS_FILE(f.file) == 0)) {
4809 DPRINT(("fd %d not related to perfmon\n", fd));
4810 goto error_args;
4811 }
4812
4813 ctx = f.file->private_data;
4814 if (unlikely(ctx == NULL)) {
4815 DPRINT(("no context for fd %d\n", fd));
4816 goto error_args;
4817 }
4818 prefetch(&ctx->ctx_state);
4819
4820 PROTECT_CTX(ctx, flags);
4821
4822
4823
4824
4825 ret = pfm_check_task_state(ctx, cmd, flags);
4826 if (unlikely(ret)) goto abort_locked;
4827
4828skip_fd:
4829 ret = (*func)(ctx, args_k, count, task_pt_regs(current));
4830
4831 call_made = 1;
4832
4833abort_locked:
4834 if (likely(ctx)) {
4835 DPRINT(("context unlocked\n"));
4836 UNPROTECT_CTX(ctx, flags);
4837 }
4838
4839
4840 if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
4841
4842error_args:
4843 if (f.file)
4844 fdput(f);
4845
4846 kfree(args_k);
4847
4848 DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
4849
4850 return ret;
4851}
4852
4853static void
4854pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
4855{
4856 pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
4857 pfm_ovfl_ctrl_t rst_ctrl;
4858 int state;
4859 int ret = 0;
4860
4861 state = ctx->ctx_state;
4862
4863
4864
4865
4866 if (CTX_HAS_SMPL(ctx)) {
4867
4868 rst_ctrl.bits.mask_monitoring = 0;
4869 rst_ctrl.bits.reset_ovfl_pmds = 0;
4870
4871 if (state == PFM_CTX_LOADED)
4872 ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4873 else
4874 ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4875 } else {
4876 rst_ctrl.bits.mask_monitoring = 0;
4877 rst_ctrl.bits.reset_ovfl_pmds = 1;
4878 }
4879
4880 if (ret == 0) {
4881 if (rst_ctrl.bits.reset_ovfl_pmds) {
4882 pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
4883 }
4884 if (rst_ctrl.bits.mask_monitoring == 0) {
4885 DPRINT(("resuming monitoring\n"));
4886 if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
4887 } else {
4888 DPRINT(("stopping monitoring\n"));
4889
4890 }
4891 ctx->ctx_state = PFM_CTX_LOADED;
4892 }
4893}
4894
4895
4896
4897
4898
4899static void
4900pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
4901{
4902 int ret;
4903
4904 DPRINT(("entering for [%d]\n", task_pid_nr(current)));
4905
4906 ret = pfm_context_unload(ctx, NULL, 0, regs);
4907 if (ret) {
4908 printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
4909 }
4910
4911
4912
4913
4914 wake_up_interruptible(&ctx->ctx_zombieq);
4915
4916
4917
4918
4919
4920
4921}
4922
4923static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
4924
4925
4926
4927
4928
4929
4930
4931
4932
4933
4934void
4935pfm_handle_work(void)
4936{
4937 pfm_context_t *ctx;
4938 struct pt_regs *regs;
4939 unsigned long flags, dummy_flags;
4940 unsigned long ovfl_regs;
4941 unsigned int reason;
4942 int ret;
4943
4944 ctx = PFM_GET_CTX(current);
4945 if (ctx == NULL) {
4946 printk(KERN_ERR "perfmon: [%d] has no PFM context\n",
4947 task_pid_nr(current));
4948 return;
4949 }
4950
4951 PROTECT_CTX(ctx, flags);
4952
4953 PFM_SET_WORK_PENDING(current, 0);
4954
4955 regs = task_pt_regs(current);
4956
4957
4958
4959
4960 reason = ctx->ctx_fl_trap_reason;
4961 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4962 ovfl_regs = ctx->ctx_ovfl_regs[0];
4963
4964 DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
4965
4966
4967
4968
4969 if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE)
4970 goto do_zombie;
4971
4972
4973 if (reason == PFM_TRAP_REASON_RESET)
4974 goto skip_blocking;
4975
4976
4977
4978
4979
4980 UNPROTECT_CTX(ctx, flags);
4981
4982
4983
4984
4985 local_irq_enable();
4986
4987 DPRINT(("before block sleeping\n"));
4988
4989
4990
4991
4992
4993 ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
4994
4995 DPRINT(("after block sleeping ret=%d\n", ret));
4996
4997
4998
4999
5000
5001
5002
5003 PROTECT_CTX(ctx, dummy_flags);
5004
5005
5006
5007
5008
5009
5010
5011 ovfl_regs = ctx->ctx_ovfl_regs[0];
5012
5013 if (ctx->ctx_fl_going_zombie) {
5014do_zombie:
5015 DPRINT(("context is zombie, bailing out\n"));
5016 pfm_context_force_terminate(ctx, regs);
5017 goto nothing_to_do;
5018 }
5019
5020
5021
5022 if (ret < 0)
5023 goto nothing_to_do;
5024
5025skip_blocking:
5026 pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
5027 ctx->ctx_ovfl_regs[0] = 0UL;
5028
5029nothing_to_do:
5030
5031
5032
5033 UNPROTECT_CTX(ctx, flags);
5034}
5035
5036static int
5037pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
5038{
5039 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5040 DPRINT(("ignoring overflow notification, owner is zombie\n"));
5041 return 0;
5042 }
5043
5044 DPRINT(("waking up somebody\n"));
5045
5046 if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
5047
5048
5049
5050
5051
5052 kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
5053
5054 return 0;
5055}
5056
5057static int
5058pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
5059{
5060 pfm_msg_t *msg = NULL;
5061
5062 if (ctx->ctx_fl_no_msg == 0) {
5063 msg = pfm_get_new_msg(ctx);
5064 if (msg == NULL) {
5065 printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
5066 return -1;
5067 }
5068
5069 msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
5070 msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
5071 msg->pfm_ovfl_msg.msg_active_set = 0;
5072 msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
5073 msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
5074 msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
5075 msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
5076 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5077 }
5078
5079 DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
5080 msg,
5081 ctx->ctx_fl_no_msg,
5082 ctx->ctx_fd,
5083 ovfl_pmds));
5084
5085 return pfm_notify_user(ctx, msg);
5086}
5087
5088static int
5089pfm_end_notify_user(pfm_context_t *ctx)
5090{
5091 pfm_msg_t *msg;
5092
5093 msg = pfm_get_new_msg(ctx);
5094 if (msg == NULL) {
5095 printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
5096 return -1;
5097 }
5098
5099 memset(msg, 0, sizeof(*msg));
5100
5101 msg->pfm_end_msg.msg_type = PFM_MSG_END;
5102 msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
5103 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5104
5105 DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
5106 msg,
5107 ctx->ctx_fl_no_msg,
5108 ctx->ctx_fd));
5109
5110 return pfm_notify_user(ctx, msg);
5111}
5112
5113
5114
5115
5116
5117static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx,
5118 unsigned long pmc0, struct pt_regs *regs)
5119{
5120 pfm_ovfl_arg_t *ovfl_arg;
5121 unsigned long mask;
5122 unsigned long old_val, ovfl_val, new_val;
5123 unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
5124 unsigned long tstamp;
5125 pfm_ovfl_ctrl_t ovfl_ctrl;
5126 unsigned int i, has_smpl;
5127 int must_notify = 0;
5128
5129 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
5130
5131
5132
5133
5134 if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
5135
5136 tstamp = ia64_get_itc();
5137 mask = pmc0 >> PMU_FIRST_COUNTER;
5138 ovfl_val = pmu_conf->ovfl_val;
5139 has_smpl = CTX_HAS_SMPL(ctx);
5140
5141 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5142 "used_pmds=0x%lx\n",
5143 pmc0,
5144 task ? task_pid_nr(task): -1,
5145 (regs ? regs->cr_iip : 0),
5146 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
5147 ctx->ctx_used_pmds[0]));
5148
5149
5150
5151
5152
5153
5154 for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
5155
5156
5157 if ((mask & 0x1) == 0) continue;
5158
5159
5160
5161
5162
5163
5164
5165 old_val = new_val = ctx->ctx_pmds[i].val;
5166 new_val += 1 + ovfl_val;
5167 ctx->ctx_pmds[i].val = new_val;
5168
5169
5170
5171
5172 if (likely(old_val > new_val)) {
5173 ovfl_pmds |= 1UL << i;
5174 if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
5175 }
5176
5177 DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
5178 i,
5179 new_val,
5180 old_val,
5181 ia64_get_pmd(i) & ovfl_val,
5182 ovfl_pmds,
5183 ovfl_notify));
5184 }
5185
5186
5187
5188
5189 if (ovfl_pmds == 0UL) return;
5190
5191
5192
5193
5194 ovfl_ctrl.val = 0;
5195 reset_pmds = 0UL;
5196
5197
5198
5199
5200
5201 if (has_smpl) {
5202 unsigned long start_cycles, end_cycles;
5203 unsigned long pmd_mask;
5204 int j, k, ret = 0;
5205 int this_cpu = smp_processor_id();
5206
5207 pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
5208 ovfl_arg = &ctx->ctx_ovfl_arg;
5209
5210 prefetch(ctx->ctx_smpl_hdr);
5211
5212 for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
5213
5214 mask = 1UL << i;
5215
5216 if ((pmd_mask & 0x1) == 0) continue;
5217
5218 ovfl_arg->ovfl_pmd = (unsigned char )i;
5219 ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
5220 ovfl_arg->active_set = 0;
5221 ovfl_arg->ovfl_ctrl.val = 0;
5222 ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
5223
5224 ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
5225 ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
5226 ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
5227
5228
5229
5230
5231
5232 if (smpl_pmds) {
5233 for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
5234 if ((smpl_pmds & 0x1) == 0) continue;
5235 ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
5236 DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
5237 }
5238 }
5239
5240 pfm_stats[this_cpu].pfm_smpl_handler_calls++;
5241
5242 start_cycles = ia64_get_itc();
5243
5244
5245
5246
5247 ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
5248
5249 end_cycles = ia64_get_itc();
5250
5251
5252
5253
5254
5255 ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
5256 ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
5257 ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
5258
5259
5260
5261 if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
5262
5263 pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
5264 }
5265
5266
5267
5268 if (ret && pmd_mask) {
5269 DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
5270 pmd_mask<<PMU_FIRST_COUNTER));
5271 }
5272
5273
5274
5275 ovfl_pmds &= ~reset_pmds;
5276 } else {
5277
5278
5279
5280
5281 ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
5282 ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
5283 ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0;
5284 ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
5285
5286
5287
5288 if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
5289 }
5290
5291 DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
5292
5293
5294
5295
5296 if (reset_pmds) {
5297 unsigned long bm = reset_pmds;
5298 pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
5299 }
5300
5301 if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
5302
5303
5304
5305 ctx->ctx_ovfl_regs[0] = ovfl_pmds;
5306
5307
5308
5309
5310 if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
5311
5312 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
5313
5314
5315
5316
5317 PFM_SET_WORK_PENDING(task, 1);
5318
5319
5320
5321
5322
5323 set_notify_resume(task);
5324 }
5325
5326
5327
5328
5329 must_notify = 1;
5330 }
5331
5332 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
5333 GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
5334 PFM_GET_WORK_PENDING(task),
5335 ctx->ctx_fl_trap_reason,
5336 ovfl_pmds,
5337 ovfl_notify,
5338 ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
5339
5340
5341
5342 if (ovfl_ctrl.bits.mask_monitoring) {
5343 pfm_mask_monitoring(task);
5344 ctx->ctx_state = PFM_CTX_MASKED;
5345 ctx->ctx_fl_can_restart = 1;
5346 }
5347
5348
5349
5350
5351 if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
5352
5353 return;
5354
5355sanity_check:
5356 printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5357 smp_processor_id(),
5358 task ? task_pid_nr(task) : -1,
5359 pmc0);
5360 return;
5361
5362stop_monitoring:
5363
5364
5365
5366
5367
5368
5369
5370
5371
5372
5373
5374
5375
5376
5377
5378
5379
5380
5381
5382
5383
5384
5385
5386
5387
5388
5389
5390
5391 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
5392 pfm_clear_psr_up();
5393 ia64_psr(regs)->up = 0;
5394 ia64_psr(regs)->sp = 1;
5395 return;
5396}
5397
5398static int
5399pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
5400{
5401 struct task_struct *task;
5402 pfm_context_t *ctx;
5403 unsigned long flags;
5404 u64 pmc0;
5405 int this_cpu = smp_processor_id();
5406 int retval = 0;
5407
5408 pfm_stats[this_cpu].pfm_ovfl_intr_count++;
5409
5410
5411
5412
5413 pmc0 = ia64_get_pmc(0);
5414
5415 task = GET_PMU_OWNER();
5416 ctx = GET_PMU_CTX();
5417
5418
5419
5420
5421
5422 if (PMC0_HAS_OVFL(pmc0) && task) {
5423
5424
5425
5426
5427
5428 if (!ctx) goto report_spurious1;
5429
5430 if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
5431 goto report_spurious2;
5432
5433 PROTECT_CTX_NOPRINT(ctx, flags);
5434
5435 pfm_overflow_handler(task, ctx, pmc0, regs);
5436
5437 UNPROTECT_CTX_NOPRINT(ctx, flags);
5438
5439 } else {
5440 pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
5441 retval = -1;
5442 }
5443
5444
5445
5446 pfm_unfreeze_pmu();
5447
5448 return retval;
5449
5450report_spurious1:
5451 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
5452 this_cpu, task_pid_nr(task));
5453 pfm_unfreeze_pmu();
5454 return -1;
5455report_spurious2:
5456 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5457 this_cpu,
5458 task_pid_nr(task));
5459 pfm_unfreeze_pmu();
5460 return -1;
5461}
5462
5463static irqreturn_t
5464pfm_interrupt_handler(int irq, void *arg)
5465{
5466 unsigned long start_cycles, total_cycles;
5467 unsigned long min, max;
5468 int this_cpu;
5469 int ret;
5470 struct pt_regs *regs = get_irq_regs();
5471
5472 this_cpu = get_cpu();
5473 if (likely(!pfm_alt_intr_handler)) {
5474 min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
5475 max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
5476
5477 start_cycles = ia64_get_itc();
5478
5479 ret = pfm_do_interrupt_handler(arg, regs);
5480
5481 total_cycles = ia64_get_itc();
5482
5483
5484
5485
5486 if (likely(ret == 0)) {
5487 total_cycles -= start_cycles;
5488
5489 if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
5490 if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
5491
5492 pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
5493 }
5494 }
5495 else {
5496 (*pfm_alt_intr_handler->handler)(irq, arg, regs);
5497 }
5498
5499 put_cpu();
5500 return IRQ_HANDLED;
5501}
5502
5503
5504
5505
5506
5507#define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
5508
5509static void *
5510pfm_proc_start(struct seq_file *m, loff_t *pos)
5511{
5512 if (*pos == 0) {
5513 return PFM_PROC_SHOW_HEADER;
5514 }
5515
5516 while (*pos <= nr_cpu_ids) {
5517 if (cpu_online(*pos - 1)) {
5518 return (void *)*pos;
5519 }
5520 ++*pos;
5521 }
5522 return NULL;
5523}
5524
5525static void *
5526pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
5527{
5528 ++*pos;
5529 return pfm_proc_start(m, pos);
5530}
5531
5532static void
5533pfm_proc_stop(struct seq_file *m, void *v)
5534{
5535}
5536
5537static void
5538pfm_proc_show_header(struct seq_file *m)
5539{
5540 struct list_head * pos;
5541 pfm_buffer_fmt_t * entry;
5542 unsigned long flags;
5543
5544 seq_printf(m,
5545 "perfmon version : %u.%u\n"
5546 "model : %s\n"
5547 "fastctxsw : %s\n"
5548 "expert mode : %s\n"
5549 "ovfl_mask : 0x%lx\n"
5550 "PMU flags : 0x%x\n",
5551 PFM_VERSION_MAJ, PFM_VERSION_MIN,
5552 pmu_conf->pmu_name,
5553 pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
5554 pfm_sysctl.expert_mode > 0 ? "Yes": "No",
5555 pmu_conf->ovfl_val,
5556 pmu_conf->flags);
5557
5558 LOCK_PFS(flags);
5559
5560 seq_printf(m,
5561 "proc_sessions : %u\n"
5562 "sys_sessions : %u\n"
5563 "sys_use_dbregs : %u\n"
5564 "ptrace_use_dbregs : %u\n",
5565 pfm_sessions.pfs_task_sessions,
5566 pfm_sessions.pfs_sys_sessions,
5567 pfm_sessions.pfs_sys_use_dbregs,
5568 pfm_sessions.pfs_ptrace_use_dbregs);
5569
5570 UNLOCK_PFS(flags);
5571
5572 spin_lock(&pfm_buffer_fmt_lock);
5573
5574 list_for_each(pos, &pfm_buffer_fmt_list) {
5575 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
5576 seq_printf(m, "format : %16phD %s\n",
5577 entry->fmt_uuid, entry->fmt_name);
5578 }
5579 spin_unlock(&pfm_buffer_fmt_lock);
5580
5581}
5582
5583static int
5584pfm_proc_show(struct seq_file *m, void *v)
5585{
5586 unsigned long psr;
5587 unsigned int i;
5588 int cpu;
5589
5590 if (v == PFM_PROC_SHOW_HEADER) {
5591 pfm_proc_show_header(m);
5592 return 0;
5593 }
5594
5595
5596
5597 cpu = (long)v - 1;
5598 seq_printf(m,
5599 "CPU%-2d overflow intrs : %lu\n"
5600 "CPU%-2d overflow cycles : %lu\n"
5601 "CPU%-2d overflow min : %lu\n"
5602 "CPU%-2d overflow max : %lu\n"
5603 "CPU%-2d smpl handler calls : %lu\n"
5604 "CPU%-2d smpl handler cycles : %lu\n"
5605 "CPU%-2d spurious intrs : %lu\n"
5606 "CPU%-2d replay intrs : %lu\n"
5607 "CPU%-2d syst_wide : %d\n"
5608 "CPU%-2d dcr_pp : %d\n"
5609 "CPU%-2d exclude idle : %d\n"
5610 "CPU%-2d owner : %d\n"
5611 "CPU%-2d context : %p\n"
5612 "CPU%-2d activations : %lu\n",
5613 cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
5614 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
5615 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
5616 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
5617 cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
5618 cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
5619 cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
5620 cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
5621 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
5622 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
5623 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
5624 cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
5625 cpu, pfm_get_cpu_data(pmu_ctx, cpu),
5626 cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
5627
5628 if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
5629
5630 psr = pfm_get_psr();
5631
5632 ia64_srlz_d();
5633
5634 seq_printf(m,
5635 "CPU%-2d psr : 0x%lx\n"
5636 "CPU%-2d pmc0 : 0x%lx\n",
5637 cpu, psr,
5638 cpu, ia64_get_pmc(0));
5639
5640 for (i=0; PMC_IS_LAST(i) == 0; i++) {
5641 if (PMC_IS_COUNTING(i) == 0) continue;
5642 seq_printf(m,
5643 "CPU%-2d pmc%u : 0x%lx\n"
5644 "CPU%-2d pmd%u : 0x%lx\n",
5645 cpu, i, ia64_get_pmc(i),
5646 cpu, i, ia64_get_pmd(i));
5647 }
5648 }
5649 return 0;
5650}
5651
5652const struct seq_operations pfm_seq_ops = {
5653 .start = pfm_proc_start,
5654 .next = pfm_proc_next,
5655 .stop = pfm_proc_stop,
5656 .show = pfm_proc_show
5657};
5658
5659
5660
5661
5662
5663
5664
5665void
5666pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
5667{
5668 struct pt_regs *regs;
5669 unsigned long dcr;
5670 unsigned long dcr_pp;
5671
5672 dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
5673
5674
5675
5676
5677
5678 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
5679 regs = task_pt_regs(task);
5680 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
5681 return;
5682 }
5683
5684
5685
5686 if (dcr_pp) {
5687 dcr = ia64_getreg(_IA64_REG_CR_DCR);
5688
5689
5690
5691 if (is_ctxswin) {
5692
5693 ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
5694 pfm_clear_psr_pp();
5695 ia64_srlz_i();
5696 return;
5697 }
5698
5699
5700
5701
5702
5703
5704
5705 ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
5706 pfm_set_psr_pp();
5707 ia64_srlz_i();
5708 }
5709}
5710
5711#ifdef CONFIG_SMP
5712
5713static void
5714pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
5715{
5716 struct task_struct *task = ctx->ctx_task;
5717
5718 ia64_psr(regs)->up = 0;
5719 ia64_psr(regs)->sp = 1;
5720
5721 if (GET_PMU_OWNER() == task) {
5722 DPRINT(("cleared ownership for [%d]\n",
5723 task_pid_nr(ctx->ctx_task)));
5724 SET_PMU_OWNER(NULL, NULL);
5725 }
5726
5727
5728
5729
5730 PFM_SET_WORK_PENDING(task, 0);
5731
5732 task->thread.pfm_context = NULL;
5733 task->thread.flags &= ~IA64_THREAD_PM_VALID;
5734
5735 DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
5736}
5737
5738
5739
5740
5741
5742void
5743pfm_save_regs(struct task_struct *task)
5744{
5745 pfm_context_t *ctx;
5746 unsigned long flags;
5747 u64 psr;
5748
5749
5750 ctx = PFM_GET_CTX(task);
5751 if (ctx == NULL) return;
5752
5753
5754
5755
5756
5757
5758 flags = pfm_protect_ctx_ctxsw(ctx);
5759
5760 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5761 struct pt_regs *regs = task_pt_regs(task);
5762
5763 pfm_clear_psr_up();
5764
5765 pfm_force_cleanup(ctx, regs);
5766
5767 BUG_ON(ctx->ctx_smpl_hdr);
5768
5769 pfm_unprotect_ctx_ctxsw(ctx, flags);
5770
5771 pfm_context_free(ctx);
5772 return;
5773 }
5774
5775
5776
5777
5778 ia64_srlz_d();
5779 psr = pfm_get_psr();
5780
5781 BUG_ON(psr & (IA64_PSR_I));
5782
5783
5784
5785
5786
5787
5788
5789
5790 pfm_clear_psr_up();
5791
5792
5793
5794
5795 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5796
5797
5798
5799
5800
5801
5802 SET_PMU_OWNER(NULL, NULL);
5803
5804
5805
5806
5807
5808
5809 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
5810
5811
5812
5813
5814
5815
5816 ctx->th_pmcs[0] = ia64_get_pmc(0);
5817
5818
5819
5820
5821 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5822
5823
5824
5825
5826
5827 pfm_unprotect_ctx_ctxsw(ctx, flags);
5828}
5829
5830#else
5831void
5832pfm_save_regs(struct task_struct *task)
5833{
5834 pfm_context_t *ctx;
5835 u64 psr;
5836
5837 ctx = PFM_GET_CTX(task);
5838 if (ctx == NULL) return;
5839
5840
5841
5842
5843 psr = pfm_get_psr();
5844
5845 BUG_ON(psr & (IA64_PSR_I));
5846
5847
5848
5849
5850
5851
5852
5853
5854 pfm_clear_psr_up();
5855
5856
5857
5858
5859 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5860}
5861
5862static void
5863pfm_lazy_save_regs (struct task_struct *task)
5864{
5865 pfm_context_t *ctx;
5866 unsigned long flags;
5867
5868 { u64 psr = pfm_get_psr();
5869 BUG_ON(psr & IA64_PSR_UP);
5870 }
5871
5872 ctx = PFM_GET_CTX(task);
5873
5874
5875
5876
5877
5878
5879
5880
5881
5882
5883 PROTECT_CTX(ctx,flags);
5884
5885
5886
5887
5888
5889
5890
5891
5892 SET_PMU_OWNER(NULL, NULL);
5893
5894
5895
5896
5897 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
5898
5899
5900
5901
5902
5903
5904 ctx->th_pmcs[0] = ia64_get_pmc(0);
5905
5906
5907
5908
5909 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5910
5911
5912
5913
5914
5915
5916 UNPROTECT_CTX(ctx,flags);
5917}
5918#endif
5919
5920#ifdef CONFIG_SMP
5921
5922
5923
5924void
5925pfm_load_regs (struct task_struct *task)
5926{
5927 pfm_context_t *ctx;
5928 unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
5929 unsigned long flags;
5930 u64 psr, psr_up;
5931 int need_irq_resend;
5932
5933 ctx = PFM_GET_CTX(task);
5934 if (unlikely(ctx == NULL)) return;
5935
5936 BUG_ON(GET_PMU_OWNER());
5937
5938
5939
5940
5941 if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
5942
5943
5944
5945
5946
5947
5948 flags = pfm_protect_ctx_ctxsw(ctx);
5949 psr = pfm_get_psr();
5950
5951 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
5952
5953 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
5954 BUG_ON(psr & IA64_PSR_I);
5955
5956 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
5957 struct pt_regs *regs = task_pt_regs(task);
5958
5959 BUG_ON(ctx->ctx_smpl_hdr);
5960
5961 pfm_force_cleanup(ctx, regs);
5962
5963 pfm_unprotect_ctx_ctxsw(ctx, flags);
5964
5965
5966
5967
5968 pfm_context_free(ctx);
5969
5970 return;
5971 }
5972
5973
5974
5975
5976
5977 if (ctx->ctx_fl_using_dbreg) {
5978 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
5979 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
5980 }
5981
5982
5983
5984 psr_up = ctx->ctx_saved_psr_up;
5985
5986
5987
5988
5989
5990 if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
5991
5992
5993
5994
5995 pmc_mask = ctx->ctx_reload_pmcs[0];
5996 pmd_mask = ctx->ctx_reload_pmds[0];
5997
5998 } else {
5999
6000
6001
6002
6003
6004
6005 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6006
6007
6008
6009
6010
6011
6012
6013
6014 pmc_mask = ctx->ctx_all_pmcs[0];
6015 }
6016
6017
6018
6019
6020
6021
6022
6023 if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6024 if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6025
6026
6027
6028
6029
6030 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6031
6032
6033
6034
6035 ia64_set_pmc(0, ctx->th_pmcs[0]);
6036 ia64_srlz_d();
6037 ctx->th_pmcs[0] = 0UL;
6038
6039
6040
6041
6042 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
6043
6044 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6045 }
6046
6047
6048
6049
6050 ctx->ctx_reload_pmcs[0] = 0UL;
6051 ctx->ctx_reload_pmds[0] = 0UL;
6052
6053 SET_LAST_CPU(ctx, smp_processor_id());
6054
6055
6056
6057
6058 INC_ACTIVATION();
6059
6060
6061
6062 SET_ACTIVATION(ctx);
6063
6064
6065
6066
6067 SET_PMU_OWNER(task, ctx);
6068
6069
6070
6071
6072
6073
6074
6075 if (likely(psr_up)) pfm_set_psr_up();
6076
6077
6078
6079
6080 pfm_unprotect_ctx_ctxsw(ctx, flags);
6081}
6082#else
6083
6084
6085
6086
6087void
6088pfm_load_regs (struct task_struct *task)
6089{
6090 pfm_context_t *ctx;
6091 struct task_struct *owner;
6092 unsigned long pmd_mask, pmc_mask;
6093 u64 psr, psr_up;
6094 int need_irq_resend;
6095
6096 owner = GET_PMU_OWNER();
6097 ctx = PFM_GET_CTX(task);
6098 psr = pfm_get_psr();
6099
6100 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6101 BUG_ON(psr & IA64_PSR_I);
6102
6103
6104
6105
6106
6107
6108
6109
6110
6111 if (ctx->ctx_fl_using_dbreg) {
6112 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6113 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6114 }
6115
6116
6117
6118
6119 psr_up = ctx->ctx_saved_psr_up;
6120 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6121
6122
6123
6124
6125
6126
6127
6128
6129
6130 if (likely(owner == task)) {
6131 if (likely(psr_up)) pfm_set_psr_up();
6132 return;
6133 }
6134
6135
6136
6137
6138
6139
6140
6141 if (owner) pfm_lazy_save_regs(owner);
6142
6143
6144
6145
6146
6147
6148
6149 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6150
6151
6152
6153
6154
6155
6156
6157
6158 pmc_mask = ctx->ctx_all_pmcs[0];
6159
6160 pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6161 pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6162
6163
6164
6165
6166
6167 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6168
6169
6170
6171
6172 ia64_set_pmc(0, ctx->th_pmcs[0]);
6173 ia64_srlz_d();
6174
6175 ctx->th_pmcs[0] = 0UL;
6176
6177
6178
6179
6180 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
6181
6182 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6183 }
6184
6185
6186
6187
6188 SET_PMU_OWNER(task, ctx);
6189
6190
6191
6192
6193
6194
6195
6196 if (likely(psr_up)) pfm_set_psr_up();
6197}
6198#endif
6199
6200
6201
6202
6203static void
6204pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6205{
6206 u64 pmc0;
6207 unsigned long mask2, val, pmd_val, ovfl_val;
6208 int i, can_access_pmu = 0;
6209 int is_self;
6210
6211
6212
6213
6214
6215 is_self = ctx->ctx_task == task ? 1 : 0;
6216
6217
6218
6219
6220
6221
6222
6223
6224 can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
6225 if (can_access_pmu) {
6226
6227
6228
6229
6230
6231
6232
6233
6234 SET_PMU_OWNER(NULL, NULL);
6235 DPRINT(("releasing ownership\n"));
6236
6237
6238
6239
6240
6241
6242 ia64_srlz_d();
6243 pmc0 = ia64_get_pmc(0);
6244
6245
6246
6247
6248 pfm_unfreeze_pmu();
6249 } else {
6250 pmc0 = ctx->th_pmcs[0];
6251
6252
6253
6254 ctx->th_pmcs[0] = 0;
6255 }
6256 ovfl_val = pmu_conf->ovfl_val;
6257
6258
6259
6260
6261
6262
6263 mask2 = ctx->ctx_used_pmds[0];
6264
6265 DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
6266
6267 for (i = 0; mask2; i++, mask2>>=1) {
6268
6269
6270 if ((mask2 & 0x1) == 0) continue;
6271
6272
6273
6274
6275 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
6276
6277 if (PMD_IS_COUNTING(i)) {
6278 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
6279 task_pid_nr(task),
6280 i,
6281 ctx->ctx_pmds[i].val,
6282 val & ovfl_val));
6283
6284
6285
6286
6287 val = ctx->ctx_pmds[i].val + (val & ovfl_val);
6288
6289
6290
6291
6292
6293
6294 pmd_val = 0UL;
6295
6296
6297
6298
6299 if (pmc0 & (1UL << i)) {
6300 val += 1 + ovfl_val;
6301 DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
6302 }
6303 }
6304
6305 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
6306
6307 if (is_self) ctx->th_pmds[i] = pmd_val;
6308
6309 ctx->ctx_pmds[i].val = val;
6310 }
6311}
6312
6313static struct irqaction perfmon_irqaction = {
6314 .handler = pfm_interrupt_handler,
6315 .name = "perfmon"
6316};
6317
6318static void
6319pfm_alt_save_pmu_state(void *data)
6320{
6321 struct pt_regs *regs;
6322
6323 regs = task_pt_regs(current);
6324
6325 DPRINT(("called\n"));
6326
6327
6328
6329
6330
6331 pfm_clear_psr_up();
6332 pfm_clear_psr_pp();
6333 ia64_psr(regs)->pp = 0;
6334
6335
6336
6337
6338
6339 pfm_freeze_pmu();
6340
6341 ia64_srlz_d();
6342}
6343
6344void
6345pfm_alt_restore_pmu_state(void *data)
6346{
6347 struct pt_regs *regs;
6348
6349 regs = task_pt_regs(current);
6350
6351 DPRINT(("called\n"));
6352
6353
6354
6355
6356
6357 pfm_clear_psr_up();
6358 pfm_clear_psr_pp();
6359 ia64_psr(regs)->pp = 0;
6360
6361
6362
6363
6364 pfm_unfreeze_pmu();
6365
6366 ia64_srlz_d();
6367}
6368
6369int
6370pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6371{
6372 int ret, i;
6373 int reserve_cpu;
6374
6375
6376 if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
6377
6378
6379 if (pfm_alt_intr_handler) return -EBUSY;
6380
6381
6382 if (!spin_trylock(&pfm_alt_install_check)) {
6383 return -EBUSY;
6384 }
6385
6386
6387 for_each_online_cpu(reserve_cpu) {
6388 ret = pfm_reserve_session(NULL, 1, reserve_cpu);
6389 if (ret) goto cleanup_reserve;
6390 }
6391
6392
6393 ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
6394 if (ret) {
6395 DPRINT(("on_each_cpu() failed: %d\n", ret));
6396 goto cleanup_reserve;
6397 }
6398
6399
6400 pfm_alt_intr_handler = hdl;
6401
6402 spin_unlock(&pfm_alt_install_check);
6403
6404 return 0;
6405
6406cleanup_reserve:
6407 for_each_online_cpu(i) {
6408
6409 if (i >= reserve_cpu) break;
6410
6411 pfm_unreserve_session(NULL, 1, i);
6412 }
6413
6414 spin_unlock(&pfm_alt_install_check);
6415
6416 return ret;
6417}
6418EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
6419
6420int
6421pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6422{
6423 int i;
6424 int ret;
6425
6426 if (hdl == NULL) return -EINVAL;
6427
6428
6429 if (pfm_alt_intr_handler != hdl) return -EINVAL;
6430
6431
6432 if (!spin_trylock(&pfm_alt_install_check)) {
6433 return -EBUSY;
6434 }
6435
6436 pfm_alt_intr_handler = NULL;
6437
6438 ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
6439 if (ret) {
6440 DPRINT(("on_each_cpu() failed: %d\n", ret));
6441 }
6442
6443 for_each_online_cpu(i) {
6444 pfm_unreserve_session(NULL, 1, i);
6445 }
6446
6447 spin_unlock(&pfm_alt_install_check);
6448
6449 return 0;
6450}
6451EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
6452
6453
6454
6455
6456static int init_pfm_fs(void);
6457
6458static int __init
6459pfm_probe_pmu(void)
6460{
6461 pmu_config_t **p;
6462 int family;
6463
6464 family = local_cpu_data->family;
6465 p = pmu_confs;
6466
6467 while(*p) {
6468 if ((*p)->probe) {
6469 if ((*p)->probe() == 0) goto found;
6470 } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
6471 goto found;
6472 }
6473 p++;
6474 }
6475 return -1;
6476found:
6477 pmu_conf = *p;
6478 return 0;
6479}
6480
6481int __init
6482pfm_init(void)
6483{
6484 unsigned int n, n_counters, i;
6485
6486 printk("perfmon: version %u.%u IRQ %u\n",
6487 PFM_VERSION_MAJ,
6488 PFM_VERSION_MIN,
6489 IA64_PERFMON_VECTOR);
6490
6491 if (pfm_probe_pmu()) {
6492 printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
6493 local_cpu_data->family);
6494 return -ENODEV;
6495 }
6496
6497
6498
6499
6500
6501 n = 0;
6502 for (i=0; PMC_IS_LAST(i) == 0; i++) {
6503 if (PMC_IS_IMPL(i) == 0) continue;
6504 pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
6505 n++;
6506 }
6507 pmu_conf->num_pmcs = n;
6508
6509 n = 0; n_counters = 0;
6510 for (i=0; PMD_IS_LAST(i) == 0; i++) {
6511 if (PMD_IS_IMPL(i) == 0) continue;
6512 pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
6513 n++;
6514 if (PMD_IS_COUNTING(i)) n_counters++;
6515 }
6516 pmu_conf->num_pmds = n;
6517 pmu_conf->num_counters = n_counters;
6518
6519
6520
6521
6522 if (pmu_conf->use_rr_dbregs) {
6523 if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
6524 printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
6525 pmu_conf = NULL;
6526 return -1;
6527 }
6528 if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
6529 printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
6530 pmu_conf = NULL;
6531 return -1;
6532 }
6533 }
6534
6535 printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
6536 pmu_conf->pmu_name,
6537 pmu_conf->num_pmcs,
6538 pmu_conf->num_pmds,
6539 pmu_conf->num_counters,
6540 ffz(pmu_conf->ovfl_val));
6541
6542
6543 if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
6544 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
6545 pmu_conf = NULL;
6546 return -1;
6547 }
6548
6549
6550
6551
6552 perfmon_dir = proc_create_seq("perfmon", S_IRUGO, NULL, &pfm_seq_ops);
6553 if (perfmon_dir == NULL) {
6554 printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
6555 pmu_conf = NULL;
6556 return -1;
6557 }
6558
6559
6560
6561
6562 pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
6563
6564
6565
6566
6567 spin_lock_init(&pfm_sessions.pfs_lock);
6568 spin_lock_init(&pfm_buffer_fmt_lock);
6569
6570 init_pfm_fs();
6571
6572 for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
6573
6574 return 0;
6575}
6576
6577__initcall(pfm_init);
6578
6579
6580
6581
6582void
6583pfm_init_percpu (void)
6584{
6585 static int first_time=1;
6586
6587
6588
6589
6590 pfm_clear_psr_pp();
6591 pfm_clear_psr_up();
6592
6593
6594
6595
6596 pfm_unfreeze_pmu();
6597
6598 if (first_time) {
6599 register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
6600 first_time=0;
6601 }
6602
6603 ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
6604 ia64_srlz_d();
6605}
6606
6607
6608
6609
6610void
6611dump_pmu_state(const char *from)
6612{
6613 struct task_struct *task;
6614 struct pt_regs *regs;
6615 pfm_context_t *ctx;
6616 unsigned long psr, dcr, info, flags;
6617 int i, this_cpu;
6618
6619 local_irq_save(flags);
6620
6621 this_cpu = smp_processor_id();
6622 regs = task_pt_regs(current);
6623 info = PFM_CPUINFO_GET();
6624 dcr = ia64_getreg(_IA64_REG_CR_DCR);
6625
6626 if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
6627 local_irq_restore(flags);
6628 return;
6629 }
6630
6631 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6632 this_cpu,
6633 from,
6634 task_pid_nr(current),
6635 regs->cr_iip,
6636 current->comm);
6637
6638 task = GET_PMU_OWNER();
6639 ctx = GET_PMU_CTX();
6640
6641 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
6642
6643 psr = pfm_get_psr();
6644
6645 printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
6646 this_cpu,
6647 ia64_get_pmc(0),
6648 psr & IA64_PSR_PP ? 1 : 0,
6649 psr & IA64_PSR_UP ? 1 : 0,
6650 dcr & IA64_DCR_PP ? 1 : 0,
6651 info,
6652 ia64_psr(regs)->up,
6653 ia64_psr(regs)->pp);
6654
6655 ia64_psr(regs)->up = 0;
6656 ia64_psr(regs)->pp = 0;
6657
6658 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6659 if (PMC_IS_IMPL(i) == 0) continue;
6660 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
6661 }
6662
6663 for (i=1; PMD_IS_LAST(i) == 0; i++) {
6664 if (PMD_IS_IMPL(i) == 0) continue;
6665 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
6666 }
6667
6668 if (ctx) {
6669 printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
6670 this_cpu,
6671 ctx->ctx_state,
6672 ctx->ctx_smpl_vaddr,
6673 ctx->ctx_smpl_hdr,
6674 ctx->ctx_msgq_head,
6675 ctx->ctx_msgq_tail,
6676 ctx->ctx_saved_psr_up);
6677 }
6678 local_irq_restore(flags);
6679}
6680
6681
6682
6683
6684void
6685pfm_inherit(struct task_struct *task, struct pt_regs *regs)
6686{
6687 struct thread_struct *thread;
6688
6689 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
6690
6691 thread = &task->thread;
6692
6693
6694
6695
6696 thread->pfm_context = NULL;
6697
6698 PFM_SET_WORK_PENDING(task, 0);
6699
6700
6701
6702
6703}
6704#else
6705asmlinkage long
6706sys_perfmonctl (int fd, int cmd, void *arg, int count)
6707{
6708 return -ENOSYS;
6709}
6710#endif
6711