1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/sched/task.h>
26#include <linux/sched/task_stack.h>
27#include <linux/interrupt.h>
28#include <linux/proc_fs.h>
29#include <linux/seq_file.h>
30#include <linux/init.h>
31#include <linux/vmalloc.h>
32#include <linux/mm.h>
33#include <linux/sysctl.h>
34#include <linux/list.h>
35#include <linux/file.h>
36#include <linux/poll.h>
37#include <linux/vfs.h>
38#include <linux/smp.h>
39#include <linux/pagemap.h>
40#include <linux/mount.h>
41#include <linux/bitops.h>
42#include <linux/capability.h>
43#include <linux/rcupdate.h>
44#include <linux/completion.h>
45#include <linux/tracehook.h>
46#include <linux/slab.h>
47#include <linux/cpu.h>
48
49#include <asm/errno.h>
50#include <asm/intrinsics.h>
51#include <asm/page.h>
52#include <asm/perfmon.h>
53#include <asm/processor.h>
54#include <asm/signal.h>
55#include <linux/uaccess.h>
56#include <asm/delay.h>
57
58#ifdef CONFIG_PERFMON
59
60
61
62#define PFM_CTX_UNLOADED 1
63#define PFM_CTX_LOADED 2
64#define PFM_CTX_MASKED 3
65#define PFM_CTX_ZOMBIE 4
66
67#define PFM_INVALID_ACTIVATION (~0UL)
68
69#define PFM_NUM_PMC_REGS 64
70#define PFM_NUM_PMD_REGS 64
71
72
73
74
75#define PFM_MAX_MSGS 32
76#define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
77
78
79
80
81
82
83
84
85
86
87
88
89#define PFM_REG_NOTIMPL 0x0
90#define PFM_REG_IMPL 0x1
91#define PFM_REG_END 0x2
92#define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL)
93#define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR)
94#define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL)
95#define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL)
96#define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL)
97
98#define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
99#define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
100
101#define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
102
103
104#define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
105#define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
106
107
108#define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
109#define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
110#define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
111#define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
112
113#define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
114#define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
115#define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
116#define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
117
118#define PFM_NUM_IBRS IA64_NUM_DBG_REGS
119#define PFM_NUM_DBRS IA64_NUM_DBG_REGS
120
121#define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
122#define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
123#define PFM_CTX_TASK(h) (h)->ctx_task
124
125#define PMU_PMC_OI 5
126
127
128#define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
129#define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
130
131#define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
132
133#define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
134#define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
135#define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
136#define PFM_CODE_RR 0
137#define PFM_DATA_RR 1
138
139#define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
140#define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
141#define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
142
143#define RDEP(x) (1UL<<(x))
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163#define PROTECT_CTX(c, f) \
164 do { \
165 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
166 spin_lock_irqsave(&(c)->ctx_lock, f); \
167 DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
168 } while(0)
169
170#define UNPROTECT_CTX(c, f) \
171 do { \
172 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
173 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
174 } while(0)
175
176#define PROTECT_CTX_NOPRINT(c, f) \
177 do { \
178 spin_lock_irqsave(&(c)->ctx_lock, f); \
179 } while(0)
180
181
182#define UNPROTECT_CTX_NOPRINT(c, f) \
183 do { \
184 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
185 } while(0)
186
187
188#define PROTECT_CTX_NOIRQ(c) \
189 do { \
190 spin_lock(&(c)->ctx_lock); \
191 } while(0)
192
193#define UNPROTECT_CTX_NOIRQ(c) \
194 do { \
195 spin_unlock(&(c)->ctx_lock); \
196 } while(0)
197
198
199#ifdef CONFIG_SMP
200
201#define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
202#define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
203#define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
204
205#else
206#define SET_ACTIVATION(t) do {} while(0)
207#define GET_ACTIVATION(t) do {} while(0)
208#define INC_ACTIVATION(t) do {} while(0)
209#endif
210
211#define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
212#define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
213#define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
214
215#define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
216#define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
217
218#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
219
220
221
222
223#define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
224
225#define PFMFS_MAGIC 0xa0b4d889
226
227
228
229
230#define PFM_DEBUGGING 1
231#ifdef PFM_DEBUGGING
232#define DPRINT(a) \
233 do { \
234 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
235 } while (0)
236
237#define DPRINT_ovfl(a) \
238 do { \
239 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
240 } while (0)
241#endif
242
243
244
245
246
247
248typedef struct {
249 unsigned long val;
250 unsigned long lval;
251 unsigned long long_reset;
252 unsigned long short_reset;
253 unsigned long reset_pmds[4];
254 unsigned long smpl_pmds[4];
255 unsigned long seed;
256 unsigned long mask;
257 unsigned int flags;
258 unsigned long eventid;
259} pfm_counter_t;
260
261
262
263
264typedef struct {
265 unsigned int block:1;
266 unsigned int system:1;
267 unsigned int using_dbreg:1;
268 unsigned int is_sampling:1;
269 unsigned int excl_idle:1;
270 unsigned int going_zombie:1;
271 unsigned int trap_reason:2;
272 unsigned int no_msg:1;
273 unsigned int can_restart:1;
274 unsigned int reserved:22;
275} pfm_context_flags_t;
276
277#define PFM_TRAP_REASON_NONE 0x0
278#define PFM_TRAP_REASON_BLOCK 0x1
279#define PFM_TRAP_REASON_RESET 0x2
280
281
282
283
284
285
286typedef struct pfm_context {
287 spinlock_t ctx_lock;
288
289 pfm_context_flags_t ctx_flags;
290 unsigned int ctx_state;
291
292 struct task_struct *ctx_task;
293
294 unsigned long ctx_ovfl_regs[4];
295
296 struct completion ctx_restart_done;
297
298 unsigned long ctx_used_pmds[4];
299 unsigned long ctx_all_pmds[4];
300 unsigned long ctx_reload_pmds[4];
301
302 unsigned long ctx_all_pmcs[4];
303 unsigned long ctx_reload_pmcs[4];
304 unsigned long ctx_used_monitors[4];
305
306 unsigned long ctx_pmcs[PFM_NUM_PMC_REGS];
307
308 unsigned int ctx_used_ibrs[1];
309 unsigned int ctx_used_dbrs[1];
310 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS];
311 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS];
312
313 pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS];
314
315 unsigned long th_pmcs[PFM_NUM_PMC_REGS];
316 unsigned long th_pmds[PFM_NUM_PMD_REGS];
317
318 unsigned long ctx_saved_psr_up;
319
320 unsigned long ctx_last_activation;
321 unsigned int ctx_last_cpu;
322 unsigned int ctx_cpu;
323
324 int ctx_fd;
325 pfm_ovfl_arg_t ctx_ovfl_arg;
326
327 pfm_buffer_fmt_t *ctx_buf_fmt;
328 void *ctx_smpl_hdr;
329 unsigned long ctx_smpl_size;
330 void *ctx_smpl_vaddr;
331
332 wait_queue_head_t ctx_msgq_wait;
333 pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
334 int ctx_msgq_head;
335 int ctx_msgq_tail;
336 struct fasync_struct *ctx_async_queue;
337
338 wait_queue_head_t ctx_zombieq;
339} pfm_context_t;
340
341
342
343
344
345#define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
346
347#define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
348
349#ifdef CONFIG_SMP
350#define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
351#define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
352#else
353#define SET_LAST_CPU(ctx, v) do {} while(0)
354#define GET_LAST_CPU(ctx) do {} while(0)
355#endif
356
357
358#define ctx_fl_block ctx_flags.block
359#define ctx_fl_system ctx_flags.system
360#define ctx_fl_using_dbreg ctx_flags.using_dbreg
361#define ctx_fl_is_sampling ctx_flags.is_sampling
362#define ctx_fl_excl_idle ctx_flags.excl_idle
363#define ctx_fl_going_zombie ctx_flags.going_zombie
364#define ctx_fl_trap_reason ctx_flags.trap_reason
365#define ctx_fl_no_msg ctx_flags.no_msg
366#define ctx_fl_can_restart ctx_flags.can_restart
367
368#define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
369#define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
370
371
372
373
374
375typedef struct {
376 spinlock_t pfs_lock;
377
378 unsigned int pfs_task_sessions;
379 unsigned int pfs_sys_sessions;
380 unsigned int pfs_sys_use_dbregs;
381 unsigned int pfs_ptrace_use_dbregs;
382 struct task_struct *pfs_sys_session[NR_CPUS];
383} pfm_session_t;
384
385
386
387
388
389
390typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
391typedef struct {
392 unsigned int type;
393 int pm_pos;
394 unsigned long default_value;
395 unsigned long reserved_mask;
396 pfm_reg_check_t read_check;
397 pfm_reg_check_t write_check;
398 unsigned long dep_pmd[4];
399 unsigned long dep_pmc[4];
400} pfm_reg_desc_t;
401
402
403#define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
404
405
406
407
408
409
410
411
412
413
414
415
416
417typedef struct {
418 unsigned long ovfl_val;
419
420 pfm_reg_desc_t *pmc_desc;
421 pfm_reg_desc_t *pmd_desc;
422
423 unsigned int num_pmcs;
424 unsigned int num_pmds;
425 unsigned long impl_pmcs[4];
426 unsigned long impl_pmds[4];
427
428 char *pmu_name;
429 unsigned int pmu_family;
430 unsigned int flags;
431 unsigned int num_ibrs;
432 unsigned int num_dbrs;
433 unsigned int num_counters;
434 int (*probe)(void);
435 unsigned int use_rr_dbregs:1;
436} pmu_config_t;
437
438
439
440#define PFM_PMU_IRQ_RESEND 1
441
442
443
444
445typedef struct {
446 unsigned long ibr_mask:56;
447 unsigned long ibr_plm:4;
448 unsigned long ibr_ig:3;
449 unsigned long ibr_x:1;
450} ibr_mask_reg_t;
451
452typedef struct {
453 unsigned long dbr_mask:56;
454 unsigned long dbr_plm:4;
455 unsigned long dbr_ig:2;
456 unsigned long dbr_w:1;
457 unsigned long dbr_r:1;
458} dbr_mask_reg_t;
459
460typedef union {
461 unsigned long val;
462 ibr_mask_reg_t ibr;
463 dbr_mask_reg_t dbr;
464} dbreg_t;
465
466
467
468
469
470typedef struct {
471 int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
472 char *cmd_name;
473 int cmd_flags;
474 unsigned int cmd_narg;
475 size_t cmd_argsize;
476 int (*cmd_getsize)(void *arg, size_t *sz);
477} pfm_cmd_desc_t;
478
479#define PFM_CMD_FD 0x01
480#define PFM_CMD_ARG_READ 0x02
481#define PFM_CMD_ARG_RW 0x04
482#define PFM_CMD_STOP 0x08
483
484
485#define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
486#define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
487#define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
488#define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
489#define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
490
491#define PFM_CMD_ARG_MANY -1
492
493typedef struct {
494 unsigned long pfm_spurious_ovfl_intr_count;
495 unsigned long pfm_replay_ovfl_intr_count;
496 unsigned long pfm_ovfl_intr_count;
497 unsigned long pfm_ovfl_intr_cycles;
498 unsigned long pfm_ovfl_intr_cycles_min;
499 unsigned long pfm_ovfl_intr_cycles_max;
500 unsigned long pfm_smpl_handler_calls;
501 unsigned long pfm_smpl_handler_cycles;
502 char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
503} pfm_stats_t;
504
505
506
507
508static pfm_stats_t pfm_stats[NR_CPUS];
509static pfm_session_t pfm_sessions;
510
511static DEFINE_SPINLOCK(pfm_alt_install_check);
512static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
513
514static struct proc_dir_entry *perfmon_dir;
515static pfm_uuid_t pfm_null_uuid = {0,};
516
517static spinlock_t pfm_buffer_fmt_lock;
518static LIST_HEAD(pfm_buffer_fmt_list);
519
520static pmu_config_t *pmu_conf;
521
522
523pfm_sysctl_t pfm_sysctl;
524EXPORT_SYMBOL(pfm_sysctl);
525
526static struct ctl_table pfm_ctl_table[] = {
527 {
528 .procname = "debug",
529 .data = &pfm_sysctl.debug,
530 .maxlen = sizeof(int),
531 .mode = 0666,
532 .proc_handler = proc_dointvec,
533 },
534 {
535 .procname = "debug_ovfl",
536 .data = &pfm_sysctl.debug_ovfl,
537 .maxlen = sizeof(int),
538 .mode = 0666,
539 .proc_handler = proc_dointvec,
540 },
541 {
542 .procname = "fastctxsw",
543 .data = &pfm_sysctl.fastctxsw,
544 .maxlen = sizeof(int),
545 .mode = 0600,
546 .proc_handler = proc_dointvec,
547 },
548 {
549 .procname = "expert_mode",
550 .data = &pfm_sysctl.expert_mode,
551 .maxlen = sizeof(int),
552 .mode = 0600,
553 .proc_handler = proc_dointvec,
554 },
555 {}
556};
557static struct ctl_table pfm_sysctl_dir[] = {
558 {
559 .procname = "perfmon",
560 .mode = 0555,
561 .child = pfm_ctl_table,
562 },
563 {}
564};
565static struct ctl_table pfm_sysctl_root[] = {
566 {
567 .procname = "kernel",
568 .mode = 0555,
569 .child = pfm_sysctl_dir,
570 },
571 {}
572};
573static struct ctl_table_header *pfm_sysctl_header;
574
575static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
576
577#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
578#define pfm_get_cpu_data(a,b) per_cpu(a, b)
579
580static inline void
581pfm_put_task(struct task_struct *task)
582{
583 if (task != current) put_task_struct(task);
584}
585
586static inline void
587pfm_reserve_page(unsigned long a)
588{
589 SetPageReserved(vmalloc_to_page((void *)a));
590}
591static inline void
592pfm_unreserve_page(unsigned long a)
593{
594 ClearPageReserved(vmalloc_to_page((void*)a));
595}
596
597static inline unsigned long
598pfm_protect_ctx_ctxsw(pfm_context_t *x)
599{
600 spin_lock(&(x)->ctx_lock);
601 return 0UL;
602}
603
604static inline void
605pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
606{
607 spin_unlock(&(x)->ctx_lock);
608}
609
610
611static const struct dentry_operations pfmfs_dentry_operations;
612
613static struct dentry *
614pfmfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
615{
616 return mount_pseudo(fs_type, "pfm:", NULL, &pfmfs_dentry_operations,
617 PFMFS_MAGIC);
618}
619
620static struct file_system_type pfm_fs_type = {
621 .name = "pfmfs",
622 .mount = pfmfs_mount,
623 .kill_sb = kill_anon_super,
624};
625MODULE_ALIAS_FS("pfmfs");
626
627DEFINE_PER_CPU(unsigned long, pfm_syst_info);
628DEFINE_PER_CPU(struct task_struct *, pmu_owner);
629DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
630DEFINE_PER_CPU(unsigned long, pmu_activation_number);
631EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
632
633
634
635static const struct file_operations pfm_file_ops;
636
637
638
639
640#ifndef CONFIG_SMP
641static void pfm_lazy_save_regs (struct task_struct *ta);
642#endif
643
644void dump_pmu_state(const char *);
645static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
646
647#include "perfmon_itanium.h"
648#include "perfmon_mckinley.h"
649#include "perfmon_montecito.h"
650#include "perfmon_generic.h"
651
652static pmu_config_t *pmu_confs[]={
653 &pmu_conf_mont,
654 &pmu_conf_mck,
655 &pmu_conf_ita,
656 &pmu_conf_gen,
657 NULL
658};
659
660
661static int pfm_end_notify_user(pfm_context_t *ctx);
662
663static inline void
664pfm_clear_psr_pp(void)
665{
666 ia64_rsm(IA64_PSR_PP);
667 ia64_srlz_i();
668}
669
670static inline void
671pfm_set_psr_pp(void)
672{
673 ia64_ssm(IA64_PSR_PP);
674 ia64_srlz_i();
675}
676
677static inline void
678pfm_clear_psr_up(void)
679{
680 ia64_rsm(IA64_PSR_UP);
681 ia64_srlz_i();
682}
683
684static inline void
685pfm_set_psr_up(void)
686{
687 ia64_ssm(IA64_PSR_UP);
688 ia64_srlz_i();
689}
690
691static inline unsigned long
692pfm_get_psr(void)
693{
694 unsigned long tmp;
695 tmp = ia64_getreg(_IA64_REG_PSR);
696 ia64_srlz_i();
697 return tmp;
698}
699
700static inline void
701pfm_set_psr_l(unsigned long val)
702{
703 ia64_setreg(_IA64_REG_PSR_L, val);
704 ia64_srlz_i();
705}
706
707static inline void
708pfm_freeze_pmu(void)
709{
710 ia64_set_pmc(0,1UL);
711 ia64_srlz_d();
712}
713
714static inline void
715pfm_unfreeze_pmu(void)
716{
717 ia64_set_pmc(0,0UL);
718 ia64_srlz_d();
719}
720
721static inline void
722pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
723{
724 int i;
725
726 for (i=0; i < nibrs; i++) {
727 ia64_set_ibr(i, ibrs[i]);
728 ia64_dv_serialize_instruction();
729 }
730 ia64_srlz_i();
731}
732
733static inline void
734pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
735{
736 int i;
737
738 for (i=0; i < ndbrs; i++) {
739 ia64_set_dbr(i, dbrs[i]);
740 ia64_dv_serialize_data();
741 }
742 ia64_srlz_d();
743}
744
745
746
747
748static inline unsigned long
749pfm_read_soft_counter(pfm_context_t *ctx, int i)
750{
751 return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
752}
753
754
755
756
757static inline void
758pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
759{
760 unsigned long ovfl_val = pmu_conf->ovfl_val;
761
762 ctx->ctx_pmds[i].val = val & ~ovfl_val;
763
764
765
766
767 ia64_set_pmd(i, val & ovfl_val);
768}
769
770static pfm_msg_t *
771pfm_get_new_msg(pfm_context_t *ctx)
772{
773 int idx, next;
774
775 next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
776
777 DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
778 if (next == ctx->ctx_msgq_head) return NULL;
779
780 idx = ctx->ctx_msgq_tail;
781 ctx->ctx_msgq_tail = next;
782
783 DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
784
785 return ctx->ctx_msgq+idx;
786}
787
788static pfm_msg_t *
789pfm_get_next_msg(pfm_context_t *ctx)
790{
791 pfm_msg_t *msg;
792
793 DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
794
795 if (PFM_CTXQ_EMPTY(ctx)) return NULL;
796
797
798
799
800 msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
801
802
803
804
805 ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
806
807 DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
808
809 return msg;
810}
811
812static void
813pfm_reset_msgq(pfm_context_t *ctx)
814{
815 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
816 DPRINT(("ctx=%p msgq reset\n", ctx));
817}
818
819static void *
820pfm_rvmalloc(unsigned long size)
821{
822 void *mem;
823 unsigned long addr;
824
825 size = PAGE_ALIGN(size);
826 mem = vzalloc(size);
827 if (mem) {
828
829 addr = (unsigned long)mem;
830 while (size > 0) {
831 pfm_reserve_page(addr);
832 addr+=PAGE_SIZE;
833 size-=PAGE_SIZE;
834 }
835 }
836 return mem;
837}
838
839static void
840pfm_rvfree(void *mem, unsigned long size)
841{
842 unsigned long addr;
843
844 if (mem) {
845 DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
846 addr = (unsigned long) mem;
847 while ((long) size > 0) {
848 pfm_unreserve_page(addr);
849 addr+=PAGE_SIZE;
850 size-=PAGE_SIZE;
851 }
852 vfree(mem);
853 }
854 return;
855}
856
857static pfm_context_t *
858pfm_context_alloc(int ctx_flags)
859{
860 pfm_context_t *ctx;
861
862
863
864
865
866 ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
867 if (ctx) {
868 DPRINT(("alloc ctx @%p\n", ctx));
869
870
871
872
873 spin_lock_init(&ctx->ctx_lock);
874
875
876
877
878 ctx->ctx_state = PFM_CTX_UNLOADED;
879
880
881
882
883 ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
884 ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
885 ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
886
887
888
889
890
891
892
893
894 init_completion(&ctx->ctx_restart_done);
895
896
897
898
899 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
900 SET_LAST_CPU(ctx, -1);
901
902
903
904
905 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
906 init_waitqueue_head(&ctx->ctx_msgq_wait);
907 init_waitqueue_head(&ctx->ctx_zombieq);
908
909 }
910 return ctx;
911}
912
913static void
914pfm_context_free(pfm_context_t *ctx)
915{
916 if (ctx) {
917 DPRINT(("free ctx @%p\n", ctx));
918 kfree(ctx);
919 }
920}
921
922static void
923pfm_mask_monitoring(struct task_struct *task)
924{
925 pfm_context_t *ctx = PFM_GET_CTX(task);
926 unsigned long mask, val, ovfl_mask;
927 int i;
928
929 DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
930
931 ovfl_mask = pmu_conf->ovfl_val;
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951 mask = ctx->ctx_used_pmds[0];
952 for (i = 0; mask; i++, mask>>=1) {
953
954 if ((mask & 0x1) == 0) continue;
955 val = ia64_get_pmd(i);
956
957 if (PMD_IS_COUNTING(i)) {
958
959
960
961 ctx->ctx_pmds[i].val += (val & ovfl_mask);
962 } else {
963 ctx->ctx_pmds[i].val = val;
964 }
965 DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
966 i,
967 ctx->ctx_pmds[i].val,
968 val & ovfl_mask));
969 }
970
971
972
973
974
975
976
977
978 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
979 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
980 if ((mask & 0x1) == 0UL) continue;
981 ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
982 ctx->th_pmcs[i] &= ~0xfUL;
983 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
984 }
985
986
987
988 ia64_srlz_d();
989}
990
991
992
993
994
995
996static void
997pfm_restore_monitoring(struct task_struct *task)
998{
999 pfm_context_t *ctx = PFM_GET_CTX(task);
1000 unsigned long mask, ovfl_mask;
1001 unsigned long psr, val;
1002 int i, is_system;
1003
1004 is_system = ctx->ctx_fl_system;
1005 ovfl_mask = pmu_conf->ovfl_val;
1006
1007 if (task != current) {
1008 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
1009 return;
1010 }
1011 if (ctx->ctx_state != PFM_CTX_MASKED) {
1012 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
1013 task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
1014 return;
1015 }
1016 psr = pfm_get_psr();
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1028
1029 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
1030 pfm_clear_psr_pp();
1031 } else {
1032 pfm_clear_psr_up();
1033 }
1034
1035
1036
1037 mask = ctx->ctx_used_pmds[0];
1038 for (i = 0; mask; i++, mask>>=1) {
1039
1040 if ((mask & 0x1) == 0) continue;
1041
1042 if (PMD_IS_COUNTING(i)) {
1043
1044
1045
1046
1047 val = ctx->ctx_pmds[i].val & ovfl_mask;
1048 ctx->ctx_pmds[i].val &= ~ovfl_mask;
1049 } else {
1050 val = ctx->ctx_pmds[i].val;
1051 }
1052 ia64_set_pmd(i, val);
1053
1054 DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
1055 i,
1056 ctx->ctx_pmds[i].val,
1057 val));
1058 }
1059
1060
1061
1062 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
1063 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
1064 if ((mask & 0x1) == 0UL) continue;
1065 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1066 ia64_set_pmc(i, ctx->th_pmcs[i]);
1067 DPRINT(("[%d] pmc[%d]=0x%lx\n",
1068 task_pid_nr(task), i, ctx->th_pmcs[i]));
1069 }
1070 ia64_srlz_d();
1071
1072
1073
1074
1075
1076 if (ctx->ctx_fl_using_dbreg) {
1077 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
1078 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
1079 }
1080
1081
1082
1083
1084 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1085
1086 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
1087 ia64_srlz_i();
1088 }
1089 pfm_set_psr_l(psr);
1090}
1091
1092static inline void
1093pfm_save_pmds(unsigned long *pmds, unsigned long mask)
1094{
1095 int i;
1096
1097 ia64_srlz_d();
1098
1099 for (i=0; mask; i++, mask>>=1) {
1100 if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
1101 }
1102}
1103
1104
1105
1106
1107static inline void
1108pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
1109{
1110 int i;
1111 unsigned long val, ovfl_val = pmu_conf->ovfl_val;
1112
1113 for (i=0; mask; i++, mask>>=1) {
1114 if ((mask & 0x1) == 0) continue;
1115 val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
1116 ia64_set_pmd(i, val);
1117 }
1118 ia64_srlz_d();
1119}
1120
1121
1122
1123
1124static inline void
1125pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1126{
1127 unsigned long ovfl_val = pmu_conf->ovfl_val;
1128 unsigned long mask = ctx->ctx_all_pmds[0];
1129 unsigned long val;
1130 int i;
1131
1132 DPRINT(("mask=0x%lx\n", mask));
1133
1134 for (i=0; mask; i++, mask>>=1) {
1135
1136 val = ctx->ctx_pmds[i].val;
1137
1138
1139
1140
1141
1142
1143
1144 if (PMD_IS_COUNTING(i)) {
1145 ctx->ctx_pmds[i].val = val & ~ovfl_val;
1146 val &= ovfl_val;
1147 }
1148 ctx->th_pmds[i] = val;
1149
1150 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1151 i,
1152 ctx->th_pmds[i],
1153 ctx->ctx_pmds[i].val));
1154 }
1155}
1156
1157
1158
1159
1160static inline void
1161pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1162{
1163 unsigned long mask = ctx->ctx_all_pmcs[0];
1164 int i;
1165
1166 DPRINT(("mask=0x%lx\n", mask));
1167
1168 for (i=0; mask; i++, mask>>=1) {
1169
1170 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1171 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1172 }
1173}
1174
1175
1176
1177static inline void
1178pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
1179{
1180 int i;
1181
1182 for (i=0; mask; i++, mask>>=1) {
1183 if ((mask & 0x1) == 0) continue;
1184 ia64_set_pmc(i, pmcs[i]);
1185 }
1186 ia64_srlz_d();
1187}
1188
1189static inline int
1190pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
1191{
1192 return memcmp(a, b, sizeof(pfm_uuid_t));
1193}
1194
1195static inline int
1196pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
1197{
1198 int ret = 0;
1199 if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
1200 return ret;
1201}
1202
1203static inline int
1204pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
1205{
1206 int ret = 0;
1207 if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
1208 return ret;
1209}
1210
1211
1212static inline int
1213pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
1214 int cpu, void *arg)
1215{
1216 int ret = 0;
1217 if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
1218 return ret;
1219}
1220
1221static inline int
1222pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
1223 int cpu, void *arg)
1224{
1225 int ret = 0;
1226 if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
1227 return ret;
1228}
1229
1230static inline int
1231pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1232{
1233 int ret = 0;
1234 if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
1235 return ret;
1236}
1237
1238static inline int
1239pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1240{
1241 int ret = 0;
1242 if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
1243 return ret;
1244}
1245
1246static pfm_buffer_fmt_t *
1247__pfm_find_buffer_fmt(pfm_uuid_t uuid)
1248{
1249 struct list_head * pos;
1250 pfm_buffer_fmt_t * entry;
1251
1252 list_for_each(pos, &pfm_buffer_fmt_list) {
1253 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
1254 if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
1255 return entry;
1256 }
1257 return NULL;
1258}
1259
1260
1261
1262
1263static pfm_buffer_fmt_t *
1264pfm_find_buffer_fmt(pfm_uuid_t uuid)
1265{
1266 pfm_buffer_fmt_t * fmt;
1267 spin_lock(&pfm_buffer_fmt_lock);
1268 fmt = __pfm_find_buffer_fmt(uuid);
1269 spin_unlock(&pfm_buffer_fmt_lock);
1270 return fmt;
1271}
1272
1273int
1274pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
1275{
1276 int ret = 0;
1277
1278
1279 if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
1280
1281
1282 if (fmt->fmt_handler == NULL) return -EINVAL;
1283
1284
1285
1286
1287
1288 spin_lock(&pfm_buffer_fmt_lock);
1289
1290 if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
1291 printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
1292 ret = -EBUSY;
1293 goto out;
1294 }
1295 list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
1296 printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
1297
1298out:
1299 spin_unlock(&pfm_buffer_fmt_lock);
1300 return ret;
1301}
1302EXPORT_SYMBOL(pfm_register_buffer_fmt);
1303
1304int
1305pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
1306{
1307 pfm_buffer_fmt_t *fmt;
1308 int ret = 0;
1309
1310 spin_lock(&pfm_buffer_fmt_lock);
1311
1312 fmt = __pfm_find_buffer_fmt(uuid);
1313 if (!fmt) {
1314 printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
1315 ret = -EINVAL;
1316 goto out;
1317 }
1318 list_del_init(&fmt->fmt_list);
1319 printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
1320
1321out:
1322 spin_unlock(&pfm_buffer_fmt_lock);
1323 return ret;
1324
1325}
1326EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1327
1328static int
1329pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1330{
1331 unsigned long flags;
1332
1333
1334
1335 LOCK_PFS(flags);
1336
1337 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1338 pfm_sessions.pfs_sys_sessions,
1339 pfm_sessions.pfs_task_sessions,
1340 pfm_sessions.pfs_sys_use_dbregs,
1341 is_syswide,
1342 cpu));
1343
1344 if (is_syswide) {
1345
1346
1347
1348 if (pfm_sessions.pfs_task_sessions > 0UL) {
1349 DPRINT(("system wide not possible, %u conflicting task_sessions\n",
1350 pfm_sessions.pfs_task_sessions));
1351 goto abort;
1352 }
1353
1354 if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
1355
1356 DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
1357
1358 pfm_sessions.pfs_sys_session[cpu] = task;
1359
1360 pfm_sessions.pfs_sys_sessions++ ;
1361
1362 } else {
1363 if (pfm_sessions.pfs_sys_sessions) goto abort;
1364 pfm_sessions.pfs_task_sessions++;
1365 }
1366
1367 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1368 pfm_sessions.pfs_sys_sessions,
1369 pfm_sessions.pfs_task_sessions,
1370 pfm_sessions.pfs_sys_use_dbregs,
1371 is_syswide,
1372 cpu));
1373
1374
1375
1376
1377 cpu_idle_poll_ctrl(true);
1378
1379 UNLOCK_PFS(flags);
1380
1381 return 0;
1382
1383error_conflict:
1384 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
1385 task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
1386 cpu));
1387abort:
1388 UNLOCK_PFS(flags);
1389
1390 return -EBUSY;
1391
1392}
1393
1394static int
1395pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1396{
1397 unsigned long flags;
1398
1399
1400
1401 LOCK_PFS(flags);
1402
1403 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1404 pfm_sessions.pfs_sys_sessions,
1405 pfm_sessions.pfs_task_sessions,
1406 pfm_sessions.pfs_sys_use_dbregs,
1407 is_syswide,
1408 cpu));
1409
1410
1411 if (is_syswide) {
1412 pfm_sessions.pfs_sys_session[cpu] = NULL;
1413
1414
1415
1416 if (ctx && ctx->ctx_fl_using_dbreg) {
1417 if (pfm_sessions.pfs_sys_use_dbregs == 0) {
1418 printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
1419 } else {
1420 pfm_sessions.pfs_sys_use_dbregs--;
1421 }
1422 }
1423 pfm_sessions.pfs_sys_sessions--;
1424 } else {
1425 pfm_sessions.pfs_task_sessions--;
1426 }
1427 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1428 pfm_sessions.pfs_sys_sessions,
1429 pfm_sessions.pfs_task_sessions,
1430 pfm_sessions.pfs_sys_use_dbregs,
1431 is_syswide,
1432 cpu));
1433
1434
1435 cpu_idle_poll_ctrl(false);
1436
1437 UNLOCK_PFS(flags);
1438
1439 return 0;
1440}
1441
1442
1443
1444
1445
1446
1447static int
1448pfm_remove_smpl_mapping(void *vaddr, unsigned long size)
1449{
1450 struct task_struct *task = current;
1451 int r;
1452
1453
1454 if (task->mm == NULL || size == 0UL || vaddr == NULL) {
1455 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
1456 return -EINVAL;
1457 }
1458
1459 DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
1460
1461
1462
1463
1464 r = vm_munmap((unsigned long)vaddr, size);
1465
1466 if (r !=0) {
1467 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
1468 }
1469
1470 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
1471
1472 return 0;
1473}
1474
1475
1476
1477
1478#if 0
1479static int
1480pfm_free_smpl_buffer(pfm_context_t *ctx)
1481{
1482 pfm_buffer_fmt_t *fmt;
1483
1484 if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
1485
1486
1487
1488
1489 fmt = ctx->ctx_buf_fmt;
1490
1491 DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
1492 ctx->ctx_smpl_hdr,
1493 ctx->ctx_smpl_size,
1494 ctx->ctx_smpl_vaddr));
1495
1496 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1497
1498
1499
1500
1501 pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
1502
1503 ctx->ctx_smpl_hdr = NULL;
1504 ctx->ctx_smpl_size = 0UL;
1505
1506 return 0;
1507
1508invalid_free:
1509 printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
1510 return -EINVAL;
1511}
1512#endif
1513
1514static inline void
1515pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
1516{
1517 if (fmt == NULL) return;
1518
1519 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1520
1521}
1522
1523
1524
1525
1526
1527
1528
1529static struct vfsmount *pfmfs_mnt __read_mostly;
1530
1531static int __init
1532init_pfm_fs(void)
1533{
1534 int err = register_filesystem(&pfm_fs_type);
1535 if (!err) {
1536 pfmfs_mnt = kern_mount(&pfm_fs_type);
1537 err = PTR_ERR(pfmfs_mnt);
1538 if (IS_ERR(pfmfs_mnt))
1539 unregister_filesystem(&pfm_fs_type);
1540 else
1541 err = 0;
1542 }
1543 return err;
1544}
1545
1546static ssize_t
1547pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1548{
1549 pfm_context_t *ctx;
1550 pfm_msg_t *msg;
1551 ssize_t ret;
1552 unsigned long flags;
1553 DECLARE_WAITQUEUE(wait, current);
1554 if (PFM_IS_FILE(filp) == 0) {
1555 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1556 return -EINVAL;
1557 }
1558
1559 ctx = filp->private_data;
1560 if (ctx == NULL) {
1561 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
1562 return -EINVAL;
1563 }
1564
1565
1566
1567
1568 if (size < sizeof(pfm_msg_t)) {
1569 DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
1570 return -EINVAL;
1571 }
1572
1573 PROTECT_CTX(ctx, flags);
1574
1575
1576
1577
1578 add_wait_queue(&ctx->ctx_msgq_wait, &wait);
1579
1580
1581 for(;;) {
1582
1583
1584
1585
1586 set_current_state(TASK_INTERRUPTIBLE);
1587
1588 DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
1589
1590 ret = 0;
1591 if(PFM_CTXQ_EMPTY(ctx) == 0) break;
1592
1593 UNPROTECT_CTX(ctx, flags);
1594
1595
1596
1597
1598 ret = -EAGAIN;
1599 if(filp->f_flags & O_NONBLOCK) break;
1600
1601
1602
1603
1604 if(signal_pending(current)) {
1605 ret = -EINTR;
1606 break;
1607 }
1608
1609
1610
1611 schedule();
1612
1613 PROTECT_CTX(ctx, flags);
1614 }
1615 DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
1616 set_current_state(TASK_RUNNING);
1617 remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
1618
1619 if (ret < 0) goto abort;
1620
1621 ret = -EINVAL;
1622 msg = pfm_get_next_msg(ctx);
1623 if (msg == NULL) {
1624 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
1625 goto abort_locked;
1626 }
1627
1628 DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
1629
1630 ret = -EFAULT;
1631 if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
1632
1633abort_locked:
1634 UNPROTECT_CTX(ctx, flags);
1635abort:
1636 return ret;
1637}
1638
1639static ssize_t
1640pfm_write(struct file *file, const char __user *ubuf,
1641 size_t size, loff_t *ppos)
1642{
1643 DPRINT(("pfm_write called\n"));
1644 return -EINVAL;
1645}
1646
1647static unsigned int
1648pfm_poll(struct file *filp, poll_table * wait)
1649{
1650 pfm_context_t *ctx;
1651 unsigned long flags;
1652 unsigned int mask = 0;
1653
1654 if (PFM_IS_FILE(filp) == 0) {
1655 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1656 return 0;
1657 }
1658
1659 ctx = filp->private_data;
1660 if (ctx == NULL) {
1661 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
1662 return 0;
1663 }
1664
1665
1666 DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
1667
1668 poll_wait(filp, &ctx->ctx_msgq_wait, wait);
1669
1670 PROTECT_CTX(ctx, flags);
1671
1672 if (PFM_CTXQ_EMPTY(ctx) == 0)
1673 mask = POLLIN | POLLRDNORM;
1674
1675 UNPROTECT_CTX(ctx, flags);
1676
1677 DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
1678
1679 return mask;
1680}
1681
1682static long
1683pfm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1684{
1685 DPRINT(("pfm_ioctl called\n"));
1686 return -EINVAL;
1687}
1688
1689
1690
1691
1692static inline int
1693pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
1694{
1695 int ret;
1696
1697 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
1698
1699 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1700 task_pid_nr(current),
1701 fd,
1702 on,
1703 ctx->ctx_async_queue, ret));
1704
1705 return ret;
1706}
1707
1708static int
1709pfm_fasync(int fd, struct file *filp, int on)
1710{
1711 pfm_context_t *ctx;
1712 int ret;
1713
1714 if (PFM_IS_FILE(filp) == 0) {
1715 printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
1716 return -EBADF;
1717 }
1718
1719 ctx = filp->private_data;
1720 if (ctx == NULL) {
1721 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
1722 return -EBADF;
1723 }
1724
1725
1726
1727
1728
1729
1730
1731 ret = pfm_do_fasync(fd, filp, ctx, on);
1732
1733
1734 DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1735 fd,
1736 on,
1737 ctx->ctx_async_queue, ret));
1738
1739 return ret;
1740}
1741
1742#ifdef CONFIG_SMP
1743
1744
1745
1746
1747
1748static void
1749pfm_syswide_force_stop(void *info)
1750{
1751 pfm_context_t *ctx = (pfm_context_t *)info;
1752 struct pt_regs *regs = task_pt_regs(current);
1753 struct task_struct *owner;
1754 unsigned long flags;
1755 int ret;
1756
1757 if (ctx->ctx_cpu != smp_processor_id()) {
1758 printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
1759 ctx->ctx_cpu,
1760 smp_processor_id());
1761 return;
1762 }
1763 owner = GET_PMU_OWNER();
1764 if (owner != ctx->ctx_task) {
1765 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1766 smp_processor_id(),
1767 task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
1768 return;
1769 }
1770 if (GET_PMU_CTX() != ctx) {
1771 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
1772 smp_processor_id(),
1773 GET_PMU_CTX(), ctx);
1774 return;
1775 }
1776
1777 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
1778
1779
1780
1781
1782
1783 local_irq_save(flags);
1784
1785 ret = pfm_context_unload(ctx, NULL, 0, regs);
1786 if (ret) {
1787 DPRINT(("context_unload returned %d\n", ret));
1788 }
1789
1790
1791
1792
1793 local_irq_restore(flags);
1794}
1795
1796static void
1797pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
1798{
1799 int ret;
1800
1801 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
1802 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1);
1803 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
1804}
1805#endif
1806
1807
1808
1809
1810
1811static int
1812pfm_flush(struct file *filp, fl_owner_t id)
1813{
1814 pfm_context_t *ctx;
1815 struct task_struct *task;
1816 struct pt_regs *regs;
1817 unsigned long flags;
1818 unsigned long smpl_buf_size = 0UL;
1819 void *smpl_buf_vaddr = NULL;
1820 int state, is_system;
1821
1822 if (PFM_IS_FILE(filp) == 0) {
1823 DPRINT(("bad magic for\n"));
1824 return -EBADF;
1825 }
1826
1827 ctx = filp->private_data;
1828 if (ctx == NULL) {
1829 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
1830 return -EBADF;
1831 }
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846 PROTECT_CTX(ctx, flags);
1847
1848 state = ctx->ctx_state;
1849 is_system = ctx->ctx_fl_system;
1850
1851 task = PFM_CTX_TASK(ctx);
1852 regs = task_pt_regs(task);
1853
1854 DPRINT(("ctx_state=%d is_current=%d\n",
1855 state,
1856 task == current ? 1 : 0));
1857
1858
1859
1860
1861
1862
1863
1864
1865 if (task == current) {
1866#ifdef CONFIG_SMP
1867
1868
1869
1870
1871
1872
1873
1874 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
1875
1876 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
1877
1878
1879
1880 local_irq_restore(flags);
1881
1882 pfm_syswide_cleanup_other_cpu(ctx);
1883
1884
1885
1886
1887 local_irq_save(flags);
1888
1889
1890
1891
1892 } else
1893#endif
1894 {
1895
1896 DPRINT(("forcing unload\n"));
1897
1898
1899
1900
1901 pfm_context_unload(ctx, NULL, 0, regs);
1902
1903 DPRINT(("ctx_state=%d\n", ctx->ctx_state));
1904 }
1905 }
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918 if (ctx->ctx_smpl_vaddr && current->mm) {
1919 smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
1920 smpl_buf_size = ctx->ctx_smpl_size;
1921 }
1922
1923 UNPROTECT_CTX(ctx, flags);
1924
1925
1926
1927
1928
1929
1930
1931 if (smpl_buf_vaddr) pfm_remove_smpl_mapping(smpl_buf_vaddr, smpl_buf_size);
1932
1933 return 0;
1934}
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950static int
1951pfm_close(struct inode *inode, struct file *filp)
1952{
1953 pfm_context_t *ctx;
1954 struct task_struct *task;
1955 struct pt_regs *regs;
1956 DECLARE_WAITQUEUE(wait, current);
1957 unsigned long flags;
1958 unsigned long smpl_buf_size = 0UL;
1959 void *smpl_buf_addr = NULL;
1960 int free_possible = 1;
1961 int state, is_system;
1962
1963 DPRINT(("pfm_close called private=%p\n", filp->private_data));
1964
1965 if (PFM_IS_FILE(filp) == 0) {
1966 DPRINT(("bad magic\n"));
1967 return -EBADF;
1968 }
1969
1970 ctx = filp->private_data;
1971 if (ctx == NULL) {
1972 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
1973 return -EBADF;
1974 }
1975
1976 PROTECT_CTX(ctx, flags);
1977
1978 state = ctx->ctx_state;
1979 is_system = ctx->ctx_fl_system;
1980
1981 task = PFM_CTX_TASK(ctx);
1982 regs = task_pt_regs(task);
1983
1984 DPRINT(("ctx_state=%d is_current=%d\n",
1985 state,
1986 task == current ? 1 : 0));
1987
1988
1989
1990
1991 if (state == PFM_CTX_UNLOADED) goto doit;
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005 if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021 ctx->ctx_fl_going_zombie = 1;
2022
2023
2024
2025
2026 complete(&ctx->ctx_restart_done);
2027
2028 DPRINT(("waking up ctx_state=%d\n", state));
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038 set_current_state(TASK_INTERRUPTIBLE);
2039 add_wait_queue(&ctx->ctx_zombieq, &wait);
2040
2041 UNPROTECT_CTX(ctx, flags);
2042
2043
2044
2045
2046
2047
2048 schedule();
2049
2050
2051 PROTECT_CTX(ctx, flags);
2052
2053
2054 remove_wait_queue(&ctx->ctx_zombieq, &wait);
2055 set_current_state(TASK_RUNNING);
2056
2057
2058
2059
2060 DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
2061 }
2062 else if (task != current) {
2063#ifdef CONFIG_SMP
2064
2065
2066
2067 ctx->ctx_state = PFM_CTX_ZOMBIE;
2068
2069 DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
2070
2071
2072
2073
2074 free_possible = 0;
2075#else
2076 pfm_context_unload(ctx, NULL, 0, regs);
2077#endif
2078 }
2079
2080doit:
2081
2082 state = ctx->ctx_state;
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098 if (ctx->ctx_smpl_hdr) {
2099 smpl_buf_addr = ctx->ctx_smpl_hdr;
2100 smpl_buf_size = ctx->ctx_smpl_size;
2101
2102 ctx->ctx_smpl_hdr = NULL;
2103 ctx->ctx_fl_is_sampling = 0;
2104 }
2105
2106 DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
2107 state,
2108 free_possible,
2109 smpl_buf_addr,
2110 smpl_buf_size));
2111
2112 if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
2113
2114
2115
2116
2117 if (state == PFM_CTX_ZOMBIE) {
2118 pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
2119 }
2120
2121
2122
2123
2124
2125 filp->private_data = NULL;
2126
2127
2128
2129
2130
2131
2132
2133
2134 UNPROTECT_CTX(ctx, flags);
2135
2136
2137
2138
2139
2140 if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
2141
2142
2143
2144
2145 if (free_possible) pfm_context_free(ctx);
2146
2147 return 0;
2148}
2149
2150static const struct file_operations pfm_file_ops = {
2151 .llseek = no_llseek,
2152 .read = pfm_read,
2153 .write = pfm_write,
2154 .poll = pfm_poll,
2155 .unlocked_ioctl = pfm_ioctl,
2156 .fasync = pfm_fasync,
2157 .release = pfm_close,
2158 .flush = pfm_flush
2159};
2160
2161static char *pfmfs_dname(struct dentry *dentry, char *buffer, int buflen)
2162{
2163 return dynamic_dname(dentry, buffer, buflen, "pfm:[%lu]",
2164 d_inode(dentry)->i_ino);
2165}
2166
2167static const struct dentry_operations pfmfs_dentry_operations = {
2168 .d_delete = always_delete_dentry,
2169 .d_dname = pfmfs_dname,
2170};
2171
2172
2173static struct file *
2174pfm_alloc_file(pfm_context_t *ctx)
2175{
2176 struct file *file;
2177 struct inode *inode;
2178 struct path path;
2179 struct qstr this = { .name = "" };
2180
2181
2182
2183
2184 inode = new_inode(pfmfs_mnt->mnt_sb);
2185 if (!inode)
2186 return ERR_PTR(-ENOMEM);
2187
2188 DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
2189
2190 inode->i_mode = S_IFCHR|S_IRUGO;
2191 inode->i_uid = current_fsuid();
2192 inode->i_gid = current_fsgid();
2193
2194
2195
2196
2197 path.dentry = d_alloc(pfmfs_mnt->mnt_root, &this);
2198 if (!path.dentry) {
2199 iput(inode);
2200 return ERR_PTR(-ENOMEM);
2201 }
2202 path.mnt = mntget(pfmfs_mnt);
2203
2204 d_add(path.dentry, inode);
2205
2206 file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
2207 if (IS_ERR(file)) {
2208 path_put(&path);
2209 return file;
2210 }
2211
2212 file->f_flags = O_RDONLY;
2213 file->private_data = ctx;
2214
2215 return file;
2216}
2217
2218static int
2219pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
2220{
2221 DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
2222
2223 while (size > 0) {
2224 unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
2225
2226
2227 if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
2228 return -ENOMEM;
2229
2230 addr += PAGE_SIZE;
2231 buf += PAGE_SIZE;
2232 size -= PAGE_SIZE;
2233 }
2234 return 0;
2235}
2236
2237
2238
2239
2240static int
2241pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
2242{
2243 struct mm_struct *mm = task->mm;
2244 struct vm_area_struct *vma = NULL;
2245 unsigned long size;
2246 void *smpl_buf;
2247
2248
2249
2250
2251
2252 size = PAGE_ALIGN(rsize);
2253
2254 DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264 if (size > task_rlimit(task, RLIMIT_MEMLOCK))
2265 return -ENOMEM;
2266
2267
2268
2269
2270
2271
2272 smpl_buf = pfm_rvmalloc(size);
2273 if (smpl_buf == NULL) {
2274 DPRINT(("Can't allocate sampling buffer\n"));
2275 return -ENOMEM;
2276 }
2277
2278 DPRINT(("smpl_buf @%p\n", smpl_buf));
2279
2280
2281 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2282 if (!vma) {
2283 DPRINT(("Cannot allocate vma\n"));
2284 goto error_kmem;
2285 }
2286 INIT_LIST_HEAD(&vma->anon_vma_chain);
2287
2288
2289
2290
2291 vma->vm_mm = mm;
2292 vma->vm_file = get_file(filp);
2293 vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
2294 vma->vm_page_prot = PAGE_READONLY;
2295
2296
2297
2298
2299
2300
2301 ctx->ctx_smpl_hdr = smpl_buf;
2302 ctx->ctx_smpl_size = size;
2303
2304
2305
2306
2307
2308
2309
2310 down_write(&task->mm->mmap_sem);
2311
2312
2313 vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
2314 if (IS_ERR_VALUE(vma->vm_start)) {
2315 DPRINT(("Cannot find unmapped area for size %ld\n", size));
2316 up_write(&task->mm->mmap_sem);
2317 goto error;
2318 }
2319 vma->vm_end = vma->vm_start + size;
2320 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2321
2322 DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
2323
2324
2325 if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
2326 DPRINT(("Can't remap buffer\n"));
2327 up_write(&task->mm->mmap_sem);
2328 goto error;
2329 }
2330
2331
2332
2333
2334
2335 insert_vm_struct(mm, vma);
2336
2337 vm_stat_account(vma->vm_mm, vma->vm_flags, vma_pages(vma));
2338 up_write(&task->mm->mmap_sem);
2339
2340
2341
2342
2343 ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
2344 *(unsigned long *)user_vaddr = vma->vm_start;
2345
2346 return 0;
2347
2348error:
2349 kmem_cache_free(vm_area_cachep, vma);
2350error_kmem:
2351 pfm_rvfree(smpl_buf, size);
2352
2353 return -ENOMEM;
2354}
2355
2356
2357
2358
2359static int
2360pfm_bad_permissions(struct task_struct *task)
2361{
2362 const struct cred *tcred;
2363 kuid_t uid = current_uid();
2364 kgid_t gid = current_gid();
2365 int ret;
2366
2367 rcu_read_lock();
2368 tcred = __task_cred(task);
2369
2370
2371 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
2372 from_kuid(&init_user_ns, uid),
2373 from_kgid(&init_user_ns, gid),
2374 from_kuid(&init_user_ns, tcred->euid),
2375 from_kuid(&init_user_ns, tcred->suid),
2376 from_kuid(&init_user_ns, tcred->uid),
2377 from_kgid(&init_user_ns, tcred->egid),
2378 from_kgid(&init_user_ns, tcred->sgid)));
2379
2380 ret = ((!uid_eq(uid, tcred->euid))
2381 || (!uid_eq(uid, tcred->suid))
2382 || (!uid_eq(uid, tcred->uid))
2383 || (!gid_eq(gid, tcred->egid))
2384 || (!gid_eq(gid, tcred->sgid))
2385 || (!gid_eq(gid, tcred->gid))) && !capable(CAP_SYS_PTRACE);
2386
2387 rcu_read_unlock();
2388 return ret;
2389}
2390
2391static int
2392pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
2393{
2394 int ctx_flags;
2395
2396
2397
2398 ctx_flags = pfx->ctx_flags;
2399
2400 if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
2401
2402
2403
2404
2405 if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
2406 DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
2407 return -EINVAL;
2408 }
2409 } else {
2410 }
2411
2412
2413 return 0;
2414}
2415
2416static int
2417pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
2418 unsigned int cpu, pfarg_context_t *arg)
2419{
2420 pfm_buffer_fmt_t *fmt = NULL;
2421 unsigned long size = 0UL;
2422 void *uaddr = NULL;
2423 void *fmt_arg = NULL;
2424 int ret = 0;
2425#define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
2426
2427
2428 fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
2429 if (fmt == NULL) {
2430 DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
2431 return -EINVAL;
2432 }
2433
2434
2435
2436
2437 if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
2438
2439 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
2440
2441 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
2442
2443 if (ret) goto error;
2444
2445
2446 ctx->ctx_buf_fmt = fmt;
2447 ctx->ctx_fl_is_sampling = 1;
2448
2449
2450
2451
2452 ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
2453 if (ret) goto error;
2454
2455 if (size) {
2456
2457
2458
2459 ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
2460 if (ret) goto error;
2461
2462
2463 arg->ctx_smpl_vaddr = uaddr;
2464 }
2465 ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
2466
2467error:
2468 return ret;
2469}
2470
2471static void
2472pfm_reset_pmu_state(pfm_context_t *ctx)
2473{
2474 int i;
2475
2476
2477
2478
2479 for (i=1; PMC_IS_LAST(i) == 0; i++) {
2480 if (PMC_IS_IMPL(i) == 0) continue;
2481 ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
2482 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
2483 }
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
2512
2513
2514
2515
2516 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
2517
2518 DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
2519
2520
2521
2522
2523 ctx->ctx_used_ibrs[0] = 0UL;
2524 ctx->ctx_used_dbrs[0] = 0UL;
2525}
2526
2527static int
2528pfm_ctx_getsize(void *arg, size_t *sz)
2529{
2530 pfarg_context_t *req = (pfarg_context_t *)arg;
2531 pfm_buffer_fmt_t *fmt;
2532
2533 *sz = 0;
2534
2535 if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
2536
2537 fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
2538 if (fmt == NULL) {
2539 DPRINT(("cannot find buffer format\n"));
2540 return -EINVAL;
2541 }
2542
2543 *sz = fmt->fmt_arg_size;
2544 DPRINT(("arg_size=%lu\n", *sz));
2545
2546 return 0;
2547}
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557static int
2558pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2559{
2560
2561
2562
2563 if (task->mm == NULL) {
2564 DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
2565 return -EPERM;
2566 }
2567 if (pfm_bad_permissions(task)) {
2568 DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
2569 return -EPERM;
2570 }
2571
2572
2573
2574 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
2575 DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
2576 return -EINVAL;
2577 }
2578
2579 if (task->exit_state == EXIT_ZOMBIE) {
2580 DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
2581 return -EBUSY;
2582 }
2583
2584
2585
2586
2587 if (task == current) return 0;
2588
2589 if (!task_is_stopped_or_traced(task)) {
2590 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
2591 return -EBUSY;
2592 }
2593
2594
2595
2596 wait_task_inactive(task, 0);
2597
2598
2599
2600 return 0;
2601}
2602
2603static int
2604pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
2605{
2606 struct task_struct *p = current;
2607 int ret;
2608
2609
2610 if (pid < 2) return -EPERM;
2611
2612 if (pid != task_pid_vnr(current)) {
2613
2614 read_lock(&tasklist_lock);
2615
2616 p = find_task_by_vpid(pid);
2617
2618
2619 if (p) get_task_struct(p);
2620
2621 read_unlock(&tasklist_lock);
2622
2623 if (p == NULL) return -ESRCH;
2624 }
2625
2626 ret = pfm_task_incompatible(ctx, p);
2627 if (ret == 0) {
2628 *task = p;
2629 } else if (p != current) {
2630 pfm_put_task(p);
2631 }
2632 return ret;
2633}
2634
2635
2636
2637static int
2638pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2639{
2640 pfarg_context_t *req = (pfarg_context_t *)arg;
2641 struct file *filp;
2642 struct path path;
2643 int ctx_flags;
2644 int fd;
2645 int ret;
2646
2647
2648 ret = pfarg_is_sane(current, req);
2649 if (ret < 0)
2650 return ret;
2651
2652 ctx_flags = req->ctx_flags;
2653
2654 ret = -ENOMEM;
2655
2656 fd = get_unused_fd_flags(0);
2657 if (fd < 0)
2658 return fd;
2659
2660 ctx = pfm_context_alloc(ctx_flags);
2661 if (!ctx)
2662 goto error;
2663
2664 filp = pfm_alloc_file(ctx);
2665 if (IS_ERR(filp)) {
2666 ret = PTR_ERR(filp);
2667 goto error_file;
2668 }
2669
2670 req->ctx_fd = ctx->ctx_fd = fd;
2671
2672
2673
2674
2675 if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
2676 ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
2677 if (ret)
2678 goto buffer_error;
2679 }
2680
2681 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n",
2682 ctx,
2683 ctx_flags,
2684 ctx->ctx_fl_system,
2685 ctx->ctx_fl_block,
2686 ctx->ctx_fl_excl_idle,
2687 ctx->ctx_fl_no_msg,
2688 ctx->ctx_fd));
2689
2690
2691
2692
2693 pfm_reset_pmu_state(ctx);
2694
2695 fd_install(fd, filp);
2696
2697 return 0;
2698
2699buffer_error:
2700 path = filp->f_path;
2701 put_filp(filp);
2702 path_put(&path);
2703
2704 if (ctx->ctx_buf_fmt) {
2705 pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
2706 }
2707error_file:
2708 pfm_context_free(ctx);
2709
2710error:
2711 put_unused_fd(fd);
2712 return ret;
2713}
2714
2715static inline unsigned long
2716pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
2717{
2718 unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
2719 unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
2720 extern unsigned long carta_random32 (unsigned long seed);
2721
2722 if (reg->flags & PFM_REGFL_RANDOM) {
2723 new_seed = carta_random32(old_seed);
2724 val -= (old_seed & mask);
2725 if ((mask >> 32) != 0)
2726
2727 new_seed |= carta_random32(old_seed >> 32) << 32;
2728 reg->seed = new_seed;
2729 }
2730 reg->lval = val;
2731 return val;
2732}
2733
2734static void
2735pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2736{
2737 unsigned long mask = ovfl_regs[0];
2738 unsigned long reset_others = 0UL;
2739 unsigned long val;
2740 int i;
2741
2742
2743
2744
2745 mask >>= PMU_FIRST_COUNTER;
2746 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2747
2748 if ((mask & 0x1UL) == 0UL) continue;
2749
2750 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2751 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2752
2753 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2754 }
2755
2756
2757
2758
2759 for(i = 0; reset_others; i++, reset_others >>= 1) {
2760
2761 if ((reset_others & 0x1) == 0) continue;
2762
2763 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2764
2765 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2766 is_long_reset ? "long" : "short", i, val));
2767 }
2768}
2769
2770static void
2771pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2772{
2773 unsigned long mask = ovfl_regs[0];
2774 unsigned long reset_others = 0UL;
2775 unsigned long val;
2776 int i;
2777
2778 DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
2779
2780 if (ctx->ctx_state == PFM_CTX_MASKED) {
2781 pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
2782 return;
2783 }
2784
2785
2786
2787
2788 mask >>= PMU_FIRST_COUNTER;
2789 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2790
2791 if ((mask & 0x1UL) == 0UL) continue;
2792
2793 val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2794 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2795
2796 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2797
2798 pfm_write_soft_counter(ctx, i, val);
2799 }
2800
2801
2802
2803
2804 for(i = 0; reset_others; i++, reset_others >>= 1) {
2805
2806 if ((reset_others & 0x1) == 0) continue;
2807
2808 val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2809
2810 if (PMD_IS_COUNTING(i)) {
2811 pfm_write_soft_counter(ctx, i, val);
2812 } else {
2813 ia64_set_pmd(i, val);
2814 }
2815 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2816 is_long_reset ? "long" : "short", i, val));
2817 }
2818 ia64_srlz_d();
2819}
2820
2821static int
2822pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2823{
2824 struct task_struct *task;
2825 pfarg_reg_t *req = (pfarg_reg_t *)arg;
2826 unsigned long value, pmc_pm;
2827 unsigned long smpl_pmds, reset_pmds, impl_pmds;
2828 unsigned int cnum, reg_flags, flags, pmc_type;
2829 int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
2830 int is_monitor, is_counting, state;
2831 int ret = -EINVAL;
2832 pfm_reg_check_t wr_func;
2833#define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
2834
2835 state = ctx->ctx_state;
2836 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
2837 is_system = ctx->ctx_fl_system;
2838 task = ctx->ctx_task;
2839 impl_pmds = pmu_conf->impl_pmds[0];
2840
2841 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
2842
2843 if (is_loaded) {
2844
2845
2846
2847
2848
2849 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
2850 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
2851 return -EBUSY;
2852 }
2853 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
2854 }
2855 expert_mode = pfm_sysctl.expert_mode;
2856
2857 for (i = 0; i < count; i++, req++) {
2858
2859 cnum = req->reg_num;
2860 reg_flags = req->reg_flags;
2861 value = req->reg_value;
2862 smpl_pmds = req->reg_smpl_pmds[0];
2863 reset_pmds = req->reg_reset_pmds[0];
2864 flags = 0;
2865
2866
2867 if (cnum >= PMU_MAX_PMCS) {
2868 DPRINT(("pmc%u is invalid\n", cnum));
2869 goto error;
2870 }
2871
2872 pmc_type = pmu_conf->pmc_desc[cnum].type;
2873 pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
2874 is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
2875 is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
2876
2877
2878
2879
2880
2881
2882 if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
2883 DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
2884 goto error;
2885 }
2886 wr_func = pmu_conf->pmc_desc[cnum].write_check;
2887
2888
2889
2890
2891
2892 if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
2893 DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
2894 cnum,
2895 pmc_pm,
2896 is_system));
2897 goto error;
2898 }
2899
2900 if (is_counting) {
2901
2902
2903
2904
2905 value |= 1 << PMU_PMC_OI;
2906
2907 if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
2908 flags |= PFM_REGFL_OVFL_NOTIFY;
2909 }
2910
2911 if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
2912
2913
2914 if ((smpl_pmds & impl_pmds) != smpl_pmds) {
2915 DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
2916 goto error;
2917 }
2918
2919
2920 if ((reset_pmds & impl_pmds) != reset_pmds) {
2921 DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
2922 goto error;
2923 }
2924 } else {
2925 if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
2926 DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
2927 goto error;
2928 }
2929
2930 }
2931
2932
2933
2934
2935 if (likely(expert_mode == 0 && wr_func)) {
2936 ret = (*wr_func)(task, ctx, cnum, &value, regs);
2937 if (ret) goto error;
2938 ret = -EINVAL;
2939 }
2940
2941
2942
2943
2944 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
2945
2946
2947
2948
2949
2950
2951
2952
2953 if (is_counting) {
2954
2955
2956
2957 ctx->ctx_pmds[cnum].flags = flags;
2958
2959 ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
2960 ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
2961 ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974 CTX_USED_PMD(ctx, reset_pmds);
2975 CTX_USED_PMD(ctx, smpl_pmds);
2976
2977
2978
2979
2980 if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
2981 }
2982
2983
2984
2985
2986
2987 CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
3002
3003
3004
3005
3006 ctx->ctx_pmcs[cnum] = value;
3007
3008 if (is_loaded) {
3009
3010
3011
3012 if (is_system == 0) ctx->th_pmcs[cnum] = value;
3013
3014
3015
3016
3017 if (can_access_pmu) {
3018 ia64_set_pmc(cnum, value);
3019 }
3020#ifdef CONFIG_SMP
3021 else {
3022
3023
3024
3025
3026
3027
3028
3029 ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
3030 }
3031#endif
3032 }
3033
3034 DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
3035 cnum,
3036 value,
3037 is_loaded,
3038 can_access_pmu,
3039 flags,
3040 ctx->ctx_all_pmcs[0],
3041 ctx->ctx_used_pmds[0],
3042 ctx->ctx_pmds[cnum].eventid,
3043 smpl_pmds,
3044 reset_pmds,
3045 ctx->ctx_reload_pmcs[0],
3046 ctx->ctx_used_monitors[0],
3047 ctx->ctx_ovfl_regs[0]));
3048 }
3049
3050
3051
3052
3053 if (can_access_pmu) ia64_srlz_d();
3054
3055 return 0;
3056error:
3057 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3058 return ret;
3059}
3060
3061static int
3062pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3063{
3064 struct task_struct *task;
3065 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3066 unsigned long value, hw_value, ovfl_mask;
3067 unsigned int cnum;
3068 int i, can_access_pmu = 0, state;
3069 int is_counting, is_loaded, is_system, expert_mode;
3070 int ret = -EINVAL;
3071 pfm_reg_check_t wr_func;
3072
3073
3074 state = ctx->ctx_state;
3075 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3076 is_system = ctx->ctx_fl_system;
3077 ovfl_mask = pmu_conf->ovfl_val;
3078 task = ctx->ctx_task;
3079
3080 if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
3081
3082
3083
3084
3085
3086 if (likely(is_loaded)) {
3087
3088
3089
3090
3091
3092 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3093 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3094 return -EBUSY;
3095 }
3096 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3097 }
3098 expert_mode = pfm_sysctl.expert_mode;
3099
3100 for (i = 0; i < count; i++, req++) {
3101
3102 cnum = req->reg_num;
3103 value = req->reg_value;
3104
3105 if (!PMD_IS_IMPL(cnum)) {
3106 DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
3107 goto abort_mission;
3108 }
3109 is_counting = PMD_IS_COUNTING(cnum);
3110 wr_func = pmu_conf->pmd_desc[cnum].write_check;
3111
3112
3113
3114
3115 if (unlikely(expert_mode == 0 && wr_func)) {
3116 unsigned long v = value;
3117
3118 ret = (*wr_func)(task, ctx, cnum, &v, regs);
3119 if (ret) goto abort_mission;
3120
3121 value = v;
3122 ret = -EINVAL;
3123 }
3124
3125
3126
3127
3128 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
3129
3130
3131
3132
3133 hw_value = value;
3134
3135
3136
3137
3138 if (is_counting) {
3139
3140
3141
3142 ctx->ctx_pmds[cnum].lval = value;
3143
3144
3145
3146
3147 if (is_loaded) {
3148 hw_value = value & ovfl_mask;
3149 value = value & ~ovfl_mask;
3150 }
3151 }
3152
3153
3154
3155 ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
3156 ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
3157
3158
3159
3160
3161 ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
3162 ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
3163
3164
3165
3166
3167 ctx->ctx_pmds[cnum].val = value;
3168
3169
3170
3171
3172
3173
3174
3175 CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
3176
3177
3178
3179
3180 CTX_USED_PMD(ctx, RDEP(cnum));
3181
3182
3183
3184
3185
3186 if (is_counting && state == PFM_CTX_MASKED) {
3187 ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3188 }
3189
3190 if (is_loaded) {
3191
3192
3193
3194 if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
3195
3196
3197
3198
3199 if (can_access_pmu) {
3200 ia64_set_pmd(cnum, hw_value);
3201 } else {
3202#ifdef CONFIG_SMP
3203
3204
3205
3206
3207
3208 ctx->ctx_reload_pmds[0] |= 1UL << cnum;
3209#endif
3210 }
3211 }
3212
3213 DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
3214 "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
3215 cnum,
3216 value,
3217 is_loaded,
3218 can_access_pmu,
3219 hw_value,
3220 ctx->ctx_pmds[cnum].val,
3221 ctx->ctx_pmds[cnum].short_reset,
3222 ctx->ctx_pmds[cnum].long_reset,
3223 PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
3224 ctx->ctx_pmds[cnum].seed,
3225 ctx->ctx_pmds[cnum].mask,
3226 ctx->ctx_used_pmds[0],
3227 ctx->ctx_pmds[cnum].reset_pmds[0],
3228 ctx->ctx_reload_pmds[0],
3229 ctx->ctx_all_pmds[0],
3230 ctx->ctx_ovfl_regs[0]));
3231 }
3232
3233
3234
3235
3236 if (can_access_pmu) ia64_srlz_d();
3237
3238 return 0;
3239
3240abort_mission:
3241
3242
3243
3244 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3245 return ret;
3246}
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257static int
3258pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3259{
3260 struct task_struct *task;
3261 unsigned long val = 0UL, lval, ovfl_mask, sval;
3262 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3263 unsigned int cnum, reg_flags = 0;
3264 int i, can_access_pmu = 0, state;
3265 int is_loaded, is_system, is_counting, expert_mode;
3266 int ret = -EINVAL;
3267 pfm_reg_check_t rd_func;
3268
3269
3270
3271
3272
3273
3274 state = ctx->ctx_state;
3275 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3276 is_system = ctx->ctx_fl_system;
3277 ovfl_mask = pmu_conf->ovfl_val;
3278 task = ctx->ctx_task;
3279
3280 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3281
3282 if (likely(is_loaded)) {
3283
3284
3285
3286
3287
3288 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3289 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3290 return -EBUSY;
3291 }
3292
3293
3294
3295 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3296
3297 if (can_access_pmu) ia64_srlz_d();
3298 }
3299 expert_mode = pfm_sysctl.expert_mode;
3300
3301 DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
3302 is_loaded,
3303 can_access_pmu,
3304 state));
3305
3306
3307
3308
3309
3310
3311 for (i = 0; i < count; i++, req++) {
3312
3313 cnum = req->reg_num;
3314 reg_flags = req->reg_flags;
3315
3316 if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
3317
3318
3319
3320
3321
3322
3323
3324
3325 if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
3326
3327 sval = ctx->ctx_pmds[cnum].val;
3328 lval = ctx->ctx_pmds[cnum].lval;
3329 is_counting = PMD_IS_COUNTING(cnum);
3330
3331
3332
3333
3334
3335
3336 if (can_access_pmu){
3337 val = ia64_get_pmd(cnum);
3338 } else {
3339
3340
3341
3342
3343
3344 val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
3345 }
3346 rd_func = pmu_conf->pmd_desc[cnum].read_check;
3347
3348 if (is_counting) {
3349
3350
3351
3352 val &= ovfl_mask;
3353 val += sval;
3354 }
3355
3356
3357
3358
3359 if (unlikely(expert_mode == 0 && rd_func)) {
3360 unsigned long v = val;
3361 ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
3362 if (ret) goto error;
3363 val = v;
3364 ret = -EINVAL;
3365 }
3366
3367 PFM_REG_RETFLAG_SET(reg_flags, 0);
3368
3369 DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
3370
3371
3372
3373
3374
3375
3376 req->reg_value = val;
3377 req->reg_flags = reg_flags;
3378 req->reg_last_reset_val = lval;
3379 }
3380
3381 return 0;
3382
3383error:
3384 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3385 return ret;
3386}
3387
3388int
3389pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3390{
3391 pfm_context_t *ctx;
3392
3393 if (req == NULL) return -EINVAL;
3394
3395 ctx = GET_PMU_CTX();
3396
3397 if (ctx == NULL) return -EINVAL;
3398
3399
3400
3401
3402
3403 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3404
3405 return pfm_write_pmcs(ctx, req, nreq, regs);
3406}
3407EXPORT_SYMBOL(pfm_mod_write_pmcs);
3408
3409int
3410pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3411{
3412 pfm_context_t *ctx;
3413
3414 if (req == NULL) return -EINVAL;
3415
3416 ctx = GET_PMU_CTX();
3417
3418 if (ctx == NULL) return -EINVAL;
3419
3420
3421
3422
3423
3424 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3425
3426 return pfm_read_pmds(ctx, req, nreq, regs);
3427}
3428EXPORT_SYMBOL(pfm_mod_read_pmds);
3429
3430
3431
3432
3433
3434int
3435pfm_use_debug_registers(struct task_struct *task)
3436{
3437 pfm_context_t *ctx = task->thread.pfm_context;
3438 unsigned long flags;
3439 int ret = 0;
3440
3441 if (pmu_conf->use_rr_dbregs == 0) return 0;
3442
3443 DPRINT(("called for [%d]\n", task_pid_nr(task)));
3444
3445
3446
3447
3448 if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458 if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
3459
3460 LOCK_PFS(flags);
3461
3462
3463
3464
3465
3466 if (pfm_sessions.pfs_sys_use_dbregs> 0)
3467 ret = -1;
3468 else
3469 pfm_sessions.pfs_ptrace_use_dbregs++;
3470
3471 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3472 pfm_sessions.pfs_ptrace_use_dbregs,
3473 pfm_sessions.pfs_sys_use_dbregs,
3474 task_pid_nr(task), ret));
3475
3476 UNLOCK_PFS(flags);
3477
3478 return ret;
3479}
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489int
3490pfm_release_debug_registers(struct task_struct *task)
3491{
3492 unsigned long flags;
3493 int ret;
3494
3495 if (pmu_conf->use_rr_dbregs == 0) return 0;
3496
3497 LOCK_PFS(flags);
3498 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
3499 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
3500 ret = -1;
3501 } else {
3502 pfm_sessions.pfs_ptrace_use_dbregs--;
3503 ret = 0;
3504 }
3505 UNLOCK_PFS(flags);
3506
3507 return ret;
3508}
3509
3510static int
3511pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3512{
3513 struct task_struct *task;
3514 pfm_buffer_fmt_t *fmt;
3515 pfm_ovfl_ctrl_t rst_ctrl;
3516 int state, is_system;
3517 int ret = 0;
3518
3519 state = ctx->ctx_state;
3520 fmt = ctx->ctx_buf_fmt;
3521 is_system = ctx->ctx_fl_system;
3522 task = PFM_CTX_TASK(ctx);
3523
3524 switch(state) {
3525 case PFM_CTX_MASKED:
3526 break;
3527 case PFM_CTX_LOADED:
3528 if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
3529
3530 case PFM_CTX_UNLOADED:
3531 case PFM_CTX_ZOMBIE:
3532 DPRINT(("invalid state=%d\n", state));
3533 return -EBUSY;
3534 default:
3535 DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
3536 return -EINVAL;
3537 }
3538
3539
3540
3541
3542
3543
3544 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3545 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3546 return -EBUSY;
3547 }
3548
3549
3550 if (unlikely(task == NULL)) {
3551 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
3552 return -EINVAL;
3553 }
3554
3555 if (task == current || is_system) {
3556
3557 fmt = ctx->ctx_buf_fmt;
3558
3559 DPRINT(("restarting self %d ovfl=0x%lx\n",
3560 task_pid_nr(task),
3561 ctx->ctx_ovfl_regs[0]));
3562
3563 if (CTX_HAS_SMPL(ctx)) {
3564
3565 prefetch(ctx->ctx_smpl_hdr);
3566
3567 rst_ctrl.bits.mask_monitoring = 0;
3568 rst_ctrl.bits.reset_ovfl_pmds = 0;
3569
3570 if (state == PFM_CTX_LOADED)
3571 ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3572 else
3573 ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3574 } else {
3575 rst_ctrl.bits.mask_monitoring = 0;
3576 rst_ctrl.bits.reset_ovfl_pmds = 1;
3577 }
3578
3579 if (ret == 0) {
3580 if (rst_ctrl.bits.reset_ovfl_pmds)
3581 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
3582
3583 if (rst_ctrl.bits.mask_monitoring == 0) {
3584 DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
3585
3586 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
3587 } else {
3588 DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
3589
3590
3591 }
3592 }
3593
3594
3595
3596 ctx->ctx_ovfl_regs[0] = 0UL;
3597
3598
3599
3600
3601 ctx->ctx_state = PFM_CTX_LOADED;
3602
3603
3604
3605
3606 ctx->ctx_fl_can_restart = 0;
3607
3608 return 0;
3609 }
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619 if (state == PFM_CTX_MASKED) {
3620 if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
3621
3622
3623
3624
3625 ctx->ctx_fl_can_restart = 0;
3626 }
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
3645 DPRINT(("unblocking [%d]\n", task_pid_nr(task)));
3646 complete(&ctx->ctx_restart_done);
3647 } else {
3648 DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
3649
3650 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
3651
3652 PFM_SET_WORK_PENDING(task, 1);
3653
3654 set_notify_resume(task);
3655
3656
3657
3658
3659 }
3660 return 0;
3661}
3662
3663static int
3664pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3665{
3666 unsigned int m = *(unsigned int *)arg;
3667
3668 pfm_sysctl.debug = m == 0 ? 0 : 1;
3669
3670 printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
3671
3672 if (m == 0) {
3673 memset(pfm_stats, 0, sizeof(pfm_stats));
3674 for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
3675 }
3676 return 0;
3677}
3678
3679
3680
3681
3682static int
3683pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3684{
3685 struct thread_struct *thread = NULL;
3686 struct task_struct *task;
3687 pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
3688 unsigned long flags;
3689 dbreg_t dbreg;
3690 unsigned int rnum;
3691 int first_time;
3692 int ret = 0, state;
3693 int i, can_access_pmu = 0;
3694 int is_system, is_loaded;
3695
3696 if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
3697
3698 state = ctx->ctx_state;
3699 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3700 is_system = ctx->ctx_fl_system;
3701 task = ctx->ctx_task;
3702
3703 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3704
3705
3706
3707
3708
3709 if (is_loaded) {
3710 thread = &task->thread;
3711
3712
3713
3714
3715
3716 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3717 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3718 return -EBUSY;
3719 }
3720 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3721 }
3722
3723
3724
3725
3726
3727
3728
3729
3730 first_time = ctx->ctx_fl_using_dbreg == 0;
3731
3732
3733
3734
3735 if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
3736 DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
3737 return -EBUSY;
3738 }
3739
3740
3741
3742
3743
3744
3745
3746
3747 if (is_loaded) {
3748 LOCK_PFS(flags);
3749
3750 if (first_time && is_system) {
3751 if (pfm_sessions.pfs_ptrace_use_dbregs)
3752 ret = -EBUSY;
3753 else
3754 pfm_sessions.pfs_sys_use_dbregs++;
3755 }
3756 UNLOCK_PFS(flags);
3757 }
3758
3759 if (ret != 0) return ret;
3760
3761
3762
3763
3764
3765 ctx->ctx_fl_using_dbreg = 1;
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775
3776 if (first_time && can_access_pmu) {
3777 DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
3778 for (i=0; i < pmu_conf->num_ibrs; i++) {
3779 ia64_set_ibr(i, 0UL);
3780 ia64_dv_serialize_instruction();
3781 }
3782 ia64_srlz_i();
3783 for (i=0; i < pmu_conf->num_dbrs; i++) {
3784 ia64_set_dbr(i, 0UL);
3785 ia64_dv_serialize_data();
3786 }
3787 ia64_srlz_d();
3788 }
3789
3790
3791
3792
3793 for (i = 0; i < count; i++, req++) {
3794
3795 rnum = req->dbreg_num;
3796 dbreg.val = req->dbreg_value;
3797
3798 ret = -EINVAL;
3799
3800 if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
3801 DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
3802 rnum, dbreg.val, mode, i, count));
3803
3804 goto abort_mission;
3805 }
3806
3807
3808
3809
3810 if (rnum & 0x1) {
3811 if (mode == PFM_CODE_RR)
3812 dbreg.ibr.ibr_x = 0;
3813 else
3814 dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
3815 }
3816
3817 PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829 if (mode == PFM_CODE_RR) {
3830 CTX_USED_IBR(ctx, rnum);
3831
3832 if (can_access_pmu) {
3833 ia64_set_ibr(rnum, dbreg.val);
3834 ia64_dv_serialize_instruction();
3835 }
3836
3837 ctx->ctx_ibrs[rnum] = dbreg.val;
3838
3839 DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
3840 rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
3841 } else {
3842 CTX_USED_DBR(ctx, rnum);
3843
3844 if (can_access_pmu) {
3845 ia64_set_dbr(rnum, dbreg.val);
3846 ia64_dv_serialize_data();
3847 }
3848 ctx->ctx_dbrs[rnum] = dbreg.val;
3849
3850 DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
3851 rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
3852 }
3853 }
3854
3855 return 0;
3856
3857abort_mission:
3858
3859
3860
3861 if (first_time) {
3862 LOCK_PFS(flags);
3863 if (ctx->ctx_fl_system) {
3864 pfm_sessions.pfs_sys_use_dbregs--;
3865 }
3866 UNLOCK_PFS(flags);
3867 ctx->ctx_fl_using_dbreg = 0;
3868 }
3869
3870
3871
3872 PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
3873
3874 return ret;
3875}
3876
3877static int
3878pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3879{
3880 return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
3881}
3882
3883static int
3884pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3885{
3886 return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
3887}
3888
3889int
3890pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3891{
3892 pfm_context_t *ctx;
3893
3894 if (req == NULL) return -EINVAL;
3895
3896 ctx = GET_PMU_CTX();
3897
3898 if (ctx == NULL) return -EINVAL;
3899
3900
3901
3902
3903
3904 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3905
3906 return pfm_write_ibrs(ctx, req, nreq, regs);
3907}
3908EXPORT_SYMBOL(pfm_mod_write_ibrs);
3909
3910int
3911pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3912{
3913 pfm_context_t *ctx;
3914
3915 if (req == NULL) return -EINVAL;
3916
3917 ctx = GET_PMU_CTX();
3918
3919 if (ctx == NULL) return -EINVAL;
3920
3921
3922
3923
3924
3925 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3926
3927 return pfm_write_dbrs(ctx, req, nreq, regs);
3928}
3929EXPORT_SYMBOL(pfm_mod_write_dbrs);
3930
3931
3932static int
3933pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3934{
3935 pfarg_features_t *req = (pfarg_features_t *)arg;
3936
3937 req->ft_version = PFM_VERSION;
3938 return 0;
3939}
3940
3941static int
3942pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3943{
3944 struct pt_regs *tregs;
3945 struct task_struct *task = PFM_CTX_TASK(ctx);
3946 int state, is_system;
3947
3948 state = ctx->ctx_state;
3949 is_system = ctx->ctx_fl_system;
3950
3951
3952
3953
3954 if (state == PFM_CTX_UNLOADED) return -EINVAL;
3955
3956
3957
3958
3959
3960
3961 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3962 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3963 return -EBUSY;
3964 }
3965 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
3966 task_pid_nr(PFM_CTX_TASK(ctx)),
3967 state,
3968 is_system));
3969
3970
3971
3972
3973
3974 if (is_system) {
3975
3976
3977
3978
3979
3980 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
3981 ia64_srlz_i();
3982
3983
3984
3985
3986 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
3987
3988
3989
3990
3991 pfm_clear_psr_pp();
3992
3993
3994
3995
3996 ia64_psr(regs)->pp = 0;
3997
3998 return 0;
3999 }
4000
4001
4002
4003
4004 if (task == current) {
4005
4006 pfm_clear_psr_up();
4007
4008
4009
4010
4011 ia64_psr(regs)->up = 0;
4012 } else {
4013 tregs = task_pt_regs(task);
4014
4015
4016
4017
4018 ia64_psr(tregs)->up = 0;
4019
4020
4021
4022
4023 ctx->ctx_saved_psr_up = 0;
4024 DPRINT(("task=[%d]\n", task_pid_nr(task)));
4025 }
4026 return 0;
4027}
4028
4029
4030static int
4031pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4032{
4033 struct pt_regs *tregs;
4034 int state, is_system;
4035
4036 state = ctx->ctx_state;
4037 is_system = ctx->ctx_fl_system;
4038
4039 if (state != PFM_CTX_LOADED) return -EINVAL;
4040
4041
4042
4043
4044
4045
4046 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
4047 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
4048 return -EBUSY;
4049 }
4050
4051
4052
4053
4054
4055
4056 if (is_system) {
4057
4058
4059
4060
4061 ia64_psr(regs)->pp = 1;
4062
4063
4064
4065
4066 PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
4067
4068
4069
4070
4071 pfm_set_psr_pp();
4072
4073
4074 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
4075 ia64_srlz_i();
4076
4077 return 0;
4078 }
4079
4080
4081
4082
4083
4084 if (ctx->ctx_task == current) {
4085
4086
4087 pfm_set_psr_up();
4088
4089
4090
4091
4092 ia64_psr(regs)->up = 1;
4093
4094 } else {
4095 tregs = task_pt_regs(ctx->ctx_task);
4096
4097
4098
4099
4100
4101 ctx->ctx_saved_psr_up = IA64_PSR_UP;
4102
4103
4104
4105
4106 ia64_psr(tregs)->up = 1;
4107 }
4108 return 0;
4109}
4110
4111static int
4112pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4113{
4114 pfarg_reg_t *req = (pfarg_reg_t *)arg;
4115 unsigned int cnum;
4116 int i;
4117 int ret = -EINVAL;
4118
4119 for (i = 0; i < count; i++, req++) {
4120
4121 cnum = req->reg_num;
4122
4123 if (!PMC_IS_IMPL(cnum)) goto abort_mission;
4124
4125 req->reg_value = PMC_DFL_VAL(cnum);
4126
4127 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
4128
4129 DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
4130 }
4131 return 0;
4132
4133abort_mission:
4134 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
4135 return ret;
4136}
4137
4138static int
4139pfm_check_task_exist(pfm_context_t *ctx)
4140{
4141 struct task_struct *g, *t;
4142 int ret = -ESRCH;
4143
4144 read_lock(&tasklist_lock);
4145
4146 do_each_thread (g, t) {
4147 if (t->thread.pfm_context == ctx) {
4148 ret = 0;
4149 goto out;
4150 }
4151 } while_each_thread (g, t);
4152out:
4153 read_unlock(&tasklist_lock);
4154
4155 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
4156
4157 return ret;
4158}
4159
4160static int
4161pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4162{
4163 struct task_struct *task;
4164 struct thread_struct *thread;
4165 struct pfm_context_t *old;
4166 unsigned long flags;
4167#ifndef CONFIG_SMP
4168 struct task_struct *owner_task = NULL;
4169#endif
4170 pfarg_load_t *req = (pfarg_load_t *)arg;
4171 unsigned long *pmcs_source, *pmds_source;
4172 int the_cpu;
4173 int ret = 0;
4174 int state, is_system, set_dbregs = 0;
4175
4176 state = ctx->ctx_state;
4177 is_system = ctx->ctx_fl_system;
4178
4179
4180
4181 if (state != PFM_CTX_UNLOADED) {
4182 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4183 req->load_pid,
4184 ctx->ctx_state));
4185 return -EBUSY;
4186 }
4187
4188 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
4189
4190 if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
4191 DPRINT(("cannot use blocking mode on self\n"));
4192 return -EINVAL;
4193 }
4194
4195 ret = pfm_get_task(ctx, req->load_pid, &task);
4196 if (ret) {
4197 DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
4198 return ret;
4199 }
4200
4201 ret = -EINVAL;
4202
4203
4204
4205
4206 if (is_system && task != current) {
4207 DPRINT(("system wide is self monitoring only load_pid=%d\n",
4208 req->load_pid));
4209 goto error;
4210 }
4211
4212 thread = &task->thread;
4213
4214 ret = 0;
4215
4216
4217
4218
4219 if (ctx->ctx_fl_using_dbreg) {
4220 if (thread->flags & IA64_THREAD_DBG_VALID) {
4221 ret = -EBUSY;
4222 DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
4223 goto error;
4224 }
4225 LOCK_PFS(flags);
4226
4227 if (is_system) {
4228 if (pfm_sessions.pfs_ptrace_use_dbregs) {
4229 DPRINT(("cannot load [%d] dbregs in use\n",
4230 task_pid_nr(task)));
4231 ret = -EBUSY;
4232 } else {
4233 pfm_sessions.pfs_sys_use_dbregs++;
4234 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
4235 set_dbregs = 1;
4236 }
4237 }
4238
4239 UNLOCK_PFS(flags);
4240
4241 if (ret) goto error;
4242 }
4243
4244
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254
4255
4256
4257
4258
4259 the_cpu = ctx->ctx_cpu = smp_processor_id();
4260
4261 ret = -EBUSY;
4262
4263
4264
4265 ret = pfm_reserve_session(current, is_system, the_cpu);
4266 if (ret) goto error;
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276
4277 DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
4278 thread->pfm_context, ctx));
4279
4280 ret = -EBUSY;
4281 old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
4282 if (old != NULL) {
4283 DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
4284 goto error_unres;
4285 }
4286
4287 pfm_reset_msgq(ctx);
4288
4289 ctx->ctx_state = PFM_CTX_LOADED;
4290
4291
4292
4293
4294 ctx->ctx_task = task;
4295
4296 if (is_system) {
4297
4298
4299
4300 PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
4301 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4302
4303 if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
4304 } else {
4305 thread->flags |= IA64_THREAD_PM_VALID;
4306 }
4307
4308
4309
4310
4311 pfm_copy_pmds(task, ctx);
4312 pfm_copy_pmcs(task, ctx);
4313
4314 pmcs_source = ctx->th_pmcs;
4315 pmds_source = ctx->th_pmds;
4316
4317
4318
4319
4320 if (task == current) {
4321
4322 if (is_system == 0) {
4323
4324
4325 ia64_psr(regs)->sp = 0;
4326 DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
4327
4328 SET_LAST_CPU(ctx, smp_processor_id());
4329 INC_ACTIVATION();
4330 SET_ACTIVATION(ctx);
4331#ifndef CONFIG_SMP
4332
4333
4334
4335 owner_task = GET_PMU_OWNER();
4336 if (owner_task) pfm_lazy_save_regs(owner_task);
4337#endif
4338 }
4339
4340
4341
4342
4343 pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
4344 pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
4345
4346 ctx->ctx_reload_pmcs[0] = 0UL;
4347 ctx->ctx_reload_pmds[0] = 0UL;
4348
4349
4350
4351
4352 if (ctx->ctx_fl_using_dbreg) {
4353 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
4354 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
4355 }
4356
4357
4358
4359 SET_PMU_OWNER(task, ctx);
4360
4361 DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
4362 } else {
4363
4364
4365
4366 regs = task_pt_regs(task);
4367
4368
4369 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4370 SET_LAST_CPU(ctx, -1);
4371
4372
4373 ctx->ctx_saved_psr_up = 0UL;
4374 ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
4375 }
4376
4377 ret = 0;
4378
4379error_unres:
4380 if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
4381error:
4382
4383
4384
4385 if (ret && set_dbregs) {
4386 LOCK_PFS(flags);
4387 pfm_sessions.pfs_sys_use_dbregs--;
4388 UNLOCK_PFS(flags);
4389 }
4390
4391
4392
4393 if (is_system == 0 && task != current) {
4394 pfm_put_task(task);
4395
4396 if (ret == 0) {
4397 ret = pfm_check_task_exist(ctx);
4398 if (ret) {
4399 ctx->ctx_state = PFM_CTX_UNLOADED;
4400 ctx->ctx_task = NULL;
4401 }
4402 }
4403 }
4404 return ret;
4405}
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
4416
4417static int
4418pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4419{
4420 struct task_struct *task = PFM_CTX_TASK(ctx);
4421 struct pt_regs *tregs;
4422 int prev_state, is_system;
4423 int ret;
4424
4425 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
4426
4427 prev_state = ctx->ctx_state;
4428 is_system = ctx->ctx_fl_system;
4429
4430
4431
4432
4433 if (prev_state == PFM_CTX_UNLOADED) {
4434 DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
4435 return 0;
4436 }
4437
4438
4439
4440
4441 ret = pfm_stop(ctx, NULL, 0, regs);
4442 if (ret) return ret;
4443
4444 ctx->ctx_state = PFM_CTX_UNLOADED;
4445
4446
4447
4448
4449
4450
4451 if (is_system) {
4452
4453
4454
4455
4456
4457
4458 PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
4459 PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
4460
4461
4462
4463
4464
4465 pfm_flush_pmds(current, ctx);
4466
4467
4468
4469
4470
4471 if (prev_state != PFM_CTX_ZOMBIE)
4472 pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
4473
4474
4475
4476
4477 task->thread.pfm_context = NULL;
4478
4479
4480
4481 ctx->ctx_task = NULL;
4482
4483
4484
4485
4486 return 0;
4487 }
4488
4489
4490
4491
4492 tregs = task == current ? regs : task_pt_regs(task);
4493
4494 if (task == current) {
4495
4496
4497
4498 ia64_psr(regs)->sp = 1;
4499
4500 DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
4501 }
4502
4503
4504
4505
4506 pfm_flush_pmds(task, ctx);
4507
4508
4509
4510
4511
4512
4513
4514 if (prev_state != PFM_CTX_ZOMBIE)
4515 pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
4516
4517
4518
4519
4520 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4521 SET_LAST_CPU(ctx, -1);
4522
4523
4524
4525
4526 task->thread.flags &= ~IA64_THREAD_PM_VALID;
4527
4528
4529
4530
4531 task->thread.pfm_context = NULL;
4532 ctx->ctx_task = NULL;
4533
4534 PFM_SET_WORK_PENDING(task, 0);
4535
4536 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4537 ctx->ctx_fl_can_restart = 0;
4538 ctx->ctx_fl_going_zombie = 0;
4539
4540 DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
4541
4542 return 0;
4543}
4544
4545
4546
4547
4548
4549
4550void
4551pfm_exit_thread(struct task_struct *task)
4552{
4553 pfm_context_t *ctx;
4554 unsigned long flags;
4555 struct pt_regs *regs = task_pt_regs(task);
4556 int ret, state;
4557 int free_ok = 0;
4558
4559 ctx = PFM_GET_CTX(task);
4560
4561 PROTECT_CTX(ctx, flags);
4562
4563 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
4564
4565 state = ctx->ctx_state;
4566 switch(state) {
4567 case PFM_CTX_UNLOADED:
4568
4569
4570
4571
4572 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
4573 break;
4574 case PFM_CTX_LOADED:
4575 case PFM_CTX_MASKED:
4576 ret = pfm_context_unload(ctx, NULL, 0, regs);
4577 if (ret) {
4578 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4579 }
4580 DPRINT(("ctx unloaded for current state was %d\n", state));
4581
4582 pfm_end_notify_user(ctx);
4583 break;
4584 case PFM_CTX_ZOMBIE:
4585 ret = pfm_context_unload(ctx, NULL, 0, regs);
4586 if (ret) {
4587 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4588 }
4589 free_ok = 1;
4590 break;
4591 default:
4592 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
4593 break;
4594 }
4595 UNPROTECT_CTX(ctx, flags);
4596
4597 { u64 psr = pfm_get_psr();
4598 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
4599 BUG_ON(GET_PMU_OWNER());
4600 BUG_ON(ia64_psr(regs)->up);
4601 BUG_ON(ia64_psr(regs)->pp);
4602 }
4603
4604
4605
4606
4607
4608 if (free_ok) pfm_context_free(ctx);
4609}
4610
4611
4612
4613
4614#define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
4615#define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
4616#define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
4617#define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
4618#define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
4619
4620static pfm_cmd_desc_t pfm_cmd_tab[]={
4621PFM_CMD_NONE,
4622PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4623PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4624PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4625PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
4626PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
4627PFM_CMD_NONE,
4628PFM_CMD_NONE,
4629PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
4630PFM_CMD_NONE,
4631PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
4632PFM_CMD_NONE,
4633PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
4634PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
4635PFM_CMD_NONE,
4636PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4637PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
4638PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
4639PFM_CMD_NONE,
4640PFM_CMD_NONE,
4641PFM_CMD_NONE,
4642PFM_CMD_NONE,
4643PFM_CMD_NONE,
4644PFM_CMD_NONE,
4645PFM_CMD_NONE,
4646PFM_CMD_NONE,
4647PFM_CMD_NONE,
4648PFM_CMD_NONE,
4649PFM_CMD_NONE,
4650PFM_CMD_NONE,
4651PFM_CMD_NONE,
4652PFM_CMD_NONE,
4653PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
4654PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
4655};
4656#define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
4657
4658static int
4659pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
4660{
4661 struct task_struct *task;
4662 int state, old_state;
4663
4664recheck:
4665 state = ctx->ctx_state;
4666 task = ctx->ctx_task;
4667
4668 if (task == NULL) {
4669 DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
4670 return 0;
4671 }
4672
4673 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4674 ctx->ctx_fd,
4675 state,
4676 task_pid_nr(task),
4677 task->state, PFM_CMD_STOPPED(cmd)));
4678
4679
4680
4681
4682
4683
4684
4685
4686 if (task == current || ctx->ctx_fl_system) return 0;
4687
4688
4689
4690
4691 switch(state) {
4692 case PFM_CTX_UNLOADED:
4693
4694
4695
4696 return 0;
4697 case PFM_CTX_ZOMBIE:
4698
4699
4700
4701 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
4702 return -EINVAL;
4703 case PFM_CTX_MASKED:
4704
4705
4706
4707
4708 if (cmd != PFM_UNLOAD_CONTEXT) return 0;
4709 }
4710
4711
4712
4713
4714
4715
4716
4717
4718
4719
4720
4721 if (PFM_CMD_STOPPED(cmd)) {
4722 if (!task_is_stopped_or_traced(task)) {
4723 DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
4724 return -EBUSY;
4725 }
4726
4727
4728
4729
4730
4731
4732
4733
4734
4735
4736
4737
4738
4739
4740 old_state = state;
4741
4742 UNPROTECT_CTX(ctx, flags);
4743
4744 wait_task_inactive(task, 0);
4745
4746 PROTECT_CTX(ctx, flags);
4747
4748
4749
4750
4751 if (ctx->ctx_state != old_state) {
4752 DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
4753 goto recheck;
4754 }
4755 }
4756 return 0;
4757}
4758
4759
4760
4761
4762asmlinkage long
4763sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
4764{
4765 struct fd f = {NULL, 0};
4766 pfm_context_t *ctx = NULL;
4767 unsigned long flags = 0UL;
4768 void *args_k = NULL;
4769 long ret;
4770 size_t base_sz, sz, xtra_sz = 0;
4771 int narg, completed_args = 0, call_made = 0, cmd_flags;
4772 int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
4773 int (*getsize)(void *arg, size_t *sz);
4774#define PFM_MAX_ARGSIZE 4096
4775
4776
4777
4778
4779 if (unlikely(pmu_conf == NULL)) return -ENOSYS;
4780
4781 if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
4782 DPRINT(("invalid cmd=%d\n", cmd));
4783 return -EINVAL;
4784 }
4785
4786 func = pfm_cmd_tab[cmd].cmd_func;
4787 narg = pfm_cmd_tab[cmd].cmd_narg;
4788 base_sz = pfm_cmd_tab[cmd].cmd_argsize;
4789 getsize = pfm_cmd_tab[cmd].cmd_getsize;
4790 cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
4791
4792 if (unlikely(func == NULL)) {
4793 DPRINT(("invalid cmd=%d\n", cmd));
4794 return -EINVAL;
4795 }
4796
4797 DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
4798 PFM_CMD_NAME(cmd),
4799 cmd,
4800 narg,
4801 base_sz,
4802 count));
4803
4804
4805
4806
4807 if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
4808 return -EINVAL;
4809
4810restart_args:
4811 sz = xtra_sz + base_sz*count;
4812
4813
4814
4815 if (unlikely(sz > PFM_MAX_ARGSIZE)) {
4816 printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
4817 return -E2BIG;
4818 }
4819
4820
4821
4822
4823 if (likely(count && args_k == NULL)) {
4824 args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
4825 if (args_k == NULL) return -ENOMEM;
4826 }
4827
4828 ret = -EFAULT;
4829
4830
4831
4832
4833
4834
4835 if (sz && copy_from_user(args_k, arg, sz)) {
4836 DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
4837 goto error_args;
4838 }
4839
4840
4841
4842
4843 if (completed_args == 0 && getsize) {
4844
4845
4846
4847 ret = (*getsize)(args_k, &xtra_sz);
4848 if (ret) goto error_args;
4849
4850 completed_args = 1;
4851
4852 DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
4853
4854
4855 if (likely(xtra_sz)) goto restart_args;
4856 }
4857
4858 if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
4859
4860 ret = -EBADF;
4861
4862 f = fdget(fd);
4863 if (unlikely(f.file == NULL)) {
4864 DPRINT(("invalid fd %d\n", fd));
4865 goto error_args;
4866 }
4867 if (unlikely(PFM_IS_FILE(f.file) == 0)) {
4868 DPRINT(("fd %d not related to perfmon\n", fd));
4869 goto error_args;
4870 }
4871
4872 ctx = f.file->private_data;
4873 if (unlikely(ctx == NULL)) {
4874 DPRINT(("no context for fd %d\n", fd));
4875 goto error_args;
4876 }
4877 prefetch(&ctx->ctx_state);
4878
4879 PROTECT_CTX(ctx, flags);
4880
4881
4882
4883
4884 ret = pfm_check_task_state(ctx, cmd, flags);
4885 if (unlikely(ret)) goto abort_locked;
4886
4887skip_fd:
4888 ret = (*func)(ctx, args_k, count, task_pt_regs(current));
4889
4890 call_made = 1;
4891
4892abort_locked:
4893 if (likely(ctx)) {
4894 DPRINT(("context unlocked\n"));
4895 UNPROTECT_CTX(ctx, flags);
4896 }
4897
4898
4899 if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
4900
4901error_args:
4902 if (f.file)
4903 fdput(f);
4904
4905 kfree(args_k);
4906
4907 DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
4908
4909 return ret;
4910}
4911
4912static void
4913pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
4914{
4915 pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
4916 pfm_ovfl_ctrl_t rst_ctrl;
4917 int state;
4918 int ret = 0;
4919
4920 state = ctx->ctx_state;
4921
4922
4923
4924
4925 if (CTX_HAS_SMPL(ctx)) {
4926
4927 rst_ctrl.bits.mask_monitoring = 0;
4928 rst_ctrl.bits.reset_ovfl_pmds = 0;
4929
4930 if (state == PFM_CTX_LOADED)
4931 ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4932 else
4933 ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
4934 } else {
4935 rst_ctrl.bits.mask_monitoring = 0;
4936 rst_ctrl.bits.reset_ovfl_pmds = 1;
4937 }
4938
4939 if (ret == 0) {
4940 if (rst_ctrl.bits.reset_ovfl_pmds) {
4941 pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
4942 }
4943 if (rst_ctrl.bits.mask_monitoring == 0) {
4944 DPRINT(("resuming monitoring\n"));
4945 if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
4946 } else {
4947 DPRINT(("stopping monitoring\n"));
4948
4949 }
4950 ctx->ctx_state = PFM_CTX_LOADED;
4951 }
4952}
4953
4954
4955
4956
4957
4958static void
4959pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
4960{
4961 int ret;
4962
4963 DPRINT(("entering for [%d]\n", task_pid_nr(current)));
4964
4965 ret = pfm_context_unload(ctx, NULL, 0, regs);
4966 if (ret) {
4967 printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
4968 }
4969
4970
4971
4972
4973 wake_up_interruptible(&ctx->ctx_zombieq);
4974
4975
4976
4977
4978
4979
4980}
4981
4982static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
4983
4984
4985
4986
4987
4988
4989
4990
4991
4992
4993void
4994pfm_handle_work(void)
4995{
4996 pfm_context_t *ctx;
4997 struct pt_regs *regs;
4998 unsigned long flags, dummy_flags;
4999 unsigned long ovfl_regs;
5000 unsigned int reason;
5001 int ret;
5002
5003 ctx = PFM_GET_CTX(current);
5004 if (ctx == NULL) {
5005 printk(KERN_ERR "perfmon: [%d] has no PFM context\n",
5006 task_pid_nr(current));
5007 return;
5008 }
5009
5010 PROTECT_CTX(ctx, flags);
5011
5012 PFM_SET_WORK_PENDING(current, 0);
5013
5014 regs = task_pt_regs(current);
5015
5016
5017
5018
5019 reason = ctx->ctx_fl_trap_reason;
5020 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
5021 ovfl_regs = ctx->ctx_ovfl_regs[0];
5022
5023 DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
5024
5025
5026
5027
5028 if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE)
5029 goto do_zombie;
5030
5031
5032 if (reason == PFM_TRAP_REASON_RESET)
5033 goto skip_blocking;
5034
5035
5036
5037
5038
5039 UNPROTECT_CTX(ctx, flags);
5040
5041
5042
5043
5044 local_irq_enable();
5045
5046 DPRINT(("before block sleeping\n"));
5047
5048
5049
5050
5051
5052 ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
5053
5054 DPRINT(("after block sleeping ret=%d\n", ret));
5055
5056
5057
5058
5059
5060
5061
5062 PROTECT_CTX(ctx, dummy_flags);
5063
5064
5065
5066
5067
5068
5069
5070 ovfl_regs = ctx->ctx_ovfl_regs[0];
5071
5072 if (ctx->ctx_fl_going_zombie) {
5073do_zombie:
5074 DPRINT(("context is zombie, bailing out\n"));
5075 pfm_context_force_terminate(ctx, regs);
5076 goto nothing_to_do;
5077 }
5078
5079
5080
5081 if (ret < 0)
5082 goto nothing_to_do;
5083
5084skip_blocking:
5085 pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
5086 ctx->ctx_ovfl_regs[0] = 0UL;
5087
5088nothing_to_do:
5089
5090
5091
5092 UNPROTECT_CTX(ctx, flags);
5093}
5094
5095static int
5096pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
5097{
5098 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5099 DPRINT(("ignoring overflow notification, owner is zombie\n"));
5100 return 0;
5101 }
5102
5103 DPRINT(("waking up somebody\n"));
5104
5105 if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
5106
5107
5108
5109
5110
5111 kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
5112
5113 return 0;
5114}
5115
5116static int
5117pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
5118{
5119 pfm_msg_t *msg = NULL;
5120
5121 if (ctx->ctx_fl_no_msg == 0) {
5122 msg = pfm_get_new_msg(ctx);
5123 if (msg == NULL) {
5124 printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
5125 return -1;
5126 }
5127
5128 msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
5129 msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
5130 msg->pfm_ovfl_msg.msg_active_set = 0;
5131 msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
5132 msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
5133 msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
5134 msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
5135 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5136 }
5137
5138 DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
5139 msg,
5140 ctx->ctx_fl_no_msg,
5141 ctx->ctx_fd,
5142 ovfl_pmds));
5143
5144 return pfm_notify_user(ctx, msg);
5145}
5146
5147static int
5148pfm_end_notify_user(pfm_context_t *ctx)
5149{
5150 pfm_msg_t *msg;
5151
5152 msg = pfm_get_new_msg(ctx);
5153 if (msg == NULL) {
5154 printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
5155 return -1;
5156 }
5157
5158 memset(msg, 0, sizeof(*msg));
5159
5160 msg->pfm_end_msg.msg_type = PFM_MSG_END;
5161 msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
5162 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5163
5164 DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
5165 msg,
5166 ctx->ctx_fl_no_msg,
5167 ctx->ctx_fd));
5168
5169 return pfm_notify_user(ctx, msg);
5170}
5171
5172
5173
5174
5175
5176static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx,
5177 unsigned long pmc0, struct pt_regs *regs)
5178{
5179 pfm_ovfl_arg_t *ovfl_arg;
5180 unsigned long mask;
5181 unsigned long old_val, ovfl_val, new_val;
5182 unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
5183 unsigned long tstamp;
5184 pfm_ovfl_ctrl_t ovfl_ctrl;
5185 unsigned int i, has_smpl;
5186 int must_notify = 0;
5187
5188 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
5189
5190
5191
5192
5193 if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
5194
5195 tstamp = ia64_get_itc();
5196 mask = pmc0 >> PMU_FIRST_COUNTER;
5197 ovfl_val = pmu_conf->ovfl_val;
5198 has_smpl = CTX_HAS_SMPL(ctx);
5199
5200 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5201 "used_pmds=0x%lx\n",
5202 pmc0,
5203 task ? task_pid_nr(task): -1,
5204 (regs ? regs->cr_iip : 0),
5205 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
5206 ctx->ctx_used_pmds[0]));
5207
5208
5209
5210
5211
5212
5213 for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
5214
5215
5216 if ((mask & 0x1) == 0) continue;
5217
5218
5219
5220
5221
5222
5223
5224 old_val = new_val = ctx->ctx_pmds[i].val;
5225 new_val += 1 + ovfl_val;
5226 ctx->ctx_pmds[i].val = new_val;
5227
5228
5229
5230
5231 if (likely(old_val > new_val)) {
5232 ovfl_pmds |= 1UL << i;
5233 if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
5234 }
5235
5236 DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
5237 i,
5238 new_val,
5239 old_val,
5240 ia64_get_pmd(i) & ovfl_val,
5241 ovfl_pmds,
5242 ovfl_notify));
5243 }
5244
5245
5246
5247
5248 if (ovfl_pmds == 0UL) return;
5249
5250
5251
5252
5253 ovfl_ctrl.val = 0;
5254 reset_pmds = 0UL;
5255
5256
5257
5258
5259
5260 if (has_smpl) {
5261 unsigned long start_cycles, end_cycles;
5262 unsigned long pmd_mask;
5263 int j, k, ret = 0;
5264 int this_cpu = smp_processor_id();
5265
5266 pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
5267 ovfl_arg = &ctx->ctx_ovfl_arg;
5268
5269 prefetch(ctx->ctx_smpl_hdr);
5270
5271 for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
5272
5273 mask = 1UL << i;
5274
5275 if ((pmd_mask & 0x1) == 0) continue;
5276
5277 ovfl_arg->ovfl_pmd = (unsigned char )i;
5278 ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
5279 ovfl_arg->active_set = 0;
5280 ovfl_arg->ovfl_ctrl.val = 0;
5281 ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
5282
5283 ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
5284 ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
5285 ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
5286
5287
5288
5289
5290
5291 if (smpl_pmds) {
5292 for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
5293 if ((smpl_pmds & 0x1) == 0) continue;
5294 ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
5295 DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
5296 }
5297 }
5298
5299 pfm_stats[this_cpu].pfm_smpl_handler_calls++;
5300
5301 start_cycles = ia64_get_itc();
5302
5303
5304
5305
5306 ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
5307
5308 end_cycles = ia64_get_itc();
5309
5310
5311
5312
5313
5314 ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
5315 ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
5316 ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
5317
5318
5319
5320 if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
5321
5322 pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
5323 }
5324
5325
5326
5327 if (ret && pmd_mask) {
5328 DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
5329 pmd_mask<<PMU_FIRST_COUNTER));
5330 }
5331
5332
5333
5334 ovfl_pmds &= ~reset_pmds;
5335 } else {
5336
5337
5338
5339
5340 ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
5341 ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
5342 ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0;
5343 ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
5344
5345
5346
5347 if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
5348 }
5349
5350 DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
5351
5352
5353
5354
5355 if (reset_pmds) {
5356 unsigned long bm = reset_pmds;
5357 pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
5358 }
5359
5360 if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
5361
5362
5363
5364 ctx->ctx_ovfl_regs[0] = ovfl_pmds;
5365
5366
5367
5368
5369 if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
5370
5371 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
5372
5373
5374
5375
5376 PFM_SET_WORK_PENDING(task, 1);
5377
5378
5379
5380
5381
5382 set_notify_resume(task);
5383 }
5384
5385
5386
5387
5388 must_notify = 1;
5389 }
5390
5391 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
5392 GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
5393 PFM_GET_WORK_PENDING(task),
5394 ctx->ctx_fl_trap_reason,
5395 ovfl_pmds,
5396 ovfl_notify,
5397 ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
5398
5399
5400
5401 if (ovfl_ctrl.bits.mask_monitoring) {
5402 pfm_mask_monitoring(task);
5403 ctx->ctx_state = PFM_CTX_MASKED;
5404 ctx->ctx_fl_can_restart = 1;
5405 }
5406
5407
5408
5409
5410 if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
5411
5412 return;
5413
5414sanity_check:
5415 printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5416 smp_processor_id(),
5417 task ? task_pid_nr(task) : -1,
5418 pmc0);
5419 return;
5420
5421stop_monitoring:
5422
5423
5424
5425
5426
5427
5428
5429
5430
5431
5432
5433
5434
5435
5436
5437
5438
5439
5440
5441
5442
5443
5444
5445
5446
5447
5448
5449
5450 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
5451 pfm_clear_psr_up();
5452 ia64_psr(regs)->up = 0;
5453 ia64_psr(regs)->sp = 1;
5454 return;
5455}
5456
5457static int
5458pfm_do_interrupt_handler(void *arg, struct pt_regs *regs)
5459{
5460 struct task_struct *task;
5461 pfm_context_t *ctx;
5462 unsigned long flags;
5463 u64 pmc0;
5464 int this_cpu = smp_processor_id();
5465 int retval = 0;
5466
5467 pfm_stats[this_cpu].pfm_ovfl_intr_count++;
5468
5469
5470
5471
5472 pmc0 = ia64_get_pmc(0);
5473
5474 task = GET_PMU_OWNER();
5475 ctx = GET_PMU_CTX();
5476
5477
5478
5479
5480
5481 if (PMC0_HAS_OVFL(pmc0) && task) {
5482
5483
5484
5485
5486
5487 if (!ctx) goto report_spurious1;
5488
5489 if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
5490 goto report_spurious2;
5491
5492 PROTECT_CTX_NOPRINT(ctx, flags);
5493
5494 pfm_overflow_handler(task, ctx, pmc0, regs);
5495
5496 UNPROTECT_CTX_NOPRINT(ctx, flags);
5497
5498 } else {
5499 pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
5500 retval = -1;
5501 }
5502
5503
5504
5505 pfm_unfreeze_pmu();
5506
5507 return retval;
5508
5509report_spurious1:
5510 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
5511 this_cpu, task_pid_nr(task));
5512 pfm_unfreeze_pmu();
5513 return -1;
5514report_spurious2:
5515 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5516 this_cpu,
5517 task_pid_nr(task));
5518 pfm_unfreeze_pmu();
5519 return -1;
5520}
5521
5522static irqreturn_t
5523pfm_interrupt_handler(int irq, void *arg)
5524{
5525 unsigned long start_cycles, total_cycles;
5526 unsigned long min, max;
5527 int this_cpu;
5528 int ret;
5529 struct pt_regs *regs = get_irq_regs();
5530
5531 this_cpu = get_cpu();
5532 if (likely(!pfm_alt_intr_handler)) {
5533 min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
5534 max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
5535
5536 start_cycles = ia64_get_itc();
5537
5538 ret = pfm_do_interrupt_handler(arg, regs);
5539
5540 total_cycles = ia64_get_itc();
5541
5542
5543
5544
5545 if (likely(ret == 0)) {
5546 total_cycles -= start_cycles;
5547
5548 if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
5549 if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
5550
5551 pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
5552 }
5553 }
5554 else {
5555 (*pfm_alt_intr_handler->handler)(irq, arg, regs);
5556 }
5557
5558 put_cpu();
5559 return IRQ_HANDLED;
5560}
5561
5562
5563
5564
5565
5566#define PFM_PROC_SHOW_HEADER ((void *)(long)nr_cpu_ids+1)
5567
5568static void *
5569pfm_proc_start(struct seq_file *m, loff_t *pos)
5570{
5571 if (*pos == 0) {
5572 return PFM_PROC_SHOW_HEADER;
5573 }
5574
5575 while (*pos <= nr_cpu_ids) {
5576 if (cpu_online(*pos - 1)) {
5577 return (void *)*pos;
5578 }
5579 ++*pos;
5580 }
5581 return NULL;
5582}
5583
5584static void *
5585pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
5586{
5587 ++*pos;
5588 return pfm_proc_start(m, pos);
5589}
5590
5591static void
5592pfm_proc_stop(struct seq_file *m, void *v)
5593{
5594}
5595
5596static void
5597pfm_proc_show_header(struct seq_file *m)
5598{
5599 struct list_head * pos;
5600 pfm_buffer_fmt_t * entry;
5601 unsigned long flags;
5602
5603 seq_printf(m,
5604 "perfmon version : %u.%u\n"
5605 "model : %s\n"
5606 "fastctxsw : %s\n"
5607 "expert mode : %s\n"
5608 "ovfl_mask : 0x%lx\n"
5609 "PMU flags : 0x%x\n",
5610 PFM_VERSION_MAJ, PFM_VERSION_MIN,
5611 pmu_conf->pmu_name,
5612 pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
5613 pfm_sysctl.expert_mode > 0 ? "Yes": "No",
5614 pmu_conf->ovfl_val,
5615 pmu_conf->flags);
5616
5617 LOCK_PFS(flags);
5618
5619 seq_printf(m,
5620 "proc_sessions : %u\n"
5621 "sys_sessions : %u\n"
5622 "sys_use_dbregs : %u\n"
5623 "ptrace_use_dbregs : %u\n",
5624 pfm_sessions.pfs_task_sessions,
5625 pfm_sessions.pfs_sys_sessions,
5626 pfm_sessions.pfs_sys_use_dbregs,
5627 pfm_sessions.pfs_ptrace_use_dbregs);
5628
5629 UNLOCK_PFS(flags);
5630
5631 spin_lock(&pfm_buffer_fmt_lock);
5632
5633 list_for_each(pos, &pfm_buffer_fmt_list) {
5634 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
5635 seq_printf(m, "format : %16phD %s\n",
5636 entry->fmt_uuid, entry->fmt_name);
5637 }
5638 spin_unlock(&pfm_buffer_fmt_lock);
5639
5640}
5641
5642static int
5643pfm_proc_show(struct seq_file *m, void *v)
5644{
5645 unsigned long psr;
5646 unsigned int i;
5647 int cpu;
5648
5649 if (v == PFM_PROC_SHOW_HEADER) {
5650 pfm_proc_show_header(m);
5651 return 0;
5652 }
5653
5654
5655
5656 cpu = (long)v - 1;
5657 seq_printf(m,
5658 "CPU%-2d overflow intrs : %lu\n"
5659 "CPU%-2d overflow cycles : %lu\n"
5660 "CPU%-2d overflow min : %lu\n"
5661 "CPU%-2d overflow max : %lu\n"
5662 "CPU%-2d smpl handler calls : %lu\n"
5663 "CPU%-2d smpl handler cycles : %lu\n"
5664 "CPU%-2d spurious intrs : %lu\n"
5665 "CPU%-2d replay intrs : %lu\n"
5666 "CPU%-2d syst_wide : %d\n"
5667 "CPU%-2d dcr_pp : %d\n"
5668 "CPU%-2d exclude idle : %d\n"
5669 "CPU%-2d owner : %d\n"
5670 "CPU%-2d context : %p\n"
5671 "CPU%-2d activations : %lu\n",
5672 cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
5673 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
5674 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
5675 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
5676 cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
5677 cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
5678 cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
5679 cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
5680 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
5681 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
5682 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
5683 cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
5684 cpu, pfm_get_cpu_data(pmu_ctx, cpu),
5685 cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
5686
5687 if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
5688
5689 psr = pfm_get_psr();
5690
5691 ia64_srlz_d();
5692
5693 seq_printf(m,
5694 "CPU%-2d psr : 0x%lx\n"
5695 "CPU%-2d pmc0 : 0x%lx\n",
5696 cpu, psr,
5697 cpu, ia64_get_pmc(0));
5698
5699 for (i=0; PMC_IS_LAST(i) == 0; i++) {
5700 if (PMC_IS_COUNTING(i) == 0) continue;
5701 seq_printf(m,
5702 "CPU%-2d pmc%u : 0x%lx\n"
5703 "CPU%-2d pmd%u : 0x%lx\n",
5704 cpu, i, ia64_get_pmc(i),
5705 cpu, i, ia64_get_pmd(i));
5706 }
5707 }
5708 return 0;
5709}
5710
5711const struct seq_operations pfm_seq_ops = {
5712 .start = pfm_proc_start,
5713 .next = pfm_proc_next,
5714 .stop = pfm_proc_stop,
5715 .show = pfm_proc_show
5716};
5717
5718static int
5719pfm_proc_open(struct inode *inode, struct file *file)
5720{
5721 return seq_open(file, &pfm_seq_ops);
5722}
5723
5724
5725
5726
5727
5728
5729
5730
5731void
5732pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
5733{
5734 struct pt_regs *regs;
5735 unsigned long dcr;
5736 unsigned long dcr_pp;
5737
5738 dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
5739
5740
5741
5742
5743
5744 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
5745 regs = task_pt_regs(task);
5746 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
5747 return;
5748 }
5749
5750
5751
5752 if (dcr_pp) {
5753 dcr = ia64_getreg(_IA64_REG_CR_DCR);
5754
5755
5756
5757 if (is_ctxswin) {
5758
5759 ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
5760 pfm_clear_psr_pp();
5761 ia64_srlz_i();
5762 return;
5763 }
5764
5765
5766
5767
5768
5769
5770
5771 ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
5772 pfm_set_psr_pp();
5773 ia64_srlz_i();
5774 }
5775}
5776
5777#ifdef CONFIG_SMP
5778
5779static void
5780pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
5781{
5782 struct task_struct *task = ctx->ctx_task;
5783
5784 ia64_psr(regs)->up = 0;
5785 ia64_psr(regs)->sp = 1;
5786
5787 if (GET_PMU_OWNER() == task) {
5788 DPRINT(("cleared ownership for [%d]\n",
5789 task_pid_nr(ctx->ctx_task)));
5790 SET_PMU_OWNER(NULL, NULL);
5791 }
5792
5793
5794
5795
5796 PFM_SET_WORK_PENDING(task, 0);
5797
5798 task->thread.pfm_context = NULL;
5799 task->thread.flags &= ~IA64_THREAD_PM_VALID;
5800
5801 DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
5802}
5803
5804
5805
5806
5807
5808void
5809pfm_save_regs(struct task_struct *task)
5810{
5811 pfm_context_t *ctx;
5812 unsigned long flags;
5813 u64 psr;
5814
5815
5816 ctx = PFM_GET_CTX(task);
5817 if (ctx == NULL) return;
5818
5819
5820
5821
5822
5823
5824 flags = pfm_protect_ctx_ctxsw(ctx);
5825
5826 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5827 struct pt_regs *regs = task_pt_regs(task);
5828
5829 pfm_clear_psr_up();
5830
5831 pfm_force_cleanup(ctx, regs);
5832
5833 BUG_ON(ctx->ctx_smpl_hdr);
5834
5835 pfm_unprotect_ctx_ctxsw(ctx, flags);
5836
5837 pfm_context_free(ctx);
5838 return;
5839 }
5840
5841
5842
5843
5844 ia64_srlz_d();
5845 psr = pfm_get_psr();
5846
5847 BUG_ON(psr & (IA64_PSR_I));
5848
5849
5850
5851
5852
5853
5854
5855
5856 pfm_clear_psr_up();
5857
5858
5859
5860
5861 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5862
5863
5864
5865
5866
5867
5868 SET_PMU_OWNER(NULL, NULL);
5869
5870
5871
5872
5873
5874
5875 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
5876
5877
5878
5879
5880
5881
5882 ctx->th_pmcs[0] = ia64_get_pmc(0);
5883
5884
5885
5886
5887 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5888
5889
5890
5891
5892
5893 pfm_unprotect_ctx_ctxsw(ctx, flags);
5894}
5895
5896#else
5897void
5898pfm_save_regs(struct task_struct *task)
5899{
5900 pfm_context_t *ctx;
5901 u64 psr;
5902
5903 ctx = PFM_GET_CTX(task);
5904 if (ctx == NULL) return;
5905
5906
5907
5908
5909 psr = pfm_get_psr();
5910
5911 BUG_ON(psr & (IA64_PSR_I));
5912
5913
5914
5915
5916
5917
5918
5919
5920 pfm_clear_psr_up();
5921
5922
5923
5924
5925 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5926}
5927
5928static void
5929pfm_lazy_save_regs (struct task_struct *task)
5930{
5931 pfm_context_t *ctx;
5932 unsigned long flags;
5933
5934 { u64 psr = pfm_get_psr();
5935 BUG_ON(psr & IA64_PSR_UP);
5936 }
5937
5938 ctx = PFM_GET_CTX(task);
5939
5940
5941
5942
5943
5944
5945
5946
5947
5948
5949 PROTECT_CTX(ctx,flags);
5950
5951
5952
5953
5954
5955
5956
5957
5958 SET_PMU_OWNER(NULL, NULL);
5959
5960
5961
5962
5963 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
5964
5965
5966
5967
5968
5969
5970 ctx->th_pmcs[0] = ia64_get_pmc(0);
5971
5972
5973
5974
5975 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5976
5977
5978
5979
5980
5981
5982 UNPROTECT_CTX(ctx,flags);
5983}
5984#endif
5985
5986#ifdef CONFIG_SMP
5987
5988
5989
5990void
5991pfm_load_regs (struct task_struct *task)
5992{
5993 pfm_context_t *ctx;
5994 unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
5995 unsigned long flags;
5996 u64 psr, psr_up;
5997 int need_irq_resend;
5998
5999 ctx = PFM_GET_CTX(task);
6000 if (unlikely(ctx == NULL)) return;
6001
6002 BUG_ON(GET_PMU_OWNER());
6003
6004
6005
6006
6007 if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
6008
6009
6010
6011
6012
6013
6014 flags = pfm_protect_ctx_ctxsw(ctx);
6015 psr = pfm_get_psr();
6016
6017 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6018
6019 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6020 BUG_ON(psr & IA64_PSR_I);
6021
6022 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
6023 struct pt_regs *regs = task_pt_regs(task);
6024
6025 BUG_ON(ctx->ctx_smpl_hdr);
6026
6027 pfm_force_cleanup(ctx, regs);
6028
6029 pfm_unprotect_ctx_ctxsw(ctx, flags);
6030
6031
6032
6033
6034 pfm_context_free(ctx);
6035
6036 return;
6037 }
6038
6039
6040
6041
6042
6043 if (ctx->ctx_fl_using_dbreg) {
6044 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6045 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6046 }
6047
6048
6049
6050 psr_up = ctx->ctx_saved_psr_up;
6051
6052
6053
6054
6055
6056 if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
6057
6058
6059
6060
6061 pmc_mask = ctx->ctx_reload_pmcs[0];
6062 pmd_mask = ctx->ctx_reload_pmds[0];
6063
6064 } else {
6065
6066
6067
6068
6069
6070
6071 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6072
6073
6074
6075
6076
6077
6078
6079
6080 pmc_mask = ctx->ctx_all_pmcs[0];
6081 }
6082
6083
6084
6085
6086
6087
6088
6089 if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6090 if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6091
6092
6093
6094
6095
6096 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6097
6098
6099
6100
6101 ia64_set_pmc(0, ctx->th_pmcs[0]);
6102 ia64_srlz_d();
6103 ctx->th_pmcs[0] = 0UL;
6104
6105
6106
6107
6108 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
6109
6110 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6111 }
6112
6113
6114
6115
6116 ctx->ctx_reload_pmcs[0] = 0UL;
6117 ctx->ctx_reload_pmds[0] = 0UL;
6118
6119 SET_LAST_CPU(ctx, smp_processor_id());
6120
6121
6122
6123
6124 INC_ACTIVATION();
6125
6126
6127
6128 SET_ACTIVATION(ctx);
6129
6130
6131
6132
6133 SET_PMU_OWNER(task, ctx);
6134
6135
6136
6137
6138
6139
6140
6141 if (likely(psr_up)) pfm_set_psr_up();
6142
6143
6144
6145
6146 pfm_unprotect_ctx_ctxsw(ctx, flags);
6147}
6148#else
6149
6150
6151
6152
6153void
6154pfm_load_regs (struct task_struct *task)
6155{
6156 pfm_context_t *ctx;
6157 struct task_struct *owner;
6158 unsigned long pmd_mask, pmc_mask;
6159 u64 psr, psr_up;
6160 int need_irq_resend;
6161
6162 owner = GET_PMU_OWNER();
6163 ctx = PFM_GET_CTX(task);
6164 psr = pfm_get_psr();
6165
6166 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6167 BUG_ON(psr & IA64_PSR_I);
6168
6169
6170
6171
6172
6173
6174
6175
6176
6177 if (ctx->ctx_fl_using_dbreg) {
6178 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6179 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6180 }
6181
6182
6183
6184
6185 psr_up = ctx->ctx_saved_psr_up;
6186 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6187
6188
6189
6190
6191
6192
6193
6194
6195
6196 if (likely(owner == task)) {
6197 if (likely(psr_up)) pfm_set_psr_up();
6198 return;
6199 }
6200
6201
6202
6203
6204
6205
6206
6207 if (owner) pfm_lazy_save_regs(owner);
6208
6209
6210
6211
6212
6213
6214
6215 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6216
6217
6218
6219
6220
6221
6222
6223
6224 pmc_mask = ctx->ctx_all_pmcs[0];
6225
6226 pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6227 pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6228
6229
6230
6231
6232
6233 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6234
6235
6236
6237
6238 ia64_set_pmc(0, ctx->th_pmcs[0]);
6239 ia64_srlz_d();
6240
6241 ctx->th_pmcs[0] = 0UL;
6242
6243
6244
6245
6246 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
6247
6248 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6249 }
6250
6251
6252
6253
6254 SET_PMU_OWNER(task, ctx);
6255
6256
6257
6258
6259
6260
6261
6262 if (likely(psr_up)) pfm_set_psr_up();
6263}
6264#endif
6265
6266
6267
6268
6269static void
6270pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6271{
6272 u64 pmc0;
6273 unsigned long mask2, val, pmd_val, ovfl_val;
6274 int i, can_access_pmu = 0;
6275 int is_self;
6276
6277
6278
6279
6280
6281 is_self = ctx->ctx_task == task ? 1 : 0;
6282
6283
6284
6285
6286
6287
6288
6289
6290 can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
6291 if (can_access_pmu) {
6292
6293
6294
6295
6296
6297
6298
6299
6300 SET_PMU_OWNER(NULL, NULL);
6301 DPRINT(("releasing ownership\n"));
6302
6303
6304
6305
6306
6307
6308 ia64_srlz_d();
6309 pmc0 = ia64_get_pmc(0);
6310
6311
6312
6313
6314 pfm_unfreeze_pmu();
6315 } else {
6316 pmc0 = ctx->th_pmcs[0];
6317
6318
6319
6320 ctx->th_pmcs[0] = 0;
6321 }
6322 ovfl_val = pmu_conf->ovfl_val;
6323
6324
6325
6326
6327
6328
6329 mask2 = ctx->ctx_used_pmds[0];
6330
6331 DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
6332
6333 for (i = 0; mask2; i++, mask2>>=1) {
6334
6335
6336 if ((mask2 & 0x1) == 0) continue;
6337
6338
6339
6340
6341 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
6342
6343 if (PMD_IS_COUNTING(i)) {
6344 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
6345 task_pid_nr(task),
6346 i,
6347 ctx->ctx_pmds[i].val,
6348 val & ovfl_val));
6349
6350
6351
6352
6353 val = ctx->ctx_pmds[i].val + (val & ovfl_val);
6354
6355
6356
6357
6358
6359
6360 pmd_val = 0UL;
6361
6362
6363
6364
6365 if (pmc0 & (1UL << i)) {
6366 val += 1 + ovfl_val;
6367 DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
6368 }
6369 }
6370
6371 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
6372
6373 if (is_self) ctx->th_pmds[i] = pmd_val;
6374
6375 ctx->ctx_pmds[i].val = val;
6376 }
6377}
6378
6379static struct irqaction perfmon_irqaction = {
6380 .handler = pfm_interrupt_handler,
6381 .name = "perfmon"
6382};
6383
6384static void
6385pfm_alt_save_pmu_state(void *data)
6386{
6387 struct pt_regs *regs;
6388
6389 regs = task_pt_regs(current);
6390
6391 DPRINT(("called\n"));
6392
6393
6394
6395
6396
6397 pfm_clear_psr_up();
6398 pfm_clear_psr_pp();
6399 ia64_psr(regs)->pp = 0;
6400
6401
6402
6403
6404
6405 pfm_freeze_pmu();
6406
6407 ia64_srlz_d();
6408}
6409
6410void
6411pfm_alt_restore_pmu_state(void *data)
6412{
6413 struct pt_regs *regs;
6414
6415 regs = task_pt_regs(current);
6416
6417 DPRINT(("called\n"));
6418
6419
6420
6421
6422
6423 pfm_clear_psr_up();
6424 pfm_clear_psr_pp();
6425 ia64_psr(regs)->pp = 0;
6426
6427
6428
6429
6430 pfm_unfreeze_pmu();
6431
6432 ia64_srlz_d();
6433}
6434
6435int
6436pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6437{
6438 int ret, i;
6439 int reserve_cpu;
6440
6441
6442 if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
6443
6444
6445 if (pfm_alt_intr_handler) return -EBUSY;
6446
6447
6448 if (!spin_trylock(&pfm_alt_install_check)) {
6449 return -EBUSY;
6450 }
6451
6452
6453 for_each_online_cpu(reserve_cpu) {
6454 ret = pfm_reserve_session(NULL, 1, reserve_cpu);
6455 if (ret) goto cleanup_reserve;
6456 }
6457
6458
6459 ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1);
6460 if (ret) {
6461 DPRINT(("on_each_cpu() failed: %d\n", ret));
6462 goto cleanup_reserve;
6463 }
6464
6465
6466 pfm_alt_intr_handler = hdl;
6467
6468 spin_unlock(&pfm_alt_install_check);
6469
6470 return 0;
6471
6472cleanup_reserve:
6473 for_each_online_cpu(i) {
6474
6475 if (i >= reserve_cpu) break;
6476
6477 pfm_unreserve_session(NULL, 1, i);
6478 }
6479
6480 spin_unlock(&pfm_alt_install_check);
6481
6482 return ret;
6483}
6484EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
6485
6486int
6487pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6488{
6489 int i;
6490 int ret;
6491
6492 if (hdl == NULL) return -EINVAL;
6493
6494
6495 if (pfm_alt_intr_handler != hdl) return -EINVAL;
6496
6497
6498 if (!spin_trylock(&pfm_alt_install_check)) {
6499 return -EBUSY;
6500 }
6501
6502 pfm_alt_intr_handler = NULL;
6503
6504 ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1);
6505 if (ret) {
6506 DPRINT(("on_each_cpu() failed: %d\n", ret));
6507 }
6508
6509 for_each_online_cpu(i) {
6510 pfm_unreserve_session(NULL, 1, i);
6511 }
6512
6513 spin_unlock(&pfm_alt_install_check);
6514
6515 return 0;
6516}
6517EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
6518
6519
6520
6521
6522static int init_pfm_fs(void);
6523
6524static int __init
6525pfm_probe_pmu(void)
6526{
6527 pmu_config_t **p;
6528 int family;
6529
6530 family = local_cpu_data->family;
6531 p = pmu_confs;
6532
6533 while(*p) {
6534 if ((*p)->probe) {
6535 if ((*p)->probe() == 0) goto found;
6536 } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
6537 goto found;
6538 }
6539 p++;
6540 }
6541 return -1;
6542found:
6543 pmu_conf = *p;
6544 return 0;
6545}
6546
6547static const struct file_operations pfm_proc_fops = {
6548 .open = pfm_proc_open,
6549 .read = seq_read,
6550 .llseek = seq_lseek,
6551 .release = seq_release,
6552};
6553
6554int __init
6555pfm_init(void)
6556{
6557 unsigned int n, n_counters, i;
6558
6559 printk("perfmon: version %u.%u IRQ %u\n",
6560 PFM_VERSION_MAJ,
6561 PFM_VERSION_MIN,
6562 IA64_PERFMON_VECTOR);
6563
6564 if (pfm_probe_pmu()) {
6565 printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
6566 local_cpu_data->family);
6567 return -ENODEV;
6568 }
6569
6570
6571
6572
6573
6574 n = 0;
6575 for (i=0; PMC_IS_LAST(i) == 0; i++) {
6576 if (PMC_IS_IMPL(i) == 0) continue;
6577 pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
6578 n++;
6579 }
6580 pmu_conf->num_pmcs = n;
6581
6582 n = 0; n_counters = 0;
6583 for (i=0; PMD_IS_LAST(i) == 0; i++) {
6584 if (PMD_IS_IMPL(i) == 0) continue;
6585 pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
6586 n++;
6587 if (PMD_IS_COUNTING(i)) n_counters++;
6588 }
6589 pmu_conf->num_pmds = n;
6590 pmu_conf->num_counters = n_counters;
6591
6592
6593
6594
6595 if (pmu_conf->use_rr_dbregs) {
6596 if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
6597 printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
6598 pmu_conf = NULL;
6599 return -1;
6600 }
6601 if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
6602 printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
6603 pmu_conf = NULL;
6604 return -1;
6605 }
6606 }
6607
6608 printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
6609 pmu_conf->pmu_name,
6610 pmu_conf->num_pmcs,
6611 pmu_conf->num_pmds,
6612 pmu_conf->num_counters,
6613 ffz(pmu_conf->ovfl_val));
6614
6615
6616 if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
6617 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
6618 pmu_conf = NULL;
6619 return -1;
6620 }
6621
6622
6623
6624
6625 perfmon_dir = proc_create("perfmon", S_IRUGO, NULL, &pfm_proc_fops);
6626 if (perfmon_dir == NULL) {
6627 printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
6628 pmu_conf = NULL;
6629 return -1;
6630 }
6631
6632
6633
6634
6635 pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
6636
6637
6638
6639
6640 spin_lock_init(&pfm_sessions.pfs_lock);
6641 spin_lock_init(&pfm_buffer_fmt_lock);
6642
6643 init_pfm_fs();
6644
6645 for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
6646
6647 return 0;
6648}
6649
6650__initcall(pfm_init);
6651
6652
6653
6654
6655void
6656pfm_init_percpu (void)
6657{
6658 static int first_time=1;
6659
6660
6661
6662
6663 pfm_clear_psr_pp();
6664 pfm_clear_psr_up();
6665
6666
6667
6668
6669 pfm_unfreeze_pmu();
6670
6671 if (first_time) {
6672 register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
6673 first_time=0;
6674 }
6675
6676 ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
6677 ia64_srlz_d();
6678}
6679
6680
6681
6682
6683void
6684dump_pmu_state(const char *from)
6685{
6686 struct task_struct *task;
6687 struct pt_regs *regs;
6688 pfm_context_t *ctx;
6689 unsigned long psr, dcr, info, flags;
6690 int i, this_cpu;
6691
6692 local_irq_save(flags);
6693
6694 this_cpu = smp_processor_id();
6695 regs = task_pt_regs(current);
6696 info = PFM_CPUINFO_GET();
6697 dcr = ia64_getreg(_IA64_REG_CR_DCR);
6698
6699 if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
6700 local_irq_restore(flags);
6701 return;
6702 }
6703
6704 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6705 this_cpu,
6706 from,
6707 task_pid_nr(current),
6708 regs->cr_iip,
6709 current->comm);
6710
6711 task = GET_PMU_OWNER();
6712 ctx = GET_PMU_CTX();
6713
6714 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
6715
6716 psr = pfm_get_psr();
6717
6718 printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
6719 this_cpu,
6720 ia64_get_pmc(0),
6721 psr & IA64_PSR_PP ? 1 : 0,
6722 psr & IA64_PSR_UP ? 1 : 0,
6723 dcr & IA64_DCR_PP ? 1 : 0,
6724 info,
6725 ia64_psr(regs)->up,
6726 ia64_psr(regs)->pp);
6727
6728 ia64_psr(regs)->up = 0;
6729 ia64_psr(regs)->pp = 0;
6730
6731 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6732 if (PMC_IS_IMPL(i) == 0) continue;
6733 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
6734 }
6735
6736 for (i=1; PMD_IS_LAST(i) == 0; i++) {
6737 if (PMD_IS_IMPL(i) == 0) continue;
6738 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
6739 }
6740
6741 if (ctx) {
6742 printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
6743 this_cpu,
6744 ctx->ctx_state,
6745 ctx->ctx_smpl_vaddr,
6746 ctx->ctx_smpl_hdr,
6747 ctx->ctx_msgq_head,
6748 ctx->ctx_msgq_tail,
6749 ctx->ctx_saved_psr_up);
6750 }
6751 local_irq_restore(flags);
6752}
6753
6754
6755
6756
6757void
6758pfm_inherit(struct task_struct *task, struct pt_regs *regs)
6759{
6760 struct thread_struct *thread;
6761
6762 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
6763
6764 thread = &task->thread;
6765
6766
6767
6768
6769 thread->pfm_context = NULL;
6770
6771 PFM_SET_WORK_PENDING(task, 0);
6772
6773
6774
6775
6776}
6777#else
6778asmlinkage long
6779sys_perfmonctl (int fd, int cmd, void *arg, int count)
6780{
6781 return -ENOSYS;
6782}
6783#endif
6784