1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/interrupt.h>
26#include <linux/proc_fs.h>
27#include <linux/seq_file.h>
28#include <linux/init.h>
29#include <linux/vmalloc.h>
30#include <linux/mm.h>
31#include <linux/sysctl.h>
32#include <linux/list.h>
33#include <linux/file.h>
34#include <linux/poll.h>
35#include <linux/vfs.h>
36#include <linux/smp.h>
37#include <linux/pagemap.h>
38#include <linux/mount.h>
39#include <linux/bitops.h>
40#include <linux/capability.h>
41#include <linux/rcupdate.h>
42#include <linux/completion.h>
43
44#include <asm/errno.h>
45#include <asm/intrinsics.h>
46#include <asm/page.h>
47#include <asm/perfmon.h>
48#include <asm/processor.h>
49#include <asm/signal.h>
50#include <asm/system.h>
51#include <asm/uaccess.h>
52#include <asm/delay.h>
53
54#ifdef CONFIG_PERFMON
55
56
57
58#define PFM_CTX_UNLOADED 1
59#define PFM_CTX_LOADED 2
60#define PFM_CTX_MASKED 3
61#define PFM_CTX_ZOMBIE 4
62
63#define PFM_INVALID_ACTIVATION (~0UL)
64
65#define PFM_NUM_PMC_REGS 64
66#define PFM_NUM_PMD_REGS 64
67
68
69
70
71#define PFM_MAX_MSGS 32
72#define PFM_CTXQ_EMPTY(g) ((g)->ctx_msgq_head == (g)->ctx_msgq_tail)
73
74
75
76
77
78
79
80
81
82
83
84
85#define PFM_REG_NOTIMPL 0x0
86#define PFM_REG_IMPL 0x1
87#define PFM_REG_END 0x2
88#define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL)
89#define PFM_REG_COUNTING (0x2<<4|PFM_REG_MONITOR)
90#define PFM_REG_CONTROL (0x4<<4|PFM_REG_IMPL)
91#define PFM_REG_CONFIG (0x8<<4|PFM_REG_IMPL)
92#define PFM_REG_BUFFER (0xc<<4|PFM_REG_IMPL)
93
94#define PMC_IS_LAST(i) (pmu_conf->pmc_desc[i].type & PFM_REG_END)
95#define PMD_IS_LAST(i) (pmu_conf->pmd_desc[i].type & PFM_REG_END)
96
97#define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
98
99
100#define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf->pmc_desc[i].type & PFM_REG_IMPL))
101#define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf->pmd_desc[i].type & PFM_REG_IMPL))
102
103
104#define PMD_IS_COUNTING(i) ((pmu_conf->pmd_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
105#define PMC_IS_COUNTING(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_COUNTING) == PFM_REG_COUNTING)
106#define PMC_IS_MONITOR(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_MONITOR) == PFM_REG_MONITOR)
107#define PMC_IS_CONTROL(i) ((pmu_conf->pmc_desc[i].type & PFM_REG_CONTROL) == PFM_REG_CONTROL)
108
109#define PMC_DFL_VAL(i) pmu_conf->pmc_desc[i].default_value
110#define PMC_RSVD_MASK(i) pmu_conf->pmc_desc[i].reserved_mask
111#define PMD_PMD_DEP(i) pmu_conf->pmd_desc[i].dep_pmd[0]
112#define PMC_PMD_DEP(i) pmu_conf->pmc_desc[i].dep_pmd[0]
113
114#define PFM_NUM_IBRS IA64_NUM_DBG_REGS
115#define PFM_NUM_DBRS IA64_NUM_DBG_REGS
116
117#define CTX_OVFL_NOBLOCK(c) ((c)->ctx_fl_block == 0)
118#define CTX_HAS_SMPL(c) ((c)->ctx_fl_is_sampling)
119#define PFM_CTX_TASK(h) (h)->ctx_task
120
121#define PMU_PMC_OI 5
122
123
124#define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
125#define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
126
127#define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
128
129#define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
130#define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
131#define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
132#define PFM_CODE_RR 0
133#define PFM_DATA_RR 1
134
135#define PFM_CPUINFO_CLEAR(v) pfm_get_cpu_var(pfm_syst_info) &= ~(v)
136#define PFM_CPUINFO_SET(v) pfm_get_cpu_var(pfm_syst_info) |= (v)
137#define PFM_CPUINFO_GET() pfm_get_cpu_var(pfm_syst_info)
138
139#define RDEP(x) (1UL<<(x))
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159#define PROTECT_CTX(c, f) \
160 do { \
161 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
162 spin_lock_irqsave(&(c)->ctx_lock, f); \
163 DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
164 } while(0)
165
166#define UNPROTECT_CTX(c, f) \
167 do { \
168 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
169 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
170 } while(0)
171
172#define PROTECT_CTX_NOPRINT(c, f) \
173 do { \
174 spin_lock_irqsave(&(c)->ctx_lock, f); \
175 } while(0)
176
177
178#define UNPROTECT_CTX_NOPRINT(c, f) \
179 do { \
180 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
181 } while(0)
182
183
184#define PROTECT_CTX_NOIRQ(c) \
185 do { \
186 spin_lock(&(c)->ctx_lock); \
187 } while(0)
188
189#define UNPROTECT_CTX_NOIRQ(c) \
190 do { \
191 spin_unlock(&(c)->ctx_lock); \
192 } while(0)
193
194
195#ifdef CONFIG_SMP
196
197#define GET_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)
198#define INC_ACTIVATION() pfm_get_cpu_var(pmu_activation_number)++
199#define SET_ACTIVATION(c) (c)->ctx_last_activation = GET_ACTIVATION()
200
201#else
202#define SET_ACTIVATION(t) do {} while(0)
203#define GET_ACTIVATION(t) do {} while(0)
204#define INC_ACTIVATION(t) do {} while(0)
205#endif
206
207#define SET_PMU_OWNER(t, c) do { pfm_get_cpu_var(pmu_owner) = (t); pfm_get_cpu_var(pmu_ctx) = (c); } while(0)
208#define GET_PMU_OWNER() pfm_get_cpu_var(pmu_owner)
209#define GET_PMU_CTX() pfm_get_cpu_var(pmu_ctx)
210
211#define LOCK_PFS(g) spin_lock_irqsave(&pfm_sessions.pfs_lock, g)
212#define UNLOCK_PFS(g) spin_unlock_irqrestore(&pfm_sessions.pfs_lock, g)
213
214#define PFM_REG_RETFLAG_SET(flags, val) do { flags &= ~PFM_REG_RETFL_MASK; flags |= (val); } while(0)
215
216
217
218
219#define PMC0_HAS_OVFL(cmp0) (cmp0 & ~0x1UL)
220
221#define PFMFS_MAGIC 0xa0b4d889
222
223
224
225
226#define PFM_DEBUGGING 1
227#ifdef PFM_DEBUGGING
228#define DPRINT(a) \
229 do { \
230 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
231 } while (0)
232
233#define DPRINT_ovfl(a) \
234 do { \
235 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
236 } while (0)
237#endif
238
239
240
241
242
243
244typedef struct {
245 unsigned long val;
246 unsigned long lval;
247 unsigned long long_reset;
248 unsigned long short_reset;
249 unsigned long reset_pmds[4];
250 unsigned long smpl_pmds[4];
251 unsigned long seed;
252 unsigned long mask;
253 unsigned int flags;
254 unsigned long eventid;
255} pfm_counter_t;
256
257
258
259
260typedef struct {
261 unsigned int block:1;
262 unsigned int system:1;
263 unsigned int using_dbreg:1;
264 unsigned int is_sampling:1;
265 unsigned int excl_idle:1;
266 unsigned int going_zombie:1;
267 unsigned int trap_reason:2;
268 unsigned int no_msg:1;
269 unsigned int can_restart:1;
270 unsigned int reserved:22;
271} pfm_context_flags_t;
272
273#define PFM_TRAP_REASON_NONE 0x0
274#define PFM_TRAP_REASON_BLOCK 0x1
275#define PFM_TRAP_REASON_RESET 0x2
276
277
278
279
280
281
282typedef struct pfm_context {
283 spinlock_t ctx_lock;
284
285 pfm_context_flags_t ctx_flags;
286 unsigned int ctx_state;
287
288 struct task_struct *ctx_task;
289
290 unsigned long ctx_ovfl_regs[4];
291
292 struct completion ctx_restart_done;
293
294 unsigned long ctx_used_pmds[4];
295 unsigned long ctx_all_pmds[4];
296 unsigned long ctx_reload_pmds[4];
297
298 unsigned long ctx_all_pmcs[4];
299 unsigned long ctx_reload_pmcs[4];
300 unsigned long ctx_used_monitors[4];
301
302 unsigned long ctx_pmcs[PFM_NUM_PMC_REGS];
303
304 unsigned int ctx_used_ibrs[1];
305 unsigned int ctx_used_dbrs[1];
306 unsigned long ctx_dbrs[IA64_NUM_DBG_REGS];
307 unsigned long ctx_ibrs[IA64_NUM_DBG_REGS];
308
309 pfm_counter_t ctx_pmds[PFM_NUM_PMD_REGS];
310
311 unsigned long th_pmcs[PFM_NUM_PMC_REGS];
312 unsigned long th_pmds[PFM_NUM_PMD_REGS];
313
314 u64 ctx_saved_psr_up;
315
316 unsigned long ctx_last_activation;
317 unsigned int ctx_last_cpu;
318 unsigned int ctx_cpu;
319
320 int ctx_fd;
321 pfm_ovfl_arg_t ctx_ovfl_arg;
322
323 pfm_buffer_fmt_t *ctx_buf_fmt;
324 void *ctx_smpl_hdr;
325 unsigned long ctx_smpl_size;
326 void *ctx_smpl_vaddr;
327
328 wait_queue_head_t ctx_msgq_wait;
329 pfm_msg_t ctx_msgq[PFM_MAX_MSGS];
330 int ctx_msgq_head;
331 int ctx_msgq_tail;
332 struct fasync_struct *ctx_async_queue;
333
334 wait_queue_head_t ctx_zombieq;
335} pfm_context_t;
336
337
338
339
340
341#define PFM_IS_FILE(f) ((f)->f_op == &pfm_file_ops)
342
343#define PFM_GET_CTX(t) ((pfm_context_t *)(t)->thread.pfm_context)
344
345#ifdef CONFIG_SMP
346#define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
347#define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
348#else
349#define SET_LAST_CPU(ctx, v) do {} while(0)
350#define GET_LAST_CPU(ctx) do {} while(0)
351#endif
352
353
354#define ctx_fl_block ctx_flags.block
355#define ctx_fl_system ctx_flags.system
356#define ctx_fl_using_dbreg ctx_flags.using_dbreg
357#define ctx_fl_is_sampling ctx_flags.is_sampling
358#define ctx_fl_excl_idle ctx_flags.excl_idle
359#define ctx_fl_going_zombie ctx_flags.going_zombie
360#define ctx_fl_trap_reason ctx_flags.trap_reason
361#define ctx_fl_no_msg ctx_flags.no_msg
362#define ctx_fl_can_restart ctx_flags.can_restart
363
364#define PFM_SET_WORK_PENDING(t, v) do { (t)->thread.pfm_needs_checking = v; } while(0);
365#define PFM_GET_WORK_PENDING(t) (t)->thread.pfm_needs_checking
366
367
368
369
370
371typedef struct {
372 spinlock_t pfs_lock;
373
374 unsigned int pfs_task_sessions;
375 unsigned int pfs_sys_sessions;
376 unsigned int pfs_sys_use_dbregs;
377 unsigned int pfs_ptrace_use_dbregs;
378 struct task_struct *pfs_sys_session[NR_CPUS];
379} pfm_session_t;
380
381
382
383
384
385
386typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
387typedef struct {
388 unsigned int type;
389 int pm_pos;
390 unsigned long default_value;
391 unsigned long reserved_mask;
392 pfm_reg_check_t read_check;
393 pfm_reg_check_t write_check;
394 unsigned long dep_pmd[4];
395 unsigned long dep_pmc[4];
396} pfm_reg_desc_t;
397
398
399#define PMC_PM(cnum, val) (((val) >> (pmu_conf->pmc_desc[cnum].pm_pos)) & 0x1)
400
401
402
403
404
405
406
407
408
409
410
411
412
413typedef struct {
414 unsigned long ovfl_val;
415
416 pfm_reg_desc_t *pmc_desc;
417 pfm_reg_desc_t *pmd_desc;
418
419 unsigned int num_pmcs;
420 unsigned int num_pmds;
421 unsigned long impl_pmcs[4];
422 unsigned long impl_pmds[4];
423
424 char *pmu_name;
425 unsigned int pmu_family;
426 unsigned int flags;
427 unsigned int num_ibrs;
428 unsigned int num_dbrs;
429 unsigned int num_counters;
430 int (*probe)(void);
431 unsigned int use_rr_dbregs:1;
432} pmu_config_t;
433
434
435
436#define PFM_PMU_IRQ_RESEND 1
437
438
439
440
441typedef struct {
442 unsigned long ibr_mask:56;
443 unsigned long ibr_plm:4;
444 unsigned long ibr_ig:3;
445 unsigned long ibr_x:1;
446} ibr_mask_reg_t;
447
448typedef struct {
449 unsigned long dbr_mask:56;
450 unsigned long dbr_plm:4;
451 unsigned long dbr_ig:2;
452 unsigned long dbr_w:1;
453 unsigned long dbr_r:1;
454} dbr_mask_reg_t;
455
456typedef union {
457 unsigned long val;
458 ibr_mask_reg_t ibr;
459 dbr_mask_reg_t dbr;
460} dbreg_t;
461
462
463
464
465
466typedef struct {
467 int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
468 char *cmd_name;
469 int cmd_flags;
470 unsigned int cmd_narg;
471 size_t cmd_argsize;
472 int (*cmd_getsize)(void *arg, size_t *sz);
473} pfm_cmd_desc_t;
474
475#define PFM_CMD_FD 0x01
476#define PFM_CMD_ARG_READ 0x02
477#define PFM_CMD_ARG_RW 0x04
478#define PFM_CMD_STOP 0x08
479
480
481#define PFM_CMD_NAME(cmd) pfm_cmd_tab[(cmd)].cmd_name
482#define PFM_CMD_READ_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_READ)
483#define PFM_CMD_RW_ARG(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_ARG_RW)
484#define PFM_CMD_USE_FD(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_FD)
485#define PFM_CMD_STOPPED(cmd) (pfm_cmd_tab[(cmd)].cmd_flags & PFM_CMD_STOP)
486
487#define PFM_CMD_ARG_MANY -1
488
489typedef struct {
490 unsigned long pfm_spurious_ovfl_intr_count;
491 unsigned long pfm_replay_ovfl_intr_count;
492 unsigned long pfm_ovfl_intr_count;
493 unsigned long pfm_ovfl_intr_cycles;
494 unsigned long pfm_ovfl_intr_cycles_min;
495 unsigned long pfm_ovfl_intr_cycles_max;
496 unsigned long pfm_smpl_handler_calls;
497 unsigned long pfm_smpl_handler_cycles;
498 char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
499} pfm_stats_t;
500
501
502
503
504static pfm_stats_t pfm_stats[NR_CPUS];
505static pfm_session_t pfm_sessions;
506
507static DEFINE_SPINLOCK(pfm_alt_install_check);
508static pfm_intr_handler_desc_t *pfm_alt_intr_handler;
509
510static struct proc_dir_entry *perfmon_dir;
511static pfm_uuid_t pfm_null_uuid = {0,};
512
513static spinlock_t pfm_buffer_fmt_lock;
514static LIST_HEAD(pfm_buffer_fmt_list);
515
516static pmu_config_t *pmu_conf;
517
518
519pfm_sysctl_t pfm_sysctl;
520EXPORT_SYMBOL(pfm_sysctl);
521
522static ctl_table pfm_ctl_table[]={
523 {
524 .ctl_name = CTL_UNNUMBERED,
525 .procname = "debug",
526 .data = &pfm_sysctl.debug,
527 .maxlen = sizeof(int),
528 .mode = 0666,
529 .proc_handler = &proc_dointvec,
530 },
531 {
532 .ctl_name = CTL_UNNUMBERED,
533 .procname = "debug_ovfl",
534 .data = &pfm_sysctl.debug_ovfl,
535 .maxlen = sizeof(int),
536 .mode = 0666,
537 .proc_handler = &proc_dointvec,
538 },
539 {
540 .ctl_name = CTL_UNNUMBERED,
541 .procname = "fastctxsw",
542 .data = &pfm_sysctl.fastctxsw,
543 .maxlen = sizeof(int),
544 .mode = 0600,
545 .proc_handler = &proc_dointvec,
546 },
547 {
548 .ctl_name = CTL_UNNUMBERED,
549 .procname = "expert_mode",
550 .data = &pfm_sysctl.expert_mode,
551 .maxlen = sizeof(int),
552 .mode = 0600,
553 .proc_handler = &proc_dointvec,
554 },
555 {}
556};
557static ctl_table pfm_sysctl_dir[] = {
558 {
559 .ctl_name = CTL_UNNUMBERED,
560 .procname = "perfmon",
561 .mode = 0555,
562 .child = pfm_ctl_table,
563 },
564 {}
565};
566static ctl_table pfm_sysctl_root[] = {
567 {
568 .ctl_name = CTL_KERN,
569 .procname = "kernel",
570 .mode = 0555,
571 .child = pfm_sysctl_dir,
572 },
573 {}
574};
575static struct ctl_table_header *pfm_sysctl_header;
576
577static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
578
579#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
580#define pfm_get_cpu_data(a,b) per_cpu(a, b)
581
582static inline void
583pfm_put_task(struct task_struct *task)
584{
585 if (task != current) put_task_struct(task);
586}
587
588static inline void
589pfm_set_task_notify(struct task_struct *task)
590{
591 struct thread_info *info;
592
593 info = (struct thread_info *) ((char *) task + IA64_TASK_SIZE);
594 set_bit(TIF_PERFMON_WORK, &info->flags);
595}
596
597static inline void
598pfm_clear_task_notify(void)
599{
600 clear_thread_flag(TIF_PERFMON_WORK);
601}
602
603static inline void
604pfm_reserve_page(unsigned long a)
605{
606 SetPageReserved(vmalloc_to_page((void *)a));
607}
608static inline void
609pfm_unreserve_page(unsigned long a)
610{
611 ClearPageReserved(vmalloc_to_page((void*)a));
612}
613
614static inline unsigned long
615pfm_protect_ctx_ctxsw(pfm_context_t *x)
616{
617 spin_lock(&(x)->ctx_lock);
618 return 0UL;
619}
620
621static inline void
622pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
623{
624 spin_unlock(&(x)->ctx_lock);
625}
626
627static inline unsigned int
628pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct)
629{
630 return do_munmap(mm, addr, len);
631}
632
633static inline unsigned long
634pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
635{
636 return get_unmapped_area(file, addr, len, pgoff, flags);
637}
638
639
640static int
641pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data,
642 struct vfsmount *mnt)
643{
644 return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC, mnt);
645}
646
647static struct file_system_type pfm_fs_type = {
648 .name = "pfmfs",
649 .get_sb = pfmfs_get_sb,
650 .kill_sb = kill_anon_super,
651};
652
653DEFINE_PER_CPU(unsigned long, pfm_syst_info);
654DEFINE_PER_CPU(struct task_struct *, pmu_owner);
655DEFINE_PER_CPU(pfm_context_t *, pmu_ctx);
656DEFINE_PER_CPU(unsigned long, pmu_activation_number);
657EXPORT_PER_CPU_SYMBOL_GPL(pfm_syst_info);
658
659
660
661static const struct file_operations pfm_file_ops;
662
663
664
665
666#ifndef CONFIG_SMP
667static void pfm_lazy_save_regs (struct task_struct *ta);
668#endif
669
670void dump_pmu_state(const char *);
671static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
672
673#include "perfmon_itanium.h"
674#include "perfmon_mckinley.h"
675#include "perfmon_montecito.h"
676#include "perfmon_generic.h"
677
678static pmu_config_t *pmu_confs[]={
679 &pmu_conf_mont,
680 &pmu_conf_mck,
681 &pmu_conf_ita,
682 &pmu_conf_gen,
683 NULL
684};
685
686
687static int pfm_end_notify_user(pfm_context_t *ctx);
688
689static inline void
690pfm_clear_psr_pp(void)
691{
692 ia64_rsm(IA64_PSR_PP);
693 ia64_srlz_i();
694}
695
696static inline void
697pfm_set_psr_pp(void)
698{
699 ia64_ssm(IA64_PSR_PP);
700 ia64_srlz_i();
701}
702
703static inline void
704pfm_clear_psr_up(void)
705{
706 ia64_rsm(IA64_PSR_UP);
707 ia64_srlz_i();
708}
709
710static inline void
711pfm_set_psr_up(void)
712{
713 ia64_ssm(IA64_PSR_UP);
714 ia64_srlz_i();
715}
716
717static inline unsigned long
718pfm_get_psr(void)
719{
720 unsigned long tmp;
721 tmp = ia64_getreg(_IA64_REG_PSR);
722 ia64_srlz_i();
723 return tmp;
724}
725
726static inline void
727pfm_set_psr_l(unsigned long val)
728{
729 ia64_setreg(_IA64_REG_PSR_L, val);
730 ia64_srlz_i();
731}
732
733static inline void
734pfm_freeze_pmu(void)
735{
736 ia64_set_pmc(0,1UL);
737 ia64_srlz_d();
738}
739
740static inline void
741pfm_unfreeze_pmu(void)
742{
743 ia64_set_pmc(0,0UL);
744 ia64_srlz_d();
745}
746
747static inline void
748pfm_restore_ibrs(unsigned long *ibrs, unsigned int nibrs)
749{
750 int i;
751
752 for (i=0; i < nibrs; i++) {
753 ia64_set_ibr(i, ibrs[i]);
754 ia64_dv_serialize_instruction();
755 }
756 ia64_srlz_i();
757}
758
759static inline void
760pfm_restore_dbrs(unsigned long *dbrs, unsigned int ndbrs)
761{
762 int i;
763
764 for (i=0; i < ndbrs; i++) {
765 ia64_set_dbr(i, dbrs[i]);
766 ia64_dv_serialize_data();
767 }
768 ia64_srlz_d();
769}
770
771
772
773
774static inline unsigned long
775pfm_read_soft_counter(pfm_context_t *ctx, int i)
776{
777 return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val);
778}
779
780
781
782
783static inline void
784pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val)
785{
786 unsigned long ovfl_val = pmu_conf->ovfl_val;
787
788 ctx->ctx_pmds[i].val = val & ~ovfl_val;
789
790
791
792
793 ia64_set_pmd(i, val & ovfl_val);
794}
795
796static pfm_msg_t *
797pfm_get_new_msg(pfm_context_t *ctx)
798{
799 int idx, next;
800
801 next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS;
802
803 DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
804 if (next == ctx->ctx_msgq_head) return NULL;
805
806 idx = ctx->ctx_msgq_tail;
807 ctx->ctx_msgq_tail = next;
808
809 DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx));
810
811 return ctx->ctx_msgq+idx;
812}
813
814static pfm_msg_t *
815pfm_get_next_msg(pfm_context_t *ctx)
816{
817 pfm_msg_t *msg;
818
819 DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
820
821 if (PFM_CTXQ_EMPTY(ctx)) return NULL;
822
823
824
825
826 msg = ctx->ctx_msgq+ctx->ctx_msgq_head;
827
828
829
830
831 ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS;
832
833 DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type));
834
835 return msg;
836}
837
838static void
839pfm_reset_msgq(pfm_context_t *ctx)
840{
841 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
842 DPRINT(("ctx=%p msgq reset\n", ctx));
843}
844
845static void *
846pfm_rvmalloc(unsigned long size)
847{
848 void *mem;
849 unsigned long addr;
850
851 size = PAGE_ALIGN(size);
852 mem = vmalloc(size);
853 if (mem) {
854
855 memset(mem, 0, size);
856 addr = (unsigned long)mem;
857 while (size > 0) {
858 pfm_reserve_page(addr);
859 addr+=PAGE_SIZE;
860 size-=PAGE_SIZE;
861 }
862 }
863 return mem;
864}
865
866static void
867pfm_rvfree(void *mem, unsigned long size)
868{
869 unsigned long addr;
870
871 if (mem) {
872 DPRINT(("freeing physical buffer @%p size=%lu\n", mem, size));
873 addr = (unsigned long) mem;
874 while ((long) size > 0) {
875 pfm_unreserve_page(addr);
876 addr+=PAGE_SIZE;
877 size-=PAGE_SIZE;
878 }
879 vfree(mem);
880 }
881 return;
882}
883
884static pfm_context_t *
885pfm_context_alloc(void)
886{
887 pfm_context_t *ctx;
888
889
890
891
892
893 ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL);
894 if (ctx) {
895 DPRINT(("alloc ctx @%p\n", ctx));
896 }
897 return ctx;
898}
899
900static void
901pfm_context_free(pfm_context_t *ctx)
902{
903 if (ctx) {
904 DPRINT(("free ctx @%p\n", ctx));
905 kfree(ctx);
906 }
907}
908
909static void
910pfm_mask_monitoring(struct task_struct *task)
911{
912 pfm_context_t *ctx = PFM_GET_CTX(task);
913 unsigned long mask, val, ovfl_mask;
914 int i;
915
916 DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
917
918 ovfl_mask = pmu_conf->ovfl_val;
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938 mask = ctx->ctx_used_pmds[0];
939 for (i = 0; mask; i++, mask>>=1) {
940
941 if ((mask & 0x1) == 0) continue;
942 val = ia64_get_pmd(i);
943
944 if (PMD_IS_COUNTING(i)) {
945
946
947
948 ctx->ctx_pmds[i].val += (val & ovfl_mask);
949 } else {
950 ctx->ctx_pmds[i].val = val;
951 }
952 DPRINT_ovfl(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
953 i,
954 ctx->ctx_pmds[i].val,
955 val & ovfl_mask));
956 }
957
958
959
960
961
962
963
964
965 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
966 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
967 if ((mask & 0x1) == 0UL) continue;
968 ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL);
969 ctx->th_pmcs[i] &= ~0xfUL;
970 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
971 }
972
973
974
975 ia64_srlz_d();
976}
977
978
979
980
981
982
983static void
984pfm_restore_monitoring(struct task_struct *task)
985{
986 pfm_context_t *ctx = PFM_GET_CTX(task);
987 unsigned long mask, ovfl_mask;
988 unsigned long psr, val;
989 int i, is_system;
990
991 is_system = ctx->ctx_fl_system;
992 ovfl_mask = pmu_conf->ovfl_val;
993
994 if (task != current) {
995 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
996 return;
997 }
998 if (ctx->ctx_state != PFM_CTX_MASKED) {
999 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
1000 task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
1001 return;
1002 }
1003 psr = pfm_get_psr();
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1015
1016 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
1017 pfm_clear_psr_pp();
1018 } else {
1019 pfm_clear_psr_up();
1020 }
1021
1022
1023
1024 mask = ctx->ctx_used_pmds[0];
1025 for (i = 0; mask; i++, mask>>=1) {
1026
1027 if ((mask & 0x1) == 0) continue;
1028
1029 if (PMD_IS_COUNTING(i)) {
1030
1031
1032
1033
1034 val = ctx->ctx_pmds[i].val & ovfl_mask;
1035 ctx->ctx_pmds[i].val &= ~ovfl_mask;
1036 } else {
1037 val = ctx->ctx_pmds[i].val;
1038 }
1039 ia64_set_pmd(i, val);
1040
1041 DPRINT(("pmd[%d]=0x%lx hw_pmd=0x%lx\n",
1042 i,
1043 ctx->ctx_pmds[i].val,
1044 val));
1045 }
1046
1047
1048
1049 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER;
1050 for(i= PMU_FIRST_COUNTER; mask; i++, mask>>=1) {
1051 if ((mask & 0x1) == 0UL) continue;
1052 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1053 ia64_set_pmc(i, ctx->th_pmcs[i]);
1054 DPRINT(("[%d] pmc[%d]=0x%lx\n",
1055 task_pid_nr(task), i, ctx->th_pmcs[i]));
1056 }
1057 ia64_srlz_d();
1058
1059
1060
1061
1062
1063 if (ctx->ctx_fl_using_dbreg) {
1064 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
1065 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
1066 }
1067
1068
1069
1070
1071 if (is_system && (PFM_CPUINFO_GET() & PFM_CPUINFO_DCR_PP)) {
1072
1073 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
1074 ia64_srlz_i();
1075 }
1076 pfm_set_psr_l(psr);
1077}
1078
1079static inline void
1080pfm_save_pmds(unsigned long *pmds, unsigned long mask)
1081{
1082 int i;
1083
1084 ia64_srlz_d();
1085
1086 for (i=0; mask; i++, mask>>=1) {
1087 if (mask & 0x1) pmds[i] = ia64_get_pmd(i);
1088 }
1089}
1090
1091
1092
1093
1094static inline void
1095pfm_restore_pmds(unsigned long *pmds, unsigned long mask)
1096{
1097 int i;
1098 unsigned long val, ovfl_val = pmu_conf->ovfl_val;
1099
1100 for (i=0; mask; i++, mask>>=1) {
1101 if ((mask & 0x1) == 0) continue;
1102 val = PMD_IS_COUNTING(i) ? pmds[i] & ovfl_val : pmds[i];
1103 ia64_set_pmd(i, val);
1104 }
1105 ia64_srlz_d();
1106}
1107
1108
1109
1110
1111static inline void
1112pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx)
1113{
1114 unsigned long ovfl_val = pmu_conf->ovfl_val;
1115 unsigned long mask = ctx->ctx_all_pmds[0];
1116 unsigned long val;
1117 int i;
1118
1119 DPRINT(("mask=0x%lx\n", mask));
1120
1121 for (i=0; mask; i++, mask>>=1) {
1122
1123 val = ctx->ctx_pmds[i].val;
1124
1125
1126
1127
1128
1129
1130
1131 if (PMD_IS_COUNTING(i)) {
1132 ctx->ctx_pmds[i].val = val & ~ovfl_val;
1133 val &= ovfl_val;
1134 }
1135 ctx->th_pmds[i] = val;
1136
1137 DPRINT(("pmd[%d]=0x%lx soft_val=0x%lx\n",
1138 i,
1139 ctx->th_pmds[i],
1140 ctx->ctx_pmds[i].val));
1141 }
1142}
1143
1144
1145
1146
1147static inline void
1148pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx)
1149{
1150 unsigned long mask = ctx->ctx_all_pmcs[0];
1151 int i;
1152
1153 DPRINT(("mask=0x%lx\n", mask));
1154
1155 for (i=0; mask; i++, mask>>=1) {
1156
1157 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1158 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i]));
1159 }
1160}
1161
1162
1163
1164static inline void
1165pfm_restore_pmcs(unsigned long *pmcs, unsigned long mask)
1166{
1167 int i;
1168
1169 for (i=0; mask; i++, mask>>=1) {
1170 if ((mask & 0x1) == 0) continue;
1171 ia64_set_pmc(i, pmcs[i]);
1172 }
1173 ia64_srlz_d();
1174}
1175
1176static inline int
1177pfm_uuid_cmp(pfm_uuid_t a, pfm_uuid_t b)
1178{
1179 return memcmp(a, b, sizeof(pfm_uuid_t));
1180}
1181
1182static inline int
1183pfm_buf_fmt_exit(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, struct pt_regs *regs)
1184{
1185 int ret = 0;
1186 if (fmt->fmt_exit) ret = (*fmt->fmt_exit)(task, buf, regs);
1187 return ret;
1188}
1189
1190static inline int
1191pfm_buf_fmt_getsize(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags, int cpu, void *arg, unsigned long *size)
1192{
1193 int ret = 0;
1194 if (fmt->fmt_getsize) ret = (*fmt->fmt_getsize)(task, flags, cpu, arg, size);
1195 return ret;
1196}
1197
1198
1199static inline int
1200pfm_buf_fmt_validate(pfm_buffer_fmt_t *fmt, struct task_struct *task, unsigned int flags,
1201 int cpu, void *arg)
1202{
1203 int ret = 0;
1204 if (fmt->fmt_validate) ret = (*fmt->fmt_validate)(task, flags, cpu, arg);
1205 return ret;
1206}
1207
1208static inline int
1209pfm_buf_fmt_init(pfm_buffer_fmt_t *fmt, struct task_struct *task, void *buf, unsigned int flags,
1210 int cpu, void *arg)
1211{
1212 int ret = 0;
1213 if (fmt->fmt_init) ret = (*fmt->fmt_init)(task, buf, flags, cpu, arg);
1214 return ret;
1215}
1216
1217static inline int
1218pfm_buf_fmt_restart(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1219{
1220 int ret = 0;
1221 if (fmt->fmt_restart) ret = (*fmt->fmt_restart)(task, ctrl, buf, regs);
1222 return ret;
1223}
1224
1225static inline int
1226pfm_buf_fmt_restart_active(pfm_buffer_fmt_t *fmt, struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, struct pt_regs *regs)
1227{
1228 int ret = 0;
1229 if (fmt->fmt_restart_active) ret = (*fmt->fmt_restart_active)(task, ctrl, buf, regs);
1230 return ret;
1231}
1232
1233static pfm_buffer_fmt_t *
1234__pfm_find_buffer_fmt(pfm_uuid_t uuid)
1235{
1236 struct list_head * pos;
1237 pfm_buffer_fmt_t * entry;
1238
1239 list_for_each(pos, &pfm_buffer_fmt_list) {
1240 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
1241 if (pfm_uuid_cmp(uuid, entry->fmt_uuid) == 0)
1242 return entry;
1243 }
1244 return NULL;
1245}
1246
1247
1248
1249
1250static pfm_buffer_fmt_t *
1251pfm_find_buffer_fmt(pfm_uuid_t uuid)
1252{
1253 pfm_buffer_fmt_t * fmt;
1254 spin_lock(&pfm_buffer_fmt_lock);
1255 fmt = __pfm_find_buffer_fmt(uuid);
1256 spin_unlock(&pfm_buffer_fmt_lock);
1257 return fmt;
1258}
1259
1260int
1261pfm_register_buffer_fmt(pfm_buffer_fmt_t *fmt)
1262{
1263 int ret = 0;
1264
1265
1266 if (fmt == NULL || fmt->fmt_name == NULL) return -EINVAL;
1267
1268
1269 if (fmt->fmt_handler == NULL) return -EINVAL;
1270
1271
1272
1273
1274
1275 spin_lock(&pfm_buffer_fmt_lock);
1276
1277 if (__pfm_find_buffer_fmt(fmt->fmt_uuid)) {
1278 printk(KERN_ERR "perfmon: duplicate sampling format: %s\n", fmt->fmt_name);
1279 ret = -EBUSY;
1280 goto out;
1281 }
1282 list_add(&fmt->fmt_list, &pfm_buffer_fmt_list);
1283 printk(KERN_INFO "perfmon: added sampling format %s\n", fmt->fmt_name);
1284
1285out:
1286 spin_unlock(&pfm_buffer_fmt_lock);
1287 return ret;
1288}
1289EXPORT_SYMBOL(pfm_register_buffer_fmt);
1290
1291int
1292pfm_unregister_buffer_fmt(pfm_uuid_t uuid)
1293{
1294 pfm_buffer_fmt_t *fmt;
1295 int ret = 0;
1296
1297 spin_lock(&pfm_buffer_fmt_lock);
1298
1299 fmt = __pfm_find_buffer_fmt(uuid);
1300 if (!fmt) {
1301 printk(KERN_ERR "perfmon: cannot unregister format, not found\n");
1302 ret = -EINVAL;
1303 goto out;
1304 }
1305 list_del_init(&fmt->fmt_list);
1306 printk(KERN_INFO "perfmon: removed sampling format: %s\n", fmt->fmt_name);
1307
1308out:
1309 spin_unlock(&pfm_buffer_fmt_lock);
1310 return ret;
1311
1312}
1313EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
1314
1315extern void update_pal_halt_status(int);
1316
1317static int
1318pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1319{
1320 unsigned long flags;
1321
1322
1323
1324 LOCK_PFS(flags);
1325
1326 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1327 pfm_sessions.pfs_sys_sessions,
1328 pfm_sessions.pfs_task_sessions,
1329 pfm_sessions.pfs_sys_use_dbregs,
1330 is_syswide,
1331 cpu));
1332
1333 if (is_syswide) {
1334
1335
1336
1337 if (pfm_sessions.pfs_task_sessions > 0UL) {
1338 DPRINT(("system wide not possible, %u conflicting task_sessions\n",
1339 pfm_sessions.pfs_task_sessions));
1340 goto abort;
1341 }
1342
1343 if (pfm_sessions.pfs_sys_session[cpu]) goto error_conflict;
1344
1345 DPRINT(("reserving system wide session on CPU%u currently on CPU%u\n", cpu, smp_processor_id()));
1346
1347 pfm_sessions.pfs_sys_session[cpu] = task;
1348
1349 pfm_sessions.pfs_sys_sessions++ ;
1350
1351 } else {
1352 if (pfm_sessions.pfs_sys_sessions) goto abort;
1353 pfm_sessions.pfs_task_sessions++;
1354 }
1355
1356 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1357 pfm_sessions.pfs_sys_sessions,
1358 pfm_sessions.pfs_task_sessions,
1359 pfm_sessions.pfs_sys_use_dbregs,
1360 is_syswide,
1361 cpu));
1362
1363
1364
1365
1366 update_pal_halt_status(0);
1367
1368 UNLOCK_PFS(flags);
1369
1370 return 0;
1371
1372error_conflict:
1373 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
1374 task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
1375 cpu));
1376abort:
1377 UNLOCK_PFS(flags);
1378
1379 return -EBUSY;
1380
1381}
1382
1383static int
1384pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
1385{
1386 unsigned long flags;
1387
1388
1389
1390 LOCK_PFS(flags);
1391
1392 DPRINT(("in sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1393 pfm_sessions.pfs_sys_sessions,
1394 pfm_sessions.pfs_task_sessions,
1395 pfm_sessions.pfs_sys_use_dbregs,
1396 is_syswide,
1397 cpu));
1398
1399
1400 if (is_syswide) {
1401 pfm_sessions.pfs_sys_session[cpu] = NULL;
1402
1403
1404
1405 if (ctx && ctx->ctx_fl_using_dbreg) {
1406 if (pfm_sessions.pfs_sys_use_dbregs == 0) {
1407 printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx);
1408 } else {
1409 pfm_sessions.pfs_sys_use_dbregs--;
1410 }
1411 }
1412 pfm_sessions.pfs_sys_sessions--;
1413 } else {
1414 pfm_sessions.pfs_task_sessions--;
1415 }
1416 DPRINT(("out sys_sessions=%u task_sessions=%u dbregs=%u syswide=%d cpu=%u\n",
1417 pfm_sessions.pfs_sys_sessions,
1418 pfm_sessions.pfs_task_sessions,
1419 pfm_sessions.pfs_sys_use_dbregs,
1420 is_syswide,
1421 cpu));
1422
1423
1424
1425
1426 if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
1427 update_pal_halt_status(1);
1428
1429 UNLOCK_PFS(flags);
1430
1431 return 0;
1432}
1433
1434
1435
1436
1437
1438
1439static int
1440pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size)
1441{
1442 int r;
1443
1444
1445 if (task->mm == NULL || size == 0UL || vaddr == NULL) {
1446 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
1447 return -EINVAL;
1448 }
1449
1450 DPRINT(("smpl_vaddr=%p size=%lu\n", vaddr, size));
1451
1452
1453
1454
1455 down_write(&task->mm->mmap_sem);
1456
1457 DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size));
1458
1459 r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0);
1460
1461 up_write(&task->mm->mmap_sem);
1462 if (r !=0) {
1463 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
1464 }
1465
1466 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
1467
1468 return 0;
1469}
1470
1471
1472
1473
1474#if 0
1475static int
1476pfm_free_smpl_buffer(pfm_context_t *ctx)
1477{
1478 pfm_buffer_fmt_t *fmt;
1479
1480 if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
1481
1482
1483
1484
1485 fmt = ctx->ctx_buf_fmt;
1486
1487 DPRINT(("sampling buffer @%p size %lu vaddr=%p\n",
1488 ctx->ctx_smpl_hdr,
1489 ctx->ctx_smpl_size,
1490 ctx->ctx_smpl_vaddr));
1491
1492 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1493
1494
1495
1496
1497 pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
1498
1499 ctx->ctx_smpl_hdr = NULL;
1500 ctx->ctx_smpl_size = 0UL;
1501
1502 return 0;
1503
1504invalid_free:
1505 printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
1506 return -EINVAL;
1507}
1508#endif
1509
1510static inline void
1511pfm_exit_smpl_buffer(pfm_buffer_fmt_t *fmt)
1512{
1513 if (fmt == NULL) return;
1514
1515 pfm_buf_fmt_exit(fmt, current, NULL, NULL);
1516
1517}
1518
1519
1520
1521
1522
1523
1524
1525static struct vfsmount *pfmfs_mnt;
1526
1527static int __init
1528init_pfm_fs(void)
1529{
1530 int err = register_filesystem(&pfm_fs_type);
1531 if (!err) {
1532 pfmfs_mnt = kern_mount(&pfm_fs_type);
1533 err = PTR_ERR(pfmfs_mnt);
1534 if (IS_ERR(pfmfs_mnt))
1535 unregister_filesystem(&pfm_fs_type);
1536 else
1537 err = 0;
1538 }
1539 return err;
1540}
1541
1542static ssize_t
1543pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1544{
1545 pfm_context_t *ctx;
1546 pfm_msg_t *msg;
1547 ssize_t ret;
1548 unsigned long flags;
1549 DECLARE_WAITQUEUE(wait, current);
1550 if (PFM_IS_FILE(filp) == 0) {
1551 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1552 return -EINVAL;
1553 }
1554
1555 ctx = (pfm_context_t *)filp->private_data;
1556 if (ctx == NULL) {
1557 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
1558 return -EINVAL;
1559 }
1560
1561
1562
1563
1564 if (size < sizeof(pfm_msg_t)) {
1565 DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t)));
1566 return -EINVAL;
1567 }
1568
1569 PROTECT_CTX(ctx, flags);
1570
1571
1572
1573
1574 add_wait_queue(&ctx->ctx_msgq_wait, &wait);
1575
1576
1577 for(;;) {
1578
1579
1580
1581
1582 set_current_state(TASK_INTERRUPTIBLE);
1583
1584 DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail));
1585
1586 ret = 0;
1587 if(PFM_CTXQ_EMPTY(ctx) == 0) break;
1588
1589 UNPROTECT_CTX(ctx, flags);
1590
1591
1592
1593
1594 ret = -EAGAIN;
1595 if(filp->f_flags & O_NONBLOCK) break;
1596
1597
1598
1599
1600 if(signal_pending(current)) {
1601 ret = -EINTR;
1602 break;
1603 }
1604
1605
1606
1607 schedule();
1608
1609 PROTECT_CTX(ctx, flags);
1610 }
1611 DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
1612 set_current_state(TASK_RUNNING);
1613 remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
1614
1615 if (ret < 0) goto abort;
1616
1617 ret = -EINVAL;
1618 msg = pfm_get_next_msg(ctx);
1619 if (msg == NULL) {
1620 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
1621 goto abort_locked;
1622 }
1623
1624 DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
1625
1626 ret = -EFAULT;
1627 if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
1628
1629abort_locked:
1630 UNPROTECT_CTX(ctx, flags);
1631abort:
1632 return ret;
1633}
1634
1635static ssize_t
1636pfm_write(struct file *file, const char __user *ubuf,
1637 size_t size, loff_t *ppos)
1638{
1639 DPRINT(("pfm_write called\n"));
1640 return -EINVAL;
1641}
1642
1643static unsigned int
1644pfm_poll(struct file *filp, poll_table * wait)
1645{
1646 pfm_context_t *ctx;
1647 unsigned long flags;
1648 unsigned int mask = 0;
1649
1650 if (PFM_IS_FILE(filp) == 0) {
1651 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1652 return 0;
1653 }
1654
1655 ctx = (pfm_context_t *)filp->private_data;
1656 if (ctx == NULL) {
1657 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
1658 return 0;
1659 }
1660
1661
1662 DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd));
1663
1664 poll_wait(filp, &ctx->ctx_msgq_wait, wait);
1665
1666 PROTECT_CTX(ctx, flags);
1667
1668 if (PFM_CTXQ_EMPTY(ctx) == 0)
1669 mask = POLLIN | POLLRDNORM;
1670
1671 UNPROTECT_CTX(ctx, flags);
1672
1673 DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask));
1674
1675 return mask;
1676}
1677
1678static int
1679pfm_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
1680{
1681 DPRINT(("pfm_ioctl called\n"));
1682 return -EINVAL;
1683}
1684
1685
1686
1687
1688static inline int
1689pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
1690{
1691 int ret;
1692
1693 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
1694
1695 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1696 task_pid_nr(current),
1697 fd,
1698 on,
1699 ctx->ctx_async_queue, ret));
1700
1701 return ret;
1702}
1703
1704static int
1705pfm_fasync(int fd, struct file *filp, int on)
1706{
1707 pfm_context_t *ctx;
1708 int ret;
1709
1710 if (PFM_IS_FILE(filp) == 0) {
1711 printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
1712 return -EBADF;
1713 }
1714
1715 ctx = (pfm_context_t *)filp->private_data;
1716 if (ctx == NULL) {
1717 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
1718 return -EBADF;
1719 }
1720
1721
1722
1723
1724
1725
1726
1727 ret = pfm_do_fasync(fd, filp, ctx, on);
1728
1729
1730 DPRINT(("pfm_fasync called on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1731 fd,
1732 on,
1733 ctx->ctx_async_queue, ret));
1734
1735 return ret;
1736}
1737
1738#ifdef CONFIG_SMP
1739
1740
1741
1742
1743
1744static void
1745pfm_syswide_force_stop(void *info)
1746{
1747 pfm_context_t *ctx = (pfm_context_t *)info;
1748 struct pt_regs *regs = task_pt_regs(current);
1749 struct task_struct *owner;
1750 unsigned long flags;
1751 int ret;
1752
1753 if (ctx->ctx_cpu != smp_processor_id()) {
1754 printk(KERN_ERR "perfmon: pfm_syswide_force_stop for CPU%d but on CPU%d\n",
1755 ctx->ctx_cpu,
1756 smp_processor_id());
1757 return;
1758 }
1759 owner = GET_PMU_OWNER();
1760 if (owner != ctx->ctx_task) {
1761 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1762 smp_processor_id(),
1763 task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
1764 return;
1765 }
1766 if (GET_PMU_CTX() != ctx) {
1767 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n",
1768 smp_processor_id(),
1769 GET_PMU_CTX(), ctx);
1770 return;
1771 }
1772
1773 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
1774
1775
1776
1777
1778
1779 local_irq_save(flags);
1780
1781 ret = pfm_context_unload(ctx, NULL, 0, regs);
1782 if (ret) {
1783 DPRINT(("context_unload returned %d\n", ret));
1784 }
1785
1786
1787
1788
1789 local_irq_restore(flags);
1790}
1791
1792static void
1793pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
1794{
1795 int ret;
1796
1797 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu));
1798 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1);
1799 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret));
1800}
1801#endif
1802
1803
1804
1805
1806
1807static int
1808pfm_flush(struct file *filp, fl_owner_t id)
1809{
1810 pfm_context_t *ctx;
1811 struct task_struct *task;
1812 struct pt_regs *regs;
1813 unsigned long flags;
1814 unsigned long smpl_buf_size = 0UL;
1815 void *smpl_buf_vaddr = NULL;
1816 int state, is_system;
1817
1818 if (PFM_IS_FILE(filp) == 0) {
1819 DPRINT(("bad magic for\n"));
1820 return -EBADF;
1821 }
1822
1823 ctx = (pfm_context_t *)filp->private_data;
1824 if (ctx == NULL) {
1825 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
1826 return -EBADF;
1827 }
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842 if (filp->f_flags & FASYNC) {
1843 DPRINT(("cleaning up async_queue=%p\n", ctx->ctx_async_queue));
1844 pfm_do_fasync (-1, filp, ctx, 0);
1845 }
1846
1847 PROTECT_CTX(ctx, flags);
1848
1849 state = ctx->ctx_state;
1850 is_system = ctx->ctx_fl_system;
1851
1852 task = PFM_CTX_TASK(ctx);
1853 regs = task_pt_regs(task);
1854
1855 DPRINT(("ctx_state=%d is_current=%d\n",
1856 state,
1857 task == current ? 1 : 0));
1858
1859
1860
1861
1862
1863
1864
1865
1866 if (task == current) {
1867#ifdef CONFIG_SMP
1868
1869
1870
1871
1872
1873
1874
1875 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
1876
1877 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
1878
1879
1880
1881 local_irq_restore(flags);
1882
1883 pfm_syswide_cleanup_other_cpu(ctx);
1884
1885
1886
1887
1888 local_irq_save(flags);
1889
1890
1891
1892
1893 } else
1894#endif
1895 {
1896
1897 DPRINT(("forcing unload\n"));
1898
1899
1900
1901
1902 pfm_context_unload(ctx, NULL, 0, regs);
1903
1904 DPRINT(("ctx_state=%d\n", ctx->ctx_state));
1905 }
1906 }
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919 if (ctx->ctx_smpl_vaddr && current->mm) {
1920 smpl_buf_vaddr = ctx->ctx_smpl_vaddr;
1921 smpl_buf_size = ctx->ctx_smpl_size;
1922 }
1923
1924 UNPROTECT_CTX(ctx, flags);
1925
1926
1927
1928
1929
1930
1931
1932 if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size);
1933
1934 return 0;
1935}
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951static int
1952pfm_close(struct inode *inode, struct file *filp)
1953{
1954 pfm_context_t *ctx;
1955 struct task_struct *task;
1956 struct pt_regs *regs;
1957 DECLARE_WAITQUEUE(wait, current);
1958 unsigned long flags;
1959 unsigned long smpl_buf_size = 0UL;
1960 void *smpl_buf_addr = NULL;
1961 int free_possible = 1;
1962 int state, is_system;
1963
1964 DPRINT(("pfm_close called private=%p\n", filp->private_data));
1965
1966 if (PFM_IS_FILE(filp) == 0) {
1967 DPRINT(("bad magic\n"));
1968 return -EBADF;
1969 }
1970
1971 ctx = (pfm_context_t *)filp->private_data;
1972 if (ctx == NULL) {
1973 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
1974 return -EBADF;
1975 }
1976
1977 PROTECT_CTX(ctx, flags);
1978
1979 state = ctx->ctx_state;
1980 is_system = ctx->ctx_fl_system;
1981
1982 task = PFM_CTX_TASK(ctx);
1983 regs = task_pt_regs(task);
1984
1985 DPRINT(("ctx_state=%d is_current=%d\n",
1986 state,
1987 task == current ? 1 : 0));
1988
1989
1990
1991
1992 if (state == PFM_CTX_UNLOADED) goto doit;
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006 if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) {
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022 ctx->ctx_fl_going_zombie = 1;
2023
2024
2025
2026
2027 complete(&ctx->ctx_restart_done);
2028
2029 DPRINT(("waking up ctx_state=%d\n", state));
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039 set_current_state(TASK_INTERRUPTIBLE);
2040 add_wait_queue(&ctx->ctx_zombieq, &wait);
2041
2042 UNPROTECT_CTX(ctx, flags);
2043
2044
2045
2046
2047
2048
2049 schedule();
2050
2051
2052 PROTECT_CTX(ctx, flags);
2053
2054
2055 remove_wait_queue(&ctx->ctx_zombieq, &wait);
2056 set_current_state(TASK_RUNNING);
2057
2058
2059
2060
2061 DPRINT(("after zombie wakeup ctx_state=%d for\n", state));
2062 }
2063 else if (task != current) {
2064#ifdef CONFIG_SMP
2065
2066
2067
2068 ctx->ctx_state = PFM_CTX_ZOMBIE;
2069
2070 DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
2071
2072
2073
2074
2075 free_possible = 0;
2076#else
2077 pfm_context_unload(ctx, NULL, 0, regs);
2078#endif
2079 }
2080
2081doit:
2082
2083 state = ctx->ctx_state;
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099 if (ctx->ctx_smpl_hdr) {
2100 smpl_buf_addr = ctx->ctx_smpl_hdr;
2101 smpl_buf_size = ctx->ctx_smpl_size;
2102
2103 ctx->ctx_smpl_hdr = NULL;
2104 ctx->ctx_fl_is_sampling = 0;
2105 }
2106
2107 DPRINT(("ctx_state=%d free_possible=%d addr=%p size=%lu\n",
2108 state,
2109 free_possible,
2110 smpl_buf_addr,
2111 smpl_buf_size));
2112
2113 if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt);
2114
2115
2116
2117
2118 if (state == PFM_CTX_ZOMBIE) {
2119 pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu);
2120 }
2121
2122
2123
2124
2125
2126 filp->private_data = NULL;
2127
2128
2129
2130
2131
2132
2133
2134
2135 UNPROTECT_CTX(ctx, flags);
2136
2137
2138
2139
2140
2141 if (smpl_buf_addr) pfm_rvfree(smpl_buf_addr, smpl_buf_size);
2142
2143
2144
2145
2146 if (free_possible) pfm_context_free(ctx);
2147
2148 return 0;
2149}
2150
2151static int
2152pfm_no_open(struct inode *irrelevant, struct file *dontcare)
2153{
2154 DPRINT(("pfm_no_open called\n"));
2155 return -ENXIO;
2156}
2157
2158
2159
2160static const struct file_operations pfm_file_ops = {
2161 .llseek = no_llseek,
2162 .read = pfm_read,
2163 .write = pfm_write,
2164 .poll = pfm_poll,
2165 .ioctl = pfm_ioctl,
2166 .open = pfm_no_open,
2167 .fasync = pfm_fasync,
2168 .release = pfm_close,
2169 .flush = pfm_flush
2170};
2171
2172static int
2173pfmfs_delete_dentry(struct dentry *dentry)
2174{
2175 return 1;
2176}
2177
2178static struct dentry_operations pfmfs_dentry_operations = {
2179 .d_delete = pfmfs_delete_dentry,
2180};
2181
2182
2183static int
2184pfm_alloc_fd(struct file **cfile)
2185{
2186 int fd, ret = 0;
2187 struct file *file = NULL;
2188 struct inode * inode;
2189 char name[32];
2190 struct qstr this;
2191
2192 fd = get_unused_fd();
2193 if (fd < 0) return -ENFILE;
2194
2195 ret = -ENFILE;
2196
2197 file = get_empty_filp();
2198 if (!file) goto out;
2199
2200
2201
2202
2203 inode = new_inode(pfmfs_mnt->mnt_sb);
2204 if (!inode) goto out;
2205
2206 DPRINT(("new inode ino=%ld @%p\n", inode->i_ino, inode));
2207
2208 inode->i_mode = S_IFCHR|S_IRUGO;
2209 inode->i_uid = current->fsuid;
2210 inode->i_gid = current->fsgid;
2211
2212 sprintf(name, "[%lu]", inode->i_ino);
2213 this.name = name;
2214 this.len = strlen(name);
2215 this.hash = inode->i_ino;
2216
2217 ret = -ENOMEM;
2218
2219
2220
2221
2222 file->f_path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
2223 if (!file->f_path.dentry) goto out;
2224
2225 file->f_path.dentry->d_op = &pfmfs_dentry_operations;
2226
2227 d_add(file->f_path.dentry, inode);
2228 file->f_path.mnt = mntget(pfmfs_mnt);
2229 file->f_mapping = inode->i_mapping;
2230
2231 file->f_op = &pfm_file_ops;
2232 file->f_mode = FMODE_READ;
2233 file->f_flags = O_RDONLY;
2234 file->f_pos = 0;
2235
2236
2237
2238
2239 fd_install(fd, file);
2240
2241
2242
2243
2244 *cfile = file;
2245
2246 return fd;
2247out:
2248 if (file) put_filp(file);
2249 put_unused_fd(fd);
2250 return ret;
2251}
2252
2253static void
2254pfm_free_fd(int fd, struct file *file)
2255{
2256 struct files_struct *files = current->files;
2257 struct fdtable *fdt;
2258
2259
2260
2261
2262 spin_lock(&files->file_lock);
2263 fdt = files_fdtable(files);
2264 rcu_assign_pointer(fdt->fd[fd], NULL);
2265 spin_unlock(&files->file_lock);
2266
2267 if (file)
2268 put_filp(file);
2269 put_unused_fd(fd);
2270}
2271
2272static int
2273pfm_remap_buffer(struct vm_area_struct *vma, unsigned long buf, unsigned long addr, unsigned long size)
2274{
2275 DPRINT(("CPU%d buf=0x%lx addr=0x%lx size=%ld\n", smp_processor_id(), buf, addr, size));
2276
2277 while (size > 0) {
2278 unsigned long pfn = ia64_tpa(buf) >> PAGE_SHIFT;
2279
2280
2281 if (remap_pfn_range(vma, addr, pfn, PAGE_SIZE, PAGE_READONLY))
2282 return -ENOMEM;
2283
2284 addr += PAGE_SIZE;
2285 buf += PAGE_SIZE;
2286 size -= PAGE_SIZE;
2287 }
2288 return 0;
2289}
2290
2291
2292
2293
2294static int
2295pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr)
2296{
2297 struct mm_struct *mm = task->mm;
2298 struct vm_area_struct *vma = NULL;
2299 unsigned long size;
2300 void *smpl_buf;
2301
2302
2303
2304
2305
2306 size = PAGE_ALIGN(rsize);
2307
2308 DPRINT(("sampling buffer rsize=%lu size=%lu bytes\n", rsize, size));
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318 if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
2319 return -ENOMEM;
2320
2321
2322
2323
2324
2325
2326 smpl_buf = pfm_rvmalloc(size);
2327 if (smpl_buf == NULL) {
2328 DPRINT(("Can't allocate sampling buffer\n"));
2329 return -ENOMEM;
2330 }
2331
2332 DPRINT(("smpl_buf @%p\n", smpl_buf));
2333
2334
2335 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2336 if (!vma) {
2337 DPRINT(("Cannot allocate vma\n"));
2338 goto error_kmem;
2339 }
2340
2341
2342
2343
2344 vma->vm_mm = mm;
2345 vma->vm_file = filp;
2346 vma->vm_flags = VM_READ| VM_MAYREAD |VM_RESERVED;
2347 vma->vm_page_prot = PAGE_READONLY;
2348
2349
2350
2351
2352
2353
2354 ctx->ctx_smpl_hdr = smpl_buf;
2355 ctx->ctx_smpl_size = size;
2356
2357
2358
2359
2360
2361
2362
2363 down_write(&task->mm->mmap_sem);
2364
2365
2366 vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0);
2367 if (vma->vm_start == 0UL) {
2368 DPRINT(("Cannot find unmapped area for size %ld\n", size));
2369 up_write(&task->mm->mmap_sem);
2370 goto error;
2371 }
2372 vma->vm_end = vma->vm_start + size;
2373 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2374
2375 DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start));
2376
2377
2378 if (pfm_remap_buffer(vma, (unsigned long)smpl_buf, vma->vm_start, size)) {
2379 DPRINT(("Can't remap buffer\n"));
2380 up_write(&task->mm->mmap_sem);
2381 goto error;
2382 }
2383
2384 get_file(filp);
2385
2386
2387
2388
2389
2390 insert_vm_struct(mm, vma);
2391
2392 mm->total_vm += size >> PAGE_SHIFT;
2393 vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
2394 vma_pages(vma));
2395 up_write(&task->mm->mmap_sem);
2396
2397
2398
2399
2400 ctx->ctx_smpl_vaddr = (void *)vma->vm_start;
2401 *(unsigned long *)user_vaddr = vma->vm_start;
2402
2403 return 0;
2404
2405error:
2406 kmem_cache_free(vm_area_cachep, vma);
2407error_kmem:
2408 pfm_rvfree(smpl_buf, size);
2409
2410 return -ENOMEM;
2411}
2412
2413
2414
2415
2416static int
2417pfm_bad_permissions(struct task_struct *task)
2418{
2419
2420 DPRINT(("cur: uid=%d gid=%d task: euid=%d suid=%d uid=%d egid=%d sgid=%d\n",
2421 current->uid,
2422 current->gid,
2423 task->euid,
2424 task->suid,
2425 task->uid,
2426 task->egid,
2427 task->sgid));
2428
2429 return ((current->uid != task->euid)
2430 || (current->uid != task->suid)
2431 || (current->uid != task->uid)
2432 || (current->gid != task->egid)
2433 || (current->gid != task->sgid)
2434 || (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE);
2435}
2436
2437static int
2438pfarg_is_sane(struct task_struct *task, pfarg_context_t *pfx)
2439{
2440 int ctx_flags;
2441
2442
2443
2444 ctx_flags = pfx->ctx_flags;
2445
2446 if (ctx_flags & PFM_FL_SYSTEM_WIDE) {
2447
2448
2449
2450
2451 if (ctx_flags & PFM_FL_NOTIFY_BLOCK) {
2452 DPRINT(("cannot use blocking mode when in system wide monitoring\n"));
2453 return -EINVAL;
2454 }
2455 } else {
2456 }
2457
2458
2459 return 0;
2460}
2461
2462static int
2463pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags,
2464 unsigned int cpu, pfarg_context_t *arg)
2465{
2466 pfm_buffer_fmt_t *fmt = NULL;
2467 unsigned long size = 0UL;
2468 void *uaddr = NULL;
2469 void *fmt_arg = NULL;
2470 int ret = 0;
2471#define PFM_CTXARG_BUF_ARG(a) (pfm_buffer_fmt_t *)(a+1)
2472
2473
2474 fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
2475 if (fmt == NULL) {
2476 DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
2477 return -EINVAL;
2478 }
2479
2480
2481
2482
2483 if (fmt->fmt_arg_size) fmt_arg = PFM_CTXARG_BUF_ARG(arg);
2484
2485 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
2486
2487 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
2488
2489 if (ret) goto error;
2490
2491
2492 ctx->ctx_buf_fmt = fmt;
2493
2494
2495
2496
2497 ret = pfm_buf_fmt_getsize(fmt, task, ctx_flags, cpu, fmt_arg, &size);
2498 if (ret) goto error;
2499
2500 if (size) {
2501
2502
2503
2504 ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr);
2505 if (ret) goto error;
2506
2507
2508 arg->ctx_smpl_vaddr = uaddr;
2509 }
2510 ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg);
2511
2512error:
2513 return ret;
2514}
2515
2516static void
2517pfm_reset_pmu_state(pfm_context_t *ctx)
2518{
2519 int i;
2520
2521
2522
2523
2524 for (i=1; PMC_IS_LAST(i) == 0; i++) {
2525 if (PMC_IS_IMPL(i) == 0) continue;
2526 ctx->ctx_pmcs[i] = PMC_DFL_VAL(i);
2527 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i]));
2528 }
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1;
2557
2558
2559
2560
2561 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0];
2562
2563 DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0]));
2564
2565
2566
2567
2568 ctx->ctx_used_ibrs[0] = 0UL;
2569 ctx->ctx_used_dbrs[0] = 0UL;
2570}
2571
2572static int
2573pfm_ctx_getsize(void *arg, size_t *sz)
2574{
2575 pfarg_context_t *req = (pfarg_context_t *)arg;
2576 pfm_buffer_fmt_t *fmt;
2577
2578 *sz = 0;
2579
2580 if (!pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) return 0;
2581
2582 fmt = pfm_find_buffer_fmt(req->ctx_smpl_buf_id);
2583 if (fmt == NULL) {
2584 DPRINT(("cannot find buffer format\n"));
2585 return -EINVAL;
2586 }
2587
2588 *sz = fmt->fmt_arg_size;
2589 DPRINT(("arg_size=%lu\n", *sz));
2590
2591 return 0;
2592}
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602static int
2603pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2604{
2605
2606
2607
2608 if (task->mm == NULL) {
2609 DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
2610 return -EPERM;
2611 }
2612 if (pfm_bad_permissions(task)) {
2613 DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
2614 return -EPERM;
2615 }
2616
2617
2618
2619 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
2620 DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
2621 return -EINVAL;
2622 }
2623
2624 if (task->exit_state == EXIT_ZOMBIE) {
2625 DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
2626 return -EBUSY;
2627 }
2628
2629
2630
2631
2632 if (task == current) return 0;
2633
2634 if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
2635 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
2636 return -EBUSY;
2637 }
2638
2639
2640
2641 wait_task_inactive(task);
2642
2643
2644
2645 return 0;
2646}
2647
2648static int
2649pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
2650{
2651 struct task_struct *p = current;
2652 int ret;
2653
2654
2655 if (pid < 2) return -EPERM;
2656
2657 if (pid != current->pid) {
2658
2659 read_lock(&tasklist_lock);
2660
2661 p = find_task_by_pid(pid);
2662
2663
2664 if (p) get_task_struct(p);
2665
2666 read_unlock(&tasklist_lock);
2667
2668 if (p == NULL) return -ESRCH;
2669 }
2670
2671 ret = pfm_task_incompatible(ctx, p);
2672 if (ret == 0) {
2673 *task = p;
2674 } else if (p != current) {
2675 pfm_put_task(p);
2676 }
2677 return ret;
2678}
2679
2680
2681
2682static int
2683pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2684{
2685 pfarg_context_t *req = (pfarg_context_t *)arg;
2686 struct file *filp;
2687 int ctx_flags;
2688 int ret;
2689
2690
2691 ret = pfarg_is_sane(current, req);
2692 if (ret < 0) return ret;
2693
2694 ctx_flags = req->ctx_flags;
2695
2696 ret = -ENOMEM;
2697
2698 ctx = pfm_context_alloc();
2699 if (!ctx) goto error;
2700
2701 ret = pfm_alloc_fd(&filp);
2702 if (ret < 0) goto error_file;
2703
2704 req->ctx_fd = ctx->ctx_fd = ret;
2705
2706
2707
2708
2709 filp->private_data = ctx;
2710
2711
2712
2713
2714 if (pfm_uuid_cmp(req->ctx_smpl_buf_id, pfm_null_uuid)) {
2715 ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req);
2716 if (ret) goto buffer_error;
2717 }
2718
2719
2720
2721
2722 spin_lock_init(&ctx->ctx_lock);
2723
2724
2725
2726
2727 ctx->ctx_state = PFM_CTX_UNLOADED;
2728
2729
2730
2731
2732 ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0;
2733 ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0;
2734 ctx->ctx_fl_is_sampling = ctx->ctx_buf_fmt ? 1 : 0;
2735 ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0;
2736
2737
2738
2739
2740
2741
2742
2743
2744 init_completion(&ctx->ctx_restart_done);
2745
2746
2747
2748
2749 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
2750 SET_LAST_CPU(ctx, -1);
2751
2752
2753
2754
2755 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0;
2756 init_waitqueue_head(&ctx->ctx_msgq_wait);
2757 init_waitqueue_head(&ctx->ctx_zombieq);
2758
2759 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n",
2760 ctx,
2761 ctx_flags,
2762 ctx->ctx_fl_system,
2763 ctx->ctx_fl_block,
2764 ctx->ctx_fl_excl_idle,
2765 ctx->ctx_fl_no_msg,
2766 ctx->ctx_fd));
2767
2768
2769
2770
2771 pfm_reset_pmu_state(ctx);
2772
2773 return 0;
2774
2775buffer_error:
2776 pfm_free_fd(ctx->ctx_fd, filp);
2777
2778 if (ctx->ctx_buf_fmt) {
2779 pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs);
2780 }
2781error_file:
2782 pfm_context_free(ctx);
2783
2784error:
2785 return ret;
2786}
2787
2788static inline unsigned long
2789pfm_new_counter_value (pfm_counter_t *reg, int is_long_reset)
2790{
2791 unsigned long val = is_long_reset ? reg->long_reset : reg->short_reset;
2792 unsigned long new_seed, old_seed = reg->seed, mask = reg->mask;
2793 extern unsigned long carta_random32 (unsigned long seed);
2794
2795 if (reg->flags & PFM_REGFL_RANDOM) {
2796 new_seed = carta_random32(old_seed);
2797 val -= (old_seed & mask);
2798 if ((mask >> 32) != 0)
2799
2800 new_seed |= carta_random32(old_seed >> 32) << 32;
2801 reg->seed = new_seed;
2802 }
2803 reg->lval = val;
2804 return val;
2805}
2806
2807static void
2808pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2809{
2810 unsigned long mask = ovfl_regs[0];
2811 unsigned long reset_others = 0UL;
2812 unsigned long val;
2813 int i;
2814
2815
2816
2817
2818 mask >>= PMU_FIRST_COUNTER;
2819 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2820
2821 if ((mask & 0x1UL) == 0UL) continue;
2822
2823 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2824 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2825
2826 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2827 }
2828
2829
2830
2831
2832 for(i = 0; reset_others; i++, reset_others >>= 1) {
2833
2834 if ((reset_others & 0x1) == 0) continue;
2835
2836 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2837
2838 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2839 is_long_reset ? "long" : "short", i, val));
2840 }
2841}
2842
2843static void
2844pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset)
2845{
2846 unsigned long mask = ovfl_regs[0];
2847 unsigned long reset_others = 0UL;
2848 unsigned long val;
2849 int i;
2850
2851 DPRINT_ovfl(("ovfl_regs=0x%lx is_long_reset=%d\n", ovfl_regs[0], is_long_reset));
2852
2853 if (ctx->ctx_state == PFM_CTX_MASKED) {
2854 pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset);
2855 return;
2856 }
2857
2858
2859
2860
2861 mask >>= PMU_FIRST_COUNTER;
2862 for(i = PMU_FIRST_COUNTER; mask; i++, mask >>= 1) {
2863
2864 if ((mask & 0x1UL) == 0UL) continue;
2865
2866 val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset);
2867 reset_others |= ctx->ctx_pmds[i].reset_pmds[0];
2868
2869 DPRINT_ovfl((" %s reset ctx_pmds[%d]=%lx\n", is_long_reset ? "long" : "short", i, val));
2870
2871 pfm_write_soft_counter(ctx, i, val);
2872 }
2873
2874
2875
2876
2877 for(i = 0; reset_others; i++, reset_others >>= 1) {
2878
2879 if ((reset_others & 0x1) == 0) continue;
2880
2881 val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset);
2882
2883 if (PMD_IS_COUNTING(i)) {
2884 pfm_write_soft_counter(ctx, i, val);
2885 } else {
2886 ia64_set_pmd(i, val);
2887 }
2888 DPRINT_ovfl(("%s reset_others pmd[%d]=%lx\n",
2889 is_long_reset ? "long" : "short", i, val));
2890 }
2891 ia64_srlz_d();
2892}
2893
2894static int
2895pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
2896{
2897 struct task_struct *task;
2898 pfarg_reg_t *req = (pfarg_reg_t *)arg;
2899 unsigned long value, pmc_pm;
2900 unsigned long smpl_pmds, reset_pmds, impl_pmds;
2901 unsigned int cnum, reg_flags, flags, pmc_type;
2902 int i, can_access_pmu = 0, is_loaded, is_system, expert_mode;
2903 int is_monitor, is_counting, state;
2904 int ret = -EINVAL;
2905 pfm_reg_check_t wr_func;
2906#define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
2907
2908 state = ctx->ctx_state;
2909 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
2910 is_system = ctx->ctx_fl_system;
2911 task = ctx->ctx_task;
2912 impl_pmds = pmu_conf->impl_pmds[0];
2913
2914 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
2915
2916 if (is_loaded) {
2917
2918
2919
2920
2921
2922 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
2923 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
2924 return -EBUSY;
2925 }
2926 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
2927 }
2928 expert_mode = pfm_sysctl.expert_mode;
2929
2930 for (i = 0; i < count; i++, req++) {
2931
2932 cnum = req->reg_num;
2933 reg_flags = req->reg_flags;
2934 value = req->reg_value;
2935 smpl_pmds = req->reg_smpl_pmds[0];
2936 reset_pmds = req->reg_reset_pmds[0];
2937 flags = 0;
2938
2939
2940 if (cnum >= PMU_MAX_PMCS) {
2941 DPRINT(("pmc%u is invalid\n", cnum));
2942 goto error;
2943 }
2944
2945 pmc_type = pmu_conf->pmc_desc[cnum].type;
2946 pmc_pm = (value >> pmu_conf->pmc_desc[cnum].pm_pos) & 0x1;
2947 is_counting = (pmc_type & PFM_REG_COUNTING) == PFM_REG_COUNTING ? 1 : 0;
2948 is_monitor = (pmc_type & PFM_REG_MONITOR) == PFM_REG_MONITOR ? 1 : 0;
2949
2950
2951
2952
2953
2954
2955 if ((pmc_type & PFM_REG_IMPL) == 0 || (pmc_type & PFM_REG_CONTROL) == PFM_REG_CONTROL) {
2956 DPRINT(("pmc%u is unimplemented or no-access pmc_type=%x\n", cnum, pmc_type));
2957 goto error;
2958 }
2959 wr_func = pmu_conf->pmc_desc[cnum].write_check;
2960
2961
2962
2963
2964
2965 if (is_monitor && value != PMC_DFL_VAL(cnum) && is_system ^ pmc_pm) {
2966 DPRINT(("pmc%u pmc_pm=%lu is_system=%d\n",
2967 cnum,
2968 pmc_pm,
2969 is_system));
2970 goto error;
2971 }
2972
2973 if (is_counting) {
2974
2975
2976
2977
2978 value |= 1 << PMU_PMC_OI;
2979
2980 if (reg_flags & PFM_REGFL_OVFL_NOTIFY) {
2981 flags |= PFM_REGFL_OVFL_NOTIFY;
2982 }
2983
2984 if (reg_flags & PFM_REGFL_RANDOM) flags |= PFM_REGFL_RANDOM;
2985
2986
2987 if ((smpl_pmds & impl_pmds) != smpl_pmds) {
2988 DPRINT(("invalid smpl_pmds 0x%lx for pmc%u\n", smpl_pmds, cnum));
2989 goto error;
2990 }
2991
2992
2993 if ((reset_pmds & impl_pmds) != reset_pmds) {
2994 DPRINT(("invalid reset_pmds 0x%lx for pmc%u\n", reset_pmds, cnum));
2995 goto error;
2996 }
2997 } else {
2998 if (reg_flags & (PFM_REGFL_OVFL_NOTIFY|PFM_REGFL_RANDOM)) {
2999 DPRINT(("cannot set ovfl_notify or random on pmc%u\n", cnum));
3000 goto error;
3001 }
3002
3003 }
3004
3005
3006
3007
3008 if (likely(expert_mode == 0 && wr_func)) {
3009 ret = (*wr_func)(task, ctx, cnum, &value, regs);
3010 if (ret) goto error;
3011 ret = -EINVAL;
3012 }
3013
3014
3015
3016
3017 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
3018
3019
3020
3021
3022
3023
3024
3025
3026 if (is_counting) {
3027
3028
3029
3030 ctx->ctx_pmds[cnum].flags = flags;
3031
3032 ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds;
3033 ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds;
3034 ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid;
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047 CTX_USED_PMD(ctx, reset_pmds);
3048 CTX_USED_PMD(ctx, smpl_pmds);
3049
3050
3051
3052
3053 if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3054 }
3055
3056
3057
3058
3059
3060 CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]);
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum);
3075
3076
3077
3078
3079 ctx->ctx_pmcs[cnum] = value;
3080
3081 if (is_loaded) {
3082
3083
3084
3085 if (is_system == 0) ctx->th_pmcs[cnum] = value;
3086
3087
3088
3089
3090 if (can_access_pmu) {
3091 ia64_set_pmc(cnum, value);
3092 }
3093#ifdef CONFIG_SMP
3094 else {
3095
3096
3097
3098
3099
3100
3101
3102 ctx->ctx_reload_pmcs[0] |= 1UL << cnum;
3103 }
3104#endif
3105 }
3106
3107 DPRINT(("pmc[%u]=0x%lx ld=%d apmu=%d flags=0x%x all_pmcs=0x%lx used_pmds=0x%lx eventid=%ld smpl_pmds=0x%lx reset_pmds=0x%lx reloads_pmcs=0x%lx used_monitors=0x%lx ovfl_regs=0x%lx\n",
3108 cnum,
3109 value,
3110 is_loaded,
3111 can_access_pmu,
3112 flags,
3113 ctx->ctx_all_pmcs[0],
3114 ctx->ctx_used_pmds[0],
3115 ctx->ctx_pmds[cnum].eventid,
3116 smpl_pmds,
3117 reset_pmds,
3118 ctx->ctx_reload_pmcs[0],
3119 ctx->ctx_used_monitors[0],
3120 ctx->ctx_ovfl_regs[0]));
3121 }
3122
3123
3124
3125
3126 if (can_access_pmu) ia64_srlz_d();
3127
3128 return 0;
3129error:
3130 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3131 return ret;
3132}
3133
3134static int
3135pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3136{
3137 struct task_struct *task;
3138 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3139 unsigned long value, hw_value, ovfl_mask;
3140 unsigned int cnum;
3141 int i, can_access_pmu = 0, state;
3142 int is_counting, is_loaded, is_system, expert_mode;
3143 int ret = -EINVAL;
3144 pfm_reg_check_t wr_func;
3145
3146
3147 state = ctx->ctx_state;
3148 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3149 is_system = ctx->ctx_fl_system;
3150 ovfl_mask = pmu_conf->ovfl_val;
3151 task = ctx->ctx_task;
3152
3153 if (unlikely(state == PFM_CTX_ZOMBIE)) return -EINVAL;
3154
3155
3156
3157
3158
3159 if (likely(is_loaded)) {
3160
3161
3162
3163
3164
3165 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3166 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3167 return -EBUSY;
3168 }
3169 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3170 }
3171 expert_mode = pfm_sysctl.expert_mode;
3172
3173 for (i = 0; i < count; i++, req++) {
3174
3175 cnum = req->reg_num;
3176 value = req->reg_value;
3177
3178 if (!PMD_IS_IMPL(cnum)) {
3179 DPRINT(("pmd[%u] is unimplemented or invalid\n", cnum));
3180 goto abort_mission;
3181 }
3182 is_counting = PMD_IS_COUNTING(cnum);
3183 wr_func = pmu_conf->pmd_desc[cnum].write_check;
3184
3185
3186
3187
3188 if (unlikely(expert_mode == 0 && wr_func)) {
3189 unsigned long v = value;
3190
3191 ret = (*wr_func)(task, ctx, cnum, &v, regs);
3192 if (ret) goto abort_mission;
3193
3194 value = v;
3195 ret = -EINVAL;
3196 }
3197
3198
3199
3200
3201 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
3202
3203
3204
3205
3206 hw_value = value;
3207
3208
3209
3210
3211 if (is_counting) {
3212
3213
3214
3215 ctx->ctx_pmds[cnum].lval = value;
3216
3217
3218
3219
3220 if (is_loaded) {
3221 hw_value = value & ovfl_mask;
3222 value = value & ~ovfl_mask;
3223 }
3224 }
3225
3226
3227
3228 ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset;
3229 ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset;
3230
3231
3232
3233
3234 ctx->ctx_pmds[cnum].seed = req->reg_random_seed;
3235 ctx->ctx_pmds[cnum].mask = req->reg_random_mask;
3236
3237
3238
3239
3240 ctx->ctx_pmds[cnum].val = value;
3241
3242
3243
3244
3245
3246
3247
3248 CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum));
3249
3250
3251
3252
3253 CTX_USED_PMD(ctx, RDEP(cnum));
3254
3255
3256
3257
3258
3259 if (is_counting && state == PFM_CTX_MASKED) {
3260 ctx->ctx_ovfl_regs[0] &= ~1UL << cnum;
3261 }
3262
3263 if (is_loaded) {
3264
3265
3266
3267 if (is_system == 0) ctx->th_pmds[cnum] = hw_value;
3268
3269
3270
3271
3272 if (can_access_pmu) {
3273 ia64_set_pmd(cnum, hw_value);
3274 } else {
3275#ifdef CONFIG_SMP
3276
3277
3278
3279
3280
3281 ctx->ctx_reload_pmds[0] |= 1UL << cnum;
3282#endif
3283 }
3284 }
3285
3286 DPRINT(("pmd[%u]=0x%lx ld=%d apmu=%d, hw_value=0x%lx ctx_pmd=0x%lx short_reset=0x%lx "
3287 "long_reset=0x%lx notify=%c seed=0x%lx mask=0x%lx used_pmds=0x%lx reset_pmds=0x%lx reload_pmds=0x%lx all_pmds=0x%lx ovfl_regs=0x%lx\n",
3288 cnum,
3289 value,
3290 is_loaded,
3291 can_access_pmu,
3292 hw_value,
3293 ctx->ctx_pmds[cnum].val,
3294 ctx->ctx_pmds[cnum].short_reset,
3295 ctx->ctx_pmds[cnum].long_reset,
3296 PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N',
3297 ctx->ctx_pmds[cnum].seed,
3298 ctx->ctx_pmds[cnum].mask,
3299 ctx->ctx_used_pmds[0],
3300 ctx->ctx_pmds[cnum].reset_pmds[0],
3301 ctx->ctx_reload_pmds[0],
3302 ctx->ctx_all_pmds[0],
3303 ctx->ctx_ovfl_regs[0]));
3304 }
3305
3306
3307
3308
3309 if (can_access_pmu) ia64_srlz_d();
3310
3311 return 0;
3312
3313abort_mission:
3314
3315
3316
3317 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3318 return ret;
3319}
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330static int
3331pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3332{
3333 struct task_struct *task;
3334 unsigned long val = 0UL, lval, ovfl_mask, sval;
3335 pfarg_reg_t *req = (pfarg_reg_t *)arg;
3336 unsigned int cnum, reg_flags = 0;
3337 int i, can_access_pmu = 0, state;
3338 int is_loaded, is_system, is_counting, expert_mode;
3339 int ret = -EINVAL;
3340 pfm_reg_check_t rd_func;
3341
3342
3343
3344
3345
3346
3347 state = ctx->ctx_state;
3348 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3349 is_system = ctx->ctx_fl_system;
3350 ovfl_mask = pmu_conf->ovfl_val;
3351 task = ctx->ctx_task;
3352
3353 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3354
3355 if (likely(is_loaded)) {
3356
3357
3358
3359
3360
3361 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3362 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3363 return -EBUSY;
3364 }
3365
3366
3367
3368 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3369
3370 if (can_access_pmu) ia64_srlz_d();
3371 }
3372 expert_mode = pfm_sysctl.expert_mode;
3373
3374 DPRINT(("ld=%d apmu=%d ctx_state=%d\n",
3375 is_loaded,
3376 can_access_pmu,
3377 state));
3378
3379
3380
3381
3382
3383
3384 for (i = 0; i < count; i++, req++) {
3385
3386 cnum = req->reg_num;
3387 reg_flags = req->reg_flags;
3388
3389 if (unlikely(!PMD_IS_IMPL(cnum))) goto error;
3390
3391
3392
3393
3394
3395
3396
3397
3398 if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error;
3399
3400 sval = ctx->ctx_pmds[cnum].val;
3401 lval = ctx->ctx_pmds[cnum].lval;
3402 is_counting = PMD_IS_COUNTING(cnum);
3403
3404
3405
3406
3407
3408
3409 if (can_access_pmu){
3410 val = ia64_get_pmd(cnum);
3411 } else {
3412
3413
3414
3415
3416
3417 val = is_loaded ? ctx->th_pmds[cnum] : 0UL;
3418 }
3419 rd_func = pmu_conf->pmd_desc[cnum].read_check;
3420
3421 if (is_counting) {
3422
3423
3424
3425 val &= ovfl_mask;
3426 val += sval;
3427 }
3428
3429
3430
3431
3432 if (unlikely(expert_mode == 0 && rd_func)) {
3433 unsigned long v = val;
3434 ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs);
3435 if (ret) goto error;
3436 val = v;
3437 ret = -EINVAL;
3438 }
3439
3440 PFM_REG_RETFLAG_SET(reg_flags, 0);
3441
3442 DPRINT(("pmd[%u]=0x%lx\n", cnum, val));
3443
3444
3445
3446
3447
3448
3449 req->reg_value = val;
3450 req->reg_flags = reg_flags;
3451 req->reg_last_reset_val = lval;
3452 }
3453
3454 return 0;
3455
3456error:
3457 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
3458 return ret;
3459}
3460
3461int
3462pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3463{
3464 pfm_context_t *ctx;
3465
3466 if (req == NULL) return -EINVAL;
3467
3468 ctx = GET_PMU_CTX();
3469
3470 if (ctx == NULL) return -EINVAL;
3471
3472
3473
3474
3475
3476 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3477
3478 return pfm_write_pmcs(ctx, req, nreq, regs);
3479}
3480EXPORT_SYMBOL(pfm_mod_write_pmcs);
3481
3482int
3483pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3484{
3485 pfm_context_t *ctx;
3486
3487 if (req == NULL) return -EINVAL;
3488
3489 ctx = GET_PMU_CTX();
3490
3491 if (ctx == NULL) return -EINVAL;
3492
3493
3494
3495
3496
3497 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3498
3499 return pfm_read_pmds(ctx, req, nreq, regs);
3500}
3501EXPORT_SYMBOL(pfm_mod_read_pmds);
3502
3503
3504
3505
3506
3507int
3508pfm_use_debug_registers(struct task_struct *task)
3509{
3510 pfm_context_t *ctx = task->thread.pfm_context;
3511 unsigned long flags;
3512 int ret = 0;
3513
3514 if (pmu_conf->use_rr_dbregs == 0) return 0;
3515
3516 DPRINT(("called for [%d]\n", task_pid_nr(task)));
3517
3518
3519
3520
3521 if (task->thread.flags & IA64_THREAD_DBG_VALID) return 0;
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531 if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1;
3532
3533 LOCK_PFS(flags);
3534
3535
3536
3537
3538
3539 if (pfm_sessions.pfs_sys_use_dbregs> 0)
3540 ret = -1;
3541 else
3542 pfm_sessions.pfs_ptrace_use_dbregs++;
3543
3544 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3545 pfm_sessions.pfs_ptrace_use_dbregs,
3546 pfm_sessions.pfs_sys_use_dbregs,
3547 task_pid_nr(task), ret));
3548
3549 UNLOCK_PFS(flags);
3550
3551 return ret;
3552}
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562int
3563pfm_release_debug_registers(struct task_struct *task)
3564{
3565 unsigned long flags;
3566 int ret;
3567
3568 if (pmu_conf->use_rr_dbregs == 0) return 0;
3569
3570 LOCK_PFS(flags);
3571 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
3572 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
3573 ret = -1;
3574 } else {
3575 pfm_sessions.pfs_ptrace_use_dbregs--;
3576 ret = 0;
3577 }
3578 UNLOCK_PFS(flags);
3579
3580 return ret;
3581}
3582
3583static int
3584pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3585{
3586 struct task_struct *task;
3587 pfm_buffer_fmt_t *fmt;
3588 pfm_ovfl_ctrl_t rst_ctrl;
3589 int state, is_system;
3590 int ret = 0;
3591
3592 state = ctx->ctx_state;
3593 fmt = ctx->ctx_buf_fmt;
3594 is_system = ctx->ctx_fl_system;
3595 task = PFM_CTX_TASK(ctx);
3596
3597 switch(state) {
3598 case PFM_CTX_MASKED:
3599 break;
3600 case PFM_CTX_LOADED:
3601 if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break;
3602
3603 case PFM_CTX_UNLOADED:
3604 case PFM_CTX_ZOMBIE:
3605 DPRINT(("invalid state=%d\n", state));
3606 return -EBUSY;
3607 default:
3608 DPRINT(("state=%d, cannot operate (no active_restart handler)\n", state));
3609 return -EINVAL;
3610 }
3611
3612
3613
3614
3615
3616
3617 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
3618 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3619 return -EBUSY;
3620 }
3621
3622
3623 if (unlikely(task == NULL)) {
3624 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
3625 return -EINVAL;
3626 }
3627
3628 if (task == current || is_system) {
3629
3630 fmt = ctx->ctx_buf_fmt;
3631
3632 DPRINT(("restarting self %d ovfl=0x%lx\n",
3633 task_pid_nr(task),
3634 ctx->ctx_ovfl_regs[0]));
3635
3636 if (CTX_HAS_SMPL(ctx)) {
3637
3638 prefetch(ctx->ctx_smpl_hdr);
3639
3640 rst_ctrl.bits.mask_monitoring = 0;
3641 rst_ctrl.bits.reset_ovfl_pmds = 0;
3642
3643 if (state == PFM_CTX_LOADED)
3644 ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3645 else
3646 ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
3647 } else {
3648 rst_ctrl.bits.mask_monitoring = 0;
3649 rst_ctrl.bits.reset_ovfl_pmds = 1;
3650 }
3651
3652 if (ret == 0) {
3653 if (rst_ctrl.bits.reset_ovfl_pmds)
3654 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
3655
3656 if (rst_ctrl.bits.mask_monitoring == 0) {
3657 DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
3658
3659 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
3660 } else {
3661 DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
3662
3663
3664 }
3665 }
3666
3667
3668
3669 ctx->ctx_ovfl_regs[0] = 0UL;
3670
3671
3672
3673
3674 ctx->ctx_state = PFM_CTX_LOADED;
3675
3676
3677
3678
3679 ctx->ctx_fl_can_restart = 0;
3680
3681 return 0;
3682 }
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692 if (state == PFM_CTX_MASKED) {
3693 if (ctx->ctx_fl_can_restart == 0) return -EINVAL;
3694
3695
3696
3697
3698 ctx->ctx_fl_can_restart = 0;
3699 }
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
3718 DPRINT(("unblocking [%d] \n", task_pid_nr(task)));
3719 complete(&ctx->ctx_restart_done);
3720 } else {
3721 DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
3722
3723 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
3724
3725 PFM_SET_WORK_PENDING(task, 1);
3726
3727 pfm_set_task_notify(task);
3728
3729
3730
3731
3732 }
3733 return 0;
3734}
3735
3736static int
3737pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3738{
3739 unsigned int m = *(unsigned int *)arg;
3740
3741 pfm_sysctl.debug = m == 0 ? 0 : 1;
3742
3743 printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
3744
3745 if (m == 0) {
3746 memset(pfm_stats, 0, sizeof(pfm_stats));
3747 for(m=0; m < NR_CPUS; m++) pfm_stats[m].pfm_ovfl_intr_cycles_min = ~0UL;
3748 }
3749 return 0;
3750}
3751
3752
3753
3754
3755static int
3756pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3757{
3758 struct thread_struct *thread = NULL;
3759 struct task_struct *task;
3760 pfarg_dbreg_t *req = (pfarg_dbreg_t *)arg;
3761 unsigned long flags;
3762 dbreg_t dbreg;
3763 unsigned int rnum;
3764 int first_time;
3765 int ret = 0, state;
3766 int i, can_access_pmu = 0;
3767 int is_system, is_loaded;
3768
3769 if (pmu_conf->use_rr_dbregs == 0) return -EINVAL;
3770
3771 state = ctx->ctx_state;
3772 is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
3773 is_system = ctx->ctx_fl_system;
3774 task = ctx->ctx_task;
3775
3776 if (state == PFM_CTX_ZOMBIE) return -EINVAL;
3777
3778
3779
3780
3781
3782 if (is_loaded) {
3783 thread = &task->thread;
3784
3785
3786
3787
3788
3789 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) {
3790 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
3791 return -EBUSY;
3792 }
3793 can_access_pmu = GET_PMU_OWNER() == task || is_system ? 1 : 0;
3794 }
3795
3796
3797
3798
3799
3800
3801
3802
3803 first_time = ctx->ctx_fl_using_dbreg == 0;
3804
3805
3806
3807
3808 if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
3809 DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
3810 return -EBUSY;
3811 }
3812
3813
3814
3815
3816
3817
3818
3819
3820 if (is_loaded) {
3821 LOCK_PFS(flags);
3822
3823 if (first_time && is_system) {
3824 if (pfm_sessions.pfs_ptrace_use_dbregs)
3825 ret = -EBUSY;
3826 else
3827 pfm_sessions.pfs_sys_use_dbregs++;
3828 }
3829 UNLOCK_PFS(flags);
3830 }
3831
3832 if (ret != 0) return ret;
3833
3834
3835
3836
3837
3838 ctx->ctx_fl_using_dbreg = 1;
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849 if (first_time && can_access_pmu) {
3850 DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
3851 for (i=0; i < pmu_conf->num_ibrs; i++) {
3852 ia64_set_ibr(i, 0UL);
3853 ia64_dv_serialize_instruction();
3854 }
3855 ia64_srlz_i();
3856 for (i=0; i < pmu_conf->num_dbrs; i++) {
3857 ia64_set_dbr(i, 0UL);
3858 ia64_dv_serialize_data();
3859 }
3860 ia64_srlz_d();
3861 }
3862
3863
3864
3865
3866 for (i = 0; i < count; i++, req++) {
3867
3868 rnum = req->dbreg_num;
3869 dbreg.val = req->dbreg_value;
3870
3871 ret = -EINVAL;
3872
3873 if ((mode == PFM_CODE_RR && rnum >= PFM_NUM_IBRS) || ((mode == PFM_DATA_RR) && rnum >= PFM_NUM_DBRS)) {
3874 DPRINT(("invalid register %u val=0x%lx mode=%d i=%d count=%d\n",
3875 rnum, dbreg.val, mode, i, count));
3876
3877 goto abort_mission;
3878 }
3879
3880
3881
3882
3883 if (rnum & 0x1) {
3884 if (mode == PFM_CODE_RR)
3885 dbreg.ibr.ibr_x = 0;
3886 else
3887 dbreg.dbr.dbr_r = dbreg.dbr.dbr_w = 0;
3888 }
3889
3890 PFM_REG_RETFLAG_SET(req->dbreg_flags, 0);
3891
3892
3893
3894
3895
3896
3897
3898
3899
3900
3901
3902 if (mode == PFM_CODE_RR) {
3903 CTX_USED_IBR(ctx, rnum);
3904
3905 if (can_access_pmu) {
3906 ia64_set_ibr(rnum, dbreg.val);
3907 ia64_dv_serialize_instruction();
3908 }
3909
3910 ctx->ctx_ibrs[rnum] = dbreg.val;
3911
3912 DPRINT(("write ibr%u=0x%lx used_ibrs=0x%x ld=%d apmu=%d\n",
3913 rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu));
3914 } else {
3915 CTX_USED_DBR(ctx, rnum);
3916
3917 if (can_access_pmu) {
3918 ia64_set_dbr(rnum, dbreg.val);
3919 ia64_dv_serialize_data();
3920 }
3921 ctx->ctx_dbrs[rnum] = dbreg.val;
3922
3923 DPRINT(("write dbr%u=0x%lx used_dbrs=0x%x ld=%d apmu=%d\n",
3924 rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu));
3925 }
3926 }
3927
3928 return 0;
3929
3930abort_mission:
3931
3932
3933
3934 if (first_time) {
3935 LOCK_PFS(flags);
3936 if (ctx->ctx_fl_system) {
3937 pfm_sessions.pfs_sys_use_dbregs--;
3938 }
3939 UNLOCK_PFS(flags);
3940 ctx->ctx_fl_using_dbreg = 0;
3941 }
3942
3943
3944
3945 PFM_REG_RETFLAG_SET(req->dbreg_flags, PFM_REG_RETFL_EINVAL);
3946
3947 return ret;
3948}
3949
3950static int
3951pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3952{
3953 return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs);
3954}
3955
3956static int
3957pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3958{
3959 return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs);
3960}
3961
3962int
3963pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3964{
3965 pfm_context_t *ctx;
3966
3967 if (req == NULL) return -EINVAL;
3968
3969 ctx = GET_PMU_CTX();
3970
3971 if (ctx == NULL) return -EINVAL;
3972
3973
3974
3975
3976
3977 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3978
3979 return pfm_write_ibrs(ctx, req, nreq, regs);
3980}
3981EXPORT_SYMBOL(pfm_mod_write_ibrs);
3982
3983int
3984pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs)
3985{
3986 pfm_context_t *ctx;
3987
3988 if (req == NULL) return -EINVAL;
3989
3990 ctx = GET_PMU_CTX();
3991
3992 if (ctx == NULL) return -EINVAL;
3993
3994
3995
3996
3997
3998 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY;
3999
4000 return pfm_write_dbrs(ctx, req, nreq, regs);
4001}
4002EXPORT_SYMBOL(pfm_mod_write_dbrs);
4003
4004
4005static int
4006pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4007{
4008 pfarg_features_t *req = (pfarg_features_t *)arg;
4009
4010 req->ft_version = PFM_VERSION;
4011 return 0;
4012}
4013
4014static int
4015pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4016{
4017 struct pt_regs *tregs;
4018 struct task_struct *task = PFM_CTX_TASK(ctx);
4019 int state, is_system;
4020
4021 state = ctx->ctx_state;
4022 is_system = ctx->ctx_fl_system;
4023
4024
4025
4026
4027 if (state == PFM_CTX_UNLOADED) return -EINVAL;
4028
4029
4030
4031
4032
4033
4034 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
4035 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
4036 return -EBUSY;
4037 }
4038 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
4039 task_pid_nr(PFM_CTX_TASK(ctx)),
4040 state,
4041 is_system));
4042
4043
4044
4045
4046
4047 if (is_system) {
4048
4049
4050
4051
4052
4053 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) & ~IA64_DCR_PP);
4054 ia64_srlz_i();
4055
4056
4057
4058
4059 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4060
4061
4062
4063
4064 pfm_clear_psr_pp();
4065
4066
4067
4068
4069 ia64_psr(regs)->pp = 0;
4070
4071 return 0;
4072 }
4073
4074
4075
4076
4077 if (task == current) {
4078
4079 pfm_clear_psr_up();
4080
4081
4082
4083
4084 ia64_psr(regs)->up = 0;
4085 } else {
4086 tregs = task_pt_regs(task);
4087
4088
4089
4090
4091 ia64_psr(tregs)->up = 0;
4092
4093
4094
4095
4096 ctx->ctx_saved_psr_up = 0;
4097 DPRINT(("task=[%d]\n", task_pid_nr(task)));
4098 }
4099 return 0;
4100}
4101
4102
4103static int
4104pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4105{
4106 struct pt_regs *tregs;
4107 int state, is_system;
4108
4109 state = ctx->ctx_state;
4110 is_system = ctx->ctx_fl_system;
4111
4112 if (state != PFM_CTX_LOADED) return -EINVAL;
4113
4114
4115
4116
4117
4118
4119 if (is_system && ctx->ctx_cpu != smp_processor_id()) {
4120 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu));
4121 return -EBUSY;
4122 }
4123
4124
4125
4126
4127
4128
4129 if (is_system) {
4130
4131
4132
4133
4134 ia64_psr(regs)->pp = 1;
4135
4136
4137
4138
4139 PFM_CPUINFO_SET(PFM_CPUINFO_DCR_PP);
4140
4141
4142
4143
4144 pfm_set_psr_pp();
4145
4146
4147 ia64_setreg(_IA64_REG_CR_DCR, ia64_getreg(_IA64_REG_CR_DCR) | IA64_DCR_PP);
4148 ia64_srlz_i();
4149
4150 return 0;
4151 }
4152
4153
4154
4155
4156
4157 if (ctx->ctx_task == current) {
4158
4159
4160 pfm_set_psr_up();
4161
4162
4163
4164
4165 ia64_psr(regs)->up = 1;
4166
4167 } else {
4168 tregs = task_pt_regs(ctx->ctx_task);
4169
4170
4171
4172
4173
4174 ctx->ctx_saved_psr_up = IA64_PSR_UP;
4175
4176
4177
4178
4179 ia64_psr(tregs)->up = 1;
4180 }
4181 return 0;
4182}
4183
4184static int
4185pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4186{
4187 pfarg_reg_t *req = (pfarg_reg_t *)arg;
4188 unsigned int cnum;
4189 int i;
4190 int ret = -EINVAL;
4191
4192 for (i = 0; i < count; i++, req++) {
4193
4194 cnum = req->reg_num;
4195
4196 if (!PMC_IS_IMPL(cnum)) goto abort_mission;
4197
4198 req->reg_value = PMC_DFL_VAL(cnum);
4199
4200 PFM_REG_RETFLAG_SET(req->reg_flags, 0);
4201
4202 DPRINT(("pmc_reset_val pmc[%u]=0x%lx\n", cnum, req->reg_value));
4203 }
4204 return 0;
4205
4206abort_mission:
4207 PFM_REG_RETFLAG_SET(req->reg_flags, PFM_REG_RETFL_EINVAL);
4208 return ret;
4209}
4210
4211static int
4212pfm_check_task_exist(pfm_context_t *ctx)
4213{
4214 struct task_struct *g, *t;
4215 int ret = -ESRCH;
4216
4217 read_lock(&tasklist_lock);
4218
4219 do_each_thread (g, t) {
4220 if (t->thread.pfm_context == ctx) {
4221 ret = 0;
4222 break;
4223 }
4224 } while_each_thread (g, t);
4225
4226 read_unlock(&tasklist_lock);
4227
4228 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
4229
4230 return ret;
4231}
4232
4233static int
4234pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4235{
4236 struct task_struct *task;
4237 struct thread_struct *thread;
4238 struct pfm_context_t *old;
4239 unsigned long flags;
4240#ifndef CONFIG_SMP
4241 struct task_struct *owner_task = NULL;
4242#endif
4243 pfarg_load_t *req = (pfarg_load_t *)arg;
4244 unsigned long *pmcs_source, *pmds_source;
4245 int the_cpu;
4246 int ret = 0;
4247 int state, is_system, set_dbregs = 0;
4248
4249 state = ctx->ctx_state;
4250 is_system = ctx->ctx_fl_system;
4251
4252
4253
4254 if (state != PFM_CTX_UNLOADED) {
4255 DPRINT(("cannot load to [%d], invalid ctx_state=%d\n",
4256 req->load_pid,
4257 ctx->ctx_state));
4258 return -EBUSY;
4259 }
4260
4261 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg));
4262
4263 if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) {
4264 DPRINT(("cannot use blocking mode on self\n"));
4265 return -EINVAL;
4266 }
4267
4268 ret = pfm_get_task(ctx, req->load_pid, &task);
4269 if (ret) {
4270 DPRINT(("load_pid [%d] get_task=%d\n", req->load_pid, ret));
4271 return ret;
4272 }
4273
4274 ret = -EINVAL;
4275
4276
4277
4278
4279 if (is_system && task != current) {
4280 DPRINT(("system wide is self monitoring only load_pid=%d\n",
4281 req->load_pid));
4282 goto error;
4283 }
4284
4285 thread = &task->thread;
4286
4287 ret = 0;
4288
4289
4290
4291
4292 if (ctx->ctx_fl_using_dbreg) {
4293 if (thread->flags & IA64_THREAD_DBG_VALID) {
4294 ret = -EBUSY;
4295 DPRINT(("load_pid [%d] task is debugged, cannot load range restrictions\n", req->load_pid));
4296 goto error;
4297 }
4298 LOCK_PFS(flags);
4299
4300 if (is_system) {
4301 if (pfm_sessions.pfs_ptrace_use_dbregs) {
4302 DPRINT(("cannot load [%d] dbregs in use\n",
4303 task_pid_nr(task)));
4304 ret = -EBUSY;
4305 } else {
4306 pfm_sessions.pfs_sys_use_dbregs++;
4307 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
4308 set_dbregs = 1;
4309 }
4310 }
4311
4312 UNLOCK_PFS(flags);
4313
4314 if (ret) goto error;
4315 }
4316
4317
4318
4319
4320
4321
4322
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332 the_cpu = ctx->ctx_cpu = smp_processor_id();
4333
4334 ret = -EBUSY;
4335
4336
4337
4338 ret = pfm_reserve_session(current, is_system, the_cpu);
4339 if (ret) goto error;
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350 DPRINT(("before cmpxchg() old_ctx=%p new_ctx=%p\n",
4351 thread->pfm_context, ctx));
4352
4353 ret = -EBUSY;
4354 old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *));
4355 if (old != NULL) {
4356 DPRINT(("load_pid [%d] already has a context\n", req->load_pid));
4357 goto error_unres;
4358 }
4359
4360 pfm_reset_msgq(ctx);
4361
4362 ctx->ctx_state = PFM_CTX_LOADED;
4363
4364
4365
4366
4367 ctx->ctx_task = task;
4368
4369 if (is_system) {
4370
4371
4372
4373 PFM_CPUINFO_SET(PFM_CPUINFO_SYST_WIDE);
4374 PFM_CPUINFO_CLEAR(PFM_CPUINFO_DCR_PP);
4375
4376 if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE);
4377 } else {
4378 thread->flags |= IA64_THREAD_PM_VALID;
4379 }
4380
4381
4382
4383
4384 pfm_copy_pmds(task, ctx);
4385 pfm_copy_pmcs(task, ctx);
4386
4387 pmcs_source = ctx->th_pmcs;
4388 pmds_source = ctx->th_pmds;
4389
4390
4391
4392
4393 if (task == current) {
4394
4395 if (is_system == 0) {
4396
4397
4398 ia64_psr(regs)->sp = 0;
4399 DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
4400
4401 SET_LAST_CPU(ctx, smp_processor_id());
4402 INC_ACTIVATION();
4403 SET_ACTIVATION(ctx);
4404#ifndef CONFIG_SMP
4405
4406
4407
4408 owner_task = GET_PMU_OWNER();
4409 if (owner_task) pfm_lazy_save_regs(owner_task);
4410#endif
4411 }
4412
4413
4414
4415
4416 pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]);
4417 pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]);
4418
4419 ctx->ctx_reload_pmcs[0] = 0UL;
4420 ctx->ctx_reload_pmds[0] = 0UL;
4421
4422
4423
4424
4425 if (ctx->ctx_fl_using_dbreg) {
4426 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
4427 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
4428 }
4429
4430
4431
4432 SET_PMU_OWNER(task, ctx);
4433
4434 DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
4435 } else {
4436
4437
4438
4439 regs = task_pt_regs(task);
4440
4441
4442 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4443 SET_LAST_CPU(ctx, -1);
4444
4445
4446 ctx->ctx_saved_psr_up = 0UL;
4447 ia64_psr(regs)->up = ia64_psr(regs)->pp = 0;
4448 }
4449
4450 ret = 0;
4451
4452error_unres:
4453 if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu);
4454error:
4455
4456
4457
4458 if (ret && set_dbregs) {
4459 LOCK_PFS(flags);
4460 pfm_sessions.pfs_sys_use_dbregs--;
4461 UNLOCK_PFS(flags);
4462 }
4463
4464
4465
4466 if (is_system == 0 && task != current) {
4467 pfm_put_task(task);
4468
4469 if (ret == 0) {
4470 ret = pfm_check_task_exist(ctx);
4471 if (ret) {
4472 ctx->ctx_state = PFM_CTX_UNLOADED;
4473 ctx->ctx_task = NULL;
4474 }
4475 }
4476 }
4477 return ret;
4478}
4479
4480
4481
4482
4483
4484
4485
4486
4487
4488static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
4489
4490static int
4491pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4492{
4493 struct task_struct *task = PFM_CTX_TASK(ctx);
4494 struct pt_regs *tregs;
4495 int prev_state, is_system;
4496 int ret;
4497
4498 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
4499
4500 prev_state = ctx->ctx_state;
4501 is_system = ctx->ctx_fl_system;
4502
4503
4504
4505
4506 if (prev_state == PFM_CTX_UNLOADED) {
4507 DPRINT(("ctx_state=%d, nothing to do\n", prev_state));
4508 return 0;
4509 }
4510
4511
4512
4513
4514 ret = pfm_stop(ctx, NULL, 0, regs);
4515 if (ret) return ret;
4516
4517 ctx->ctx_state = PFM_CTX_UNLOADED;
4518
4519
4520
4521
4522
4523
4524 if (is_system) {
4525
4526
4527
4528
4529
4530
4531 PFM_CPUINFO_CLEAR(PFM_CPUINFO_SYST_WIDE);
4532 PFM_CPUINFO_CLEAR(PFM_CPUINFO_EXCL_IDLE);
4533
4534
4535
4536
4537
4538 pfm_flush_pmds(current, ctx);
4539
4540
4541
4542
4543
4544 if (prev_state != PFM_CTX_ZOMBIE)
4545 pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu);
4546
4547
4548
4549
4550 task->thread.pfm_context = NULL;
4551
4552
4553
4554 ctx->ctx_task = NULL;
4555
4556
4557
4558
4559 return 0;
4560 }
4561
4562
4563
4564
4565 tregs = task == current ? regs : task_pt_regs(task);
4566
4567 if (task == current) {
4568
4569
4570
4571 ia64_psr(regs)->sp = 1;
4572
4573 DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
4574 }
4575
4576
4577
4578
4579 pfm_flush_pmds(task, ctx);
4580
4581
4582
4583
4584
4585
4586
4587 if (prev_state != PFM_CTX_ZOMBIE)
4588 pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu);
4589
4590
4591
4592
4593 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
4594 SET_LAST_CPU(ctx, -1);
4595
4596
4597
4598
4599 task->thread.flags &= ~IA64_THREAD_PM_VALID;
4600
4601
4602
4603
4604 task->thread.pfm_context = NULL;
4605 ctx->ctx_task = NULL;
4606
4607 PFM_SET_WORK_PENDING(task, 0);
4608
4609 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
4610 ctx->ctx_fl_can_restart = 0;
4611 ctx->ctx_fl_going_zombie = 0;
4612
4613 DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
4614
4615 return 0;
4616}
4617
4618
4619
4620
4621
4622
4623void
4624pfm_exit_thread(struct task_struct *task)
4625{
4626 pfm_context_t *ctx;
4627 unsigned long flags;
4628 struct pt_regs *regs = task_pt_regs(task);
4629 int ret, state;
4630 int free_ok = 0;
4631
4632 ctx = PFM_GET_CTX(task);
4633
4634 PROTECT_CTX(ctx, flags);
4635
4636 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
4637
4638 state = ctx->ctx_state;
4639 switch(state) {
4640 case PFM_CTX_UNLOADED:
4641
4642
4643
4644
4645 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
4646 break;
4647 case PFM_CTX_LOADED:
4648 case PFM_CTX_MASKED:
4649 ret = pfm_context_unload(ctx, NULL, 0, regs);
4650 if (ret) {
4651 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4652 }
4653 DPRINT(("ctx unloaded for current state was %d\n", state));
4654
4655 pfm_end_notify_user(ctx);
4656 break;
4657 case PFM_CTX_ZOMBIE:
4658 ret = pfm_context_unload(ctx, NULL, 0, regs);
4659 if (ret) {
4660 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4661 }
4662 free_ok = 1;
4663 break;
4664 default:
4665 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
4666 break;
4667 }
4668 UNPROTECT_CTX(ctx, flags);
4669
4670 { u64 psr = pfm_get_psr();
4671 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
4672 BUG_ON(GET_PMU_OWNER());
4673 BUG_ON(ia64_psr(regs)->up);
4674 BUG_ON(ia64_psr(regs)->pp);
4675 }
4676
4677
4678
4679
4680
4681 if (free_ok) pfm_context_free(ctx);
4682}
4683
4684
4685
4686
4687#define PFM_CMD(name, flags, arg_count, arg_type, getsz) { name, #name, flags, arg_count, sizeof(arg_type), getsz }
4688#define PFM_CMD_S(name, flags) { name, #name, flags, 0, 0, NULL }
4689#define PFM_CMD_PCLRWS (PFM_CMD_FD|PFM_CMD_ARG_RW|PFM_CMD_STOP)
4690#define PFM_CMD_PCLRW (PFM_CMD_FD|PFM_CMD_ARG_RW)
4691#define PFM_CMD_NONE { NULL, "no-cmd", 0, 0, 0, NULL}
4692
4693static pfm_cmd_desc_t pfm_cmd_tab[]={
4694PFM_CMD_NONE,
4695PFM_CMD(pfm_write_pmcs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4696PFM_CMD(pfm_write_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4697PFM_CMD(pfm_read_pmds, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4698PFM_CMD_S(pfm_stop, PFM_CMD_PCLRWS),
4699PFM_CMD_S(pfm_start, PFM_CMD_PCLRWS),
4700PFM_CMD_NONE,
4701PFM_CMD_NONE,
4702PFM_CMD(pfm_context_create, PFM_CMD_ARG_RW, 1, pfarg_context_t, pfm_ctx_getsize),
4703PFM_CMD_NONE,
4704PFM_CMD_S(pfm_restart, PFM_CMD_PCLRW),
4705PFM_CMD_NONE,
4706PFM_CMD(pfm_get_features, PFM_CMD_ARG_RW, 1, pfarg_features_t, NULL),
4707PFM_CMD(pfm_debug, 0, 1, unsigned int, NULL),
4708PFM_CMD_NONE,
4709PFM_CMD(pfm_get_pmc_reset, PFM_CMD_ARG_RW, PFM_CMD_ARG_MANY, pfarg_reg_t, NULL),
4710PFM_CMD(pfm_context_load, PFM_CMD_PCLRWS, 1, pfarg_load_t, NULL),
4711PFM_CMD_S(pfm_context_unload, PFM_CMD_PCLRWS),
4712PFM_CMD_NONE,
4713PFM_CMD_NONE,
4714PFM_CMD_NONE,
4715PFM_CMD_NONE,
4716PFM_CMD_NONE,
4717PFM_CMD_NONE,
4718PFM_CMD_NONE,
4719PFM_CMD_NONE,
4720PFM_CMD_NONE,
4721PFM_CMD_NONE,
4722PFM_CMD_NONE,
4723PFM_CMD_NONE,
4724PFM_CMD_NONE,
4725PFM_CMD_NONE,
4726PFM_CMD(pfm_write_ibrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL),
4727PFM_CMD(pfm_write_dbrs, PFM_CMD_PCLRWS, PFM_CMD_ARG_MANY, pfarg_dbreg_t, NULL)
4728};
4729#define PFM_CMD_COUNT (sizeof(pfm_cmd_tab)/sizeof(pfm_cmd_desc_t))
4730
4731static int
4732pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags)
4733{
4734 struct task_struct *task;
4735 int state, old_state;
4736
4737recheck:
4738 state = ctx->ctx_state;
4739 task = ctx->ctx_task;
4740
4741 if (task == NULL) {
4742 DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state));
4743 return 0;
4744 }
4745
4746 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4747 ctx->ctx_fd,
4748 state,
4749 task_pid_nr(task),
4750 task->state, PFM_CMD_STOPPED(cmd)));
4751
4752
4753
4754
4755
4756
4757
4758
4759 if (task == current || ctx->ctx_fl_system) return 0;
4760
4761
4762
4763
4764 switch(state) {
4765 case PFM_CTX_UNLOADED:
4766
4767
4768
4769 return 0;
4770 case PFM_CTX_ZOMBIE:
4771
4772
4773
4774 DPRINT(("cmd %d state zombie cannot operate on context\n", cmd));
4775 return -EINVAL;
4776 case PFM_CTX_MASKED:
4777
4778
4779
4780
4781 if (cmd != PFM_UNLOAD_CONTEXT) return 0;
4782 }
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794 if (PFM_CMD_STOPPED(cmd)) {
4795 if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
4796 DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
4797 return -EBUSY;
4798 }
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813 old_state = state;
4814
4815 UNPROTECT_CTX(ctx, flags);
4816
4817 wait_task_inactive(task);
4818
4819 PROTECT_CTX(ctx, flags);
4820
4821
4822
4823
4824 if (ctx->ctx_state != old_state) {
4825 DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state));
4826 goto recheck;
4827 }
4828 }
4829 return 0;
4830}
4831
4832
4833
4834
4835asmlinkage long
4836sys_perfmonctl (int fd, int cmd, void __user *arg, int count)
4837{
4838 struct file *file = NULL;
4839 pfm_context_t *ctx = NULL;
4840 unsigned long flags = 0UL;
4841 void *args_k = NULL;
4842 long ret;
4843 size_t base_sz, sz, xtra_sz = 0;
4844 int narg, completed_args = 0, call_made = 0, cmd_flags;
4845 int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
4846 int (*getsize)(void *arg, size_t *sz);
4847#define PFM_MAX_ARGSIZE 4096
4848
4849
4850
4851
4852 if (unlikely(pmu_conf == NULL)) return -ENOSYS;
4853
4854 if (unlikely(cmd < 0 || cmd >= PFM_CMD_COUNT)) {
4855 DPRINT(("invalid cmd=%d\n", cmd));
4856 return -EINVAL;
4857 }
4858
4859 func = pfm_cmd_tab[cmd].cmd_func;
4860 narg = pfm_cmd_tab[cmd].cmd_narg;
4861 base_sz = pfm_cmd_tab[cmd].cmd_argsize;
4862 getsize = pfm_cmd_tab[cmd].cmd_getsize;
4863 cmd_flags = pfm_cmd_tab[cmd].cmd_flags;
4864
4865 if (unlikely(func == NULL)) {
4866 DPRINT(("invalid cmd=%d\n", cmd));
4867 return -EINVAL;
4868 }
4869
4870 DPRINT(("cmd=%s idx=%d narg=0x%x argsz=%lu count=%d\n",
4871 PFM_CMD_NAME(cmd),
4872 cmd,
4873 narg,
4874 base_sz,
4875 count));
4876
4877
4878
4879
4880 if (unlikely((narg == PFM_CMD_ARG_MANY && count <= 0) || (narg > 0 && narg != count)))
4881 return -EINVAL;
4882
4883restart_args:
4884 sz = xtra_sz + base_sz*count;
4885
4886
4887
4888 if (unlikely(sz > PFM_MAX_ARGSIZE)) {
4889 printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
4890 return -E2BIG;
4891 }
4892
4893
4894
4895
4896 if (likely(count && args_k == NULL)) {
4897 args_k = kmalloc(PFM_MAX_ARGSIZE, GFP_KERNEL);
4898 if (args_k == NULL) return -ENOMEM;
4899 }
4900
4901 ret = -EFAULT;
4902
4903
4904
4905
4906
4907
4908 if (sz && copy_from_user(args_k, arg, sz)) {
4909 DPRINT(("cannot copy_from_user %lu bytes @%p\n", sz, arg));
4910 goto error_args;
4911 }
4912
4913
4914
4915
4916 if (completed_args == 0 && getsize) {
4917
4918
4919
4920 ret = (*getsize)(args_k, &xtra_sz);
4921 if (ret) goto error_args;
4922
4923 completed_args = 1;
4924
4925 DPRINT(("restart_args sz=%lu xtra_sz=%lu\n", sz, xtra_sz));
4926
4927
4928 if (likely(xtra_sz)) goto restart_args;
4929 }
4930
4931 if (unlikely((cmd_flags & PFM_CMD_FD) == 0)) goto skip_fd;
4932
4933 ret = -EBADF;
4934
4935 file = fget(fd);
4936 if (unlikely(file == NULL)) {
4937 DPRINT(("invalid fd %d\n", fd));
4938 goto error_args;
4939 }
4940 if (unlikely(PFM_IS_FILE(file) == 0)) {
4941 DPRINT(("fd %d not related to perfmon\n", fd));
4942 goto error_args;
4943 }
4944
4945 ctx = (pfm_context_t *)file->private_data;
4946 if (unlikely(ctx == NULL)) {
4947 DPRINT(("no context for fd %d\n", fd));
4948 goto error_args;
4949 }
4950 prefetch(&ctx->ctx_state);
4951
4952 PROTECT_CTX(ctx, flags);
4953
4954
4955
4956
4957 ret = pfm_check_task_state(ctx, cmd, flags);
4958 if (unlikely(ret)) goto abort_locked;
4959
4960skip_fd:
4961 ret = (*func)(ctx, args_k, count, task_pt_regs(current));
4962
4963 call_made = 1;
4964
4965abort_locked:
4966 if (likely(ctx)) {
4967 DPRINT(("context unlocked\n"));
4968 UNPROTECT_CTX(ctx, flags);
4969 }
4970
4971
4972 if (call_made && PFM_CMD_RW_ARG(cmd) && copy_to_user(arg, args_k, base_sz*count)) ret = -EFAULT;
4973
4974error_args:
4975 if (file)
4976 fput(file);
4977
4978 kfree(args_k);
4979
4980 DPRINT(("cmd=%s ret=%ld\n", PFM_CMD_NAME(cmd), ret));
4981
4982 return ret;
4983}
4984
4985static void
4986pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs)
4987{
4988 pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt;
4989 pfm_ovfl_ctrl_t rst_ctrl;
4990 int state;
4991 int ret = 0;
4992
4993 state = ctx->ctx_state;
4994
4995
4996
4997
4998 if (CTX_HAS_SMPL(ctx)) {
4999
5000 rst_ctrl.bits.mask_monitoring = 0;
5001 rst_ctrl.bits.reset_ovfl_pmds = 0;
5002
5003 if (state == PFM_CTX_LOADED)
5004 ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
5005 else
5006 ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs);
5007 } else {
5008 rst_ctrl.bits.mask_monitoring = 0;
5009 rst_ctrl.bits.reset_ovfl_pmds = 1;
5010 }
5011
5012 if (ret == 0) {
5013 if (rst_ctrl.bits.reset_ovfl_pmds) {
5014 pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET);
5015 }
5016 if (rst_ctrl.bits.mask_monitoring == 0) {
5017 DPRINT(("resuming monitoring\n"));
5018 if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current);
5019 } else {
5020 DPRINT(("stopping monitoring\n"));
5021
5022 }
5023 ctx->ctx_state = PFM_CTX_LOADED;
5024 }
5025}
5026
5027
5028
5029
5030
5031static void
5032pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
5033{
5034 int ret;
5035
5036 DPRINT(("entering for [%d]\n", task_pid_nr(current)));
5037
5038 ret = pfm_context_unload(ctx, NULL, 0, regs);
5039 if (ret) {
5040 printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
5041 }
5042
5043
5044
5045
5046 wake_up_interruptible(&ctx->ctx_zombieq);
5047
5048
5049
5050
5051
5052
5053}
5054
5055static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
5056
5057
5058
5059
5060
5061
5062
5063
5064
5065void
5066pfm_handle_work(void)
5067{
5068 pfm_context_t *ctx;
5069 struct pt_regs *regs;
5070 unsigned long flags, dummy_flags;
5071 unsigned long ovfl_regs;
5072 unsigned int reason;
5073 int ret;
5074
5075 ctx = PFM_GET_CTX(current);
5076 if (ctx == NULL) {
5077 printk(KERN_ERR "perfmon: [%d] has no PFM context\n", task_pid_nr(current));
5078 return;
5079 }
5080
5081 PROTECT_CTX(ctx, flags);
5082
5083 PFM_SET_WORK_PENDING(current, 0);
5084
5085 pfm_clear_task_notify();
5086
5087 regs = task_pt_regs(current);
5088
5089
5090
5091
5092 reason = ctx->ctx_fl_trap_reason;
5093 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE;
5094 ovfl_regs = ctx->ctx_ovfl_regs[0];
5095
5096 DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state));
5097
5098
5099
5100
5101 if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) goto do_zombie;
5102
5103
5104
5105 if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
5106
5107
5108
5109
5110
5111 UNPROTECT_CTX(ctx, flags);
5112
5113
5114
5115
5116 local_irq_enable();
5117
5118 DPRINT(("before block sleeping\n"));
5119
5120
5121
5122
5123
5124 ret = wait_for_completion_interruptible(&ctx->ctx_restart_done);
5125
5126 DPRINT(("after block sleeping ret=%d\n", ret));
5127
5128
5129
5130
5131
5132
5133
5134 PROTECT_CTX(ctx, dummy_flags);
5135
5136
5137
5138
5139
5140
5141
5142 ovfl_regs = ctx->ctx_ovfl_regs[0];
5143
5144 if (ctx->ctx_fl_going_zombie) {
5145do_zombie:
5146 DPRINT(("context is zombie, bailing out\n"));
5147 pfm_context_force_terminate(ctx, regs);
5148 goto nothing_to_do;
5149 }
5150
5151
5152
5153 if (ret < 0) goto nothing_to_do;
5154
5155skip_blocking:
5156 pfm_resume_after_ovfl(ctx, ovfl_regs, regs);
5157 ctx->ctx_ovfl_regs[0] = 0UL;
5158
5159nothing_to_do:
5160
5161
5162
5163 UNPROTECT_CTX(ctx, flags);
5164}
5165
5166static int
5167pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg)
5168{
5169 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5170 DPRINT(("ignoring overflow notification, owner is zombie\n"));
5171 return 0;
5172 }
5173
5174 DPRINT(("waking up somebody\n"));
5175
5176 if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait);
5177
5178
5179
5180
5181
5182 kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN);
5183
5184 return 0;
5185}
5186
5187static int
5188pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds)
5189{
5190 pfm_msg_t *msg = NULL;
5191
5192 if (ctx->ctx_fl_no_msg == 0) {
5193 msg = pfm_get_new_msg(ctx);
5194 if (msg == NULL) {
5195 printk(KERN_ERR "perfmon: pfm_ovfl_notify_user no more notification msgs\n");
5196 return -1;
5197 }
5198
5199 msg->pfm_ovfl_msg.msg_type = PFM_MSG_OVFL;
5200 msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd;
5201 msg->pfm_ovfl_msg.msg_active_set = 0;
5202 msg->pfm_ovfl_msg.msg_ovfl_pmds[0] = ovfl_pmds;
5203 msg->pfm_ovfl_msg.msg_ovfl_pmds[1] = 0UL;
5204 msg->pfm_ovfl_msg.msg_ovfl_pmds[2] = 0UL;
5205 msg->pfm_ovfl_msg.msg_ovfl_pmds[3] = 0UL;
5206 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5207 }
5208
5209 DPRINT(("ovfl msg: msg=%p no_msg=%d fd=%d ovfl_pmds=0x%lx\n",
5210 msg,
5211 ctx->ctx_fl_no_msg,
5212 ctx->ctx_fd,
5213 ovfl_pmds));
5214
5215 return pfm_notify_user(ctx, msg);
5216}
5217
5218static int
5219pfm_end_notify_user(pfm_context_t *ctx)
5220{
5221 pfm_msg_t *msg;
5222
5223 msg = pfm_get_new_msg(ctx);
5224 if (msg == NULL) {
5225 printk(KERN_ERR "perfmon: pfm_end_notify_user no more notification msgs\n");
5226 return -1;
5227 }
5228
5229 memset(msg, 0, sizeof(*msg));
5230
5231 msg->pfm_end_msg.msg_type = PFM_MSG_END;
5232 msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd;
5233 msg->pfm_ovfl_msg.msg_tstamp = 0UL;
5234
5235 DPRINT(("end msg: msg=%p no_msg=%d ctx_fd=%d\n",
5236 msg,
5237 ctx->ctx_fl_no_msg,
5238 ctx->ctx_fd));
5239
5240 return pfm_notify_user(ctx, msg);
5241}
5242
5243
5244
5245
5246
5247static void
5248pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, struct pt_regs *regs)
5249{
5250 pfm_ovfl_arg_t *ovfl_arg;
5251 unsigned long mask;
5252 unsigned long old_val, ovfl_val, new_val;
5253 unsigned long ovfl_notify = 0UL, ovfl_pmds = 0UL, smpl_pmds = 0UL, reset_pmds;
5254 unsigned long tstamp;
5255 pfm_ovfl_ctrl_t ovfl_ctrl;
5256 unsigned int i, has_smpl;
5257 int must_notify = 0;
5258
5259 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring;
5260
5261
5262
5263
5264 if (unlikely((pmc0 & 0x1) == 0)) goto sanity_check;
5265
5266 tstamp = ia64_get_itc();
5267 mask = pmc0 >> PMU_FIRST_COUNTER;
5268 ovfl_val = pmu_conf->ovfl_val;
5269 has_smpl = CTX_HAS_SMPL(ctx);
5270
5271 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5272 "used_pmds=0x%lx\n",
5273 pmc0,
5274 task ? task_pid_nr(task): -1,
5275 (regs ? regs->cr_iip : 0),
5276 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
5277 ctx->ctx_used_pmds[0]));
5278
5279
5280
5281
5282
5283
5284 for (i = PMU_FIRST_COUNTER; mask ; i++, mask >>= 1) {
5285
5286
5287 if ((mask & 0x1) == 0) continue;
5288
5289
5290
5291
5292
5293
5294
5295 old_val = new_val = ctx->ctx_pmds[i].val;
5296 new_val += 1 + ovfl_val;
5297 ctx->ctx_pmds[i].val = new_val;
5298
5299
5300
5301
5302 if (likely(old_val > new_val)) {
5303 ovfl_pmds |= 1UL << i;
5304 if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i;
5305 }
5306
5307 DPRINT_ovfl(("ctx_pmd[%d].val=0x%lx old_val=0x%lx pmd=0x%lx ovfl_pmds=0x%lx ovfl_notify=0x%lx\n",
5308 i,
5309 new_val,
5310 old_val,
5311 ia64_get_pmd(i) & ovfl_val,
5312 ovfl_pmds,
5313 ovfl_notify));
5314 }
5315
5316
5317
5318
5319 if (ovfl_pmds == 0UL) return;
5320
5321
5322
5323
5324 ovfl_ctrl.val = 0;
5325 reset_pmds = 0UL;
5326
5327
5328
5329
5330
5331 if (has_smpl) {
5332 unsigned long start_cycles, end_cycles;
5333 unsigned long pmd_mask;
5334 int j, k, ret = 0;
5335 int this_cpu = smp_processor_id();
5336
5337 pmd_mask = ovfl_pmds >> PMU_FIRST_COUNTER;
5338 ovfl_arg = &ctx->ctx_ovfl_arg;
5339
5340 prefetch(ctx->ctx_smpl_hdr);
5341
5342 for(i=PMU_FIRST_COUNTER; pmd_mask && ret == 0; i++, pmd_mask >>=1) {
5343
5344 mask = 1UL << i;
5345
5346 if ((pmd_mask & 0x1) == 0) continue;
5347
5348 ovfl_arg->ovfl_pmd = (unsigned char )i;
5349 ovfl_arg->ovfl_notify = ovfl_notify & mask ? 1 : 0;
5350 ovfl_arg->active_set = 0;
5351 ovfl_arg->ovfl_ctrl.val = 0;
5352 ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0];
5353
5354 ovfl_arg->pmd_value = ctx->ctx_pmds[i].val;
5355 ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval;
5356 ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid;
5357
5358
5359
5360
5361
5362 if (smpl_pmds) {
5363 for(j=0, k=0; smpl_pmds; j++, smpl_pmds >>=1) {
5364 if ((smpl_pmds & 0x1) == 0) continue;
5365 ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j);
5366 DPRINT_ovfl(("smpl_pmd[%d]=pmd%u=0x%lx\n", k-1, j, ovfl_arg->smpl_pmds_values[k-1]));
5367 }
5368 }
5369
5370 pfm_stats[this_cpu].pfm_smpl_handler_calls++;
5371
5372 start_cycles = ia64_get_itc();
5373
5374
5375
5376
5377 ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp);
5378
5379 end_cycles = ia64_get_itc();
5380
5381
5382
5383
5384
5385 ovfl_ctrl.bits.notify_user |= ovfl_arg->ovfl_ctrl.bits.notify_user;
5386 ovfl_ctrl.bits.block_task |= ovfl_arg->ovfl_ctrl.bits.block_task;
5387 ovfl_ctrl.bits.mask_monitoring |= ovfl_arg->ovfl_ctrl.bits.mask_monitoring;
5388
5389
5390
5391 if (ovfl_arg->ovfl_ctrl.bits.reset_ovfl_pmds) reset_pmds |= mask;
5392
5393 pfm_stats[this_cpu].pfm_smpl_handler_cycles += end_cycles - start_cycles;
5394 }
5395
5396
5397
5398 if (ret && pmd_mask) {
5399 DPRINT(("handler aborts leftover ovfl_pmds=0x%lx\n",
5400 pmd_mask<<PMU_FIRST_COUNTER));
5401 }
5402
5403
5404
5405 ovfl_pmds &= ~reset_pmds;
5406 } else {
5407
5408
5409
5410
5411 ovfl_ctrl.bits.notify_user = ovfl_notify ? 1 : 0;
5412 ovfl_ctrl.bits.block_task = ovfl_notify ? 1 : 0;
5413 ovfl_ctrl.bits.mask_monitoring = ovfl_notify ? 1 : 0;
5414 ovfl_ctrl.bits.reset_ovfl_pmds = ovfl_notify ? 0 : 1;
5415
5416
5417
5418 if (ovfl_notify == 0) reset_pmds = ovfl_pmds;
5419 }
5420
5421 DPRINT_ovfl(("ovfl_pmds=0x%lx reset_pmds=0x%lx\n", ovfl_pmds, reset_pmds));
5422
5423
5424
5425
5426 if (reset_pmds) {
5427 unsigned long bm = reset_pmds;
5428 pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET);
5429 }
5430
5431 if (ovfl_notify && ovfl_ctrl.bits.notify_user) {
5432
5433
5434
5435 ctx->ctx_ovfl_regs[0] = ovfl_pmds;
5436
5437
5438
5439
5440 if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) {
5441
5442 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK;
5443
5444
5445
5446
5447 PFM_SET_WORK_PENDING(task, 1);
5448
5449
5450
5451
5452
5453 pfm_set_task_notify(task);
5454 }
5455
5456
5457
5458
5459 must_notify = 1;
5460 }
5461
5462 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
5463 GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
5464 PFM_GET_WORK_PENDING(task),
5465 ctx->ctx_fl_trap_reason,
5466 ovfl_pmds,
5467 ovfl_notify,
5468 ovfl_ctrl.bits.mask_monitoring ? 1 : 0));
5469
5470
5471
5472 if (ovfl_ctrl.bits.mask_monitoring) {
5473 pfm_mask_monitoring(task);
5474 ctx->ctx_state = PFM_CTX_MASKED;
5475 ctx->ctx_fl_can_restart = 1;
5476 }
5477
5478
5479
5480
5481 if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify);
5482
5483 return;
5484
5485sanity_check:
5486 printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5487 smp_processor_id(),
5488 task ? task_pid_nr(task) : -1,
5489 pmc0);
5490 return;
5491
5492stop_monitoring:
5493
5494
5495
5496
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
5522 pfm_clear_psr_up();
5523 ia64_psr(regs)->up = 0;
5524 ia64_psr(regs)->sp = 1;
5525 return;
5526}
5527
5528static int
5529pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
5530{
5531 struct task_struct *task;
5532 pfm_context_t *ctx;
5533 unsigned long flags;
5534 u64 pmc0;
5535 int this_cpu = smp_processor_id();
5536 int retval = 0;
5537
5538 pfm_stats[this_cpu].pfm_ovfl_intr_count++;
5539
5540
5541
5542
5543 pmc0 = ia64_get_pmc(0);
5544
5545 task = GET_PMU_OWNER();
5546 ctx = GET_PMU_CTX();
5547
5548
5549
5550
5551
5552 if (PMC0_HAS_OVFL(pmc0) && task) {
5553
5554
5555
5556
5557
5558 if (!ctx) goto report_spurious1;
5559
5560 if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0)
5561 goto report_spurious2;
5562
5563 PROTECT_CTX_NOPRINT(ctx, flags);
5564
5565 pfm_overflow_handler(task, ctx, pmc0, regs);
5566
5567 UNPROTECT_CTX_NOPRINT(ctx, flags);
5568
5569 } else {
5570 pfm_stats[this_cpu].pfm_spurious_ovfl_intr_count++;
5571 retval = -1;
5572 }
5573
5574
5575
5576 pfm_unfreeze_pmu();
5577
5578 return retval;
5579
5580report_spurious1:
5581 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
5582 this_cpu, task_pid_nr(task));
5583 pfm_unfreeze_pmu();
5584 return -1;
5585report_spurious2:
5586 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5587 this_cpu,
5588 task_pid_nr(task));
5589 pfm_unfreeze_pmu();
5590 return -1;
5591}
5592
5593static irqreturn_t
5594pfm_interrupt_handler(int irq, void *arg)
5595{
5596 unsigned long start_cycles, total_cycles;
5597 unsigned long min, max;
5598 int this_cpu;
5599 int ret;
5600 struct pt_regs *regs = get_irq_regs();
5601
5602 this_cpu = get_cpu();
5603 if (likely(!pfm_alt_intr_handler)) {
5604 min = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min;
5605 max = pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max;
5606
5607 start_cycles = ia64_get_itc();
5608
5609 ret = pfm_do_interrupt_handler(irq, arg, regs);
5610
5611 total_cycles = ia64_get_itc();
5612
5613
5614
5615
5616 if (likely(ret == 0)) {
5617 total_cycles -= start_cycles;
5618
5619 if (total_cycles < min) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_min = total_cycles;
5620 if (total_cycles > max) pfm_stats[this_cpu].pfm_ovfl_intr_cycles_max = total_cycles;
5621
5622 pfm_stats[this_cpu].pfm_ovfl_intr_cycles += total_cycles;
5623 }
5624 }
5625 else {
5626 (*pfm_alt_intr_handler->handler)(irq, arg, regs);
5627 }
5628
5629 put_cpu_no_resched();
5630 return IRQ_HANDLED;
5631}
5632
5633
5634
5635
5636
5637#define PFM_PROC_SHOW_HEADER ((void *)NR_CPUS+1)
5638
5639static void *
5640pfm_proc_start(struct seq_file *m, loff_t *pos)
5641{
5642 if (*pos == 0) {
5643 return PFM_PROC_SHOW_HEADER;
5644 }
5645
5646 while (*pos <= NR_CPUS) {
5647 if (cpu_online(*pos - 1)) {
5648 return (void *)*pos;
5649 }
5650 ++*pos;
5651 }
5652 return NULL;
5653}
5654
5655static void *
5656pfm_proc_next(struct seq_file *m, void *v, loff_t *pos)
5657{
5658 ++*pos;
5659 return pfm_proc_start(m, pos);
5660}
5661
5662static void
5663pfm_proc_stop(struct seq_file *m, void *v)
5664{
5665}
5666
5667static void
5668pfm_proc_show_header(struct seq_file *m)
5669{
5670 struct list_head * pos;
5671 pfm_buffer_fmt_t * entry;
5672 unsigned long flags;
5673
5674 seq_printf(m,
5675 "perfmon version : %u.%u\n"
5676 "model : %s\n"
5677 "fastctxsw : %s\n"
5678 "expert mode : %s\n"
5679 "ovfl_mask : 0x%lx\n"
5680 "PMU flags : 0x%x\n",
5681 PFM_VERSION_MAJ, PFM_VERSION_MIN,
5682 pmu_conf->pmu_name,
5683 pfm_sysctl.fastctxsw > 0 ? "Yes": "No",
5684 pfm_sysctl.expert_mode > 0 ? "Yes": "No",
5685 pmu_conf->ovfl_val,
5686 pmu_conf->flags);
5687
5688 LOCK_PFS(flags);
5689
5690 seq_printf(m,
5691 "proc_sessions : %u\n"
5692 "sys_sessions : %u\n"
5693 "sys_use_dbregs : %u\n"
5694 "ptrace_use_dbregs : %u\n",
5695 pfm_sessions.pfs_task_sessions,
5696 pfm_sessions.pfs_sys_sessions,
5697 pfm_sessions.pfs_sys_use_dbregs,
5698 pfm_sessions.pfs_ptrace_use_dbregs);
5699
5700 UNLOCK_PFS(flags);
5701
5702 spin_lock(&pfm_buffer_fmt_lock);
5703
5704 list_for_each(pos, &pfm_buffer_fmt_list) {
5705 entry = list_entry(pos, pfm_buffer_fmt_t, fmt_list);
5706 seq_printf(m, "format : %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x %s\n",
5707 entry->fmt_uuid[0],
5708 entry->fmt_uuid[1],
5709 entry->fmt_uuid[2],
5710 entry->fmt_uuid[3],
5711 entry->fmt_uuid[4],
5712 entry->fmt_uuid[5],
5713 entry->fmt_uuid[6],
5714 entry->fmt_uuid[7],
5715 entry->fmt_uuid[8],
5716 entry->fmt_uuid[9],
5717 entry->fmt_uuid[10],
5718 entry->fmt_uuid[11],
5719 entry->fmt_uuid[12],
5720 entry->fmt_uuid[13],
5721 entry->fmt_uuid[14],
5722 entry->fmt_uuid[15],
5723 entry->fmt_name);
5724 }
5725 spin_unlock(&pfm_buffer_fmt_lock);
5726
5727}
5728
5729static int
5730pfm_proc_show(struct seq_file *m, void *v)
5731{
5732 unsigned long psr;
5733 unsigned int i;
5734 int cpu;
5735
5736 if (v == PFM_PROC_SHOW_HEADER) {
5737 pfm_proc_show_header(m);
5738 return 0;
5739 }
5740
5741
5742
5743 cpu = (long)v - 1;
5744 seq_printf(m,
5745 "CPU%-2d overflow intrs : %lu\n"
5746 "CPU%-2d overflow cycles : %lu\n"
5747 "CPU%-2d overflow min : %lu\n"
5748 "CPU%-2d overflow max : %lu\n"
5749 "CPU%-2d smpl handler calls : %lu\n"
5750 "CPU%-2d smpl handler cycles : %lu\n"
5751 "CPU%-2d spurious intrs : %lu\n"
5752 "CPU%-2d replay intrs : %lu\n"
5753 "CPU%-2d syst_wide : %d\n"
5754 "CPU%-2d dcr_pp : %d\n"
5755 "CPU%-2d exclude idle : %d\n"
5756 "CPU%-2d owner : %d\n"
5757 "CPU%-2d context : %p\n"
5758 "CPU%-2d activations : %lu\n",
5759 cpu, pfm_stats[cpu].pfm_ovfl_intr_count,
5760 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles,
5761 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_min,
5762 cpu, pfm_stats[cpu].pfm_ovfl_intr_cycles_max,
5763 cpu, pfm_stats[cpu].pfm_smpl_handler_calls,
5764 cpu, pfm_stats[cpu].pfm_smpl_handler_cycles,
5765 cpu, pfm_stats[cpu].pfm_spurious_ovfl_intr_count,
5766 cpu, pfm_stats[cpu].pfm_replay_ovfl_intr_count,
5767 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_SYST_WIDE ? 1 : 0,
5768 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_DCR_PP ? 1 : 0,
5769 cpu, pfm_get_cpu_data(pfm_syst_info, cpu) & PFM_CPUINFO_EXCL_IDLE ? 1 : 0,
5770 cpu, pfm_get_cpu_data(pmu_owner, cpu) ? pfm_get_cpu_data(pmu_owner, cpu)->pid: -1,
5771 cpu, pfm_get_cpu_data(pmu_ctx, cpu),
5772 cpu, pfm_get_cpu_data(pmu_activation_number, cpu));
5773
5774 if (num_online_cpus() == 1 && pfm_sysctl.debug > 0) {
5775
5776 psr = pfm_get_psr();
5777
5778 ia64_srlz_d();
5779
5780 seq_printf(m,
5781 "CPU%-2d psr : 0x%lx\n"
5782 "CPU%-2d pmc0 : 0x%lx\n",
5783 cpu, psr,
5784 cpu, ia64_get_pmc(0));
5785
5786 for (i=0; PMC_IS_LAST(i) == 0; i++) {
5787 if (PMC_IS_COUNTING(i) == 0) continue;
5788 seq_printf(m,
5789 "CPU%-2d pmc%u : 0x%lx\n"
5790 "CPU%-2d pmd%u : 0x%lx\n",
5791 cpu, i, ia64_get_pmc(i),
5792 cpu, i, ia64_get_pmd(i));
5793 }
5794 }
5795 return 0;
5796}
5797
5798struct seq_operations pfm_seq_ops = {
5799 .start = pfm_proc_start,
5800 .next = pfm_proc_next,
5801 .stop = pfm_proc_stop,
5802 .show = pfm_proc_show
5803};
5804
5805static int
5806pfm_proc_open(struct inode *inode, struct file *file)
5807{
5808 return seq_open(file, &pfm_seq_ops);
5809}
5810
5811
5812
5813
5814
5815
5816
5817
5818void
5819pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_ctxswin)
5820{
5821 struct pt_regs *regs;
5822 unsigned long dcr;
5823 unsigned long dcr_pp;
5824
5825 dcr_pp = info & PFM_CPUINFO_DCR_PP ? 1 : 0;
5826
5827
5828
5829
5830
5831 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
5832 regs = task_pt_regs(task);
5833 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
5834 return;
5835 }
5836
5837
5838
5839 if (dcr_pp) {
5840 dcr = ia64_getreg(_IA64_REG_CR_DCR);
5841
5842
5843
5844 if (is_ctxswin) {
5845
5846 ia64_setreg(_IA64_REG_CR_DCR, dcr & ~IA64_DCR_PP);
5847 pfm_clear_psr_pp();
5848 ia64_srlz_i();
5849 return;
5850 }
5851
5852
5853
5854
5855
5856
5857
5858 ia64_setreg(_IA64_REG_CR_DCR, dcr |IA64_DCR_PP);
5859 pfm_set_psr_pp();
5860 ia64_srlz_i();
5861 }
5862}
5863
5864#ifdef CONFIG_SMP
5865
5866static void
5867pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
5868{
5869 struct task_struct *task = ctx->ctx_task;
5870
5871 ia64_psr(regs)->up = 0;
5872 ia64_psr(regs)->sp = 1;
5873
5874 if (GET_PMU_OWNER() == task) {
5875 DPRINT(("cleared ownership for [%d]\n",
5876 task_pid_nr(ctx->ctx_task)));
5877 SET_PMU_OWNER(NULL, NULL);
5878 }
5879
5880
5881
5882
5883 PFM_SET_WORK_PENDING(task, 0);
5884
5885 task->thread.pfm_context = NULL;
5886 task->thread.flags &= ~IA64_THREAD_PM_VALID;
5887
5888 DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
5889}
5890
5891
5892
5893
5894
5895void
5896pfm_save_regs(struct task_struct *task)
5897{
5898 pfm_context_t *ctx;
5899 unsigned long flags;
5900 u64 psr;
5901
5902
5903 ctx = PFM_GET_CTX(task);
5904 if (ctx == NULL) return;
5905
5906
5907
5908
5909
5910
5911 flags = pfm_protect_ctx_ctxsw(ctx);
5912
5913 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
5914 struct pt_regs *regs = task_pt_regs(task);
5915
5916 pfm_clear_psr_up();
5917
5918 pfm_force_cleanup(ctx, regs);
5919
5920 BUG_ON(ctx->ctx_smpl_hdr);
5921
5922 pfm_unprotect_ctx_ctxsw(ctx, flags);
5923
5924 pfm_context_free(ctx);
5925 return;
5926 }
5927
5928
5929
5930
5931 ia64_srlz_d();
5932 psr = pfm_get_psr();
5933
5934 BUG_ON(psr & (IA64_PSR_I));
5935
5936
5937
5938
5939
5940
5941
5942
5943 pfm_clear_psr_up();
5944
5945
5946
5947
5948 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
5949
5950
5951
5952
5953
5954
5955 SET_PMU_OWNER(NULL, NULL);
5956
5957
5958
5959
5960
5961
5962 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
5963
5964
5965
5966
5967
5968
5969 ctx->th_pmcs[0] = ia64_get_pmc(0);
5970
5971
5972
5973
5974 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
5975
5976
5977
5978
5979
5980 pfm_unprotect_ctx_ctxsw(ctx, flags);
5981}
5982
5983#else
5984void
5985pfm_save_regs(struct task_struct *task)
5986{
5987 pfm_context_t *ctx;
5988 u64 psr;
5989
5990 ctx = PFM_GET_CTX(task);
5991 if (ctx == NULL) return;
5992
5993
5994
5995
5996 psr = pfm_get_psr();
5997
5998 BUG_ON(psr & (IA64_PSR_I));
5999
6000
6001
6002
6003
6004
6005
6006
6007 pfm_clear_psr_up();
6008
6009
6010
6011
6012 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP;
6013}
6014
6015static void
6016pfm_lazy_save_regs (struct task_struct *task)
6017{
6018 pfm_context_t *ctx;
6019 unsigned long flags;
6020
6021 { u64 psr = pfm_get_psr();
6022 BUG_ON(psr & IA64_PSR_UP);
6023 }
6024
6025 ctx = PFM_GET_CTX(task);
6026
6027
6028
6029
6030
6031
6032
6033
6034
6035
6036 PROTECT_CTX(ctx,flags);
6037
6038
6039
6040
6041
6042
6043
6044
6045 SET_PMU_OWNER(NULL, NULL);
6046
6047
6048
6049
6050 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]);
6051
6052
6053
6054
6055
6056
6057 ctx->th_pmcs[0] = ia64_get_pmc(0);
6058
6059
6060
6061
6062 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu();
6063
6064
6065
6066
6067
6068
6069 UNPROTECT_CTX(ctx,flags);
6070}
6071#endif
6072
6073#ifdef CONFIG_SMP
6074
6075
6076
6077void
6078pfm_load_regs (struct task_struct *task)
6079{
6080 pfm_context_t *ctx;
6081 unsigned long pmc_mask = 0UL, pmd_mask = 0UL;
6082 unsigned long flags;
6083 u64 psr, psr_up;
6084 int need_irq_resend;
6085
6086 ctx = PFM_GET_CTX(task);
6087 if (unlikely(ctx == NULL)) return;
6088
6089 BUG_ON(GET_PMU_OWNER());
6090
6091
6092
6093
6094 if (unlikely((task->thread.flags & IA64_THREAD_PM_VALID) == 0)) return;
6095
6096
6097
6098
6099
6100
6101 flags = pfm_protect_ctx_ctxsw(ctx);
6102 psr = pfm_get_psr();
6103
6104 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6105
6106 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6107 BUG_ON(psr & IA64_PSR_I);
6108
6109 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
6110 struct pt_regs *regs = task_pt_regs(task);
6111
6112 BUG_ON(ctx->ctx_smpl_hdr);
6113
6114 pfm_force_cleanup(ctx, regs);
6115
6116 pfm_unprotect_ctx_ctxsw(ctx, flags);
6117
6118
6119
6120
6121 pfm_context_free(ctx);
6122
6123 return;
6124 }
6125
6126
6127
6128
6129
6130 if (ctx->ctx_fl_using_dbreg) {
6131 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6132 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6133 }
6134
6135
6136
6137 psr_up = ctx->ctx_saved_psr_up;
6138
6139
6140
6141
6142
6143 if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) {
6144
6145
6146
6147
6148 pmc_mask = ctx->ctx_reload_pmcs[0];
6149 pmd_mask = ctx->ctx_reload_pmds[0];
6150
6151 } else {
6152
6153
6154
6155
6156
6157
6158 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6159
6160
6161
6162
6163
6164
6165
6166
6167 pmc_mask = ctx->ctx_all_pmcs[0];
6168 }
6169
6170
6171
6172
6173
6174
6175
6176 if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6177 if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6178
6179
6180
6181
6182
6183 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6184
6185
6186
6187
6188 ia64_set_pmc(0, ctx->th_pmcs[0]);
6189 ia64_srlz_d();
6190 ctx->th_pmcs[0] = 0UL;
6191
6192
6193
6194
6195 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
6196
6197 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6198 }
6199
6200
6201
6202
6203 ctx->ctx_reload_pmcs[0] = 0UL;
6204 ctx->ctx_reload_pmds[0] = 0UL;
6205
6206 SET_LAST_CPU(ctx, smp_processor_id());
6207
6208
6209
6210
6211 INC_ACTIVATION();
6212
6213
6214
6215 SET_ACTIVATION(ctx);
6216
6217
6218
6219
6220 SET_PMU_OWNER(task, ctx);
6221
6222
6223
6224
6225
6226
6227
6228 if (likely(psr_up)) pfm_set_psr_up();
6229
6230
6231
6232
6233 pfm_unprotect_ctx_ctxsw(ctx, flags);
6234}
6235#else
6236
6237
6238
6239
6240void
6241pfm_load_regs (struct task_struct *task)
6242{
6243 pfm_context_t *ctx;
6244 struct task_struct *owner;
6245 unsigned long pmd_mask, pmc_mask;
6246 u64 psr, psr_up;
6247 int need_irq_resend;
6248
6249 owner = GET_PMU_OWNER();
6250 ctx = PFM_GET_CTX(task);
6251 psr = pfm_get_psr();
6252
6253 BUG_ON(psr & (IA64_PSR_UP|IA64_PSR_PP));
6254 BUG_ON(psr & IA64_PSR_I);
6255
6256
6257
6258
6259
6260
6261
6262
6263
6264 if (ctx->ctx_fl_using_dbreg) {
6265 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs);
6266 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs);
6267 }
6268
6269
6270
6271
6272 psr_up = ctx->ctx_saved_psr_up;
6273 need_irq_resend = pmu_conf->flags & PFM_PMU_IRQ_RESEND;
6274
6275
6276
6277
6278
6279
6280
6281
6282
6283 if (likely(owner == task)) {
6284 if (likely(psr_up)) pfm_set_psr_up();
6285 return;
6286 }
6287
6288
6289
6290
6291
6292
6293
6294 if (owner) pfm_lazy_save_regs(owner);
6295
6296
6297
6298
6299
6300
6301
6302 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0];
6303
6304
6305
6306
6307
6308
6309
6310
6311 pmc_mask = ctx->ctx_all_pmcs[0];
6312
6313 pfm_restore_pmds(ctx->th_pmds, pmd_mask);
6314 pfm_restore_pmcs(ctx->th_pmcs, pmc_mask);
6315
6316
6317
6318
6319
6320 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) {
6321
6322
6323
6324
6325 ia64_set_pmc(0, ctx->th_pmcs[0]);
6326 ia64_srlz_d();
6327
6328 ctx->th_pmcs[0] = 0UL;
6329
6330
6331
6332
6333 if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
6334
6335 pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
6336 }
6337
6338
6339
6340
6341 SET_PMU_OWNER(task, ctx);
6342
6343
6344
6345
6346
6347
6348
6349 if (likely(psr_up)) pfm_set_psr_up();
6350}
6351#endif
6352
6353
6354
6355
6356static void
6357pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6358{
6359 u64 pmc0;
6360 unsigned long mask2, val, pmd_val, ovfl_val;
6361 int i, can_access_pmu = 0;
6362 int is_self;
6363
6364
6365
6366
6367
6368 is_self = ctx->ctx_task == task ? 1 : 0;
6369
6370
6371
6372
6373
6374
6375
6376
6377 can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id());
6378 if (can_access_pmu) {
6379
6380
6381
6382
6383
6384
6385
6386
6387 SET_PMU_OWNER(NULL, NULL);
6388 DPRINT(("releasing ownership\n"));
6389
6390
6391
6392
6393
6394
6395 ia64_srlz_d();
6396 pmc0 = ia64_get_pmc(0);
6397
6398
6399
6400
6401 pfm_unfreeze_pmu();
6402 } else {
6403 pmc0 = ctx->th_pmcs[0];
6404
6405
6406
6407 ctx->th_pmcs[0] = 0;
6408 }
6409 ovfl_val = pmu_conf->ovfl_val;
6410
6411
6412
6413
6414
6415
6416 mask2 = ctx->ctx_used_pmds[0];
6417
6418 DPRINT(("is_self=%d ovfl_val=0x%lx mask2=0x%lx\n", is_self, ovfl_val, mask2));
6419
6420 for (i = 0; mask2; i++, mask2>>=1) {
6421
6422
6423 if ((mask2 & 0x1) == 0) continue;
6424
6425
6426
6427
6428 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i];
6429
6430 if (PMD_IS_COUNTING(i)) {
6431 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
6432 task_pid_nr(task),
6433 i,
6434 ctx->ctx_pmds[i].val,
6435 val & ovfl_val));
6436
6437
6438
6439
6440 val = ctx->ctx_pmds[i].val + (val & ovfl_val);
6441
6442
6443
6444
6445
6446
6447 pmd_val = 0UL;
6448
6449
6450
6451
6452 if (pmc0 & (1UL << i)) {
6453 val += 1 + ovfl_val;
6454 DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
6455 }
6456 }
6457
6458 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
6459
6460 if (is_self) ctx->th_pmds[i] = pmd_val;
6461
6462 ctx->ctx_pmds[i].val = val;
6463 }
6464}
6465
6466static struct irqaction perfmon_irqaction = {
6467 .handler = pfm_interrupt_handler,
6468 .flags = IRQF_DISABLED,
6469 .name = "perfmon"
6470};
6471
6472static void
6473pfm_alt_save_pmu_state(void *data)
6474{
6475 struct pt_regs *regs;
6476
6477 regs = task_pt_regs(current);
6478
6479 DPRINT(("called\n"));
6480
6481
6482
6483
6484
6485 pfm_clear_psr_up();
6486 pfm_clear_psr_pp();
6487 ia64_psr(regs)->pp = 0;
6488
6489
6490
6491
6492
6493 pfm_freeze_pmu();
6494
6495 ia64_srlz_d();
6496}
6497
6498void
6499pfm_alt_restore_pmu_state(void *data)
6500{
6501 struct pt_regs *regs;
6502
6503 regs = task_pt_regs(current);
6504
6505 DPRINT(("called\n"));
6506
6507
6508
6509
6510
6511 pfm_clear_psr_up();
6512 pfm_clear_psr_pp();
6513 ia64_psr(regs)->pp = 0;
6514
6515
6516
6517
6518 pfm_unfreeze_pmu();
6519
6520 ia64_srlz_d();
6521}
6522
6523int
6524pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6525{
6526 int ret, i;
6527 int reserve_cpu;
6528
6529
6530 if (hdl == NULL || hdl->handler == NULL) return -EINVAL;
6531
6532
6533 if (pfm_alt_intr_handler) return -EBUSY;
6534
6535
6536 if (!spin_trylock(&pfm_alt_install_check)) {
6537 return -EBUSY;
6538 }
6539
6540
6541 for_each_online_cpu(reserve_cpu) {
6542 ret = pfm_reserve_session(NULL, 1, reserve_cpu);
6543 if (ret) goto cleanup_reserve;
6544 }
6545
6546
6547 ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1);
6548 if (ret) {
6549 DPRINT(("on_each_cpu() failed: %d\n", ret));
6550 goto cleanup_reserve;
6551 }
6552
6553
6554 pfm_alt_intr_handler = hdl;
6555
6556 spin_unlock(&pfm_alt_install_check);
6557
6558 return 0;
6559
6560cleanup_reserve:
6561 for_each_online_cpu(i) {
6562
6563 if (i >= reserve_cpu) break;
6564
6565 pfm_unreserve_session(NULL, 1, i);
6566 }
6567
6568 spin_unlock(&pfm_alt_install_check);
6569
6570 return ret;
6571}
6572EXPORT_SYMBOL_GPL(pfm_install_alt_pmu_interrupt);
6573
6574int
6575pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl)
6576{
6577 int i;
6578 int ret;
6579
6580 if (hdl == NULL) return -EINVAL;
6581
6582
6583 if (pfm_alt_intr_handler != hdl) return -EINVAL;
6584
6585
6586 if (!spin_trylock(&pfm_alt_install_check)) {
6587 return -EBUSY;
6588 }
6589
6590 pfm_alt_intr_handler = NULL;
6591
6592 ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1);
6593 if (ret) {
6594 DPRINT(("on_each_cpu() failed: %d\n", ret));
6595 }
6596
6597 for_each_online_cpu(i) {
6598 pfm_unreserve_session(NULL, 1, i);
6599 }
6600
6601 spin_unlock(&pfm_alt_install_check);
6602
6603 return 0;
6604}
6605EXPORT_SYMBOL_GPL(pfm_remove_alt_pmu_interrupt);
6606
6607
6608
6609
6610static int init_pfm_fs(void);
6611
6612static int __init
6613pfm_probe_pmu(void)
6614{
6615 pmu_config_t **p;
6616 int family;
6617
6618 family = local_cpu_data->family;
6619 p = pmu_confs;
6620
6621 while(*p) {
6622 if ((*p)->probe) {
6623 if ((*p)->probe() == 0) goto found;
6624 } else if ((*p)->pmu_family == family || (*p)->pmu_family == 0xff) {
6625 goto found;
6626 }
6627 p++;
6628 }
6629 return -1;
6630found:
6631 pmu_conf = *p;
6632 return 0;
6633}
6634
6635static const struct file_operations pfm_proc_fops = {
6636 .open = pfm_proc_open,
6637 .read = seq_read,
6638 .llseek = seq_lseek,
6639 .release = seq_release,
6640};
6641
6642int __init
6643pfm_init(void)
6644{
6645 unsigned int n, n_counters, i;
6646
6647 printk("perfmon: version %u.%u IRQ %u\n",
6648 PFM_VERSION_MAJ,
6649 PFM_VERSION_MIN,
6650 IA64_PERFMON_VECTOR);
6651
6652 if (pfm_probe_pmu()) {
6653 printk(KERN_INFO "perfmon: disabled, there is no support for processor family %d\n",
6654 local_cpu_data->family);
6655 return -ENODEV;
6656 }
6657
6658
6659
6660
6661
6662 n = 0;
6663 for (i=0; PMC_IS_LAST(i) == 0; i++) {
6664 if (PMC_IS_IMPL(i) == 0) continue;
6665 pmu_conf->impl_pmcs[i>>6] |= 1UL << (i&63);
6666 n++;
6667 }
6668 pmu_conf->num_pmcs = n;
6669
6670 n = 0; n_counters = 0;
6671 for (i=0; PMD_IS_LAST(i) == 0; i++) {
6672 if (PMD_IS_IMPL(i) == 0) continue;
6673 pmu_conf->impl_pmds[i>>6] |= 1UL << (i&63);
6674 n++;
6675 if (PMD_IS_COUNTING(i)) n_counters++;
6676 }
6677 pmu_conf->num_pmds = n;
6678 pmu_conf->num_counters = n_counters;
6679
6680
6681
6682
6683 if (pmu_conf->use_rr_dbregs) {
6684 if (pmu_conf->num_ibrs > IA64_NUM_DBG_REGS) {
6685 printk(KERN_INFO "perfmon: unsupported number of code debug registers (%u)\n", pmu_conf->num_ibrs);
6686 pmu_conf = NULL;
6687 return -1;
6688 }
6689 if (pmu_conf->num_dbrs > IA64_NUM_DBG_REGS) {
6690 printk(KERN_INFO "perfmon: unsupported number of data debug registers (%u)\n", pmu_conf->num_ibrs);
6691 pmu_conf = NULL;
6692 return -1;
6693 }
6694 }
6695
6696 printk("perfmon: %s PMU detected, %u PMCs, %u PMDs, %u counters (%lu bits)\n",
6697 pmu_conf->pmu_name,
6698 pmu_conf->num_pmcs,
6699 pmu_conf->num_pmds,
6700 pmu_conf->num_counters,
6701 ffz(pmu_conf->ovfl_val));
6702
6703
6704 if (pmu_conf->num_pmds >= PFM_NUM_PMD_REGS || pmu_conf->num_pmcs >= PFM_NUM_PMC_REGS) {
6705 printk(KERN_ERR "perfmon: not enough pmc/pmd, perfmon disabled\n");
6706 pmu_conf = NULL;
6707 return -1;
6708 }
6709
6710
6711
6712
6713 perfmon_dir = create_proc_entry("perfmon", S_IRUGO, NULL);
6714 if (perfmon_dir == NULL) {
6715 printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
6716 pmu_conf = NULL;
6717 return -1;
6718 }
6719
6720
6721
6722 perfmon_dir->proc_fops = &pfm_proc_fops;
6723
6724
6725
6726
6727 pfm_sysctl_header = register_sysctl_table(pfm_sysctl_root);
6728
6729
6730
6731
6732 spin_lock_init(&pfm_sessions.pfs_lock);
6733 spin_lock_init(&pfm_buffer_fmt_lock);
6734
6735 init_pfm_fs();
6736
6737 for(i=0; i < NR_CPUS; i++) pfm_stats[i].pfm_ovfl_intr_cycles_min = ~0UL;
6738
6739 return 0;
6740}
6741
6742__initcall(pfm_init);
6743
6744
6745
6746
6747void
6748pfm_init_percpu (void)
6749{
6750 static int first_time=1;
6751
6752
6753
6754
6755 pfm_clear_psr_pp();
6756 pfm_clear_psr_up();
6757
6758
6759
6760
6761 pfm_unfreeze_pmu();
6762
6763 if (first_time) {
6764 register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
6765 first_time=0;
6766 }
6767
6768 ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
6769 ia64_srlz_d();
6770}
6771
6772
6773
6774
6775void
6776dump_pmu_state(const char *from)
6777{
6778 struct task_struct *task;
6779 struct pt_regs *regs;
6780 pfm_context_t *ctx;
6781 unsigned long psr, dcr, info, flags;
6782 int i, this_cpu;
6783
6784 local_irq_save(flags);
6785
6786 this_cpu = smp_processor_id();
6787 regs = task_pt_regs(current);
6788 info = PFM_CPUINFO_GET();
6789 dcr = ia64_getreg(_IA64_REG_CR_DCR);
6790
6791 if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) {
6792 local_irq_restore(flags);
6793 return;
6794 }
6795
6796 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6797 this_cpu,
6798 from,
6799 task_pid_nr(current),
6800 regs->cr_iip,
6801 current->comm);
6802
6803 task = GET_PMU_OWNER();
6804 ctx = GET_PMU_CTX();
6805
6806 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
6807
6808 psr = pfm_get_psr();
6809
6810 printk("->CPU%d pmc0=0x%lx psr.pp=%d psr.up=%d dcr.pp=%d syst_info=0x%lx user_psr.up=%d user_psr.pp=%d\n",
6811 this_cpu,
6812 ia64_get_pmc(0),
6813 psr & IA64_PSR_PP ? 1 : 0,
6814 psr & IA64_PSR_UP ? 1 : 0,
6815 dcr & IA64_DCR_PP ? 1 : 0,
6816 info,
6817 ia64_psr(regs)->up,
6818 ia64_psr(regs)->pp);
6819
6820 ia64_psr(regs)->up = 0;
6821 ia64_psr(regs)->pp = 0;
6822
6823 for (i=1; PMC_IS_LAST(i) == 0; i++) {
6824 if (PMC_IS_IMPL(i) == 0) continue;
6825 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]);
6826 }
6827
6828 for (i=1; PMD_IS_LAST(i) == 0; i++) {
6829 if (PMD_IS_IMPL(i) == 0) continue;
6830 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]);
6831 }
6832
6833 if (ctx) {
6834 printk("->CPU%d ctx_state=%d vaddr=%p addr=%p fd=%d ctx_task=[%d] saved_psr_up=0x%lx\n",
6835 this_cpu,
6836 ctx->ctx_state,
6837 ctx->ctx_smpl_vaddr,
6838 ctx->ctx_smpl_hdr,
6839 ctx->ctx_msgq_head,
6840 ctx->ctx_msgq_tail,
6841 ctx->ctx_saved_psr_up);
6842 }
6843 local_irq_restore(flags);
6844}
6845
6846
6847
6848
6849void
6850pfm_inherit(struct task_struct *task, struct pt_regs *regs)
6851{
6852 struct thread_struct *thread;
6853
6854 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
6855
6856 thread = &task->thread;
6857
6858
6859
6860
6861 thread->pfm_context = NULL;
6862
6863 PFM_SET_WORK_PENDING(task, 0);
6864
6865
6866
6867
6868}
6869#else
6870asmlinkage long
6871sys_perfmonctl (int fd, int cmd, void *arg, int count)
6872{
6873 return -ENOSYS;
6874}
6875#endif
6876