1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/mm.h>
25#include <linux/workqueue.h>
26#include <linux/notifier.h>
27#include <linux/dcookies.h>
28#include <linux/profile.h>
29#include <linux/module.h>
30#include <linux/fs.h>
31#include <linux/oprofile.h>
32#include <linux/sched.h>
33#include <linux/gfp.h>
34
35#include "oprofile_stats.h"
36#include "event_buffer.h"
37#include "cpu_buffer.h"
38#include "buffer_sync.h"
39
40static LIST_HEAD(dying_tasks);
41static LIST_HEAD(dead_tasks);
42static cpumask_var_t marked_cpus;
43static DEFINE_SPINLOCK(task_mortuary);
44static void process_task_mortuary(void);
45
46
47
48
49
50
51
52
53static int
54task_free_notify(struct notifier_block *self, unsigned long val, void *data)
55{
56 unsigned long flags;
57 struct task_struct *task = data;
58 spin_lock_irqsave(&task_mortuary, flags);
59 list_add(&task->tasks, &dying_tasks);
60 spin_unlock_irqrestore(&task_mortuary, flags);
61 return NOTIFY_OK;
62}
63
64
65
66
67
68static int
69task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
70{
71
72
73
74 sync_buffer(raw_smp_processor_id());
75 return 0;
76}
77
78
79
80
81
82
83
84static int
85munmap_notify(struct notifier_block *self, unsigned long val, void *data)
86{
87 unsigned long addr = (unsigned long)data;
88 struct mm_struct *mm = current->mm;
89 struct vm_area_struct *mpnt;
90
91 down_read(&mm->mmap_sem);
92
93 mpnt = find_vma(mm, addr);
94 if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
95 up_read(&mm->mmap_sem);
96
97
98
99 sync_buffer(raw_smp_processor_id());
100 return 0;
101 }
102
103 up_read(&mm->mmap_sem);
104 return 0;
105}
106
107
108
109
110
111static int
112module_load_notify(struct notifier_block *self, unsigned long val, void *data)
113{
114#ifdef CONFIG_MODULES
115 if (val != MODULE_STATE_COMING)
116 return 0;
117
118
119 mutex_lock(&buffer_mutex);
120 add_event_entry(ESCAPE_CODE);
121 add_event_entry(MODULE_LOADED_CODE);
122 mutex_unlock(&buffer_mutex);
123#endif
124 return 0;
125}
126
127
128static struct notifier_block task_free_nb = {
129 .notifier_call = task_free_notify,
130};
131
132static struct notifier_block task_exit_nb = {
133 .notifier_call = task_exit_notify,
134};
135
136static struct notifier_block munmap_nb = {
137 .notifier_call = munmap_notify,
138};
139
140static struct notifier_block module_load_nb = {
141 .notifier_call = module_load_notify,
142};
143
144static void free_all_tasks(void)
145{
146
147 process_task_mortuary();
148 process_task_mortuary();
149}
150
151int sync_start(void)
152{
153 int err;
154
155 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
156 return -ENOMEM;
157
158 err = task_handoff_register(&task_free_nb);
159 if (err)
160 goto out1;
161 err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
162 if (err)
163 goto out2;
164 err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
165 if (err)
166 goto out3;
167 err = register_module_notifier(&module_load_nb);
168 if (err)
169 goto out4;
170
171 start_cpu_work();
172
173out:
174 return err;
175out4:
176 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
177out3:
178 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
179out2:
180 task_handoff_unregister(&task_free_nb);
181 free_all_tasks();
182out1:
183 free_cpumask_var(marked_cpus);
184 goto out;
185}
186
187
188void sync_stop(void)
189{
190 end_cpu_work();
191 unregister_module_notifier(&module_load_nb);
192 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
193 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
194 task_handoff_unregister(&task_free_nb);
195 barrier();
196
197 flush_cpu_work();
198
199 free_all_tasks();
200 free_cpumask_var(marked_cpus);
201}
202
203
204
205
206
207
208static inline unsigned long fast_get_dcookie(struct path *path)
209{
210 unsigned long cookie;
211
212 if (path->dentry->d_flags & DCACHE_COOKIE)
213 return (unsigned long)path->dentry;
214 get_dcookie(path, &cookie);
215 return cookie;
216}
217
218
219
220
221
222
223
224static unsigned long get_exec_dcookie(struct mm_struct *mm)
225{
226 unsigned long cookie = NO_COOKIE;
227 struct vm_area_struct *vma;
228
229 if (!mm)
230 goto out;
231
232 for (vma = mm->mmap; vma; vma = vma->vm_next) {
233 if (!vma->vm_file)
234 continue;
235 if (!(vma->vm_flags & VM_EXECUTABLE))
236 continue;
237 cookie = fast_get_dcookie(&vma->vm_file->f_path);
238 break;
239 }
240
241out:
242 return cookie;
243}
244
245
246
247
248
249
250
251static unsigned long
252lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
253{
254 unsigned long cookie = NO_COOKIE;
255 struct vm_area_struct *vma;
256
257 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
258
259 if (addr < vma->vm_start || addr >= vma->vm_end)
260 continue;
261
262 if (vma->vm_file) {
263 cookie = fast_get_dcookie(&vma->vm_file->f_path);
264 *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
265 vma->vm_start;
266 } else {
267
268 *offset = addr;
269 }
270
271 break;
272 }
273
274 if (!vma)
275 cookie = INVALID_COOKIE;
276
277 return cookie;
278}
279
280static unsigned long last_cookie = INVALID_COOKIE;
281
282static void add_cpu_switch(int i)
283{
284 add_event_entry(ESCAPE_CODE);
285 add_event_entry(CPU_SWITCH_CODE);
286 add_event_entry(i);
287 last_cookie = INVALID_COOKIE;
288}
289
290static void add_kernel_ctx_switch(unsigned int in_kernel)
291{
292 add_event_entry(ESCAPE_CODE);
293 if (in_kernel)
294 add_event_entry(KERNEL_ENTER_SWITCH_CODE);
295 else
296 add_event_entry(KERNEL_EXIT_SWITCH_CODE);
297}
298
299static void
300add_user_ctx_switch(struct task_struct const *task, unsigned long cookie)
301{
302 add_event_entry(ESCAPE_CODE);
303 add_event_entry(CTX_SWITCH_CODE);
304 add_event_entry(task->pid);
305 add_event_entry(cookie);
306
307 add_event_entry(ESCAPE_CODE);
308 add_event_entry(CTX_TGID_CODE);
309 add_event_entry(task->tgid);
310}
311
312
313static void add_cookie_switch(unsigned long cookie)
314{
315 add_event_entry(ESCAPE_CODE);
316 add_event_entry(COOKIE_SWITCH_CODE);
317 add_event_entry(cookie);
318}
319
320
321static void add_trace_begin(void)
322{
323 add_event_entry(ESCAPE_CODE);
324 add_event_entry(TRACE_BEGIN_CODE);
325}
326
327static void add_data(struct op_entry *entry, struct mm_struct *mm)
328{
329 unsigned long code, pc, val;
330 unsigned long cookie;
331 off_t offset;
332
333 if (!op_cpu_buffer_get_data(entry, &code))
334 return;
335 if (!op_cpu_buffer_get_data(entry, &pc))
336 return;
337 if (!op_cpu_buffer_get_size(entry))
338 return;
339
340 if (mm) {
341 cookie = lookup_dcookie(mm, pc, &offset);
342
343 if (cookie == NO_COOKIE)
344 offset = pc;
345 if (cookie == INVALID_COOKIE) {
346 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
347 offset = pc;
348 }
349 if (cookie != last_cookie) {
350 add_cookie_switch(cookie);
351 last_cookie = cookie;
352 }
353 } else
354 offset = pc;
355
356 add_event_entry(ESCAPE_CODE);
357 add_event_entry(code);
358 add_event_entry(offset);
359
360 while (op_cpu_buffer_get_data(entry, &val))
361 add_event_entry(val);
362}
363
364static inline void add_sample_entry(unsigned long offset, unsigned long event)
365{
366 add_event_entry(offset);
367 add_event_entry(event);
368}
369
370
371
372
373
374
375
376static int
377add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
378{
379 unsigned long cookie;
380 off_t offset;
381
382 if (in_kernel) {
383 add_sample_entry(s->eip, s->event);
384 return 1;
385 }
386
387
388
389 if (!mm) {
390 atomic_inc(&oprofile_stats.sample_lost_no_mm);
391 return 0;
392 }
393
394 cookie = lookup_dcookie(mm, s->eip, &offset);
395
396 if (cookie == INVALID_COOKIE) {
397 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
398 return 0;
399 }
400
401 if (cookie != last_cookie) {
402 add_cookie_switch(cookie);
403 last_cookie = cookie;
404 }
405
406 add_sample_entry(offset, s->event);
407
408 return 1;
409}
410
411
412static void release_mm(struct mm_struct *mm)
413{
414 if (!mm)
415 return;
416 up_read(&mm->mmap_sem);
417 mmput(mm);
418}
419
420
421static struct mm_struct *take_tasks_mm(struct task_struct *task)
422{
423 struct mm_struct *mm = get_task_mm(task);
424 if (mm)
425 down_read(&mm->mmap_sem);
426 return mm;
427}
428
429
430static inline int is_code(unsigned long val)
431{
432 return val == ESCAPE_CODE;
433}
434
435
436
437
438
439
440
441
442static void process_task_mortuary(void)
443{
444 unsigned long flags;
445 LIST_HEAD(local_dead_tasks);
446 struct task_struct *task;
447 struct task_struct *ttask;
448
449 spin_lock_irqsave(&task_mortuary, flags);
450
451 list_splice_init(&dead_tasks, &local_dead_tasks);
452 list_splice_init(&dying_tasks, &dead_tasks);
453
454 spin_unlock_irqrestore(&task_mortuary, flags);
455
456 list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
457 list_del(&task->tasks);
458 free_task(task);
459 }
460}
461
462
463static void mark_done(int cpu)
464{
465 int i;
466
467 cpumask_set_cpu(cpu, marked_cpus);
468
469 for_each_online_cpu(i) {
470 if (!cpumask_test_cpu(i, marked_cpus))
471 return;
472 }
473
474
475
476
477 process_task_mortuary();
478
479 cpumask_clear(marked_cpus);
480}
481
482
483
484
485
486
487typedef enum {
488 sb_bt_ignore = -2,
489 sb_buffer_start,
490 sb_bt_start,
491 sb_sample_start,
492} sync_buffer_state;
493
494
495
496
497
498
499
500void sync_buffer(int cpu)
501{
502 struct mm_struct *mm = NULL;
503 struct mm_struct *oldmm;
504 unsigned long val;
505 struct task_struct *new;
506 unsigned long cookie = 0;
507 int in_kernel = 1;
508 sync_buffer_state state = sb_buffer_start;
509 unsigned int i;
510 unsigned long available;
511 unsigned long flags;
512 struct op_entry entry;
513 struct op_sample *sample;
514
515 mutex_lock(&buffer_mutex);
516
517 add_cpu_switch(cpu);
518
519 op_cpu_buffer_reset(cpu);
520 available = op_cpu_buffer_entries(cpu);
521
522 for (i = 0; i < available; ++i) {
523 sample = op_cpu_buffer_read_entry(&entry, cpu);
524 if (!sample)
525 break;
526
527 if (is_code(sample->eip)) {
528 flags = sample->event;
529 if (flags & TRACE_BEGIN) {
530 state = sb_bt_start;
531 add_trace_begin();
532 }
533 if (flags & KERNEL_CTX_SWITCH) {
534
535 in_kernel = flags & IS_KERNEL;
536 if (state == sb_buffer_start)
537 state = sb_sample_start;
538 add_kernel_ctx_switch(flags & IS_KERNEL);
539 }
540 if (flags & USER_CTX_SWITCH
541 && op_cpu_buffer_get_data(&entry, &val)) {
542
543 new = (struct task_struct *)val;
544 oldmm = mm;
545 release_mm(oldmm);
546 mm = take_tasks_mm(new);
547 if (mm != oldmm)
548 cookie = get_exec_dcookie(mm);
549 add_user_ctx_switch(new, cookie);
550 }
551 if (op_cpu_buffer_get_size(&entry))
552 add_data(&entry, mm);
553 continue;
554 }
555
556 if (state < sb_bt_start)
557
558 continue;
559
560 if (add_sample(mm, sample, in_kernel))
561 continue;
562
563
564 if (state == sb_bt_start) {
565 state = sb_bt_ignore;
566 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
567 }
568 }
569 release_mm(mm);
570
571 mark_done(cpu);
572
573 mutex_unlock(&buffer_mutex);
574}
575
576
577
578
579
580
581void oprofile_put_buff(unsigned long *buf, unsigned int start,
582 unsigned int stop, unsigned int max)
583{
584 int i;
585
586 i = start;
587
588 mutex_lock(&buffer_mutex);
589 while (i != stop) {
590 add_event_entry(buf[i++]);
591
592 if (i >= max)
593 i = 0;
594 }
595
596 mutex_unlock(&buffer_mutex);
597}
598
599