1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/export.h>
18#include <linux/profile.h>
19#include <linux/bootmem.h>
20#include <linux/notifier.h>
21#include <linux/mm.h>
22#include <linux/cpumask.h>
23#include <linux/cpu.h>
24#include <linux/highmem.h>
25#include <linux/mutex.h>
26#include <linux/slab.h>
27#include <linux/vmalloc.h>
28#include <asm/sections.h>
29#include <asm/irq_regs.h>
30#include <asm/ptrace.h>
31
32struct profile_hit {
33 u32 pc, hits;
34};
35#define PROFILE_GRPSHIFT 3
36#define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT)
37#define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
38#define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
39
40static atomic_t *prof_buffer;
41static unsigned long prof_len, prof_shift;
42
43int prof_on __read_mostly;
44EXPORT_SYMBOL_GPL(prof_on);
45
46static cpumask_var_t prof_cpu_mask;
47#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
48static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
49static DEFINE_PER_CPU(int, cpu_profile_flip);
50static DEFINE_MUTEX(profile_flip_mutex);
51#endif
52
53int profile_setup(char *str)
54{
55 static const char schedstr[] = "schedule";
56 static const char sleepstr[] = "sleep";
57 static const char kvmstr[] = "kvm";
58 int par;
59
60 if (!strncmp(str, sleepstr, strlen(sleepstr))) {
61#ifdef CONFIG_SCHEDSTATS
62 force_schedstat_enabled();
63 prof_on = SLEEP_PROFILING;
64 if (str[strlen(sleepstr)] == ',')
65 str += strlen(sleepstr) + 1;
66 if (get_option(&str, &par))
67 prof_shift = par;
68 pr_info("kernel sleep profiling enabled (shift: %ld)\n",
69 prof_shift);
70#else
71 pr_warn("kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
72#endif
73 } else if (!strncmp(str, schedstr, strlen(schedstr))) {
74 prof_on = SCHED_PROFILING;
75 if (str[strlen(schedstr)] == ',')
76 str += strlen(schedstr) + 1;
77 if (get_option(&str, &par))
78 prof_shift = par;
79 pr_info("kernel schedule profiling enabled (shift: %ld)\n",
80 prof_shift);
81 } else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
82 prof_on = KVM_PROFILING;
83 if (str[strlen(kvmstr)] == ',')
84 str += strlen(kvmstr) + 1;
85 if (get_option(&str, &par))
86 prof_shift = par;
87 pr_info("kernel KVM profiling enabled (shift: %ld)\n",
88 prof_shift);
89 } else if (get_option(&str, &par)) {
90 prof_shift = par;
91 prof_on = CPU_PROFILING;
92 pr_info("kernel profiling enabled (shift: %ld)\n",
93 prof_shift);
94 }
95 return 1;
96}
97__setup("profile=", profile_setup);
98
99
100int __ref profile_init(void)
101{
102 int buffer_bytes;
103 if (!prof_on)
104 return 0;
105
106
107 prof_len = (_etext - _stext) >> prof_shift;
108 buffer_bytes = prof_len*sizeof(atomic_t);
109
110 if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
111 return -ENOMEM;
112
113 cpumask_copy(prof_cpu_mask, cpu_possible_mask);
114
115 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN);
116 if (prof_buffer)
117 return 0;
118
119 prof_buffer = alloc_pages_exact(buffer_bytes,
120 GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
121 if (prof_buffer)
122 return 0;
123
124 prof_buffer = vzalloc(buffer_bytes);
125 if (prof_buffer)
126 return 0;
127
128 free_cpumask_var(prof_cpu_mask);
129 return -ENOMEM;
130}
131
132
133
134static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
135static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
136static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
137
138void profile_task_exit(struct task_struct *task)
139{
140 blocking_notifier_call_chain(&task_exit_notifier, 0, task);
141}
142
143int profile_handoff_task(struct task_struct *task)
144{
145 int ret;
146 ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
147 return (ret == NOTIFY_OK) ? 1 : 0;
148}
149
150void profile_munmap(unsigned long addr)
151{
152 blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
153}
154
155int task_handoff_register(struct notifier_block *n)
156{
157 return atomic_notifier_chain_register(&task_free_notifier, n);
158}
159EXPORT_SYMBOL_GPL(task_handoff_register);
160
161int task_handoff_unregister(struct notifier_block *n)
162{
163 return atomic_notifier_chain_unregister(&task_free_notifier, n);
164}
165EXPORT_SYMBOL_GPL(task_handoff_unregister);
166
167int profile_event_register(enum profile_type type, struct notifier_block *n)
168{
169 int err = -EINVAL;
170
171 switch (type) {
172 case PROFILE_TASK_EXIT:
173 err = blocking_notifier_chain_register(
174 &task_exit_notifier, n);
175 break;
176 case PROFILE_MUNMAP:
177 err = blocking_notifier_chain_register(
178 &munmap_notifier, n);
179 break;
180 }
181
182 return err;
183}
184EXPORT_SYMBOL_GPL(profile_event_register);
185
186int profile_event_unregister(enum profile_type type, struct notifier_block *n)
187{
188 int err = -EINVAL;
189
190 switch (type) {
191 case PROFILE_TASK_EXIT:
192 err = blocking_notifier_chain_unregister(
193 &task_exit_notifier, n);
194 break;
195 case PROFILE_MUNMAP:
196 err = blocking_notifier_chain_unregister(
197 &munmap_notifier, n);
198 break;
199 }
200
201 return err;
202}
203EXPORT_SYMBOL_GPL(profile_event_unregister);
204
205#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237static void __profile_flip_buffers(void *unused)
238{
239 int cpu = smp_processor_id();
240
241 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
242}
243
244static void profile_flip_buffers(void)
245{
246 int i, j, cpu;
247
248 mutex_lock(&profile_flip_mutex);
249 j = per_cpu(cpu_profile_flip, get_cpu());
250 put_cpu();
251 on_each_cpu(__profile_flip_buffers, NULL, 1);
252 for_each_online_cpu(cpu) {
253 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
254 for (i = 0; i < NR_PROFILE_HIT; ++i) {
255 if (!hits[i].hits) {
256 if (hits[i].pc)
257 hits[i].pc = 0;
258 continue;
259 }
260 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
261 hits[i].hits = hits[i].pc = 0;
262 }
263 }
264 mutex_unlock(&profile_flip_mutex);
265}
266
267static void profile_discard_flip_buffers(void)
268{
269 int i, cpu;
270
271 mutex_lock(&profile_flip_mutex);
272 i = per_cpu(cpu_profile_flip, get_cpu());
273 put_cpu();
274 on_each_cpu(__profile_flip_buffers, NULL, 1);
275 for_each_online_cpu(cpu) {
276 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
277 memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));
278 }
279 mutex_unlock(&profile_flip_mutex);
280}
281
282static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
283{
284 unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
285 int i, j, cpu;
286 struct profile_hit *hits;
287
288 pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
289 i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
290 secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
291 cpu = get_cpu();
292 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
293 if (!hits) {
294 put_cpu();
295 return;
296 }
297
298
299
300
301
302 local_irq_save(flags);
303 do {
304 for (j = 0; j < PROFILE_GRPSZ; ++j) {
305 if (hits[i + j].pc == pc) {
306 hits[i + j].hits += nr_hits;
307 goto out;
308 } else if (!hits[i + j].hits) {
309 hits[i + j].pc = pc;
310 hits[i + j].hits = nr_hits;
311 goto out;
312 }
313 }
314 i = (i + secondary) & (NR_PROFILE_HIT - 1);
315 } while (i != primary);
316
317
318
319
320
321 atomic_add(nr_hits, &prof_buffer[pc]);
322 for (i = 0; i < NR_PROFILE_HIT; ++i) {
323 atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
324 hits[i].pc = hits[i].hits = 0;
325 }
326out:
327 local_irq_restore(flags);
328 put_cpu();
329}
330
331static int profile_cpu_callback(struct notifier_block *info,
332 unsigned long action, void *__cpu)
333{
334 int node, cpu = (unsigned long)__cpu;
335 struct page *page;
336
337 switch (action) {
338 case CPU_UP_PREPARE:
339 case CPU_UP_PREPARE_FROZEN:
340 node = cpu_to_mem(cpu);
341 per_cpu(cpu_profile_flip, cpu) = 0;
342 if (!per_cpu(cpu_profile_hits, cpu)[1]) {
343 page = __alloc_pages_node(node,
344 GFP_KERNEL | __GFP_ZERO,
345 0);
346 if (!page)
347 return notifier_from_errno(-ENOMEM);
348 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
349 }
350 if (!per_cpu(cpu_profile_hits, cpu)[0]) {
351 page = __alloc_pages_node(node,
352 GFP_KERNEL | __GFP_ZERO,
353 0);
354 if (!page)
355 goto out_free;
356 per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
357 }
358 break;
359out_free:
360 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
361 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
362 __free_page(page);
363 return notifier_from_errno(-ENOMEM);
364 case CPU_ONLINE:
365 case CPU_ONLINE_FROZEN:
366 if (prof_cpu_mask != NULL)
367 cpumask_set_cpu(cpu, prof_cpu_mask);
368 break;
369 case CPU_UP_CANCELED:
370 case CPU_UP_CANCELED_FROZEN:
371 case CPU_DEAD:
372 case CPU_DEAD_FROZEN:
373 if (prof_cpu_mask != NULL)
374 cpumask_clear_cpu(cpu, prof_cpu_mask);
375 if (per_cpu(cpu_profile_hits, cpu)[0]) {
376 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
377 per_cpu(cpu_profile_hits, cpu)[0] = NULL;
378 __free_page(page);
379 }
380 if (per_cpu(cpu_profile_hits, cpu)[1]) {
381 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
382 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
383 __free_page(page);
384 }
385 break;
386 }
387 return NOTIFY_OK;
388}
389#else
390#define profile_flip_buffers() do { } while (0)
391#define profile_discard_flip_buffers() do { } while (0)
392#define profile_cpu_callback NULL
393
394static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
395{
396 unsigned long pc;
397 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
398 atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
399}
400#endif
401
402void profile_hits(int type, void *__pc, unsigned int nr_hits)
403{
404 if (prof_on != type || !prof_buffer)
405 return;
406 do_profile_hits(type, __pc, nr_hits);
407}
408EXPORT_SYMBOL_GPL(profile_hits);
409
410void profile_tick(int type)
411{
412 struct pt_regs *regs = get_irq_regs();
413
414 if (!user_mode(regs) && prof_cpu_mask != NULL &&
415 cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
416 profile_hit(type, (void *)profile_pc(regs));
417}
418
419#ifdef CONFIG_PROC_FS
420#include <linux/proc_fs.h>
421#include <linux/seq_file.h>
422#include <asm/uaccess.h>
423
424static int prof_cpu_mask_proc_show(struct seq_file *m, void *v)
425{
426 seq_printf(m, "%*pb\n", cpumask_pr_args(prof_cpu_mask));
427 return 0;
428}
429
430static int prof_cpu_mask_proc_open(struct inode *inode, struct file *file)
431{
432 return single_open(file, prof_cpu_mask_proc_show, NULL);
433}
434
435static ssize_t prof_cpu_mask_proc_write(struct file *file,
436 const char __user *buffer, size_t count, loff_t *pos)
437{
438 cpumask_var_t new_value;
439 int err;
440
441 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
442 return -ENOMEM;
443
444 err = cpumask_parse_user(buffer, count, new_value);
445 if (!err) {
446 cpumask_copy(prof_cpu_mask, new_value);
447 err = count;
448 }
449 free_cpumask_var(new_value);
450 return err;
451}
452
453static const struct file_operations prof_cpu_mask_proc_fops = {
454 .open = prof_cpu_mask_proc_open,
455 .read = seq_read,
456 .llseek = seq_lseek,
457 .release = single_release,
458 .write = prof_cpu_mask_proc_write,
459};
460
461void create_prof_cpu_mask(void)
462{
463
464 proc_create("irq/prof_cpu_mask", 0600, NULL, &prof_cpu_mask_proc_fops);
465}
466
467
468
469
470
471
472
473static ssize_t
474read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
475{
476 unsigned long p = *ppos;
477 ssize_t read;
478 char *pnt;
479 unsigned int sample_step = 1 << prof_shift;
480
481 profile_flip_buffers();
482 if (p >= (prof_len+1)*sizeof(unsigned int))
483 return 0;
484 if (count > (prof_len+1)*sizeof(unsigned int) - p)
485 count = (prof_len+1)*sizeof(unsigned int) - p;
486 read = 0;
487
488 while (p < sizeof(unsigned int) && count > 0) {
489 if (put_user(*((char *)(&sample_step)+p), buf))
490 return -EFAULT;
491 buf++; p++; count--; read++;
492 }
493 pnt = (char *)prof_buffer + p - sizeof(atomic_t);
494 if (copy_to_user(buf, (void *)pnt, count))
495 return -EFAULT;
496 read += count;
497 *ppos += read;
498 return read;
499}
500
501
502
503
504
505
506
507static ssize_t write_profile(struct file *file, const char __user *buf,
508 size_t count, loff_t *ppos)
509{
510#ifdef CONFIG_SMP
511 extern int setup_profiling_timer(unsigned int multiplier);
512
513 if (count == sizeof(int)) {
514 unsigned int multiplier;
515
516 if (copy_from_user(&multiplier, buf, sizeof(int)))
517 return -EFAULT;
518
519 if (setup_profiling_timer(multiplier))
520 return -EINVAL;
521 }
522#endif
523 profile_discard_flip_buffers();
524 memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
525 return count;
526}
527
528static const struct file_operations proc_profile_operations = {
529 .read = read_profile,
530 .write = write_profile,
531 .llseek = default_llseek,
532};
533
534#ifdef CONFIG_SMP
535static void profile_nop(void *unused)
536{
537}
538
539static int create_hash_tables(void)
540{
541 int cpu;
542
543 for_each_online_cpu(cpu) {
544 int node = cpu_to_mem(cpu);
545 struct page *page;
546
547 page = __alloc_pages_node(node,
548 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
549 0);
550 if (!page)
551 goto out_cleanup;
552 per_cpu(cpu_profile_hits, cpu)[1]
553 = (struct profile_hit *)page_address(page);
554 page = __alloc_pages_node(node,
555 GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
556 0);
557 if (!page)
558 goto out_cleanup;
559 per_cpu(cpu_profile_hits, cpu)[0]
560 = (struct profile_hit *)page_address(page);
561 }
562 return 0;
563out_cleanup:
564 prof_on = 0;
565 smp_mb();
566 on_each_cpu(profile_nop, NULL, 1);
567 for_each_online_cpu(cpu) {
568 struct page *page;
569
570 if (per_cpu(cpu_profile_hits, cpu)[0]) {
571 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
572 per_cpu(cpu_profile_hits, cpu)[0] = NULL;
573 __free_page(page);
574 }
575 if (per_cpu(cpu_profile_hits, cpu)[1]) {
576 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
577 per_cpu(cpu_profile_hits, cpu)[1] = NULL;
578 __free_page(page);
579 }
580 }
581 return -1;
582}
583#else
584#define create_hash_tables() ({ 0; })
585#endif
586
587int __ref create_proc_profile(void)
588{
589 struct proc_dir_entry *entry;
590 int err = 0;
591
592 if (!prof_on)
593 return 0;
594
595 cpu_notifier_register_begin();
596
597 if (create_hash_tables()) {
598 err = -ENOMEM;
599 goto out;
600 }
601
602 entry = proc_create("profile", S_IWUSR | S_IRUGO,
603 NULL, &proc_profile_operations);
604 if (!entry)
605 goto out;
606 proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
607 __hotcpu_notifier(profile_cpu_callback, 0);
608
609out:
610 cpu_notifier_register_done();
611 return err;
612}
613subsys_initcall(create_proc_profile);
614#endif
615