1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/dcookies.h>
22#include <linux/kref.h>
23#include <linux/mm.h>
24#include <linux/fs.h>
25#include <linux/module.h>
26#include <linux/notifier.h>
27#include <linux/numa.h>
28#include <linux/oprofile.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
31#include "pr_util.h"
32
33#define RELEASE_ALL 9999
34
35static DEFINE_SPINLOCK(buffer_lock);
36static DEFINE_SPINLOCK(cache_lock);
37static int num_spu_nodes;
38int spu_prof_num_nodes;
39
40struct spu_buffer spu_buff[MAX_NUMNODES * SPUS_PER_NODE];
41struct delayed_work spu_work;
42static unsigned max_spu_buff;
43
44static void spu_buff_add(unsigned long int value, int spu)
45{
46
47
48
49
50
51
52
53
54
55
56 int full = 1;
57
58 if (spu_buff[spu].head >= spu_buff[spu].tail) {
59 if ((spu_buff[spu].head - spu_buff[spu].tail)
60 < (max_spu_buff - 1))
61 full = 0;
62
63 } else if (spu_buff[spu].tail > spu_buff[spu].head) {
64 if ((spu_buff[spu].tail - spu_buff[spu].head)
65 > 1)
66 full = 0;
67 }
68
69 if (!full) {
70 spu_buff[spu].buff[spu_buff[spu].head] = value;
71 spu_buff[spu].head++;
72
73 if (spu_buff[spu].head >= max_spu_buff)
74 spu_buff[spu].head = 0;
75 } else {
76
77
78
79
80
81
82
83 oprofile_cpu_buffer_inc_smpl_lost();
84 }
85}
86
87
88
89
90void sync_spu_buff(void)
91{
92 int spu;
93 unsigned long flags;
94 int curr_head;
95
96 for (spu = 0; spu < num_spu_nodes; spu++) {
97
98
99
100 if (spu_buff[spu].buff == NULL)
101 continue;
102
103
104
105
106
107
108 spin_lock_irqsave(&buffer_lock, flags);
109 curr_head = spu_buff[spu].head;
110 spin_unlock_irqrestore(&buffer_lock, flags);
111
112
113
114
115 oprofile_put_buff(spu_buff[spu].buff,
116 spu_buff[spu].tail,
117 curr_head, max_spu_buff);
118
119 spin_lock_irqsave(&buffer_lock, flags);
120 spu_buff[spu].tail = curr_head;
121 spin_unlock_irqrestore(&buffer_lock, flags);
122 }
123
124}
125
126static void wq_sync_spu_buff(struct work_struct *work)
127{
128
129 sync_spu_buff();
130
131
132 if (spu_prof_running)
133 schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
134}
135
136
137struct cached_info {
138 struct vma_to_fileoffset_map *map;
139 struct spu *the_spu;
140 struct kref cache_ref;
141};
142
143static struct cached_info *spu_info[MAX_NUMNODES * 8];
144
145static void destroy_cached_info(struct kref *kref)
146{
147 struct cached_info *info;
148
149 info = container_of(kref, struct cached_info, cache_ref);
150 vma_map_free(info->map);
151 kfree(info);
152 module_put(THIS_MODULE);
153}
154
155
156
157
158
159static struct cached_info *get_cached_info(struct spu *the_spu, int spu_num)
160{
161 struct kref *ref;
162 struct cached_info *ret_info;
163
164 if (spu_num >= num_spu_nodes) {
165 printk(KERN_ERR "SPU_PROF: "
166 "%s, line %d: Invalid index %d into spu info cache\n",
167 __func__, __LINE__, spu_num);
168 ret_info = NULL;
169 goto out;
170 }
171 if (!spu_info[spu_num] && the_spu) {
172 ref = spu_get_profile_private_kref(the_spu->ctx);
173 if (ref) {
174 spu_info[spu_num] = container_of(ref, struct cached_info, cache_ref);
175 kref_get(&spu_info[spu_num]->cache_ref);
176 }
177 }
178
179 ret_info = spu_info[spu_num];
180 out:
181 return ret_info;
182}
183
184
185
186
187
188
189static int
190prepare_cached_spu_info(struct spu *spu, unsigned long objectId)
191{
192 unsigned long flags;
193 struct vma_to_fileoffset_map *new_map;
194 int retval = 0;
195 struct cached_info *info;
196
197
198
199
200 info = get_cached_info(spu, spu->number);
201
202 if (info) {
203 pr_debug("Found cached SPU info.\n");
204 goto out;
205 }
206
207
208
209
210 info = kzalloc(sizeof(struct cached_info), GFP_KERNEL);
211 if (!info) {
212 printk(KERN_ERR "SPU_PROF: "
213 "%s, line %d: create vma_map failed\n",
214 __func__, __LINE__);
215 retval = -ENOMEM;
216 goto err_alloc;
217 }
218 new_map = create_vma_map(spu, objectId);
219 if (!new_map) {
220 printk(KERN_ERR "SPU_PROF: "
221 "%s, line %d: create vma_map failed\n",
222 __func__, __LINE__);
223 retval = -ENOMEM;
224 goto err_alloc;
225 }
226
227 pr_debug("Created vma_map\n");
228 info->map = new_map;
229 info->the_spu = spu;
230 kref_init(&info->cache_ref);
231 spin_lock_irqsave(&cache_lock, flags);
232 spu_info[spu->number] = info;
233
234 kref_get(&info->cache_ref);
235
236
237
238
239
240
241
242 try_module_get(THIS_MODULE);
243 spu_set_profile_private_kref(spu->ctx, &info->cache_ref,
244 destroy_cached_info);
245 spin_unlock_irqrestore(&cache_lock, flags);
246 goto out;
247
248err_alloc:
249 kfree(info);
250out:
251 return retval;
252}
253
254
255
256
257
258static int release_cached_info(int spu_index)
259{
260 int index, end;
261
262 if (spu_index == RELEASE_ALL) {
263 end = num_spu_nodes;
264 index = 0;
265 } else {
266 if (spu_index >= num_spu_nodes) {
267 printk(KERN_ERR "SPU_PROF: "
268 "%s, line %d: "
269 "Invalid index %d into spu info cache\n",
270 __func__, __LINE__, spu_index);
271 goto out;
272 }
273 end = spu_index + 1;
274 index = spu_index;
275 }
276 for (; index < end; index++) {
277 if (spu_info[index]) {
278 kref_put(&spu_info[index]->cache_ref,
279 destroy_cached_info);
280 spu_info[index] = NULL;
281 }
282 }
283
284out:
285 return 0;
286}
287
288
289
290
291
292
293
294
295
296
297static inline unsigned long fast_get_dcookie(struct path *path)
298{
299 unsigned long cookie;
300
301 if (path->dentry->d_flags & DCACHE_COOKIE)
302 return (unsigned long)path->dentry;
303 get_dcookie(path, &cookie);
304 return cookie;
305}
306
307
308
309
310
311
312
313
314
315
316
317static unsigned long
318get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
319 unsigned long *spu_bin_dcookie,
320 unsigned long spu_ref)
321{
322 unsigned long app_cookie = 0;
323 unsigned int my_offset = 0;
324 struct vm_area_struct *vma;
325 struct mm_struct *mm = spu->mm;
326
327 if (!mm)
328 goto out;
329
330 down_read(&mm->mmap_sem);
331
332 if (mm->exe_file) {
333 app_cookie = fast_get_dcookie(&mm->exe_file->f_path);
334 pr_debug("got dcookie for %s\n",
335 mm->exe_file->f_dentry->d_name.name);
336 }
337
338 for (vma = mm->mmap; vma; vma = vma->vm_next) {
339 if (vma->vm_start > spu_ref || vma->vm_end <= spu_ref)
340 continue;
341 my_offset = spu_ref - vma->vm_start;
342 if (!vma->vm_file)
343 goto fail_no_image_cookie;
344
345 pr_debug("Found spu ELF at %X(object-id:%lx) for file %s\n",
346 my_offset, spu_ref,
347 vma->vm_file->f_dentry->d_name.name);
348 *offsetp = my_offset;
349 break;
350 }
351
352 *spu_bin_dcookie = fast_get_dcookie(&vma->vm_file->f_path);
353 pr_debug("got dcookie for %s\n", vma->vm_file->f_dentry->d_name.name);
354
355 up_read(&mm->mmap_sem);
356
357out:
358 return app_cookie;
359
360fail_no_image_cookie:
361 up_read(&mm->mmap_sem);
362
363 printk(KERN_ERR "SPU_PROF: "
364 "%s, line %d: Cannot find dcookie for SPU binary\n",
365 __func__, __LINE__);
366 goto out;
367}
368
369
370
371
372
373
374
375static int process_context_switch(struct spu *spu, unsigned long objectId)
376{
377 unsigned long flags;
378 int retval;
379 unsigned int offset = 0;
380 unsigned long spu_cookie = 0, app_dcookie;
381
382 retval = prepare_cached_spu_info(spu, objectId);
383 if (retval)
384 goto out;
385
386
387
388
389 app_dcookie = get_exec_dcookie_and_offset(spu, &offset, &spu_cookie, objectId);
390 if (!app_dcookie || !spu_cookie) {
391 retval = -ENOENT;
392 goto out;
393 }
394
395
396 spin_lock_irqsave(&buffer_lock, flags);
397 spu_buff_add(ESCAPE_CODE, spu->number);
398 spu_buff_add(SPU_CTX_SWITCH_CODE, spu->number);
399 spu_buff_add(spu->number, spu->number);
400 spu_buff_add(spu->pid, spu->number);
401 spu_buff_add(spu->tgid, spu->number);
402 spu_buff_add(app_dcookie, spu->number);
403 spu_buff_add(spu_cookie, spu->number);
404 spu_buff_add(offset, spu->number);
405
406
407
408
409
410 spu_buff[spu->number].ctx_sw_seen = 1;
411
412 spin_unlock_irqrestore(&buffer_lock, flags);
413 smp_wmb();
414
415out:
416 return retval;
417}
418
419
420
421
422
423
424
425static int spu_active_notify(struct notifier_block *self, unsigned long val,
426 void *data)
427{
428 int retval;
429 unsigned long flags;
430 struct spu *the_spu = data;
431
432 pr_debug("SPU event notification arrived\n");
433 if (!val) {
434 spin_lock_irqsave(&cache_lock, flags);
435 retval = release_cached_info(the_spu->number);
436 spin_unlock_irqrestore(&cache_lock, flags);
437 } else {
438 retval = process_context_switch(the_spu, val);
439 }
440 return retval;
441}
442
443static struct notifier_block spu_active = {
444 .notifier_call = spu_active_notify,
445};
446
447static int number_of_online_nodes(void)
448{
449 u32 cpu; u32 tmp;
450 int nodes = 0;
451 for_each_online_cpu(cpu) {
452 tmp = cbe_cpu_to_node(cpu) + 1;
453 if (tmp > nodes)
454 nodes++;
455 }
456 return nodes;
457}
458
459static int oprofile_spu_buff_create(void)
460{
461 int spu;
462
463 max_spu_buff = oprofile_get_cpu_buffer_size();
464
465 for (spu = 0; spu < num_spu_nodes; spu++) {
466
467
468
469 spu_buff[spu].head = 0;
470 spu_buff[spu].tail = 0;
471
472
473
474
475
476
477
478 spu_buff[spu].buff = kzalloc((max_spu_buff
479 * sizeof(unsigned long)),
480 GFP_KERNEL);
481
482 if (!spu_buff[spu].buff) {
483 printk(KERN_ERR "SPU_PROF: "
484 "%s, line %d: oprofile_spu_buff_create "
485 "failed to allocate spu buffer %d.\n",
486 __func__, __LINE__, spu);
487
488
489 while (spu >= 0) {
490 kfree(spu_buff[spu].buff);
491 spu_buff[spu].buff = 0;
492 spu--;
493 }
494 return -ENOMEM;
495 }
496 }
497 return 0;
498}
499
500
501
502
503
504
505
506
507
508
509
510int spu_sync_start(void)
511{
512 int spu;
513 int ret = SKIP_GENERIC_SYNC;
514 int register_ret;
515 unsigned long flags = 0;
516
517 spu_prof_num_nodes = number_of_online_nodes();
518 num_spu_nodes = spu_prof_num_nodes * 8;
519 INIT_DELAYED_WORK(&spu_work, wq_sync_spu_buff);
520
521
522
523
524 ret = oprofile_spu_buff_create();
525 if (ret)
526 goto out;
527
528 spin_lock_irqsave(&buffer_lock, flags);
529 for (spu = 0; spu < num_spu_nodes; spu++) {
530 spu_buff_add(ESCAPE_CODE, spu);
531 spu_buff_add(SPU_PROFILING_CODE, spu);
532 spu_buff_add(num_spu_nodes, spu);
533 }
534 spin_unlock_irqrestore(&buffer_lock, flags);
535
536 for (spu = 0; spu < num_spu_nodes; spu++) {
537 spu_buff[spu].ctx_sw_seen = 0;
538 spu_buff[spu].last_guard_val = 0;
539 }
540
541
542 register_ret = spu_switch_event_register(&spu_active);
543 if (register_ret) {
544 ret = SYNC_START_ERROR;
545 goto out;
546 }
547
548 pr_debug("spu_sync_start -- running.\n");
549out:
550 return ret;
551}
552
553
554void spu_sync_buffer(int spu_num, unsigned int *samples,
555 int num_samples)
556{
557 unsigned long long file_offset;
558 unsigned long flags;
559 int i;
560 struct vma_to_fileoffset_map *map;
561 struct spu *the_spu;
562 unsigned long long spu_num_ll = spu_num;
563 unsigned long long spu_num_shifted = spu_num_ll << 32;
564 struct cached_info *c_info;
565
566
567
568
569
570
571 spin_lock_irqsave(&cache_lock, flags);
572 c_info = get_cached_info(NULL, spu_num);
573 if (!c_info) {
574
575
576
577
578 pr_debug("SPU_PROF: No cached SPU contex "
579 "for SPU #%d. Dropping samples.\n", spu_num);
580 goto out;
581 }
582
583 map = c_info->map;
584 the_spu = c_info->the_spu;
585 spin_lock(&buffer_lock);
586 for (i = 0; i < num_samples; i++) {
587 unsigned int sample = *(samples+i);
588 int grd_val = 0;
589 file_offset = 0;
590 if (sample == 0)
591 continue;
592 file_offset = vma_map_lookup( map, sample, the_spu, &grd_val);
593
594
595
596
597
598
599 if (grd_val && grd_val != spu_buff[spu_num].last_guard_val) {
600 spu_buff[spu_num].last_guard_val = grd_val;
601
602 break;
603 }
604
605
606
607
608
609
610 if (spu_buff[spu_num].ctx_sw_seen)
611 spu_buff_add((file_offset | spu_num_shifted),
612 spu_num);
613 }
614 spin_unlock(&buffer_lock);
615out:
616 spin_unlock_irqrestore(&cache_lock, flags);
617}
618
619
620int spu_sync_stop(void)
621{
622 unsigned long flags = 0;
623 int ret;
624 int k;
625
626 ret = spu_switch_event_unregister(&spu_active);
627
628 if (ret)
629 printk(KERN_ERR "SPU_PROF: "
630 "%s, line %d: spu_switch_event_unregister " \
631 "returned %d\n",
632 __func__, __LINE__, ret);
633
634
635 sync_spu_buff();
636
637 spin_lock_irqsave(&cache_lock, flags);
638 ret = release_cached_info(RELEASE_ALL);
639 spin_unlock_irqrestore(&cache_lock, flags);
640
641
642
643
644
645 cancel_delayed_work(&spu_work);
646
647 for (k = 0; k < num_spu_nodes; k++) {
648 spu_buff[k].ctx_sw_seen = 0;
649
650
651
652
653
654 kfree(spu_buff[k].buff);
655 spu_buff[k].buff = 0;
656 }
657 pr_debug("spu_sync_stop -- done.\n");
658 return ret;
659}
660
661