linux/arch/powerpc/oprofile/cell/spu_task_sync.c
<<
>>
Prefs
   1/*
   2 * Cell Broadband Engine OProfile Support
   3 *
   4 * (C) Copyright IBM Corporation 2006
   5 *
   6 * Author: Maynard Johnson <maynardj@us.ibm.com>
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License
  10 * as published by the Free Software Foundation; either version
  11 * 2 of the License, or (at your option) any later version.
  12 */
  13
  14/* The purpose of this file is to handle SPU event task switching
  15 * and to record SPU context information into the OProfile
  16 * event buffer.
  17 *
  18 * Additionally, the spu_sync_buffer function is provided as a helper
  19 * for recoding actual SPU program counter samples to the event buffer.
  20 */
  21#include <linux/dcookies.h>
  22#include <linux/kref.h>
  23#include <linux/mm.h>
  24#include <linux/fs.h>
  25#include <linux/module.h>
  26#include <linux/notifier.h>
  27#include <linux/numa.h>
  28#include <linux/oprofile.h>
  29#include <linux/slab.h>
  30#include <linux/spinlock.h>
  31#include "pr_util.h"
  32
  33#define RELEASE_ALL 9999
  34
  35static DEFINE_SPINLOCK(buffer_lock);
  36static DEFINE_SPINLOCK(cache_lock);
  37static int num_spu_nodes;
  38int spu_prof_num_nodes;
  39
  40struct spu_buffer spu_buff[MAX_NUMNODES * SPUS_PER_NODE];
  41struct delayed_work spu_work;
  42static unsigned max_spu_buff;
  43
  44static void spu_buff_add(unsigned long int value, int spu)
  45{
  46        /* spu buff is a circular buffer.  Add entries to the
  47         * head.  Head is the index to store the next value.
  48         * The buffer is full when there is one available entry
  49         * in the queue, i.e. head and tail can't be equal.
  50         * That way we can tell the difference between the
  51         * buffer being full versus empty.
  52         *
  53         *  ASSUPTION: the buffer_lock is held when this function
  54         *             is called to lock the buffer, head and tail.
  55         */
  56        int full = 1;
  57
  58        if (spu_buff[spu].head >= spu_buff[spu].tail) {
  59                if ((spu_buff[spu].head - spu_buff[spu].tail)
  60                    <  (max_spu_buff - 1))
  61                        full = 0;
  62
  63        } else if (spu_buff[spu].tail > spu_buff[spu].head) {
  64                if ((spu_buff[spu].tail - spu_buff[spu].head)
  65                    > 1)
  66                        full = 0;
  67        }
  68
  69        if (!full) {
  70                spu_buff[spu].buff[spu_buff[spu].head] = value;
  71                spu_buff[spu].head++;
  72
  73                if (spu_buff[spu].head >= max_spu_buff)
  74                        spu_buff[spu].head = 0;
  75        } else {
  76                /* From the user's perspective make the SPU buffer
  77                 * size management/overflow look like we are using
  78                 * per cpu buffers.  The user uses the same
  79                 * per cpu parameter to adjust the SPU buffer size.
  80                 * Increment the sample_lost_overflow to inform
  81                 * the user the buffer size needs to be increased.
  82                 */
  83                oprofile_cpu_buffer_inc_smpl_lost();
  84        }
  85}
  86
  87/* This function copies the per SPU buffers to the
  88 * OProfile kernel buffer.
  89 */
  90void sync_spu_buff(void)
  91{
  92        int spu;
  93        unsigned long flags;
  94        int curr_head;
  95
  96        for (spu = 0; spu < num_spu_nodes; spu++) {
  97                /* In case there was an issue and the buffer didn't
  98                 * get created skip it.
  99                 */
 100                if (spu_buff[spu].buff == NULL)
 101                        continue;
 102
 103                /* Hold the lock to make sure the head/tail
 104                 * doesn't change while spu_buff_add() is
 105                 * deciding if the buffer is full or not.
 106                 * Being a little paranoid.
 107                 */
 108                spin_lock_irqsave(&buffer_lock, flags);
 109                curr_head = spu_buff[spu].head;
 110                spin_unlock_irqrestore(&buffer_lock, flags);
 111
 112                /* Transfer the current contents to the kernel buffer.
 113                 * data can still be added to the head of the buffer.
 114                 */
 115                oprofile_put_buff(spu_buff[spu].buff,
 116                                  spu_buff[spu].tail,
 117                                  curr_head, max_spu_buff);
 118
 119                spin_lock_irqsave(&buffer_lock, flags);
 120                spu_buff[spu].tail = curr_head;
 121                spin_unlock_irqrestore(&buffer_lock, flags);
 122        }
 123
 124}
 125
 126static void wq_sync_spu_buff(struct work_struct *work)
 127{
 128        /* move data from spu buffers to kernel buffer */
 129        sync_spu_buff();
 130
 131        /* only reschedule if profiling is not done */
 132        if (spu_prof_running)
 133                schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
 134}
 135
 136/* Container for caching information about an active SPU task. */
 137struct cached_info {
 138        struct vma_to_fileoffset_map *map;
 139        struct spu *the_spu;    /* needed to access pointer to local_store */
 140        struct kref cache_ref;
 141};
 142
 143static struct cached_info *spu_info[MAX_NUMNODES * 8];
 144
 145static void destroy_cached_info(struct kref *kref)
 146{
 147        struct cached_info *info;
 148
 149        info = container_of(kref, struct cached_info, cache_ref);
 150        vma_map_free(info->map);
 151        kfree(info);
 152        module_put(THIS_MODULE);
 153}
 154
 155/* Return the cached_info for the passed SPU number.
 156 * ATTENTION:  Callers are responsible for obtaining the
 157 *             cache_lock if needed prior to invoking this function.
 158 */
 159static struct cached_info *get_cached_info(struct spu *the_spu, int spu_num)
 160{
 161        struct kref *ref;
 162        struct cached_info *ret_info;
 163
 164        if (spu_num >= num_spu_nodes) {
 165                printk(KERN_ERR "SPU_PROF: "
 166                       "%s, line %d: Invalid index %d into spu info cache\n",
 167                       __func__, __LINE__, spu_num);
 168                ret_info = NULL;
 169                goto out;
 170        }
 171        if (!spu_info[spu_num] && the_spu) {
 172                ref = spu_get_profile_private_kref(the_spu->ctx);
 173                if (ref) {
 174                        spu_info[spu_num] = container_of(ref, struct cached_info, cache_ref);
 175                        kref_get(&spu_info[spu_num]->cache_ref);
 176                }
 177        }
 178
 179        ret_info = spu_info[spu_num];
 180 out:
 181        return ret_info;
 182}
 183
 184
 185/* Looks for cached info for the passed spu.  If not found, the
 186 * cached info is created for the passed spu.
 187 * Returns 0 for success; otherwise, -1 for error.
 188 */
 189static int
 190prepare_cached_spu_info(struct spu *spu, unsigned long objectId)
 191{
 192        unsigned long flags;
 193        struct vma_to_fileoffset_map *new_map;
 194        int retval = 0;
 195        struct cached_info *info;
 196
 197        /* We won't bother getting cache_lock here since
 198         * don't do anything with the cached_info that's returned.
 199         */
 200        info = get_cached_info(spu, spu->number);
 201
 202        if (info) {
 203                pr_debug("Found cached SPU info.\n");
 204                goto out;
 205        }
 206
 207        /* Create cached_info and set spu_info[spu->number] to point to it.
 208         * spu->number is a system-wide value, not a per-node value.
 209         */
 210        info = kzalloc(sizeof(struct cached_info), GFP_KERNEL);
 211        if (!info) {
 212                printk(KERN_ERR "SPU_PROF: "
 213                       "%s, line %d: create vma_map failed\n",
 214                       __func__, __LINE__);
 215                retval = -ENOMEM;
 216                goto err_alloc;
 217        }
 218        new_map = create_vma_map(spu, objectId);
 219        if (!new_map) {
 220                printk(KERN_ERR "SPU_PROF: "
 221                       "%s, line %d: create vma_map failed\n",
 222                       __func__, __LINE__);
 223                retval = -ENOMEM;
 224                goto err_alloc;
 225        }
 226
 227        pr_debug("Created vma_map\n");
 228        info->map = new_map;
 229        info->the_spu = spu;
 230        kref_init(&info->cache_ref);
 231        spin_lock_irqsave(&cache_lock, flags);
 232        spu_info[spu->number] = info;
 233        /* Increment count before passing off ref to SPUFS. */
 234        kref_get(&info->cache_ref);
 235
 236        /* We increment the module refcount here since SPUFS is
 237         * responsible for the final destruction of the cached_info,
 238         * and it must be able to access the destroy_cached_info()
 239         * function defined in the OProfile module.  We decrement
 240         * the module refcount in destroy_cached_info.
 241         */
 242        try_module_get(THIS_MODULE);
 243        spu_set_profile_private_kref(spu->ctx, &info->cache_ref,
 244                                destroy_cached_info);
 245        spin_unlock_irqrestore(&cache_lock, flags);
 246        goto out;
 247
 248err_alloc:
 249        kfree(info);
 250out:
 251        return retval;
 252}
 253
 254/*
 255 * NOTE:  The caller is responsible for locking the
 256 *        cache_lock prior to calling this function.
 257 */
 258static int release_cached_info(int spu_index)
 259{
 260        int index, end;
 261
 262        if (spu_index == RELEASE_ALL) {
 263                end = num_spu_nodes;
 264                index = 0;
 265        } else {
 266                if (spu_index >= num_spu_nodes) {
 267                        printk(KERN_ERR "SPU_PROF: "
 268                                "%s, line %d: "
 269                                "Invalid index %d into spu info cache\n",
 270                                __func__, __LINE__, spu_index);
 271                        goto out;
 272                }
 273                end = spu_index + 1;
 274                index = spu_index;
 275        }
 276        for (; index < end; index++) {
 277                if (spu_info[index]) {
 278                        kref_put(&spu_info[index]->cache_ref,
 279                                 destroy_cached_info);
 280                        spu_info[index] = NULL;
 281                }
 282        }
 283
 284out:
 285        return 0;
 286}
 287
 288/* The source code for fast_get_dcookie was "borrowed"
 289 * from drivers/oprofile/buffer_sync.c.
 290 */
 291
 292/* Optimisation. We can manage without taking the dcookie sem
 293 * because we cannot reach this code without at least one
 294 * dcookie user still being registered (namely, the reader
 295 * of the event buffer).
 296 */
 297static inline unsigned long fast_get_dcookie(struct path *path)
 298{
 299        unsigned long cookie;
 300
 301        if (path->dentry->d_flags & DCACHE_COOKIE)
 302                return (unsigned long)path->dentry;
 303        get_dcookie(path, &cookie);
 304        return cookie;
 305}
 306
 307/* Look up the dcookie for the task's first VM_EXECUTABLE mapping,
 308 * which corresponds loosely to "application name". Also, determine
 309 * the offset for the SPU ELF object.  If computed offset is
 310 * non-zero, it implies an embedded SPU object; otherwise, it's a
 311 * separate SPU binary, in which case we retrieve it's dcookie.
 312 * For the embedded case, we must determine if SPU ELF is embedded
 313 * in the executable application or another file (i.e., shared lib).
 314 * If embedded in a shared lib, we must get the dcookie and return
 315 * that to the caller.
 316 */
 317static unsigned long
 318get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
 319                            unsigned long *spu_bin_dcookie,
 320                            unsigned long spu_ref)
 321{
 322        unsigned long app_cookie = 0;
 323        unsigned int my_offset = 0;
 324        struct file *app = NULL;
 325        struct vm_area_struct *vma;
 326        struct mm_struct *mm = spu->mm;
 327
 328        if (!mm)
 329                goto out;
 330
 331        down_read(&mm->mmap_sem);
 332
 333        for (vma = mm->mmap; vma; vma = vma->vm_next) {
 334                if (!vma->vm_file)
 335                        continue;
 336                if (!(vma->vm_flags & VM_EXECUTABLE))
 337                        continue;
 338                app_cookie = fast_get_dcookie(&vma->vm_file->f_path);
 339                pr_debug("got dcookie for %s\n",
 340                         vma->vm_file->f_dentry->d_name.name);
 341                app = vma->vm_file;
 342                break;
 343        }
 344
 345        for (vma = mm->mmap; vma; vma = vma->vm_next) {
 346                if (vma->vm_start > spu_ref || vma->vm_end <= spu_ref)
 347                        continue;
 348                my_offset = spu_ref - vma->vm_start;
 349                if (!vma->vm_file)
 350                        goto fail_no_image_cookie;
 351
 352                pr_debug("Found spu ELF at %X(object-id:%lx) for file %s\n",
 353                         my_offset, spu_ref,
 354                         vma->vm_file->f_dentry->d_name.name);
 355                *offsetp = my_offset;
 356                break;
 357        }
 358
 359        *spu_bin_dcookie = fast_get_dcookie(&vma->vm_file->f_path);
 360        pr_debug("got dcookie for %s\n", vma->vm_file->f_dentry->d_name.name);
 361
 362        up_read(&mm->mmap_sem);
 363
 364out:
 365        return app_cookie;
 366
 367fail_no_image_cookie:
 368        up_read(&mm->mmap_sem);
 369
 370        printk(KERN_ERR "SPU_PROF: "
 371                "%s, line %d: Cannot find dcookie for SPU binary\n",
 372                __func__, __LINE__);
 373        goto out;
 374}
 375
 376
 377
 378/* This function finds or creates cached context information for the
 379 * passed SPU and records SPU context information into the OProfile
 380 * event buffer.
 381 */
 382static int process_context_switch(struct spu *spu, unsigned long objectId)
 383{
 384        unsigned long flags;
 385        int retval;
 386        unsigned int offset = 0;
 387        unsigned long spu_cookie = 0, app_dcookie;
 388
 389        retval = prepare_cached_spu_info(spu, objectId);
 390        if (retval)
 391                goto out;
 392
 393        /* Get dcookie first because a mutex_lock is taken in that
 394         * code path, so interrupts must not be disabled.
 395         */
 396        app_dcookie = get_exec_dcookie_and_offset(spu, &offset, &spu_cookie, objectId);
 397        if (!app_dcookie || !spu_cookie) {
 398                retval  = -ENOENT;
 399                goto out;
 400        }
 401
 402        /* Record context info in event buffer */
 403        spin_lock_irqsave(&buffer_lock, flags);
 404        spu_buff_add(ESCAPE_CODE, spu->number);
 405        spu_buff_add(SPU_CTX_SWITCH_CODE, spu->number);
 406        spu_buff_add(spu->number, spu->number);
 407        spu_buff_add(spu->pid, spu->number);
 408        spu_buff_add(spu->tgid, spu->number);
 409        spu_buff_add(app_dcookie, spu->number);
 410        spu_buff_add(spu_cookie, spu->number);
 411        spu_buff_add(offset, spu->number);
 412
 413        /* Set flag to indicate SPU PC data can now be written out.  If
 414         * the SPU program counter data is seen before an SPU context
 415         * record is seen, the postprocessing will fail.
 416         */
 417        spu_buff[spu->number].ctx_sw_seen = 1;
 418
 419        spin_unlock_irqrestore(&buffer_lock, flags);
 420        smp_wmb();      /* insure spu event buffer updates are written */
 421                        /* don't want entries intermingled... */
 422out:
 423        return retval;
 424}
 425
 426/*
 427 * This function is invoked on either a bind_context or unbind_context.
 428 * If called for an unbind_context, the val arg is 0; otherwise,
 429 * it is the object-id value for the spu context.
 430 * The data arg is of type 'struct spu *'.
 431 */
 432static int spu_active_notify(struct notifier_block *self, unsigned long val,
 433                                void *data)
 434{
 435        int retval;
 436        unsigned long flags;
 437        struct spu *the_spu = data;
 438
 439        pr_debug("SPU event notification arrived\n");
 440        if (!val) {
 441                spin_lock_irqsave(&cache_lock, flags);
 442                retval = release_cached_info(the_spu->number);
 443                spin_unlock_irqrestore(&cache_lock, flags);
 444        } else {
 445                retval = process_context_switch(the_spu, val);
 446        }
 447        return retval;
 448}
 449
 450static struct notifier_block spu_active = {
 451        .notifier_call = spu_active_notify,
 452};
 453
 454static int number_of_online_nodes(void)
 455{
 456        u32 cpu; u32 tmp;
 457        int nodes = 0;
 458        for_each_online_cpu(cpu) {
 459                tmp = cbe_cpu_to_node(cpu) + 1;
 460                if (tmp > nodes)
 461                        nodes++;
 462        }
 463        return nodes;
 464}
 465
 466static int oprofile_spu_buff_create(void)
 467{
 468        int spu;
 469
 470        max_spu_buff = oprofile_get_cpu_buffer_size();
 471
 472        for (spu = 0; spu < num_spu_nodes; spu++) {
 473                /* create circular buffers to store the data in.
 474                 * use locks to manage accessing the buffers
 475                 */
 476                spu_buff[spu].head = 0;
 477                spu_buff[spu].tail = 0;
 478
 479                /*
 480                 * Create a buffer for each SPU.  Can't reliably
 481                 * create a single buffer for all spus due to not
 482                 * enough contiguous kernel memory.
 483                 */
 484
 485                spu_buff[spu].buff = kzalloc((max_spu_buff
 486                                              * sizeof(unsigned long)),
 487                                             GFP_KERNEL);
 488
 489                if (!spu_buff[spu].buff) {
 490                        printk(KERN_ERR "SPU_PROF: "
 491                               "%s, line %d:  oprofile_spu_buff_create "
 492                       "failed to allocate spu buffer %d.\n",
 493                               __func__, __LINE__, spu);
 494
 495                        /* release the spu buffers that have been allocated */
 496                        while (spu >= 0) {
 497                                kfree(spu_buff[spu].buff);
 498                                spu_buff[spu].buff = 0;
 499                                spu--;
 500                        }
 501                        return -ENOMEM;
 502                }
 503        }
 504        return 0;
 505}
 506
 507/* The main purpose of this function is to synchronize
 508 * OProfile with SPUFS by registering to be notified of
 509 * SPU task switches.
 510 *
 511 * NOTE: When profiling SPUs, we must ensure that only
 512 * spu_sync_start is invoked and not the generic sync_start
 513 * in drivers/oprofile/oprof.c.  A return value of
 514 * SKIP_GENERIC_SYNC or SYNC_START_ERROR will
 515 * accomplish this.
 516 */
 517int spu_sync_start(void)
 518{
 519        int spu;
 520        int ret = SKIP_GENERIC_SYNC;
 521        int register_ret;
 522        unsigned long flags = 0;
 523
 524        spu_prof_num_nodes = number_of_online_nodes();
 525        num_spu_nodes = spu_prof_num_nodes * 8;
 526        INIT_DELAYED_WORK(&spu_work, wq_sync_spu_buff);
 527
 528        /* create buffer for storing the SPU data to put in
 529         * the kernel buffer.
 530         */
 531        ret = oprofile_spu_buff_create();
 532        if (ret)
 533                goto out;
 534
 535        spin_lock_irqsave(&buffer_lock, flags);
 536        for (spu = 0; spu < num_spu_nodes; spu++) {
 537                spu_buff_add(ESCAPE_CODE, spu);
 538                spu_buff_add(SPU_PROFILING_CODE, spu);
 539                spu_buff_add(num_spu_nodes, spu);
 540        }
 541        spin_unlock_irqrestore(&buffer_lock, flags);
 542
 543        for (spu = 0; spu < num_spu_nodes; spu++) {
 544                spu_buff[spu].ctx_sw_seen = 0;
 545                spu_buff[spu].last_guard_val = 0;
 546        }
 547
 548        /* Register for SPU events  */
 549        register_ret = spu_switch_event_register(&spu_active);
 550        if (register_ret) {
 551                ret = SYNC_START_ERROR;
 552                goto out;
 553        }
 554
 555        pr_debug("spu_sync_start -- running.\n");
 556out:
 557        return ret;
 558}
 559
 560/* Record SPU program counter samples to the oprofile event buffer. */
 561void spu_sync_buffer(int spu_num, unsigned int *samples,
 562                     int num_samples)
 563{
 564        unsigned long long file_offset;
 565        unsigned long flags;
 566        int i;
 567        struct vma_to_fileoffset_map *map;
 568        struct spu *the_spu;
 569        unsigned long long spu_num_ll = spu_num;
 570        unsigned long long spu_num_shifted = spu_num_ll << 32;
 571        struct cached_info *c_info;
 572
 573        /* We need to obtain the cache_lock here because it's
 574         * possible that after getting the cached_info, the SPU job
 575         * corresponding to this cached_info may end, thus resulting
 576         * in the destruction of the cached_info.
 577         */
 578        spin_lock_irqsave(&cache_lock, flags);
 579        c_info = get_cached_info(NULL, spu_num);
 580        if (!c_info) {
 581                /* This legitimately happens when the SPU task ends before all
 582                 * samples are recorded.
 583                 * No big deal -- so we just drop a few samples.
 584                 */
 585                pr_debug("SPU_PROF: No cached SPU contex "
 586                          "for SPU #%d. Dropping samples.\n", spu_num);
 587                goto out;
 588        }
 589
 590        map = c_info->map;
 591        the_spu = c_info->the_spu;
 592        spin_lock(&buffer_lock);
 593        for (i = 0; i < num_samples; i++) {
 594                unsigned int sample = *(samples+i);
 595                int grd_val = 0;
 596                file_offset = 0;
 597                if (sample == 0)
 598                        continue;
 599                file_offset = vma_map_lookup( map, sample, the_spu, &grd_val);
 600
 601                /* If overlays are used by this SPU application, the guard
 602                 * value is non-zero, indicating which overlay section is in
 603                 * use.  We need to discard samples taken during the time
 604                 * period which an overlay occurs (i.e., guard value changes).
 605                 */
 606                if (grd_val && grd_val != spu_buff[spu_num].last_guard_val) {
 607                        spu_buff[spu_num].last_guard_val = grd_val;
 608                        /* Drop the rest of the samples. */
 609                        break;
 610                }
 611
 612                /* We must ensure that the SPU context switch has been written
 613                 * out before samples for the SPU.  Otherwise, the SPU context
 614                 * information is not available and the postprocessing of the
 615                 * SPU PC will fail with no available anonymous map information.
 616                 */
 617                if (spu_buff[spu_num].ctx_sw_seen)
 618                        spu_buff_add((file_offset | spu_num_shifted),
 619                                         spu_num);
 620        }
 621        spin_unlock(&buffer_lock);
 622out:
 623        spin_unlock_irqrestore(&cache_lock, flags);
 624}
 625
 626
 627int spu_sync_stop(void)
 628{
 629        unsigned long flags = 0;
 630        int ret;
 631        int k;
 632
 633        ret = spu_switch_event_unregister(&spu_active);
 634
 635        if (ret)
 636                printk(KERN_ERR "SPU_PROF: "
 637                       "%s, line %d: spu_switch_event_unregister "      \
 638                       "returned %d\n",
 639                       __func__, __LINE__, ret);
 640
 641        /* flush any remaining data in the per SPU buffers */
 642        sync_spu_buff();
 643
 644        spin_lock_irqsave(&cache_lock, flags);
 645        ret = release_cached_info(RELEASE_ALL);
 646        spin_unlock_irqrestore(&cache_lock, flags);
 647
 648        /* remove scheduled work queue item rather then waiting
 649         * for every queued entry to execute.  Then flush pending
 650         * system wide buffer to event buffer.
 651         */
 652        cancel_delayed_work(&spu_work);
 653
 654        for (k = 0; k < num_spu_nodes; k++) {
 655                spu_buff[k].ctx_sw_seen = 0;
 656
 657                /*
 658                 * spu_sys_buff will be null if there was a problem
 659                 * allocating the buffer.  Only delete if it exists.
 660                 */
 661                kfree(spu_buff[k].buff);
 662                spu_buff[k].buff = 0;
 663        }
 664        pr_debug("spu_sync_stop -- done.\n");
 665        return ret;
 666}
 667
 668