linux/arch/powerpc/oprofile/op_model_cell.c
<<
>>
Prefs
   1/*
   2 * Cell Broadband Engine OProfile Support
   3 *
   4 * (C) Copyright IBM Corporation 2006
   5 *
   6 * Author: David Erb (djerb@us.ibm.com)
   7 * Modifications:
   8 *         Carl Love <carll@us.ibm.com>
   9 *         Maynard Johnson <maynardj@us.ibm.com>
  10 *
  11 * This program is free software; you can redistribute it and/or
  12 * modify it under the terms of the GNU General Public License
  13 * as published by the Free Software Foundation; either version
  14 * 2 of the License, or (at your option) any later version.
  15 */
  16
  17#include <linux/cpufreq.h>
  18#include <linux/delay.h>
  19#include <linux/init.h>
  20#include <linux/jiffies.h>
  21#include <linux/kthread.h>
  22#include <linux/oprofile.h>
  23#include <linux/percpu.h>
  24#include <linux/smp.h>
  25#include <linux/spinlock.h>
  26#include <linux/timer.h>
  27#include <asm/cell-pmu.h>
  28#include <asm/cputable.h>
  29#include <asm/firmware.h>
  30#include <asm/io.h>
  31#include <asm/oprofile_impl.h>
  32#include <asm/processor.h>
  33#include <asm/prom.h>
  34#include <asm/ptrace.h>
  35#include <asm/reg.h>
  36#include <asm/rtas.h>
  37#include <asm/system.h>
  38#include <asm/cell-regs.h>
  39
  40#include "../platforms/cell/interrupt.h"
  41#include "cell/pr_util.h"
  42
  43#define PPU_PROFILING            0
  44#define SPU_PROFILING_CYCLES     1
  45#define SPU_PROFILING_EVENTS     2
  46
  47#define SPU_EVENT_NUM_START      4100
  48#define SPU_EVENT_NUM_STOP       4399
  49#define SPU_PROFILE_EVENT_ADDR          4363  /* spu, address trace, decimal */
  50#define SPU_PROFILE_EVENT_ADDR_MASK_A   0x146 /* sub unit set to zero */
  51#define SPU_PROFILE_EVENT_ADDR_MASK_B   0x186 /* sub unit set to zero */
  52
  53#define NUM_SPUS_PER_NODE    8
  54#define SPU_CYCLES_EVENT_NUM 2  /*  event number for SPU_CYCLES */
  55
  56#define PPU_CYCLES_EVENT_NUM 1  /*  event number for CYCLES */
  57#define PPU_CYCLES_GRP_NUM   1  /* special group number for identifying
  58                                 * PPU_CYCLES event
  59                                 */
  60#define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */
  61
  62#define NUM_THREADS 2         /* number of physical threads in
  63                               * physical processor
  64                               */
  65#define NUM_DEBUG_BUS_WORDS 4
  66#define NUM_INPUT_BUS_WORDS 2
  67
  68#define MAX_SPU_COUNT 0xFFFFFF  /* maximum 24 bit LFSR value */
  69
  70/* Minumum HW interval timer setting to send value to trace buffer is 10 cycle.
  71 * To configure counter to send value every N cycles set counter to
  72 * 2^32 - 1 - N.
  73 */
  74#define NUM_INTERVAL_CYC  0xFFFFFFFF - 10
  75
  76/*
  77 * spu_cycle_reset is the number of cycles between samples.
  78 * This variable is used for SPU profiling and should ONLY be set
  79 * at the beginning of cell_reg_setup; otherwise, it's read-only.
  80 */
  81static unsigned int spu_cycle_reset;
  82static unsigned int profiling_mode;
  83static int spu_evnt_phys_spu_indx;
  84
  85struct pmc_cntrl_data {
  86        unsigned long vcntr;
  87        unsigned long evnts;
  88        unsigned long masks;
  89        unsigned long enabled;
  90};
  91
  92/*
  93 * ibm,cbe-perftools rtas parameters
  94 */
  95struct pm_signal {
  96        u16 cpu;                /* Processor to modify */
  97        u16 sub_unit;           /* hw subunit this applies to (if applicable)*/
  98        short int signal_group; /* Signal Group to Enable/Disable */
  99        u8 bus_word;            /* Enable/Disable on this Trace/Trigger/Event
 100                                 * Bus Word(s) (bitmask)
 101                                 */
 102        u8 bit;                 /* Trigger/Event bit (if applicable) */
 103};
 104
 105/*
 106 * rtas call arguments
 107 */
 108enum {
 109        SUBFUNC_RESET = 1,
 110        SUBFUNC_ACTIVATE = 2,
 111        SUBFUNC_DEACTIVATE = 3,
 112
 113        PASSTHRU_IGNORE = 0,
 114        PASSTHRU_ENABLE = 1,
 115        PASSTHRU_DISABLE = 2,
 116};
 117
 118struct pm_cntrl {
 119        u16 enable;
 120        u16 stop_at_max;
 121        u16 trace_mode;
 122        u16 freeze;
 123        u16 count_mode;
 124        u16 spu_addr_trace;
 125        u8  trace_buf_ovflw;
 126};
 127
 128static struct {
 129        u32 group_control;
 130        u32 debug_bus_control;
 131        struct pm_cntrl pm_cntrl;
 132        u32 pm07_cntrl[NR_PHYS_CTRS];
 133} pm_regs;
 134
 135#define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12)
 136#define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4)
 137#define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8)
 138#define GET_POLARITY(x) ((x & 0x00000002) >> 1)
 139#define GET_COUNT_CYCLES(x) (x & 0x00000001)
 140#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
 141
 142static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
 143static unsigned long spu_pm_cnt[MAX_NUMNODES * NUM_SPUS_PER_NODE];
 144static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
 145
 146/*
 147 * The CELL profiling code makes rtas calls to setup the debug bus to
 148 * route the performance signals.  Additionally, SPU profiling requires
 149 * a second rtas call to setup the hardware to capture the SPU PCs.
 150 * The EIO error value is returned if the token lookups or the rtas
 151 * call fail.  The EIO error number is the best choice of the existing
 152 * error numbers.  The probability of rtas related error is very low.  But
 153 * by returning EIO and printing additional information to dmsg the user
 154 * will know that OProfile did not start and dmesg will tell them why.
 155 * OProfile does not support returning errors on Stop.  Not a huge issue
 156 * since failure to reset the debug bus or stop the SPU PC collection is
 157 * not a fatel issue.  Chances are if the Stop failed, Start doesn't work
 158 * either.
 159 */
 160
 161/*
 162 * Interpetation of hdw_thread:
 163 * 0 - even virtual cpus 0, 2, 4,...
 164 * 1 - odd virtual cpus 1, 3, 5, ...
 165 *
 166 * FIXME: this is strictly wrong, we need to clean this up in a number
 167 * of places. It works for now. -arnd
 168 */
 169static u32 hdw_thread;
 170
 171static u32 virt_cntr_inter_mask;
 172static struct timer_list timer_virt_cntr;
 173static struct timer_list timer_spu_event_swap;
 174
 175/*
 176 * pm_signal needs to be global since it is initialized in
 177 * cell_reg_setup at the time when the necessary information
 178 * is available.
 179 */
 180static struct pm_signal pm_signal[NR_PHYS_CTRS];
 181static int pm_rtas_token;    /* token for debug bus setup call */
 182static int spu_rtas_token;   /* token for SPU cycle profiling */
 183
 184static u32 reset_value[NR_PHYS_CTRS];
 185static int num_counters;
 186static int oprofile_running;
 187static DEFINE_SPINLOCK(cntr_lock);
 188
 189static u32 ctr_enabled;
 190
 191static unsigned char input_bus[NUM_INPUT_BUS_WORDS];
 192
 193/*
 194 * Firmware interface functions
 195 */
 196static int
 197rtas_ibm_cbe_perftools(int subfunc, int passthru,
 198                       void *address, unsigned long length)
 199{
 200        u64 paddr = __pa(address);
 201
 202        return rtas_call(pm_rtas_token, 5, 1, NULL, subfunc,
 203                         passthru, paddr >> 32, paddr & 0xffffffff, length);
 204}
 205
 206static void pm_rtas_reset_signals(u32 node)
 207{
 208        int ret;
 209        struct pm_signal pm_signal_local;
 210
 211        /*
 212         * The debug bus is being set to the passthru disable state.
 213         * However, the FW still expects atleast one legal signal routing
 214         * entry or it will return an error on the arguments.   If we don't
 215         * supply a valid entry, we must ignore all return values.  Ignoring
 216         * all return values means we might miss an error we should be
 217         * concerned about.
 218         */
 219
 220        /*  fw expects physical cpu #. */
 221        pm_signal_local.cpu = node;
 222        pm_signal_local.signal_group = 21;
 223        pm_signal_local.bus_word = 1;
 224        pm_signal_local.sub_unit = 0;
 225        pm_signal_local.bit = 0;
 226
 227        ret = rtas_ibm_cbe_perftools(SUBFUNC_RESET, PASSTHRU_DISABLE,
 228                                     &pm_signal_local,
 229                                     sizeof(struct pm_signal));
 230
 231        if (unlikely(ret))
 232                /*
 233                 * Not a fatal error. For Oprofile stop, the oprofile
 234                 * functions do not support returning an error for
 235                 * failure to stop OProfile.
 236                 */
 237                printk(KERN_WARNING "%s: rtas returned: %d\n",
 238                       __func__, ret);
 239}
 240
 241static int pm_rtas_activate_signals(u32 node, u32 count)
 242{
 243        int ret;
 244        int i, j;
 245        struct pm_signal pm_signal_local[NR_PHYS_CTRS];
 246
 247        /*
 248         * There is no debug setup required for the cycles event.
 249         * Note that only events in the same group can be used.
 250         * Otherwise, there will be conflicts in correctly routing
 251         * the signals on the debug bus.  It is the responsiblity
 252         * of the OProfile user tool to check the events are in
 253         * the same group.
 254         */
 255        i = 0;
 256        for (j = 0; j < count; j++) {
 257                if (pm_signal[j].signal_group != PPU_CYCLES_GRP_NUM) {
 258
 259                        /* fw expects physical cpu # */
 260                        pm_signal_local[i].cpu = node;
 261                        pm_signal_local[i].signal_group
 262                                = pm_signal[j].signal_group;
 263                        pm_signal_local[i].bus_word = pm_signal[j].bus_word;
 264                        pm_signal_local[i].sub_unit = pm_signal[j].sub_unit;
 265                        pm_signal_local[i].bit = pm_signal[j].bit;
 266                        i++;
 267                }
 268        }
 269
 270        if (i != 0) {
 271                ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE,
 272                                             pm_signal_local,
 273                                             i * sizeof(struct pm_signal));
 274
 275                if (unlikely(ret)) {
 276                        printk(KERN_WARNING "%s: rtas returned: %d\n",
 277                               __func__, ret);
 278                        return -EIO;
 279                }
 280        }
 281
 282        return 0;
 283}
 284
 285/*
 286 * PM Signal functions
 287 */
 288static void set_pm_event(u32 ctr, int event, u32 unit_mask)
 289{
 290        struct pm_signal *p;
 291        u32 signal_bit;
 292        u32 bus_word, bus_type, count_cycles, polarity, input_control;
 293        int j, i;
 294
 295        if (event == PPU_CYCLES_EVENT_NUM) {
 296                /* Special Event: Count all cpu cycles */
 297                pm_regs.pm07_cntrl[ctr] = CBE_COUNT_ALL_CYCLES;
 298                p = &(pm_signal[ctr]);
 299                p->signal_group = PPU_CYCLES_GRP_NUM;
 300                p->bus_word = 1;
 301                p->sub_unit = 0;
 302                p->bit = 0;
 303                goto out;
 304        } else {
 305                pm_regs.pm07_cntrl[ctr] = 0;
 306        }
 307
 308        bus_word = GET_BUS_WORD(unit_mask);
 309        bus_type = GET_BUS_TYPE(unit_mask);
 310        count_cycles = GET_COUNT_CYCLES(unit_mask);
 311        polarity = GET_POLARITY(unit_mask);
 312        input_control = GET_INPUT_CONTROL(unit_mask);
 313        signal_bit = (event % 100);
 314
 315        p = &(pm_signal[ctr]);
 316
 317        p->signal_group = event / 100;
 318        p->bus_word = bus_word;
 319        p->sub_unit = GET_SUB_UNIT(unit_mask);
 320
 321        pm_regs.pm07_cntrl[ctr] = 0;
 322        pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles);
 323        pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity);
 324        pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control);
 325
 326        /*
 327         * Some of the islands signal selection is based on 64 bit words.
 328         * The debug bus words are 32 bits, the input words to the performance
 329         * counters are defined as 32 bits.  Need to convert the 64 bit island
 330         * specification to the appropriate 32 input bit and bus word for the
 331         * performance counter event selection.  See the CELL Performance
 332         * monitoring signals manual and the Perf cntr hardware descriptions
 333         * for the details.
 334         */
 335        if (input_control == 0) {
 336                if (signal_bit > 31) {
 337                        signal_bit -= 32;
 338                        if (bus_word == 0x3)
 339                                bus_word = 0x2;
 340                        else if (bus_word == 0xc)
 341                                bus_word = 0x8;
 342                }
 343
 344                if ((bus_type == 0) && p->signal_group >= 60)
 345                        bus_type = 2;
 346                if ((bus_type == 1) && p->signal_group >= 50)
 347                        bus_type = 0;
 348
 349                pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_MUX(signal_bit);
 350        } else {
 351                pm_regs.pm07_cntrl[ctr] = 0;
 352                p->bit = signal_bit;
 353        }
 354
 355        for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) {
 356                if (bus_word & (1 << i)) {
 357                        pm_regs.debug_bus_control |=
 358                                (bus_type << (30 - (2 * i)));
 359
 360                        for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
 361                                if (input_bus[j] == 0xff) {
 362                                        input_bus[j] = i;
 363                                        pm_regs.group_control |=
 364                                                (i << (30 - (2 * j)));
 365
 366                                        break;
 367                                }
 368                        }
 369                }
 370        }
 371out:
 372        ;
 373}
 374
 375static void write_pm_cntrl(int cpu)
 376{
 377        /*
 378         * Oprofile will use 32 bit counters, set bits 7:10 to 0
 379         * pmregs.pm_cntrl is a global
 380         */
 381
 382        u32 val = 0;
 383        if (pm_regs.pm_cntrl.enable == 1)
 384                val |= CBE_PM_ENABLE_PERF_MON;
 385
 386        if (pm_regs.pm_cntrl.stop_at_max == 1)
 387                val |= CBE_PM_STOP_AT_MAX;
 388
 389        if (pm_regs.pm_cntrl.trace_mode != 0)
 390                val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode);
 391
 392        if (pm_regs.pm_cntrl.trace_buf_ovflw == 1)
 393                val |= CBE_PM_TRACE_BUF_OVFLW(pm_regs.pm_cntrl.trace_buf_ovflw);
 394        if (pm_regs.pm_cntrl.freeze == 1)
 395                val |= CBE_PM_FREEZE_ALL_CTRS;
 396
 397        val |= CBE_PM_SPU_ADDR_TRACE_SET(pm_regs.pm_cntrl.spu_addr_trace);
 398
 399        /*
 400         * Routine set_count_mode must be called previously to set
 401         * the count mode based on the user selection of user and kernel.
 402         */
 403        val |= CBE_PM_COUNT_MODE_SET(pm_regs.pm_cntrl.count_mode);
 404        cbe_write_pm(cpu, pm_control, val);
 405}
 406
 407static inline void
 408set_count_mode(u32 kernel, u32 user)
 409{
 410        /*
 411         * The user must specify user and kernel if they want them. If
 412         *  neither is specified, OProfile will count in hypervisor mode.
 413         *  pm_regs.pm_cntrl is a global
 414         */
 415        if (kernel) {
 416                if (user)
 417                        pm_regs.pm_cntrl.count_mode = CBE_COUNT_ALL_MODES;
 418                else
 419                        pm_regs.pm_cntrl.count_mode =
 420                                CBE_COUNT_SUPERVISOR_MODE;
 421        } else {
 422                if (user)
 423                        pm_regs.pm_cntrl.count_mode = CBE_COUNT_PROBLEM_MODE;
 424                else
 425                        pm_regs.pm_cntrl.count_mode =
 426                                CBE_COUNT_HYPERVISOR_MODE;
 427        }
 428}
 429
 430static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl)
 431{
 432
 433        pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE;
 434        cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]);
 435}
 436
 437/*
 438 * Oprofile is expected to collect data on all CPUs simultaneously.
 439 * However, there is one set of performance counters per node.  There are
 440 * two hardware threads or virtual CPUs on each node.  Hence, OProfile must
 441 * multiplex in time the performance counter collection on the two virtual
 442 * CPUs.  The multiplexing of the performance counters is done by this
 443 * virtual counter routine.
 444 *
 445 * The pmc_values used below is defined as 'per-cpu' but its use is
 446 * more akin to 'per-node'.  We need to store two sets of counter
 447 * values per node -- one for the previous run and one for the next.
 448 * The per-cpu[NR_PHYS_CTRS] gives us the storage we need.  Each odd/even
 449 * pair of per-cpu arrays is used for storing the previous and next
 450 * pmc values for a given node.
 451 * NOTE: We use the per-cpu variable to improve cache performance.
 452 *
 453 * This routine will alternate loading the virtual counters for
 454 * virtual CPUs
 455 */
 456static void cell_virtual_cntr(unsigned long data)
 457{
 458        int i, prev_hdw_thread, next_hdw_thread;
 459        u32 cpu;
 460        unsigned long flags;
 461
 462        /*
 463         * Make sure that the interrupt_hander and the virt counter are
 464         * not both playing with the counters on the same node.
 465         */
 466
 467        spin_lock_irqsave(&cntr_lock, flags);
 468
 469        prev_hdw_thread = hdw_thread;
 470
 471        /* switch the cpu handling the interrupts */
 472        hdw_thread = 1 ^ hdw_thread;
 473        next_hdw_thread = hdw_thread;
 474
 475        pm_regs.group_control = 0;
 476        pm_regs.debug_bus_control = 0;
 477
 478        for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
 479                input_bus[i] = 0xff;
 480
 481        /*
 482         * There are some per thread events.  Must do the
 483         * set event, for the thread that is being started
 484         */
 485        for (i = 0; i < num_counters; i++)
 486                set_pm_event(i,
 487                        pmc_cntrl[next_hdw_thread][i].evnts,
 488                        pmc_cntrl[next_hdw_thread][i].masks);
 489
 490        /*
 491         * The following is done only once per each node, but
 492         * we need cpu #, not node #, to pass to the cbe_xxx functions.
 493         */
 494        for_each_online_cpu(cpu) {
 495                if (cbe_get_hw_thread_id(cpu))
 496                        continue;
 497
 498                /*
 499                 * stop counters, save counter values, restore counts
 500                 * for previous thread
 501                 */
 502                cbe_disable_pm(cpu);
 503                cbe_disable_pm_interrupts(cpu);
 504                for (i = 0; i < num_counters; i++) {
 505                        per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
 506                                = cbe_read_ctr(cpu, i);
 507
 508                        if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
 509                            == 0xFFFFFFFF)
 510                                /* If the cntr value is 0xffffffff, we must
 511                                 * reset that to 0xfffffff0 when the current
 512                                 * thread is restarted.  This will generate a
 513                                 * new interrupt and make sure that we never
 514                                 * restore the counters to the max value.  If
 515                                 * the counters were restored to the max value,
 516                                 * they do not increment and no interrupts are
 517                                 * generated.  Hence no more samples will be
 518                                 * collected on that cpu.
 519                                 */
 520                                cbe_write_ctr(cpu, i, 0xFFFFFFF0);
 521                        else
 522                                cbe_write_ctr(cpu, i,
 523                                              per_cpu(pmc_values,
 524                                                      cpu +
 525                                                      next_hdw_thread)[i]);
 526                }
 527
 528                /*
 529                 * Switch to the other thread. Change the interrupt
 530                 * and control regs to be scheduled on the CPU
 531                 * corresponding to the thread to execute.
 532                 */
 533                for (i = 0; i < num_counters; i++) {
 534                        if (pmc_cntrl[next_hdw_thread][i].enabled) {
 535                                /*
 536                                 * There are some per thread events.
 537                                 * Must do the set event, enable_cntr
 538                                 * for each cpu.
 539                                 */
 540                                enable_ctr(cpu, i,
 541                                           pm_regs.pm07_cntrl);
 542                        } else {
 543                                cbe_write_pm07_control(cpu, i, 0);
 544                        }
 545                }
 546
 547                /* Enable interrupts on the CPU thread that is starting */
 548                cbe_enable_pm_interrupts(cpu, next_hdw_thread,
 549                                         virt_cntr_inter_mask);
 550                cbe_enable_pm(cpu);
 551        }
 552
 553        spin_unlock_irqrestore(&cntr_lock, flags);
 554
 555        mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
 556}
 557
 558static void start_virt_cntrs(void)
 559{
 560        init_timer(&timer_virt_cntr);
 561        timer_virt_cntr.function = cell_virtual_cntr;
 562        timer_virt_cntr.data = 0UL;
 563        timer_virt_cntr.expires = jiffies + HZ / 10;
 564        add_timer(&timer_virt_cntr);
 565}
 566
 567static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr,
 568                        struct op_system_config *sys, int num_ctrs)
 569{
 570        spu_cycle_reset = ctr[0].count;
 571
 572        /*
 573         * Each node will need to make the rtas call to start
 574         * and stop SPU profiling.  Get the token once and store it.
 575         */
 576        spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
 577
 578        if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
 579                printk(KERN_ERR
 580                       "%s: rtas token ibm,cbe-spu-perftools unknown\n",
 581                       __func__);
 582                return -EIO;
 583        }
 584        return 0;
 585}
 586
 587/* Unfortunately, the hardware will only support event profiling
 588 * on one SPU per node at a time.  Therefore, we must time slice
 589 * the profiling across all SPUs in the node.  Note, we do this
 590 * in parallel for each node.  The following routine is called
 591 * periodically based on kernel timer to switch which SPU is
 592 * being monitored in a round robbin fashion.
 593 */
 594static void spu_evnt_swap(unsigned long data)
 595{
 596        int node;
 597        int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx;
 598        unsigned long flags;
 599        int cpu;
 600        int ret;
 601        u32 interrupt_mask;
 602
 603
 604        /* enable interrupts on cntr 0 */
 605        interrupt_mask = CBE_PM_CTR_OVERFLOW_INTR(0);
 606
 607        hdw_thread = 0;
 608
 609        /* Make sure spu event interrupt handler and spu event swap
 610         * don't access the counters simultaneously.
 611         */
 612        spin_lock_irqsave(&cntr_lock, flags);
 613
 614        cur_spu_evnt_phys_spu_indx = spu_evnt_phys_spu_indx;
 615
 616        if (++(spu_evnt_phys_spu_indx) == NUM_SPUS_PER_NODE)
 617                spu_evnt_phys_spu_indx = 0;
 618
 619        pm_signal[0].sub_unit = spu_evnt_phys_spu_indx;
 620        pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
 621        pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
 622
 623        /* switch the SPU being profiled on each node */
 624        for_each_online_cpu(cpu) {
 625                if (cbe_get_hw_thread_id(cpu))
 626                        continue;
 627
 628                node = cbe_cpu_to_node(cpu);
 629                cur_phys_spu = (node * NUM_SPUS_PER_NODE)
 630                        + cur_spu_evnt_phys_spu_indx;
 631                nxt_phys_spu = (node * NUM_SPUS_PER_NODE)
 632                        + spu_evnt_phys_spu_indx;
 633
 634                /*
 635                 * stop counters, save counter values, restore counts
 636                 * for previous physical SPU
 637                 */
 638                cbe_disable_pm(cpu);
 639                cbe_disable_pm_interrupts(cpu);
 640
 641                spu_pm_cnt[cur_phys_spu]
 642                        = cbe_read_ctr(cpu, 0);
 643
 644                /* restore previous count for the next spu to sample */
 645                /* NOTE, hardware issue, counter will not start if the
 646                 * counter value is at max (0xFFFFFFFF).
 647                 */
 648                if (spu_pm_cnt[nxt_phys_spu] >= 0xFFFFFFFF)
 649                        cbe_write_ctr(cpu, 0, 0xFFFFFFF0);
 650                 else
 651                         cbe_write_ctr(cpu, 0, spu_pm_cnt[nxt_phys_spu]);
 652
 653                pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
 654
 655                /* setup the debug bus measure the one event and
 656                 * the two events to route the next SPU's PC on
 657                 * the debug bus
 658                 */
 659                ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), 3);
 660                if (ret)
 661                        printk(KERN_ERR "%s: pm_rtas_activate_signals failed, "
 662                               "SPU event swap\n", __func__);
 663
 664                /* clear the trace buffer, don't want to take PC for
 665                 * previous SPU*/
 666                cbe_write_pm(cpu, trace_address, 0);
 667
 668                enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
 669
 670                /* Enable interrupts on the CPU thread that is starting */
 671                cbe_enable_pm_interrupts(cpu, hdw_thread,
 672                                         interrupt_mask);
 673                cbe_enable_pm(cpu);
 674        }
 675
 676        spin_unlock_irqrestore(&cntr_lock, flags);
 677
 678        /* swap approximately every 0.1 seconds */
 679        mod_timer(&timer_spu_event_swap, jiffies + HZ / 25);
 680}
 681
 682static void start_spu_event_swap(void)
 683{
 684        init_timer(&timer_spu_event_swap);
 685        timer_spu_event_swap.function = spu_evnt_swap;
 686        timer_spu_event_swap.data = 0UL;
 687        timer_spu_event_swap.expires = jiffies + HZ / 25;
 688        add_timer(&timer_spu_event_swap);
 689}
 690
 691static int cell_reg_setup_spu_events(struct op_counter_config *ctr,
 692                        struct op_system_config *sys, int num_ctrs)
 693{
 694        int i;
 695
 696        /* routine is called once for all nodes */
 697
 698        spu_evnt_phys_spu_indx = 0;
 699        /*
 700         * For all events except PPU CYCLEs, each node will need to make
 701         * the rtas cbe-perftools call to setup and reset the debug bus.
 702         * Make the token lookup call once and store it in the global
 703         * variable pm_rtas_token.
 704         */
 705        pm_rtas_token = rtas_token("ibm,cbe-perftools");
 706
 707        if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
 708                printk(KERN_ERR
 709                       "%s: rtas token ibm,cbe-perftools unknown\n",
 710                       __func__);
 711                return -EIO;
 712        }
 713
 714        /* setup the pm_control register settings,
 715         * settings will be written per node by the
 716         * cell_cpu_setup() function.
 717         */
 718        pm_regs.pm_cntrl.trace_buf_ovflw = 1;
 719
 720        /* Use the occurrence trace mode to have SPU PC saved
 721         * to the trace buffer.  Occurrence data in trace buffer
 722         * is not used.  Bit 2 must be set to store SPU addresses.
 723         */
 724        pm_regs.pm_cntrl.trace_mode = 2;
 725
 726        pm_regs.pm_cntrl.spu_addr_trace = 0x1;  /* using debug bus
 727                                                   event 2 & 3 */
 728
 729        /* setup the debug bus event array with the SPU PC routing events.
 730        *  Note, pm_signal[0] will be filled in by set_pm_event() call below.
 731        */
 732        pm_signal[1].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
 733        pm_signal[1].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_A);
 734        pm_signal[1].bit = SPU_PROFILE_EVENT_ADDR % 100;
 735        pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
 736
 737        pm_signal[2].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
 738        pm_signal[2].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_B);
 739        pm_signal[2].bit = SPU_PROFILE_EVENT_ADDR % 100;
 740        pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
 741
 742        /* Set the user selected spu event to profile on,
 743         * note, only one SPU profiling event is supported
 744         */
 745        num_counters = 1;  /* Only support one SPU event at a time */
 746        set_pm_event(0, ctr[0].event, ctr[0].unit_mask);
 747
 748        reset_value[0] = 0xFFFFFFFF - ctr[0].count;
 749
 750        /* global, used by cell_cpu_setup */
 751        ctr_enabled |= 1;
 752
 753        /* Initialize the count for each SPU to the reset value */
 754        for (i=0; i < MAX_NUMNODES * NUM_SPUS_PER_NODE; i++)
 755                spu_pm_cnt[i] = reset_value[0];
 756
 757        return 0;
 758}
 759
 760static int cell_reg_setup_ppu(struct op_counter_config *ctr,
 761                        struct op_system_config *sys, int num_ctrs)
 762{
 763        /* routine is called once for all nodes */
 764        int i, j, cpu;
 765
 766        num_counters = num_ctrs;
 767
 768        if (unlikely(num_ctrs > NR_PHYS_CTRS)) {
 769                printk(KERN_ERR
 770                       "%s: Oprofile, number of specified events " \
 771                       "exceeds number of physical counters\n",
 772                       __func__);
 773                return -EIO;
 774        }
 775
 776        set_count_mode(sys->enable_kernel, sys->enable_user);
 777
 778        /* Setup the thread 0 events */
 779        for (i = 0; i < num_ctrs; ++i) {
 780
 781                pmc_cntrl[0][i].evnts = ctr[i].event;
 782                pmc_cntrl[0][i].masks = ctr[i].unit_mask;
 783                pmc_cntrl[0][i].enabled = ctr[i].enabled;
 784                pmc_cntrl[0][i].vcntr = i;
 785
 786                for_each_possible_cpu(j)
 787                        per_cpu(pmc_values, j)[i] = 0;
 788        }
 789
 790        /*
 791         * Setup the thread 1 events, map the thread 0 event to the
 792         * equivalent thread 1 event.
 793         */
 794        for (i = 0; i < num_ctrs; ++i) {
 795                if ((ctr[i].event >= 2100) && (ctr[i].event <= 2111))
 796                        pmc_cntrl[1][i].evnts = ctr[i].event + 19;
 797                else if (ctr[i].event == 2203)
 798                        pmc_cntrl[1][i].evnts = ctr[i].event;
 799                else if ((ctr[i].event >= 2200) && (ctr[i].event <= 2215))
 800                        pmc_cntrl[1][i].evnts = ctr[i].event + 16;
 801                else
 802                        pmc_cntrl[1][i].evnts = ctr[i].event;
 803
 804                pmc_cntrl[1][i].masks = ctr[i].unit_mask;
 805                pmc_cntrl[1][i].enabled = ctr[i].enabled;
 806                pmc_cntrl[1][i].vcntr = i;
 807        }
 808
 809        for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
 810                input_bus[i] = 0xff;
 811
 812        /*
 813         * Our counters count up, and "count" refers to
 814         * how much before the next interrupt, and we interrupt
 815         * on overflow.  So we calculate the starting value
 816         * which will give us "count" until overflow.
 817         * Then we set the events on the enabled counters.
 818         */
 819        for (i = 0; i < num_counters; ++i) {
 820                /* start with virtual counter set 0 */
 821                if (pmc_cntrl[0][i].enabled) {
 822                        /* Using 32bit counters, reset max - count */
 823                        reset_value[i] = 0xFFFFFFFF - ctr[i].count;
 824                        set_pm_event(i,
 825                                     pmc_cntrl[0][i].evnts,
 826                                     pmc_cntrl[0][i].masks);
 827
 828                        /* global, used by cell_cpu_setup */
 829                        ctr_enabled |= (1 << i);
 830                }
 831        }
 832
 833        /* initialize the previous counts for the virtual cntrs */
 834        for_each_online_cpu(cpu)
 835                for (i = 0; i < num_counters; ++i) {
 836                        per_cpu(pmc_values, cpu)[i] = reset_value[i];
 837                }
 838
 839        return 0;
 840}
 841
 842
 843/* This function is called once for all cpus combined */
 844static int cell_reg_setup(struct op_counter_config *ctr,
 845                        struct op_system_config *sys, int num_ctrs)
 846{
 847        int ret=0;
 848        spu_cycle_reset = 0;
 849
 850        /* initialize the spu_arr_trace value, will be reset if
 851         * doing spu event profiling.
 852         */
 853        pm_regs.group_control = 0;
 854        pm_regs.debug_bus_control = 0;
 855        pm_regs.pm_cntrl.stop_at_max = 1;
 856        pm_regs.pm_cntrl.trace_mode = 0;
 857        pm_regs.pm_cntrl.freeze = 1;
 858        pm_regs.pm_cntrl.trace_buf_ovflw = 0;
 859        pm_regs.pm_cntrl.spu_addr_trace = 0;
 860
 861        /*
 862         * For all events except PPU CYCLEs, each node will need to make
 863         * the rtas cbe-perftools call to setup and reset the debug bus.
 864         * Make the token lookup call once and store it in the global
 865         * variable pm_rtas_token.
 866         */
 867        pm_rtas_token = rtas_token("ibm,cbe-perftools");
 868
 869        if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
 870                printk(KERN_ERR
 871                       "%s: rtas token ibm,cbe-perftools unknown\n",
 872                       __func__);
 873                return -EIO;
 874        }
 875
 876        if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
 877                profiling_mode = SPU_PROFILING_CYCLES;
 878                ret = cell_reg_setup_spu_cycles(ctr, sys, num_ctrs);
 879        } else if ((ctr[0].event >= SPU_EVENT_NUM_START) &&
 880                   (ctr[0].event <= SPU_EVENT_NUM_STOP)) {
 881                profiling_mode = SPU_PROFILING_EVENTS;
 882                spu_cycle_reset = ctr[0].count;
 883
 884                /* for SPU event profiling, need to setup the
 885                 * pm_signal array with the events to route the
 886                 * SPU PC before making the FW call.  Note, only
 887                 * one SPU event for profiling can be specified
 888                 * at a time.
 889                 */
 890                cell_reg_setup_spu_events(ctr, sys, num_ctrs);
 891        } else {
 892                profiling_mode = PPU_PROFILING;
 893                ret = cell_reg_setup_ppu(ctr, sys, num_ctrs);
 894        }
 895
 896        return ret;
 897}
 898
 899
 900
 901/* This function is called once for each cpu */
 902static int cell_cpu_setup(struct op_counter_config *cntr)
 903{
 904        u32 cpu = smp_processor_id();
 905        u32 num_enabled = 0;
 906        int i;
 907        int ret;
 908
 909        /* Cycle based SPU profiling does not use the performance
 910         * counters.  The trace array is configured to collect
 911         * the data.
 912         */
 913        if (profiling_mode == SPU_PROFILING_CYCLES)
 914                return 0;
 915
 916        /* There is one performance monitor per processor chip (i.e. node),
 917         * so we only need to perform this function once per node.
 918         */
 919        if (cbe_get_hw_thread_id(cpu))
 920                return 0;
 921
 922        /* Stop all counters */
 923        cbe_disable_pm(cpu);
 924        cbe_disable_pm_interrupts(cpu);
 925
 926        cbe_write_pm(cpu, pm_start_stop, 0);
 927        cbe_write_pm(cpu, group_control, pm_regs.group_control);
 928        cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
 929        write_pm_cntrl(cpu);
 930
 931        for (i = 0; i < num_counters; ++i) {
 932                if (ctr_enabled & (1 << i)) {
 933                        pm_signal[num_enabled].cpu = cbe_cpu_to_node(cpu);
 934                        num_enabled++;
 935                }
 936        }
 937
 938        /*
 939         * The pm_rtas_activate_signals will return -EIO if the FW
 940         * call failed.
 941         */
 942        if (profiling_mode == SPU_PROFILING_EVENTS) {
 943                /* For SPU event profiling also need to setup the
 944                 * pm interval timer
 945                 */
 946                ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
 947                                               num_enabled+2);
 948                /* store PC from debug bus to Trace buffer as often
 949                 * as possible (every 10 cycles)
 950                 */
 951                cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
 952                return ret;
 953        } else
 954                return pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
 955                                                num_enabled);
 956}
 957
 958#define ENTRIES  303
 959#define MAXLFSR  0xFFFFFF
 960
 961/* precomputed table of 24 bit LFSR values */
 962static int initial_lfsr[] = {
 963 8221349, 12579195, 5379618, 10097839, 7512963, 7519310, 3955098, 10753424,
 964 15507573, 7458917, 285419, 2641121, 9780088, 3915503, 6668768, 1548716,
 965 4885000, 8774424, 9650099, 2044357, 2304411, 9326253, 10332526, 4421547,
 966 3440748, 10179459, 13332843, 10375561, 1313462, 8375100, 5198480, 6071392,
 967 9341783, 1526887, 3985002, 1439429, 13923762, 7010104, 11969769, 4547026,
 968 2040072, 4025602, 3437678, 7939992, 11444177, 4496094, 9803157, 10745556,
 969 3671780, 4257846, 5662259, 13196905, 3237343, 12077182, 16222879, 7587769,
 970 14706824, 2184640, 12591135, 10420257, 7406075, 3648978, 11042541, 15906893,
 971 11914928, 4732944, 10695697, 12928164, 11980531, 4430912, 11939291, 2917017,
 972 6119256, 4172004, 9373765, 8410071, 14788383, 5047459, 5474428, 1737756,
 973 15967514, 13351758, 6691285, 8034329, 2856544, 14394753, 11310160, 12149558,
 974 7487528, 7542781, 15668898, 12525138, 12790975, 3707933, 9106617, 1965401,
 975 16219109, 12801644, 2443203, 4909502, 8762329, 3120803, 6360315, 9309720,
 976 15164599, 10844842, 4456529, 6667610, 14924259, 884312, 6234963, 3326042,
 977 15973422, 13919464, 5272099, 6414643, 3909029, 2764324, 5237926, 4774955,
 978 10445906, 4955302, 5203726, 10798229, 11443419, 2303395, 333836, 9646934,
 979 3464726, 4159182, 568492, 995747, 10318756, 13299332, 4836017, 8237783,
 980 3878992, 2581665, 11394667, 5672745, 14412947, 3159169, 9094251, 16467278,
 981 8671392, 15230076, 4843545, 7009238, 15504095, 1494895, 9627886, 14485051,
 982 8304291, 252817, 12421642, 16085736, 4774072, 2456177, 4160695, 15409741,
 983 4902868, 5793091, 13162925, 16039714, 782255, 11347835, 14884586, 366972,
 984 16308990, 11913488, 13390465, 2958444, 10340278, 1177858, 1319431, 10426302,
 985 2868597, 126119, 5784857, 5245324, 10903900, 16436004, 3389013, 1742384,
 986 14674502, 10279218, 8536112, 10364279, 6877778, 14051163, 1025130, 6072469,
 987 1988305, 8354440, 8216060, 16342977, 13112639, 3976679, 5913576, 8816697,
 988 6879995, 14043764, 3339515, 9364420, 15808858, 12261651, 2141560, 5636398,
 989 10345425, 10414756, 781725, 6155650, 4746914, 5078683, 7469001, 6799140,
 990 10156444, 9667150, 10116470, 4133858, 2121972, 1124204, 1003577, 1611214,
 991 14304602, 16221850, 13878465, 13577744, 3629235, 8772583, 10881308, 2410386,
 992 7300044, 5378855, 9301235, 12755149, 4977682, 8083074, 10327581, 6395087,
 993 9155434, 15501696, 7514362, 14520507, 15808945, 3244584, 4741962, 9658130,
 994 14336147, 8654727, 7969093, 15759799, 14029445, 5038459, 9894848, 8659300,
 995 13699287, 8834306, 10712885, 14753895, 10410465, 3373251, 309501, 9561475,
 996 5526688, 14647426, 14209836, 5339224, 207299, 14069911, 8722990, 2290950,
 997 3258216, 12505185, 6007317, 9218111, 14661019, 10537428, 11731949, 9027003,
 998 6641507, 9490160, 200241, 9720425, 16277895, 10816638, 1554761, 10431375,
 999 7467528, 6790302, 3429078, 14633753, 14428997, 11463204, 3576212, 2003426,
1000 6123687, 820520, 9992513, 15784513, 5778891, 6428165, 8388607
1001};
1002
1003/*
1004 * The hardware uses an LFSR counting sequence to determine when to capture
1005 * the SPU PCs.  An LFSR sequence is like a puesdo random number sequence
1006 * where each number occurs once in the sequence but the sequence is not in
1007 * numerical order. The SPU PC capture is done when the LFSR sequence reaches
1008 * the last value in the sequence.  Hence the user specified value N
1009 * corresponds to the LFSR number that is N from the end of the sequence.
1010 *
1011 * To avoid the time to compute the LFSR, a lookup table is used.  The 24 bit
1012 * LFSR sequence is broken into four ranges.  The spacing of the precomputed
1013 * values is adjusted in each range so the error between the user specifed
1014 * number (N) of events between samples and the actual number of events based
1015 * on the precomputed value will be les then about 6.2%.  Note, if the user
1016 * specifies N < 2^16, the LFSR value that is 2^16 from the end will be used.
1017 * This is to prevent the loss of samples because the trace buffer is full.
1018 *
1019 *         User specified N                  Step between          Index in
1020 *                                       precomputed values      precomputed
1021 *                                                                  table
1022 * 0                to  2^16-1                  ----                  0
1023 * 2^16     to  2^16+2^19-1             2^12                1 to 128
1024 * 2^16+2^19        to  2^16+2^19+2^22-1        2^15              129 to 256
1025 * 2^16+2^19+2^22  to   2^24-1                  2^18              257 to 302
1026 *
1027 *
1028 * For example, the LFSR values in the second range are computed for 2^16,
1029 * 2^16+2^12, ... , 2^19-2^16, 2^19 and stored in the table at indicies
1030 * 1, 2,..., 127, 128.
1031 *
1032 * The 24 bit LFSR value for the nth number in the sequence can be
1033 * calculated using the following code:
1034 *
1035 * #define size 24
1036 * int calculate_lfsr(int n)
1037 * {
1038 *      int i;
1039 *      unsigned int newlfsr0;
1040 *      unsigned int lfsr = 0xFFFFFF;
1041 *      unsigned int howmany = n;
1042 *
1043 *      for (i = 2; i < howmany + 2; i++) {
1044 *              newlfsr0 = (((lfsr >> (size - 1 - 0)) & 1) ^
1045 *              ((lfsr >> (size - 1 - 1)) & 1) ^
1046 *              (((lfsr >> (size - 1 - 6)) & 1) ^
1047 *              ((lfsr >> (size - 1 - 23)) & 1)));
1048 *
1049 *              lfsr >>= 1;
1050 *              lfsr = lfsr | (newlfsr0 << (size - 1));
1051 *      }
1052 *      return lfsr;
1053 * }
1054 */
1055
1056#define V2_16  (0x1 << 16)
1057#define V2_19  (0x1 << 19)
1058#define V2_22  (0x1 << 22)
1059
1060static int calculate_lfsr(int n)
1061{
1062        /*
1063         * The ranges and steps are in powers of 2 so the calculations
1064         * can be done using shifts rather then divide.
1065         */
1066        int index;
1067
1068        if ((n >> 16) == 0)
1069                index = 0;
1070        else if (((n - V2_16) >> 19) == 0)
1071                index = ((n - V2_16) >> 12) + 1;
1072        else if (((n - V2_16 - V2_19) >> 22) == 0)
1073                index = ((n - V2_16 - V2_19) >> 15 ) + 1 + 128;
1074        else if (((n - V2_16 - V2_19 - V2_22) >> 24) == 0)
1075                index = ((n - V2_16 - V2_19 - V2_22) >> 18 ) + 1 + 256;
1076        else
1077                index = ENTRIES-1;
1078
1079        /* make sure index is valid */
1080        if ((index > ENTRIES) || (index < 0))
1081                index = ENTRIES-1;
1082
1083        return initial_lfsr[index];
1084}
1085
1086static int pm_rtas_activate_spu_profiling(u32 node)
1087{
1088        int ret, i;
1089        struct pm_signal pm_signal_local[NUM_SPUS_PER_NODE];
1090
1091        /*
1092         * Set up the rtas call to configure the debug bus to
1093         * route the SPU PCs.  Setup the pm_signal for each SPU
1094         */
1095        for (i = 0; i < ARRAY_SIZE(pm_signal_local); i++) {
1096                pm_signal_local[i].cpu = node;
1097                pm_signal_local[i].signal_group = 41;
1098                /* spu i on word (i/2) */
1099                pm_signal_local[i].bus_word = 1 << i / 2;
1100                /* spu i */
1101                pm_signal_local[i].sub_unit = i;
1102                pm_signal_local[i].bit = 63;
1103        }
1104
1105        ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE,
1106                                     PASSTHRU_ENABLE, pm_signal_local,
1107                                     (ARRAY_SIZE(pm_signal_local)
1108                                      * sizeof(struct pm_signal)));
1109
1110        if (unlikely(ret)) {
1111                printk(KERN_WARNING "%s: rtas returned: %d\n",
1112                       __func__, ret);
1113                return -EIO;
1114        }
1115
1116        return 0;
1117}
1118
1119#ifdef CONFIG_CPU_FREQ
1120static int
1121oprof_cpufreq_notify(struct notifier_block *nb, unsigned long val, void *data)
1122{
1123        int ret = 0;
1124        struct cpufreq_freqs *frq = data;
1125        if ((val == CPUFREQ_PRECHANGE && frq->old < frq->new) ||
1126            (val == CPUFREQ_POSTCHANGE && frq->old > frq->new) ||
1127            (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE))
1128                set_spu_profiling_frequency(frq->new, spu_cycle_reset);
1129        return ret;
1130}
1131
1132static struct notifier_block cpu_freq_notifier_block = {
1133        .notifier_call  = oprof_cpufreq_notify
1134};
1135#endif
1136
1137/*
1138 * Note the generic OProfile stop calls do not support returning
1139 * an error on stop.  Hence, will not return an error if the FW
1140 * calls fail on stop.  Failure to reset the debug bus is not an issue.
1141 * Failure to disable the SPU profiling is not an issue.  The FW calls
1142 * to enable the performance counters and debug bus will work even if
1143 * the hardware was not cleanly reset.
1144 */
1145static void cell_global_stop_spu_cycles(void)
1146{
1147        int subfunc, rtn_value;
1148        unsigned int lfsr_value;
1149        int cpu;
1150
1151        oprofile_running = 0;
1152        smp_wmb();
1153
1154#ifdef CONFIG_CPU_FREQ
1155        cpufreq_unregister_notifier(&cpu_freq_notifier_block,
1156                                    CPUFREQ_TRANSITION_NOTIFIER);
1157#endif
1158
1159        for_each_online_cpu(cpu) {
1160                if (cbe_get_hw_thread_id(cpu))
1161                        continue;
1162
1163                subfunc = 3;    /*
1164                                 * 2 - activate SPU tracing,
1165                                 * 3 - deactivate
1166                                 */
1167                lfsr_value = 0x8f100000;
1168
1169                rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
1170                                      subfunc, cbe_cpu_to_node(cpu),
1171                                      lfsr_value);
1172
1173                if (unlikely(rtn_value != 0)) {
1174                        printk(KERN_ERR
1175                               "%s: rtas call ibm,cbe-spu-perftools " \
1176                               "failed, return = %d\n",
1177                               __func__, rtn_value);
1178                }
1179
1180                /* Deactivate the signals */
1181                pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1182        }
1183
1184        stop_spu_profiling_cycles();
1185}
1186
1187static void cell_global_stop_spu_events(void)
1188{
1189        int cpu;
1190        oprofile_running = 0;
1191
1192        stop_spu_profiling_events();
1193        smp_wmb();
1194
1195        for_each_online_cpu(cpu) {
1196                if (cbe_get_hw_thread_id(cpu))
1197                        continue;
1198
1199                cbe_sync_irq(cbe_cpu_to_node(cpu));
1200                /* Stop the counters */
1201                cbe_disable_pm(cpu);
1202                cbe_write_pm07_control(cpu, 0, 0);
1203
1204                /* Deactivate the signals */
1205                pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1206
1207                /* Deactivate interrupts */
1208                cbe_disable_pm_interrupts(cpu);
1209        }
1210        del_timer_sync(&timer_spu_event_swap);
1211}
1212
1213static void cell_global_stop_ppu(void)
1214{
1215        int cpu;
1216
1217        /*
1218         * This routine will be called once for the system.
1219         * There is one performance monitor per node, so we
1220         * only need to perform this function once per node.
1221         */
1222        del_timer_sync(&timer_virt_cntr);
1223        oprofile_running = 0;
1224        smp_wmb();
1225
1226        for_each_online_cpu(cpu) {
1227                if (cbe_get_hw_thread_id(cpu))
1228                        continue;
1229
1230                cbe_sync_irq(cbe_cpu_to_node(cpu));
1231                /* Stop the counters */
1232                cbe_disable_pm(cpu);
1233
1234                /* Deactivate the signals */
1235                pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
1236
1237                /* Deactivate interrupts */
1238                cbe_disable_pm_interrupts(cpu);
1239        }
1240}
1241
1242static void cell_global_stop(void)
1243{
1244        if (profiling_mode == PPU_PROFILING)
1245                cell_global_stop_ppu();
1246        else if (profiling_mode == SPU_PROFILING_EVENTS)
1247                cell_global_stop_spu_events();
1248        else
1249                cell_global_stop_spu_cycles();
1250}
1251
1252static int cell_global_start_spu_cycles(struct op_counter_config *ctr)
1253{
1254        int subfunc;
1255        unsigned int lfsr_value;
1256        int cpu;
1257        int ret;
1258        int rtas_error;
1259        unsigned int cpu_khzfreq = 0;
1260
1261        /* The SPU profiling uses time-based profiling based on
1262         * cpu frequency, so if configured with the CPU_FREQ
1263         * option, we should detect frequency changes and react
1264         * accordingly.
1265         */
1266#ifdef CONFIG_CPU_FREQ
1267        ret = cpufreq_register_notifier(&cpu_freq_notifier_block,
1268                                        CPUFREQ_TRANSITION_NOTIFIER);
1269        if (ret < 0)
1270                /* this is not a fatal error */
1271                printk(KERN_ERR "CPU freq change registration failed: %d\n",
1272                       ret);
1273
1274        else
1275                cpu_khzfreq = cpufreq_quick_get(smp_processor_id());
1276#endif
1277
1278        set_spu_profiling_frequency(cpu_khzfreq, spu_cycle_reset);
1279
1280        for_each_online_cpu(cpu) {
1281                if (cbe_get_hw_thread_id(cpu))
1282                        continue;
1283
1284                /*
1285                 * Setup SPU cycle-based profiling.
1286                 * Set perf_mon_control bit 0 to a zero before
1287                 * enabling spu collection hardware.
1288                 */
1289                cbe_write_pm(cpu, pm_control, 0);
1290
1291                if (spu_cycle_reset > MAX_SPU_COUNT)
1292                        /* use largest possible value */
1293                        lfsr_value = calculate_lfsr(MAX_SPU_COUNT-1);
1294                else
1295                        lfsr_value = calculate_lfsr(spu_cycle_reset);
1296
1297                /* must use a non zero value. Zero disables data collection. */
1298                if (lfsr_value == 0)
1299                        lfsr_value = calculate_lfsr(1);
1300
1301                lfsr_value = lfsr_value << 8; /* shift lfsr to correct
1302                                                * register location
1303                                                */
1304
1305                /* debug bus setup */
1306                ret = pm_rtas_activate_spu_profiling(cbe_cpu_to_node(cpu));
1307
1308                if (unlikely(ret)) {
1309                        rtas_error = ret;
1310                        goto out;
1311                }
1312
1313
1314                subfunc = 2;    /* 2 - activate SPU tracing, 3 - deactivate */
1315
1316                /* start profiling */
1317                ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc,
1318                                cbe_cpu_to_node(cpu), lfsr_value);
1319
1320                if (unlikely(ret != 0)) {
1321                        printk(KERN_ERR
1322                               "%s: rtas call ibm,cbe-spu-perftools failed, " \
1323                               "return = %d\n", __func__, ret);
1324                        rtas_error = -EIO;
1325                        goto out;
1326                }
1327        }
1328
1329        rtas_error = start_spu_profiling_cycles(spu_cycle_reset);
1330        if (rtas_error)
1331                goto out_stop;
1332
1333        oprofile_running = 1;
1334        return 0;
1335
1336out_stop:
1337        cell_global_stop_spu_cycles();  /* clean up the PMU/debug bus */
1338out:
1339        return rtas_error;
1340}
1341
1342static int cell_global_start_spu_events(struct op_counter_config *ctr)
1343{
1344        int cpu;
1345        u32 interrupt_mask = 0;
1346        int rtn = 0;
1347
1348        hdw_thread = 0;
1349
1350        /* spu event profiling, uses the performance counters to generate
1351         * an interrupt.  The hardware is setup to store the SPU program
1352         * counter into the trace array.  The occurrence mode is used to
1353         * enable storing data to the trace buffer.  The bits are set
1354         * to send/store the SPU address in the trace buffer.  The debug
1355         * bus must be setup to route the SPU program counter onto the
1356         * debug bus.  The occurrence data in the trace buffer is not used.
1357         */
1358
1359        /* This routine gets called once for the system.
1360         * There is one performance monitor per node, so we
1361         * only need to perform this function once per node.
1362         */
1363
1364        for_each_online_cpu(cpu) {
1365                if (cbe_get_hw_thread_id(cpu))
1366                        continue;
1367
1368                /*
1369                 * Setup SPU event-based profiling.
1370                 * Set perf_mon_control bit 0 to a zero before
1371                 * enabling spu collection hardware.
1372                 *
1373                 * Only support one SPU event on one SPU per node.
1374                 */
1375                if (ctr_enabled & 1) {
1376                        cbe_write_ctr(cpu, 0, reset_value[0]);
1377                        enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
1378                        interrupt_mask |=
1379                                CBE_PM_CTR_OVERFLOW_INTR(0);
1380                } else {
1381                        /* Disable counter */
1382                        cbe_write_pm07_control(cpu, 0, 0);
1383                }
1384
1385                cbe_get_and_clear_pm_interrupts(cpu);
1386                cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
1387                cbe_enable_pm(cpu);
1388
1389                /* clear the trace buffer */
1390                cbe_write_pm(cpu, trace_address, 0);
1391        }
1392
1393        /* Start the timer to time slice collecting the event profile
1394         * on each of the SPUs.  Note, can collect profile on one SPU
1395         * per node at a time.
1396         */
1397        start_spu_event_swap();
1398        start_spu_profiling_events();
1399        oprofile_running = 1;
1400        smp_wmb();
1401
1402        return rtn;
1403}
1404
1405static int cell_global_start_ppu(struct op_counter_config *ctr)
1406{
1407        u32 cpu, i;
1408        u32 interrupt_mask = 0;
1409
1410        /* This routine gets called once for the system.
1411         * There is one performance monitor per node, so we
1412         * only need to perform this function once per node.
1413         */
1414        for_each_online_cpu(cpu) {
1415                if (cbe_get_hw_thread_id(cpu))
1416                        continue;
1417
1418                interrupt_mask = 0;
1419
1420                for (i = 0; i < num_counters; ++i) {
1421                        if (ctr_enabled & (1 << i)) {
1422                                cbe_write_ctr(cpu, i, reset_value[i]);
1423                                enable_ctr(cpu, i, pm_regs.pm07_cntrl);
1424                                interrupt_mask |= CBE_PM_CTR_OVERFLOW_INTR(i);
1425                        } else {
1426                                /* Disable counter */
1427                                cbe_write_pm07_control(cpu, i, 0);
1428                        }
1429                }
1430
1431                cbe_get_and_clear_pm_interrupts(cpu);
1432                cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
1433                cbe_enable_pm(cpu);
1434        }
1435
1436        virt_cntr_inter_mask = interrupt_mask;
1437        oprofile_running = 1;
1438        smp_wmb();
1439
1440        /*
1441         * NOTE: start_virt_cntrs will result in cell_virtual_cntr() being
1442         * executed which manipulates the PMU.  We start the "virtual counter"
1443         * here so that we do not need to synchronize access to the PMU in
1444         * the above for-loop.
1445         */
1446        start_virt_cntrs();
1447
1448        return 0;
1449}
1450
1451static int cell_global_start(struct op_counter_config *ctr)
1452{
1453        if (profiling_mode == SPU_PROFILING_CYCLES)
1454                return cell_global_start_spu_cycles(ctr);
1455        else if (profiling_mode == SPU_PROFILING_EVENTS)
1456                return cell_global_start_spu_events(ctr);
1457        else
1458                return cell_global_start_ppu(ctr);
1459}
1460
1461
1462/* The SPU interrupt handler
1463 *
1464 * SPU event profiling works as follows:
1465 * The pm_signal[0] holds the one SPU event to be measured.  It is routed on
1466 * the debug bus using word 0 or 1.  The value of pm_signal[1] and
1467 * pm_signal[2] contain the necessary events to route the SPU program
1468 * counter for the selected SPU onto the debug bus using words 2 and 3.
1469 * The pm_interval register is setup to write the SPU PC value into the
1470 * trace buffer at the maximum rate possible.  The trace buffer is configured
1471 * to store the PCs, wrapping when it is full.  The performance counter is
1472 * intialized to the max hardware count minus the number of events, N, between
1473 * samples.  Once the N events have occured, a HW counter overflow occurs
1474 * causing the generation of a HW counter interrupt which also stops the
1475 * writing of the SPU PC values to the trace buffer.  Hence the last PC
1476 * written to the trace buffer is the SPU PC that we want.  Unfortunately,
1477 * we have to read from the beginning of the trace buffer to get to the
1478 * last value written.  We just hope the PPU has nothing better to do then
1479 * service this interrupt. The PC for the specific SPU being profiled is
1480 * extracted from the trace buffer processed and stored.  The trace buffer
1481 * is cleared, interrupts are cleared, the counter is reset to max - N.
1482 * A kernel timer is used to periodically call the routine spu_evnt_swap()
1483 * to switch to the next physical SPU in the node to profile in round robbin
1484 * order.  This way data is collected for all SPUs on the node. It does mean
1485 * that we need to use a relatively small value of N to ensure enough samples
1486 * on each SPU are collected each SPU is being profiled 1/8 of the time.
1487 * It may also be necessary to use a longer sample collection period.
1488 */
1489static void cell_handle_interrupt_spu(struct pt_regs *regs,
1490                                      struct op_counter_config *ctr)
1491{
1492        u32 cpu, cpu_tmp;
1493        u64 trace_entry;
1494        u32 interrupt_mask;
1495        u64 trace_buffer[2];
1496        u64 last_trace_buffer;
1497        u32 sample;
1498        u32 trace_addr;
1499        unsigned long sample_array_lock_flags;
1500        int spu_num;
1501        unsigned long flags;
1502
1503        /* Make sure spu event interrupt handler and spu event swap
1504         * don't access the counters simultaneously.
1505         */
1506        cpu = smp_processor_id();
1507        spin_lock_irqsave(&cntr_lock, flags);
1508
1509        cpu_tmp = cpu;
1510        cbe_disable_pm(cpu);
1511
1512        interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
1513
1514        sample = 0xABCDEF;
1515        trace_entry = 0xfedcba;
1516        last_trace_buffer = 0xdeadbeaf;
1517
1518        if ((oprofile_running == 1) && (interrupt_mask != 0)) {
1519                /* disable writes to trace buff */
1520                cbe_write_pm(cpu, pm_interval, 0);
1521
1522                /* only have one perf cntr being used, cntr 0 */
1523                if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(0))
1524                    && ctr[0].enabled)
1525                        /* The SPU PC values will be read
1526                         * from the trace buffer, reset counter
1527                         */
1528
1529                        cbe_write_ctr(cpu, 0, reset_value[0]);
1530
1531                trace_addr = cbe_read_pm(cpu, trace_address);
1532
1533                while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
1534                        /* There is data in the trace buffer to process
1535                         * Read the buffer until you get to the last
1536                         * entry.  This is the value we want.
1537                         */
1538
1539                        cbe_read_trace_buffer(cpu, trace_buffer);
1540                        trace_addr = cbe_read_pm(cpu, trace_address);
1541                }
1542
1543                /* SPU Address 16 bit count format for 128 bit
1544                 * HW trace buffer is used for the SPU PC storage
1545                 *    HDR bits          0:15
1546                 *    SPU Addr 0 bits   16:31
1547                 *    SPU Addr 1 bits   32:47
1548                 *    unused bits       48:127
1549                 *
1550                 * HDR: bit4 = 1 SPU Address 0 valid
1551                 * HDR: bit5 = 1 SPU Address 1 valid
1552                 *  - unfortunately, the valid bits don't seem to work
1553                 *
1554                 * Note trace_buffer[0] holds bits 0:63 of the HW
1555                 * trace buffer, trace_buffer[1] holds bits 64:127
1556                 */
1557
1558                trace_entry = trace_buffer[0]
1559                        & 0x00000000FFFF0000;
1560
1561                /* only top 16 of the 18 bit SPU PC address
1562                 * is stored in trace buffer, hence shift right
1563                 * by 16 -2 bits */
1564                sample = trace_entry >> 14;
1565                last_trace_buffer = trace_buffer[0];
1566
1567                spu_num = spu_evnt_phys_spu_indx
1568                        + (cbe_cpu_to_node(cpu) * NUM_SPUS_PER_NODE);
1569
1570                /* make sure only one process at a time is calling
1571                 * spu_sync_buffer()
1572                 */
1573                spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
1574                                  sample_array_lock_flags);
1575                spu_sync_buffer(spu_num, &sample, 1);
1576                spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
1577                                       sample_array_lock_flags);
1578
1579                smp_wmb();    /* insure spu event buffer updates are written
1580                               * don't want events intermingled... */
1581
1582                /* The counters were frozen by the interrupt.
1583                 * Reenable the interrupt and restart the counters.
1584                 */
1585                cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
1586                cbe_enable_pm_interrupts(cpu, hdw_thread,
1587                                         virt_cntr_inter_mask);
1588
1589                /* clear the trace buffer, re-enable writes to trace buff */
1590                cbe_write_pm(cpu, trace_address, 0);
1591                cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
1592
1593                /* The writes to the various performance counters only writes
1594                 * to a latch.  The new values (interrupt setting bits, reset
1595                 * counter value etc.) are not copied to the actual registers
1596                 * until the performance monitor is enabled.  In order to get
1597                 * this to work as desired, the permormance monitor needs to
1598                 * be disabled while writing to the latches.  This is a
1599                 * HW design issue.
1600                 */
1601                write_pm_cntrl(cpu);
1602                cbe_enable_pm(cpu);
1603        }
1604        spin_unlock_irqrestore(&cntr_lock, flags);
1605}
1606
1607static void cell_handle_interrupt_ppu(struct pt_regs *regs,
1608                                      struct op_counter_config *ctr)
1609{
1610        u32 cpu;
1611        u64 pc;
1612        int is_kernel;
1613        unsigned long flags = 0;
1614        u32 interrupt_mask;
1615        int i;
1616
1617        cpu = smp_processor_id();
1618
1619        /*
1620         * Need to make sure the interrupt handler and the virt counter
1621         * routine are not running at the same time. See the
1622         * cell_virtual_cntr() routine for additional comments.
1623         */
1624        spin_lock_irqsave(&cntr_lock, flags);
1625
1626        /*
1627         * Need to disable and reenable the performance counters
1628         * to get the desired behavior from the hardware.  This
1629         * is hardware specific.
1630         */
1631
1632        cbe_disable_pm(cpu);
1633
1634        interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
1635
1636        /*
1637         * If the interrupt mask has been cleared, then the virt cntr
1638         * has cleared the interrupt.  When the thread that generated
1639         * the interrupt is restored, the data count will be restored to
1640         * 0xffffff0 to cause the interrupt to be regenerated.
1641         */
1642
1643        if ((oprofile_running == 1) && (interrupt_mask != 0)) {
1644                pc = regs->nip;
1645                is_kernel = is_kernel_addr(pc);
1646
1647                for (i = 0; i < num_counters; ++i) {
1648                        if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i))
1649                            && ctr[i].enabled) {
1650                                oprofile_add_ext_sample(pc, regs, i, is_kernel);
1651                                cbe_write_ctr(cpu, i, reset_value[i]);
1652                        }
1653                }
1654
1655                /*
1656                 * The counters were frozen by the interrupt.
1657                 * Reenable the interrupt and restart the counters.
1658                 * If there was a race between the interrupt handler and
1659                 * the virtual counter routine.  The virutal counter
1660                 * routine may have cleared the interrupts.  Hence must
1661                 * use the virt_cntr_inter_mask to re-enable the interrupts.
1662                 */
1663                cbe_enable_pm_interrupts(cpu, hdw_thread,
1664                                         virt_cntr_inter_mask);
1665
1666                /*
1667                 * The writes to the various performance counters only writes
1668                 * to a latch.  The new values (interrupt setting bits, reset
1669                 * counter value etc.) are not copied to the actual registers
1670                 * until the performance monitor is enabled.  In order to get
1671                 * this to work as desired, the permormance monitor needs to
1672                 * be disabled while writing to the latches.  This is a
1673                 * HW design issue.
1674                 */
1675                cbe_enable_pm(cpu);
1676        }
1677        spin_unlock_irqrestore(&cntr_lock, flags);
1678}
1679
1680static void cell_handle_interrupt(struct pt_regs *regs,
1681                                  struct op_counter_config *ctr)
1682{
1683        if (profiling_mode == PPU_PROFILING)
1684                cell_handle_interrupt_ppu(regs, ctr);
1685        else
1686                cell_handle_interrupt_spu(regs, ctr);
1687}
1688
1689/*
1690 * This function is called from the generic OProfile
1691 * driver.  When profiling PPUs, we need to do the
1692 * generic sync start; otherwise, do spu_sync_start.
1693 */
1694static int cell_sync_start(void)
1695{
1696        if ((profiling_mode == SPU_PROFILING_CYCLES) ||
1697            (profiling_mode == SPU_PROFILING_EVENTS))
1698                return spu_sync_start();
1699        else
1700                return DO_GENERIC_SYNC;
1701}
1702
1703static int cell_sync_stop(void)
1704{
1705        if ((profiling_mode == SPU_PROFILING_CYCLES) ||
1706            (profiling_mode == SPU_PROFILING_EVENTS))
1707                return spu_sync_stop();
1708        else
1709                return 1;
1710}
1711
1712struct op_powerpc_model op_model_cell = {
1713        .reg_setup = cell_reg_setup,
1714        .cpu_setup = cell_cpu_setup,
1715        .global_start = cell_global_start,
1716        .global_stop = cell_global_stop,
1717        .sync_start = cell_sync_start,
1718        .sync_stop = cell_sync_stop,
1719        .handle_interrupt = cell_handle_interrupt,
1720};
1721