linux/arch/mips/oprofile/op_model_mipsxx.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 2004, 05, 06 by Ralf Baechle
   7 * Copyright (C) 2005 by MIPS Technologies, Inc.
   8 */
   9#include <linux/cpumask.h>
  10#include <linux/oprofile.h>
  11#include <linux/interrupt.h>
  12#include <linux/smp.h>
  13#include <asm/irq_regs.h>
  14#include <asm/time.h>
  15
  16#include "op_impl.h"
  17
  18#define M_PERFCTL_EXL                   (1UL      <<  0)
  19#define M_PERFCTL_KERNEL                (1UL      <<  1)
  20#define M_PERFCTL_SUPERVISOR            (1UL      <<  2)
  21#define M_PERFCTL_USER                  (1UL      <<  3)
  22#define M_PERFCTL_INTERRUPT_ENABLE      (1UL      <<  4)
  23#define M_PERFCTL_EVENT(event)          (((event) & 0x3ff)  << 5)
  24#define M_PERFCTL_VPEID(vpe)            ((vpe)    << 16)
  25#define M_PERFCTL_MT_EN(filter)         ((filter) << 20)
  26#define    M_TC_EN_ALL                  M_PERFCTL_MT_EN(0)
  27#define    M_TC_EN_VPE                  M_PERFCTL_MT_EN(1)
  28#define    M_TC_EN_TC                   M_PERFCTL_MT_EN(2)
  29#define M_PERFCTL_TCID(tcid)            ((tcid)   << 22)
  30#define M_PERFCTL_WIDE                  (1UL      << 30)
  31#define M_PERFCTL_MORE                  (1UL      << 31)
  32
  33#define M_COUNTER_OVERFLOW              (1UL      << 31)
  34
  35/* Netlogic XLR specific, count events in all threads in a core */
  36#define M_PERFCTL_COUNT_ALL_THREADS     (1UL      << 13)
  37
  38static int (*save_perf_irq)(void);
  39static int perfcount_irq;
  40
  41/*
  42 * XLR has only one set of counters per core. Designate the
  43 * first hardware thread in the core for setup and init.
  44 * Skip CPUs with non-zero hardware thread id (4 hwt per core)
  45 */
  46#if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
  47#define oprofile_skip_cpu(c)    ((cpu_logical_map(c) & 0x3) != 0)
  48#else
  49#define oprofile_skip_cpu(c)    0
  50#endif
  51
  52#ifdef CONFIG_MIPS_MT_SMP
  53static int cpu_has_mipsmt_pertccounters;
  54#define WHAT            (M_TC_EN_VPE | \
  55                         M_PERFCTL_VPEID(cpu_data[smp_processor_id()].vpe_id))
  56#define vpe_id()        (cpu_has_mipsmt_pertccounters ? \
  57                        0 : cpu_data[smp_processor_id()].vpe_id)
  58
  59/*
  60 * The number of bits to shift to convert between counters per core and
  61 * counters per VPE.  There is no reasonable interface atm to obtain the
  62 * number of VPEs used by Linux and in the 34K this number is fixed to two
  63 * anyways so we hardcore a few things here for the moment.  The way it's
  64 * done here will ensure that oprofile VSMP kernel will run right on a lesser
  65 * core like a 24K also or with maxcpus=1.
  66 */
  67static inline unsigned int vpe_shift(void)
  68{
  69        if (num_possible_cpus() > 1)
  70                return 1;
  71
  72        return 0;
  73}
  74
  75#else
  76
  77#define WHAT            0
  78#define vpe_id()        0
  79
  80static inline unsigned int vpe_shift(void)
  81{
  82        return 0;
  83}
  84
  85#endif
  86
  87static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
  88{
  89        return counters >> vpe_shift();
  90}
  91
  92static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
  93{
  94        return counters << vpe_shift();
  95}
  96
  97#define __define_perf_accessors(r, n, np)                               \
  98                                                                        \
  99static inline unsigned int r_c0_ ## r ## n(void)                        \
 100{                                                                       \
 101        unsigned int cpu = vpe_id();                                    \
 102                                                                        \
 103        switch (cpu) {                                                  \
 104        case 0:                                                         \
 105                return read_c0_ ## r ## n();                            \
 106        case 1:                                                         \
 107                return read_c0_ ## r ## np();                           \
 108        default:                                                        \
 109                BUG();                                                  \
 110        }                                                               \
 111        return 0;                                                       \
 112}                                                                       \
 113                                                                        \
 114static inline void w_c0_ ## r ## n(unsigned int value)                  \
 115{                                                                       \
 116        unsigned int cpu = vpe_id();                                    \
 117                                                                        \
 118        switch (cpu) {                                                  \
 119        case 0:                                                         \
 120                write_c0_ ## r ## n(value);                             \
 121                return;                                                 \
 122        case 1:                                                         \
 123                write_c0_ ## r ## np(value);                            \
 124                return;                                                 \
 125        default:                                                        \
 126                BUG();                                                  \
 127        }                                                               \
 128        return;                                                         \
 129}                                                                       \
 130
 131__define_perf_accessors(perfcntr, 0, 2)
 132__define_perf_accessors(perfcntr, 1, 3)
 133__define_perf_accessors(perfcntr, 2, 0)
 134__define_perf_accessors(perfcntr, 3, 1)
 135
 136__define_perf_accessors(perfctrl, 0, 2)
 137__define_perf_accessors(perfctrl, 1, 3)
 138__define_perf_accessors(perfctrl, 2, 0)
 139__define_perf_accessors(perfctrl, 3, 1)
 140
 141struct op_mips_model op_model_mipsxx_ops;
 142
 143static struct mipsxx_register_config {
 144        unsigned int control[4];
 145        unsigned int counter[4];
 146} reg;
 147
 148/* Compute all of the registers in preparation for enabling profiling.  */
 149
 150static void mipsxx_reg_setup(struct op_counter_config *ctr)
 151{
 152        unsigned int counters = op_model_mipsxx_ops.num_counters;
 153        int i;
 154
 155        /* Compute the performance counter control word.  */
 156        for (i = 0; i < counters; i++) {
 157                reg.control[i] = 0;
 158                reg.counter[i] = 0;
 159
 160                if (!ctr[i].enabled)
 161                        continue;
 162
 163                reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
 164                                 M_PERFCTL_INTERRUPT_ENABLE;
 165                if (ctr[i].kernel)
 166                        reg.control[i] |= M_PERFCTL_KERNEL;
 167                if (ctr[i].user)
 168                        reg.control[i] |= M_PERFCTL_USER;
 169                if (ctr[i].exl)
 170                        reg.control[i] |= M_PERFCTL_EXL;
 171                if (boot_cpu_type() == CPU_XLR)
 172                        reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS;
 173                reg.counter[i] = 0x80000000 - ctr[i].count;
 174        }
 175}
 176
 177/* Program all of the registers in preparation for enabling profiling.  */
 178
 179static void mipsxx_cpu_setup(void *args)
 180{
 181        unsigned int counters = op_model_mipsxx_ops.num_counters;
 182
 183        if (oprofile_skip_cpu(smp_processor_id()))
 184                return;
 185
 186        switch (counters) {
 187        case 4:
 188                w_c0_perfctrl3(0);
 189                w_c0_perfcntr3(reg.counter[3]);
 190        case 3:
 191                w_c0_perfctrl2(0);
 192                w_c0_perfcntr2(reg.counter[2]);
 193        case 2:
 194                w_c0_perfctrl1(0);
 195                w_c0_perfcntr1(reg.counter[1]);
 196        case 1:
 197                w_c0_perfctrl0(0);
 198                w_c0_perfcntr0(reg.counter[0]);
 199        }
 200}
 201
 202/* Start all counters on current CPU */
 203static void mipsxx_cpu_start(void *args)
 204{
 205        unsigned int counters = op_model_mipsxx_ops.num_counters;
 206
 207        if (oprofile_skip_cpu(smp_processor_id()))
 208                return;
 209
 210        switch (counters) {
 211        case 4:
 212                w_c0_perfctrl3(WHAT | reg.control[3]);
 213        case 3:
 214                w_c0_perfctrl2(WHAT | reg.control[2]);
 215        case 2:
 216                w_c0_perfctrl1(WHAT | reg.control[1]);
 217        case 1:
 218                w_c0_perfctrl0(WHAT | reg.control[0]);
 219        }
 220}
 221
 222/* Stop all counters on current CPU */
 223static void mipsxx_cpu_stop(void *args)
 224{
 225        unsigned int counters = op_model_mipsxx_ops.num_counters;
 226
 227        if (oprofile_skip_cpu(smp_processor_id()))
 228                return;
 229
 230        switch (counters) {
 231        case 4:
 232                w_c0_perfctrl3(0);
 233        case 3:
 234                w_c0_perfctrl2(0);
 235        case 2:
 236                w_c0_perfctrl1(0);
 237        case 1:
 238                w_c0_perfctrl0(0);
 239        }
 240}
 241
 242static int mipsxx_perfcount_handler(void)
 243{
 244        unsigned int counters = op_model_mipsxx_ops.num_counters;
 245        unsigned int control;
 246        unsigned int counter;
 247        int handled = IRQ_NONE;
 248
 249        if (cpu_has_mips_r2 && !(read_c0_cause() & CAUSEF_PCI))
 250                return handled;
 251
 252        switch (counters) {
 253#define HANDLE_COUNTER(n)                                               \
 254        case n + 1:                                                     \
 255                control = r_c0_perfctrl ## n();                         \
 256                counter = r_c0_perfcntr ## n();                         \
 257                if ((control & M_PERFCTL_INTERRUPT_ENABLE) &&           \
 258                    (counter & M_COUNTER_OVERFLOW)) {                   \
 259                        oprofile_add_sample(get_irq_regs(), n);         \
 260                        w_c0_perfcntr ## n(reg.counter[n]);             \
 261                        handled = IRQ_HANDLED;                          \
 262                }
 263        HANDLE_COUNTER(3)
 264        HANDLE_COUNTER(2)
 265        HANDLE_COUNTER(1)
 266        HANDLE_COUNTER(0)
 267        }
 268
 269        return handled;
 270}
 271
 272static inline int __n_counters(void)
 273{
 274        if (!cpu_has_perf)
 275                return 0;
 276        if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
 277                return 1;
 278        if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
 279                return 2;
 280        if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
 281                return 3;
 282
 283        return 4;
 284}
 285
 286static inline int n_counters(void)
 287{
 288        int counters;
 289
 290        switch (current_cpu_type()) {
 291        case CPU_R10000:
 292                counters = 2;
 293                break;
 294
 295        case CPU_R12000:
 296        case CPU_R14000:
 297        case CPU_R16000:
 298                counters = 4;
 299                break;
 300
 301        default:
 302                counters = __n_counters();
 303        }
 304
 305        return counters;
 306}
 307
 308static void reset_counters(void *arg)
 309{
 310        int counters = (int)(long)arg;
 311        switch (counters) {
 312        case 4:
 313                w_c0_perfctrl3(0);
 314                w_c0_perfcntr3(0);
 315        case 3:
 316                w_c0_perfctrl2(0);
 317                w_c0_perfcntr2(0);
 318        case 2:
 319                w_c0_perfctrl1(0);
 320                w_c0_perfcntr1(0);
 321        case 1:
 322                w_c0_perfctrl0(0);
 323                w_c0_perfcntr0(0);
 324        }
 325}
 326
 327static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
 328{
 329        return mipsxx_perfcount_handler();
 330}
 331
 332static int __init mipsxx_init(void)
 333{
 334        int counters;
 335
 336        counters = n_counters();
 337        if (counters == 0) {
 338                printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
 339                return -ENODEV;
 340        }
 341
 342#ifdef CONFIG_MIPS_MT_SMP
 343        cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
 344        if (!cpu_has_mipsmt_pertccounters)
 345                counters = counters_total_to_per_cpu(counters);
 346#endif
 347        on_each_cpu(reset_counters, (void *)(long)counters, 1);
 348
 349        op_model_mipsxx_ops.num_counters = counters;
 350        switch (current_cpu_type()) {
 351        case CPU_M14KC:
 352                op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
 353                break;
 354
 355        case CPU_M14KEC:
 356                op_model_mipsxx_ops.cpu_type = "mips/M14KEc";
 357                break;
 358
 359        case CPU_20KC:
 360                op_model_mipsxx_ops.cpu_type = "mips/20K";
 361                break;
 362
 363        case CPU_24K:
 364                op_model_mipsxx_ops.cpu_type = "mips/24K";
 365                break;
 366
 367        case CPU_25KF:
 368                op_model_mipsxx_ops.cpu_type = "mips/25K";
 369                break;
 370
 371        case CPU_1004K:
 372        case CPU_34K:
 373                op_model_mipsxx_ops.cpu_type = "mips/34K";
 374                break;
 375
 376        case CPU_1074K:
 377        case CPU_74K:
 378                op_model_mipsxx_ops.cpu_type = "mips/74K";
 379                break;
 380
 381        case CPU_INTERAPTIV:
 382                op_model_mipsxx_ops.cpu_type = "mips/interAptiv";
 383                break;
 384
 385        case CPU_PROAPTIV:
 386                op_model_mipsxx_ops.cpu_type = "mips/proAptiv";
 387                break;
 388
 389        case CPU_P5600:
 390                op_model_mipsxx_ops.cpu_type = "mips/P5600";
 391                break;
 392
 393        case CPU_I6400:
 394                op_model_mipsxx_ops.cpu_type = "mips/I6400";
 395                break;
 396
 397        case CPU_M5150:
 398                op_model_mipsxx_ops.cpu_type = "mips/M5150";
 399                break;
 400
 401        case CPU_5KC:
 402                op_model_mipsxx_ops.cpu_type = "mips/5K";
 403                break;
 404
 405        case CPU_R10000:
 406                if ((current_cpu_data.processor_id & 0xff) == 0x20)
 407                        op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
 408                else
 409                        op_model_mipsxx_ops.cpu_type = "mips/r10000";
 410                break;
 411
 412        case CPU_R12000:
 413        case CPU_R14000:
 414                op_model_mipsxx_ops.cpu_type = "mips/r12000";
 415                break;
 416
 417        case CPU_R16000:
 418                op_model_mipsxx_ops.cpu_type = "mips/r16000";
 419                break;
 420
 421        case CPU_SB1:
 422        case CPU_SB1A:
 423                op_model_mipsxx_ops.cpu_type = "mips/sb1";
 424                break;
 425
 426        case CPU_LOONGSON1:
 427                op_model_mipsxx_ops.cpu_type = "mips/loongson1";
 428                break;
 429
 430        case CPU_XLR:
 431                op_model_mipsxx_ops.cpu_type = "mips/xlr";
 432                break;
 433
 434        default:
 435                printk(KERN_ERR "Profiling unsupported for this CPU\n");
 436
 437                return -ENODEV;
 438        }
 439
 440        save_perf_irq = perf_irq;
 441        perf_irq = mipsxx_perfcount_handler;
 442
 443        if (get_c0_perfcount_int)
 444                perfcount_irq = get_c0_perfcount_int();
 445        else if (cp0_perfcount_irq >= 0)
 446                perfcount_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
 447        else
 448                perfcount_irq = -1;
 449
 450        if (perfcount_irq >= 0)
 451                return request_irq(perfcount_irq, mipsxx_perfcount_int,
 452                                   IRQF_PERCPU | IRQF_NOBALANCING |
 453                                   IRQF_NO_THREAD | IRQF_NO_SUSPEND |
 454                                   IRQF_SHARED,
 455                                   "Perfcounter", save_perf_irq);
 456
 457        return 0;
 458}
 459
 460static void mipsxx_exit(void)
 461{
 462        int counters = op_model_mipsxx_ops.num_counters;
 463
 464        if (perfcount_irq >= 0)
 465                free_irq(perfcount_irq, save_perf_irq);
 466
 467        counters = counters_per_cpu_to_total(counters);
 468        on_each_cpu(reset_counters, (void *)(long)counters, 1);
 469
 470        perf_irq = save_perf_irq;
 471}
 472
 473struct op_mips_model op_model_mipsxx_ops = {
 474        .reg_setup      = mipsxx_reg_setup,
 475        .cpu_setup      = mipsxx_cpu_setup,
 476        .init           = mipsxx_init,
 477        .exit           = mipsxx_exit,
 478        .cpu_start      = mipsxx_cpu_start,
 479        .cpu_stop       = mipsxx_cpu_stop,
 480};
 481