linux/arch/mips/sibyte/common/sb_tbprof.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *
   4 * Copyright (C) 2001, 2002, 2003 Broadcom Corporation
   5 * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
   6 * Copyright (C) 2007 MIPS Technologies, Inc.
   7 *    written by Ralf Baechle <ralf@linux-mips.org>
   8 */
   9
  10#undef DEBUG
  11
  12#include <linux/device.h>
  13#include <linux/module.h>
  14#include <linux/kernel.h>
  15#include <linux/types.h>
  16#include <linux/init.h>
  17#include <linux/interrupt.h>
  18#include <linux/sched.h>
  19#include <linux/vmalloc.h>
  20#include <linux/fs.h>
  21#include <linux/errno.h>
  22#include <linux/wait.h>
  23#include <asm/io.h>
  24#include <asm/sibyte/sb1250.h>
  25
  26#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
  27#include <asm/sibyte/bcm1480_regs.h>
  28#include <asm/sibyte/bcm1480_scd.h>
  29#include <asm/sibyte/bcm1480_int.h>
  30#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
  31#include <asm/sibyte/sb1250_regs.h>
  32#include <asm/sibyte/sb1250_scd.h>
  33#include <asm/sibyte/sb1250_int.h>
  34#else
  35#error invalid SiByte UART configuration
  36#endif
  37
  38#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
  39#undef K_INT_TRACE_FREEZE
  40#define K_INT_TRACE_FREEZE K_BCM1480_INT_TRACE_FREEZE
  41#undef K_INT_PERF_CNT
  42#define K_INT_PERF_CNT K_BCM1480_INT_PERF_CNT
  43#endif
  44
  45#include <linux/uaccess.h>
  46
  47#define SBPROF_TB_MAJOR 240
  48
  49typedef u64 tb_sample_t[6*256];
  50
  51enum open_status {
  52        SB_CLOSED,
  53        SB_OPENING,
  54        SB_OPEN
  55};
  56
  57struct sbprof_tb {
  58        wait_queue_head_t       tb_sync;
  59        wait_queue_head_t       tb_read;
  60        struct mutex            lock;
  61        enum open_status        open;
  62        tb_sample_t             *sbprof_tbbuf;
  63        int                     next_tb_sample;
  64
  65        volatile int            tb_enable;
  66        volatile int            tb_armed;
  67
  68};
  69
  70static struct sbprof_tb sbp;
  71
  72#define MAX_SAMPLE_BYTES (24*1024*1024)
  73#define MAX_TBSAMPLE_BYTES (12*1024*1024)
  74
  75#define MAX_SAMPLES (MAX_SAMPLE_BYTES/sizeof(u_int32_t))
  76#define TB_SAMPLE_SIZE (sizeof(tb_sample_t))
  77#define MAX_TB_SAMPLES (MAX_TBSAMPLE_BYTES/TB_SAMPLE_SIZE)
  78
  79/* ioctls */
  80#define SBPROF_ZBSTART          _IOW('s', 0, int)
  81#define SBPROF_ZBSTOP           _IOW('s', 1, int)
  82#define SBPROF_ZBWAITFULL       _IOW('s', 2, int)
  83
  84/*
  85 * Routines for using 40-bit SCD cycle counter
  86 *
  87 * Client responsible for either handling interrupts or making sure
  88 * the cycles counter never saturates, e.g., by doing
  89 * zclk_timer_init(0) at least every 2^40 - 1 ZCLKs.
  90 */
  91
  92/*
  93 * Configures SCD counter 0 to count ZCLKs starting from val;
  94 * Configures SCD counters1,2,3 to count nothing.
  95 * Must not be called while gathering ZBbus profiles.
  96 */
  97
  98#define zclk_timer_init(val) \
  99  __asm__ __volatile__ (".set push;" \
 100                        ".set mips64;" \
 101                        "la   $8, 0xb00204c0;" /* SCD perf_cnt_cfg */ \
 102                        "sd   %0, 0x10($8);"   /* write val to counter0 */ \
 103                        "sd   %1, 0($8);"      /* config counter0 for zclks*/ \
 104                        ".set pop" \
 105                        : /* no outputs */ \
 106                                                     /* enable, counter0 */ \
 107                        : /* inputs */ "r"(val), "r" ((1ULL << 33) | 1ULL) \
 108                        : /* modifies */ "$8" )
 109
 110
 111/* Reads SCD counter 0 and puts result in value
 112   unsigned long long val; */
 113#define zclk_get(val) \
 114  __asm__ __volatile__ (".set push;" \
 115                        ".set mips64;" \
 116                        "la   $8, 0xb00204c0;" /* SCD perf_cnt_cfg */ \
 117                        "ld   %0, 0x10($8);"   /* write val to counter0 */ \
 118                        ".set pop" \
 119                        : /* outputs */ "=r"(val) \
 120                        : /* inputs */ \
 121                        : /* modifies */ "$8" )
 122
 123#define DEVNAME "sb_tbprof"
 124
 125#define TB_FULL (sbp.next_tb_sample == MAX_TB_SAMPLES)
 126
 127/*
 128 * Support for ZBbus sampling using the trace buffer
 129 *
 130 * We use the SCD performance counter interrupt, caused by a Zclk counter
 131 * overflow, to trigger the start of tracing.
 132 *
 133 * We set the trace buffer to sample everything and freeze on
 134 * overflow.
 135 *
 136 * We map the interrupt for trace_buffer_freeze to handle it on CPU 0.
 137 *
 138 */
 139
 140static u64 tb_period;
 141
 142static void arm_tb(void)
 143{
 144        u64 scdperfcnt;
 145        u64 next = (1ULL << 40) - tb_period;
 146        u64 tb_options = M_SCD_TRACE_CFG_FREEZE_FULL;
 147
 148        /*
 149         * Generate an SCD_PERFCNT interrupt in TB_PERIOD Zclks to
 150         * trigger start of trace.  XXX vary sampling period
 151         */
 152        __raw_writeq(0, IOADDR(A_SCD_PERF_CNT_1));
 153        scdperfcnt = __raw_readq(IOADDR(A_SCD_PERF_CNT_CFG));
 154
 155        /*
 156         * Unfortunately, in Pass 2 we must clear all counters to knock down
 157         * a previous interrupt request.  This means that bus profiling
 158         * requires ALL of the SCD perf counters.
 159         */
 160#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
 161        __raw_writeq((scdperfcnt & ~M_SPC_CFG_SRC1) |
 162                                                /* keep counters 0,2,3,4,5,6,7 as is */
 163                     V_SPC_CFG_SRC1(1),         /* counter 1 counts cycles */
 164                     IOADDR(A_BCM1480_SCD_PERF_CNT_CFG0));
 165        __raw_writeq(
 166                     M_SPC_CFG_ENABLE |         /* enable counting */
 167                     M_SPC_CFG_CLEAR |          /* clear all counters */
 168                     V_SPC_CFG_SRC1(1),         /* counter 1 counts cycles */
 169                     IOADDR(A_BCM1480_SCD_PERF_CNT_CFG1));
 170#else
 171        __raw_writeq((scdperfcnt & ~M_SPC_CFG_SRC1) |
 172                                                /* keep counters 0,2,3 as is */
 173                     M_SPC_CFG_ENABLE |         /* enable counting */
 174                     M_SPC_CFG_CLEAR |          /* clear all counters */
 175                     V_SPC_CFG_SRC1(1),         /* counter 1 counts cycles */
 176                     IOADDR(A_SCD_PERF_CNT_CFG));
 177#endif
 178        __raw_writeq(next, IOADDR(A_SCD_PERF_CNT_1));
 179        /* Reset the trace buffer */
 180        __raw_writeq(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG));
 181#if 0 && defined(M_SCD_TRACE_CFG_FORCECNT)
 182        /* XXXKW may want to expose control to the data-collector */
 183        tb_options |= M_SCD_TRACE_CFG_FORCECNT;
 184#endif
 185        __raw_writeq(tb_options, IOADDR(A_SCD_TRACE_CFG));
 186        sbp.tb_armed = 1;
 187}
 188
 189static irqreturn_t sbprof_tb_intr(int irq, void *dev_id)
 190{
 191        int i;
 192
 193        pr_debug(DEVNAME ": tb_intr\n");
 194
 195        if (sbp.next_tb_sample < MAX_TB_SAMPLES) {
 196                /* XXX should use XKPHYS to make writes bypass L2 */
 197                u64 *p = sbp.sbprof_tbbuf[sbp.next_tb_sample++];
 198                /* Read out trace */
 199                __raw_writeq(M_SCD_TRACE_CFG_START_READ,
 200                             IOADDR(A_SCD_TRACE_CFG));
 201                __asm__ __volatile__ ("sync" : : : "memory");
 202                /* Loop runs backwards because bundles are read out in reverse order */
 203                for (i = 256 * 6; i > 0; i -= 6) {
 204                        /* Subscripts decrease to put bundle in the order */
 205                        /*   t0 lo, t0 hi, t1 lo, t1 hi, t2 lo, t2 hi */
 206                        p[i - 1] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
 207                        /* read t2 hi */
 208                        p[i - 2] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
 209                        /* read t2 lo */
 210                        p[i - 3] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
 211                        /* read t1 hi */
 212                        p[i - 4] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
 213                        /* read t1 lo */
 214                        p[i - 5] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
 215                        /* read t0 hi */
 216                        p[i - 6] = __raw_readq(IOADDR(A_SCD_TRACE_READ));
 217                        /* read t0 lo */
 218                }
 219                if (!sbp.tb_enable) {
 220                        pr_debug(DEVNAME ": tb_intr shutdown\n");
 221                        __raw_writeq(M_SCD_TRACE_CFG_RESET,
 222                                     IOADDR(A_SCD_TRACE_CFG));
 223                        sbp.tb_armed = 0;
 224                        wake_up_interruptible(&sbp.tb_sync);
 225                } else {
 226                        /* knock down current interrupt and get another one later */
 227                        arm_tb();
 228                }
 229        } else {
 230                /* No more trace buffer samples */
 231                pr_debug(DEVNAME ": tb_intr full\n");
 232                __raw_writeq(M_SCD_TRACE_CFG_RESET, IOADDR(A_SCD_TRACE_CFG));
 233                sbp.tb_armed = 0;
 234                if (!sbp.tb_enable)
 235                        wake_up_interruptible(&sbp.tb_sync);
 236                wake_up_interruptible(&sbp.tb_read);
 237        }
 238        return IRQ_HANDLED;
 239}
 240
 241static irqreturn_t sbprof_pc_intr(int irq, void *dev_id)
 242{
 243        printk(DEVNAME ": unexpected pc_intr");
 244        return IRQ_NONE;
 245}
 246
 247/*
 248 * Requires: Already called zclk_timer_init with a value that won't
 249 *           saturate 40 bits.  No subsequent use of SCD performance counters
 250 *           or trace buffer.
 251 */
 252
 253static int sbprof_zbprof_start(struct file *filp)
 254{
 255        u64 scdperfcnt;
 256        int err;
 257
 258        if (xchg(&sbp.tb_enable, 1))
 259                return -EBUSY;
 260
 261        pr_debug(DEVNAME ": starting\n");
 262
 263        sbp.next_tb_sample = 0;
 264        filp->f_pos = 0;
 265
 266        err = request_irq(K_INT_TRACE_FREEZE, sbprof_tb_intr, 0,
 267                          DEVNAME " trace freeze", &sbp);
 268        if (err)
 269                return -EBUSY;
 270
 271        /* Make sure there isn't a perf-cnt interrupt waiting */
 272        scdperfcnt = __raw_readq(IOADDR(A_SCD_PERF_CNT_CFG));
 273        /* Disable and clear counters, override SRC_1 */
 274        __raw_writeq((scdperfcnt & ~(M_SPC_CFG_SRC1 | M_SPC_CFG_ENABLE)) |
 275                     M_SPC_CFG_ENABLE | M_SPC_CFG_CLEAR | V_SPC_CFG_SRC1(1),
 276                     IOADDR(A_SCD_PERF_CNT_CFG));
 277
 278        /*
 279         * We grab this interrupt to prevent others from trying to use
 280         * it, even though we don't want to service the interrupts
 281         * (they only feed into the trace-on-interrupt mechanism)
 282         */
 283        if (request_irq(K_INT_PERF_CNT, sbprof_pc_intr, 0, DEVNAME " scd perfcnt", &sbp)) {
 284                free_irq(K_INT_TRACE_FREEZE, &sbp);
 285                return -EBUSY;
 286        }
 287
 288        /*
 289         * I need the core to mask these, but the interrupt mapper to
 290         *  pass them through.  I am exploiting my knowledge that
 291         *  cp0_status masks out IP[5]. krw
 292         */
 293#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
 294        __raw_writeq(K_BCM1480_INT_MAP_I3,
 295                     IOADDR(A_BCM1480_IMR_REGISTER(0, R_BCM1480_IMR_INTERRUPT_MAP_BASE_L) +
 296                            ((K_BCM1480_INT_PERF_CNT & 0x3f) << 3)));
 297#else
 298        __raw_writeq(K_INT_MAP_I3,
 299                     IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_MAP_BASE) +
 300                            (K_INT_PERF_CNT << 3)));
 301#endif
 302
 303        /* Initialize address traps */
 304        __raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_0));
 305        __raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_1));
 306        __raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_2));
 307        __raw_writeq(0, IOADDR(A_ADDR_TRAP_UP_3));
 308
 309        __raw_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_0));
 310        __raw_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_1));
 311        __raw_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_2));
 312        __raw_writeq(0, IOADDR(A_ADDR_TRAP_DOWN_3));
 313
 314        __raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_0));
 315        __raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_1));
 316        __raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_2));
 317        __raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_3));
 318
 319        /* Initialize Trace Event 0-7 */
 320        /*                              when interrupt  */
 321        __raw_writeq(M_SCD_TREVT_INTERRUPT, IOADDR(A_SCD_TRACE_EVENT_0));
 322        __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_1));
 323        __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_2));
 324        __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_3));
 325        __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_4));
 326        __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_5));
 327        __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_6));
 328        __raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_7));
 329
 330        /* Initialize Trace Sequence 0-7 */
 331        /*                                   Start on event 0 (interrupt) */
 332        __raw_writeq(V_SCD_TRSEQ_FUNC_START | 0x0fff,
 333                     IOADDR(A_SCD_TRACE_SEQUENCE_0));
 334        /*                        dsamp when d used | asamp when a used */
 335        __raw_writeq(M_SCD_TRSEQ_ASAMPLE | M_SCD_TRSEQ_DSAMPLE |
 336                     K_SCD_TRSEQ_TRIGGER_ALL,
 337                     IOADDR(A_SCD_TRACE_SEQUENCE_1));
 338        __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_2));
 339        __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_3));
 340        __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_4));
 341        __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_5));
 342        __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_6));
 343        __raw_writeq(0, IOADDR(A_SCD_TRACE_SEQUENCE_7));
 344
 345        /* Now indicate the PERF_CNT interrupt as a trace-relevant interrupt */
 346#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
 347        __raw_writeq(1ULL << (K_BCM1480_INT_PERF_CNT & 0x3f),
 348                     IOADDR(A_BCM1480_IMR_REGISTER(0, R_BCM1480_IMR_INTERRUPT_TRACE_L)));
 349#else
 350        __raw_writeq(1ULL << K_INT_PERF_CNT,
 351                     IOADDR(A_IMR_REGISTER(0, R_IMR_INTERRUPT_TRACE)));
 352#endif
 353        arm_tb();
 354
 355        pr_debug(DEVNAME ": done starting\n");
 356
 357        return 0;
 358}
 359
 360static int sbprof_zbprof_stop(void)
 361{
 362        int err = 0;
 363
 364        pr_debug(DEVNAME ": stopping\n");
 365
 366        if (sbp.tb_enable) {
 367                /*
 368                 * XXXKW there is a window here where the intr handler may run,
 369                 * see the disable, and do the wake_up before this sleep
 370                 * happens.
 371                 */
 372                pr_debug(DEVNAME ": wait for disarm\n");
 373                err = wait_event_interruptible(sbp.tb_sync, !sbp.tb_armed);
 374                pr_debug(DEVNAME ": disarm complete, stat %d\n", err);
 375
 376                if (err)
 377                        return err;
 378
 379                sbp.tb_enable = 0;
 380                free_irq(K_INT_TRACE_FREEZE, &sbp);
 381                free_irq(K_INT_PERF_CNT, &sbp);
 382        }
 383
 384        pr_debug(DEVNAME ": done stopping\n");
 385
 386        return err;
 387}
 388
 389static int sbprof_tb_open(struct inode *inode, struct file *filp)
 390{
 391        int minor;
 392
 393        minor = iminor(inode);
 394        if (minor != 0)
 395                return -ENODEV;
 396
 397        if (xchg(&sbp.open, SB_OPENING) != SB_CLOSED)
 398                return -EBUSY;
 399
 400        memset(&sbp, 0, sizeof(struct sbprof_tb));
 401        sbp.sbprof_tbbuf = vzalloc(MAX_TBSAMPLE_BYTES);
 402        if (!sbp.sbprof_tbbuf) {
 403                sbp.open = SB_CLOSED;
 404                wmb();
 405                return -ENOMEM;
 406        }
 407
 408        init_waitqueue_head(&sbp.tb_sync);
 409        init_waitqueue_head(&sbp.tb_read);
 410        mutex_init(&sbp.lock);
 411
 412        sbp.open = SB_OPEN;
 413        wmb();
 414
 415        return 0;
 416}
 417
 418static int sbprof_tb_release(struct inode *inode, struct file *filp)
 419{
 420        int minor;
 421
 422        minor = iminor(inode);
 423        if (minor != 0 || sbp.open != SB_CLOSED)
 424                return -ENODEV;
 425
 426        mutex_lock(&sbp.lock);
 427
 428        if (sbp.tb_armed || sbp.tb_enable)
 429                sbprof_zbprof_stop();
 430
 431        vfree(sbp.sbprof_tbbuf);
 432        sbp.open = SB_CLOSED;
 433        wmb();
 434
 435        mutex_unlock(&sbp.lock);
 436
 437        return 0;
 438}
 439
 440static ssize_t sbprof_tb_read(struct file *filp, char *buf,
 441                              size_t size, loff_t *offp)
 442{
 443        int cur_sample, sample_off, cur_count, sample_left;
 444        char *src;
 445        int   count   =  0;
 446        char *dest    =  buf;
 447        long  cur_off = *offp;
 448
 449        if (!access_ok(buf, size))
 450                return -EFAULT;
 451
 452        mutex_lock(&sbp.lock);
 453
 454        count = 0;
 455        cur_sample = cur_off / TB_SAMPLE_SIZE;
 456        sample_off = cur_off % TB_SAMPLE_SIZE;
 457        sample_left = TB_SAMPLE_SIZE - sample_off;
 458
 459        while (size && (cur_sample < sbp.next_tb_sample)) {
 460                int err;
 461
 462                cur_count = size < sample_left ? size : sample_left;
 463                src = (char *)(((long)sbp.sbprof_tbbuf[cur_sample])+sample_off);
 464                err = __copy_to_user(dest, src, cur_count);
 465                if (err) {
 466                        *offp = cur_off + cur_count - err;
 467                        mutex_unlock(&sbp.lock);
 468                        return err;
 469                }
 470                pr_debug(DEVNAME ": read from sample %d, %d bytes\n",
 471                         cur_sample, cur_count);
 472                size -= cur_count;
 473                sample_left -= cur_count;
 474                if (!sample_left) {
 475                        cur_sample++;
 476                        sample_off = 0;
 477                        sample_left = TB_SAMPLE_SIZE;
 478                } else {
 479                        sample_off += cur_count;
 480                }
 481                cur_off += cur_count;
 482                dest += cur_count;
 483                count += cur_count;
 484        }
 485        *offp = cur_off;
 486        mutex_unlock(&sbp.lock);
 487
 488        return count;
 489}
 490
 491static long sbprof_tb_ioctl(struct file *filp,
 492                            unsigned int command,
 493                            unsigned long arg)
 494{
 495        int err = 0;
 496
 497        switch (command) {
 498        case SBPROF_ZBSTART:
 499                mutex_lock(&sbp.lock);
 500                err = sbprof_zbprof_start(filp);
 501                mutex_unlock(&sbp.lock);
 502                break;
 503
 504        case SBPROF_ZBSTOP:
 505                mutex_lock(&sbp.lock);
 506                err = sbprof_zbprof_stop();
 507                mutex_unlock(&sbp.lock);
 508                break;
 509
 510        case SBPROF_ZBWAITFULL: {
 511                err = wait_event_interruptible(sbp.tb_read, TB_FULL);
 512                if (err)
 513                        break;
 514
 515                err = put_user(TB_FULL, (int *) arg);
 516                break;
 517        }
 518
 519        default:
 520                err = -EINVAL;
 521                break;
 522        }
 523
 524        return err;
 525}
 526
 527static const struct file_operations sbprof_tb_fops = {
 528        .owner          = THIS_MODULE,
 529        .open           = sbprof_tb_open,
 530        .release        = sbprof_tb_release,
 531        .read           = sbprof_tb_read,
 532        .unlocked_ioctl = sbprof_tb_ioctl,
 533        .compat_ioctl   = sbprof_tb_ioctl,
 534        .mmap           = NULL,
 535        .llseek         = default_llseek,
 536};
 537
 538static struct class *tb_class;
 539static struct device *tb_dev;
 540
 541static int __init sbprof_tb_init(void)
 542{
 543        struct device *dev;
 544        struct class *tbc;
 545        int err;
 546
 547        if (register_chrdev(SBPROF_TB_MAJOR, DEVNAME, &sbprof_tb_fops)) {
 548                printk(KERN_WARNING DEVNAME ": initialization failed (dev %d)\n",
 549                       SBPROF_TB_MAJOR);
 550                return -EIO;
 551        }
 552
 553        tbc = class_create(THIS_MODULE, "sb_tracebuffer");
 554        if (IS_ERR(tbc)) {
 555                err = PTR_ERR(tbc);
 556                goto out_chrdev;
 557        }
 558
 559        tb_class = tbc;
 560
 561        dev = device_create(tbc, NULL, MKDEV(SBPROF_TB_MAJOR, 0), NULL, "tb");
 562        if (IS_ERR(dev)) {
 563                err = PTR_ERR(dev);
 564                goto out_class;
 565        }
 566        tb_dev = dev;
 567
 568        sbp.open = SB_CLOSED;
 569        wmb();
 570        tb_period = zbbus_mhz * 10000LL;
 571        pr_info(DEVNAME ": initialized - tb_period = %lld\n",
 572                (long long) tb_period);
 573        return 0;
 574
 575out_class:
 576        class_destroy(tb_class);
 577out_chrdev:
 578        unregister_chrdev(SBPROF_TB_MAJOR, DEVNAME);
 579
 580        return err;
 581}
 582
 583static void __exit sbprof_tb_cleanup(void)
 584{
 585        device_destroy(tb_class, MKDEV(SBPROF_TB_MAJOR, 0));
 586        unregister_chrdev(SBPROF_TB_MAJOR, DEVNAME);
 587        class_destroy(tb_class);
 588}
 589
 590module_init(sbprof_tb_init);
 591module_exit(sbprof_tb_cleanup);
 592
 593MODULE_ALIAS_CHARDEV_MAJOR(SBPROF_TB_MAJOR);
 594MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
 595MODULE_LICENSE("GPL");
 596