linux/drivers/clocksource/sh_cmt.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * SuperH Timer Support - CMT
   4 *
   5 *  Copyright (C) 2008 Magnus Damm
   6 */
   7
   8#include <linux/clk.h>
   9#include <linux/clockchips.h>
  10#include <linux/clocksource.h>
  11#include <linux/delay.h>
  12#include <linux/err.h>
  13#include <linux/init.h>
  14#include <linux/interrupt.h>
  15#include <linux/io.h>
  16#include <linux/ioport.h>
  17#include <linux/irq.h>
  18#include <linux/module.h>
  19#include <linux/of.h>
  20#include <linux/of_device.h>
  21#include <linux/platform_device.h>
  22#include <linux/pm_domain.h>
  23#include <linux/pm_runtime.h>
  24#include <linux/sh_timer.h>
  25#include <linux/slab.h>
  26#include <linux/spinlock.h>
  27
  28#ifdef CONFIG_SUPERH
  29#include <asm/platform_early.h>
  30#endif
  31
  32struct sh_cmt_device;
  33
  34/*
  35 * The CMT comes in 5 different identified flavours, depending not only on the
  36 * SoC but also on the particular instance. The following table lists the main
  37 * characteristics of those flavours.
  38 *
  39 *                      16B     32B     32B-F   48B     R-Car Gen2
  40 * -----------------------------------------------------------------------------
  41 * Channels             2       1/4     1       6       2/8
  42 * Control Width        16      16      16      16      32
  43 * Counter Width        16      32      32      32/48   32/48
  44 * Shared Start/Stop    Y       Y       Y       Y       N
  45 *
  46 * The r8a73a4 / R-Car Gen2 version has a per-channel start/stop register
  47 * located in the channel registers block. All other versions have a shared
  48 * start/stop register located in the global space.
  49 *
  50 * Channels are indexed from 0 to N-1 in the documentation. The channel index
  51 * infers the start/stop bit position in the control register and the channel
  52 * registers block address. Some CMT instances have a subset of channels
  53 * available, in which case the index in the documentation doesn't match the
  54 * "real" index as implemented in hardware. This is for instance the case with
  55 * CMT0 on r8a7740, which is a 32-bit variant with a single channel numbered 0
  56 * in the documentation but using start/stop bit 5 and having its registers
  57 * block at 0x60.
  58 *
  59 * Similarly CMT0 on r8a73a4, r8a7790 and r8a7791, while implementing 32-bit
  60 * channels only, is a 48-bit gen2 CMT with the 48-bit channels unavailable.
  61 */
  62
  63enum sh_cmt_model {
  64        SH_CMT_16BIT,
  65        SH_CMT_32BIT,
  66        SH_CMT_48BIT,
  67        SH_CMT0_RCAR_GEN2,
  68        SH_CMT1_RCAR_GEN2,
  69};
  70
  71struct sh_cmt_info {
  72        enum sh_cmt_model model;
  73
  74        unsigned int channels_mask;
  75
  76        unsigned long width; /* 16 or 32 bit version of hardware block */
  77        u32 overflow_bit;
  78        u32 clear_bits;
  79
  80        /* callbacks for CMSTR and CMCSR access */
  81        u32 (*read_control)(void __iomem *base, unsigned long offs);
  82        void (*write_control)(void __iomem *base, unsigned long offs,
  83                              u32 value);
  84
  85        /* callbacks for CMCNT and CMCOR access */
  86        u32 (*read_count)(void __iomem *base, unsigned long offs);
  87        void (*write_count)(void __iomem *base, unsigned long offs, u32 value);
  88};
  89
  90struct sh_cmt_channel {
  91        struct sh_cmt_device *cmt;
  92
  93        unsigned int index;     /* Index in the documentation */
  94        unsigned int hwidx;     /* Real hardware index */
  95
  96        void __iomem *iostart;
  97        void __iomem *ioctrl;
  98
  99        unsigned int timer_bit;
 100        unsigned long flags;
 101        u32 match_value;
 102        u32 next_match_value;
 103        u32 max_match_value;
 104        raw_spinlock_t lock;
 105        struct clock_event_device ced;
 106        struct clocksource cs;
 107        u64 total_cycles;
 108        bool cs_enabled;
 109};
 110
 111struct sh_cmt_device {
 112        struct platform_device *pdev;
 113
 114        const struct sh_cmt_info *info;
 115
 116        void __iomem *mapbase;
 117        struct clk *clk;
 118        unsigned long rate;
 119
 120        raw_spinlock_t lock; /* Protect the shared start/stop register */
 121
 122        struct sh_cmt_channel *channels;
 123        unsigned int num_channels;
 124        unsigned int hw_channels;
 125
 126        bool has_clockevent;
 127        bool has_clocksource;
 128};
 129
 130#define SH_CMT16_CMCSR_CMF              (1 << 7)
 131#define SH_CMT16_CMCSR_CMIE             (1 << 6)
 132#define SH_CMT16_CMCSR_CKS8             (0 << 0)
 133#define SH_CMT16_CMCSR_CKS32            (1 << 0)
 134#define SH_CMT16_CMCSR_CKS128           (2 << 0)
 135#define SH_CMT16_CMCSR_CKS512           (3 << 0)
 136#define SH_CMT16_CMCSR_CKS_MASK         (3 << 0)
 137
 138#define SH_CMT32_CMCSR_CMF              (1 << 15)
 139#define SH_CMT32_CMCSR_OVF              (1 << 14)
 140#define SH_CMT32_CMCSR_WRFLG            (1 << 13)
 141#define SH_CMT32_CMCSR_STTF             (1 << 12)
 142#define SH_CMT32_CMCSR_STPF             (1 << 11)
 143#define SH_CMT32_CMCSR_SSIE             (1 << 10)
 144#define SH_CMT32_CMCSR_CMS              (1 << 9)
 145#define SH_CMT32_CMCSR_CMM              (1 << 8)
 146#define SH_CMT32_CMCSR_CMTOUT_IE        (1 << 7)
 147#define SH_CMT32_CMCSR_CMR_NONE         (0 << 4)
 148#define SH_CMT32_CMCSR_CMR_DMA          (1 << 4)
 149#define SH_CMT32_CMCSR_CMR_IRQ          (2 << 4)
 150#define SH_CMT32_CMCSR_CMR_MASK         (3 << 4)
 151#define SH_CMT32_CMCSR_DBGIVD           (1 << 3)
 152#define SH_CMT32_CMCSR_CKS_RCLK8        (4 << 0)
 153#define SH_CMT32_CMCSR_CKS_RCLK32       (5 << 0)
 154#define SH_CMT32_CMCSR_CKS_RCLK128      (6 << 0)
 155#define SH_CMT32_CMCSR_CKS_RCLK1        (7 << 0)
 156#define SH_CMT32_CMCSR_CKS_MASK         (7 << 0)
 157
 158static u32 sh_cmt_read16(void __iomem *base, unsigned long offs)
 159{
 160        return ioread16(base + (offs << 1));
 161}
 162
 163static u32 sh_cmt_read32(void __iomem *base, unsigned long offs)
 164{
 165        return ioread32(base + (offs << 2));
 166}
 167
 168static void sh_cmt_write16(void __iomem *base, unsigned long offs, u32 value)
 169{
 170        iowrite16(value, base + (offs << 1));
 171}
 172
 173static void sh_cmt_write32(void __iomem *base, unsigned long offs, u32 value)
 174{
 175        iowrite32(value, base + (offs << 2));
 176}
 177
 178static const struct sh_cmt_info sh_cmt_info[] = {
 179        [SH_CMT_16BIT] = {
 180                .model = SH_CMT_16BIT,
 181                .width = 16,
 182                .overflow_bit = SH_CMT16_CMCSR_CMF,
 183                .clear_bits = ~SH_CMT16_CMCSR_CMF,
 184                .read_control = sh_cmt_read16,
 185                .write_control = sh_cmt_write16,
 186                .read_count = sh_cmt_read16,
 187                .write_count = sh_cmt_write16,
 188        },
 189        [SH_CMT_32BIT] = {
 190                .model = SH_CMT_32BIT,
 191                .width = 32,
 192                .overflow_bit = SH_CMT32_CMCSR_CMF,
 193                .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
 194                .read_control = sh_cmt_read16,
 195                .write_control = sh_cmt_write16,
 196                .read_count = sh_cmt_read32,
 197                .write_count = sh_cmt_write32,
 198        },
 199        [SH_CMT_48BIT] = {
 200                .model = SH_CMT_48BIT,
 201                .channels_mask = 0x3f,
 202                .width = 32,
 203                .overflow_bit = SH_CMT32_CMCSR_CMF,
 204                .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
 205                .read_control = sh_cmt_read32,
 206                .write_control = sh_cmt_write32,
 207                .read_count = sh_cmt_read32,
 208                .write_count = sh_cmt_write32,
 209        },
 210        [SH_CMT0_RCAR_GEN2] = {
 211                .model = SH_CMT0_RCAR_GEN2,
 212                .channels_mask = 0x60,
 213                .width = 32,
 214                .overflow_bit = SH_CMT32_CMCSR_CMF,
 215                .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
 216                .read_control = sh_cmt_read32,
 217                .write_control = sh_cmt_write32,
 218                .read_count = sh_cmt_read32,
 219                .write_count = sh_cmt_write32,
 220        },
 221        [SH_CMT1_RCAR_GEN2] = {
 222                .model = SH_CMT1_RCAR_GEN2,
 223                .channels_mask = 0xff,
 224                .width = 32,
 225                .overflow_bit = SH_CMT32_CMCSR_CMF,
 226                .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF),
 227                .read_control = sh_cmt_read32,
 228                .write_control = sh_cmt_write32,
 229                .read_count = sh_cmt_read32,
 230                .write_count = sh_cmt_write32,
 231        },
 232};
 233
 234#define CMCSR 0 /* channel register */
 235#define CMCNT 1 /* channel register */
 236#define CMCOR 2 /* channel register */
 237
 238static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
 239{
 240        if (ch->iostart)
 241                return ch->cmt->info->read_control(ch->iostart, 0);
 242        else
 243                return ch->cmt->info->read_control(ch->cmt->mapbase, 0);
 244}
 245
 246static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value)
 247{
 248        if (ch->iostart)
 249                ch->cmt->info->write_control(ch->iostart, 0, value);
 250        else
 251                ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
 252}
 253
 254static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
 255{
 256        return ch->cmt->info->read_control(ch->ioctrl, CMCSR);
 257}
 258
 259static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value)
 260{
 261        ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
 262}
 263
 264static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
 265{
 266        return ch->cmt->info->read_count(ch->ioctrl, CMCNT);
 267}
 268
 269static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value)
 270{
 271        ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);
 272}
 273
 274static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value)
 275{
 276        ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
 277}
 278
 279static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped)
 280{
 281        u32 v1, v2, v3;
 282        u32 o1, o2;
 283
 284        o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
 285
 286        /* Make sure the timer value is stable. Stolen from acpi_pm.c */
 287        do {
 288                o2 = o1;
 289                v1 = sh_cmt_read_cmcnt(ch);
 290                v2 = sh_cmt_read_cmcnt(ch);
 291                v3 = sh_cmt_read_cmcnt(ch);
 292                o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
 293        } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
 294                          || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
 295
 296        *has_wrapped = o1;
 297        return v2;
 298}
 299
 300static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
 301{
 302        unsigned long flags;
 303        u32 value;
 304
 305        /* start stop register shared by multiple timer channels */
 306        raw_spin_lock_irqsave(&ch->cmt->lock, flags);
 307        value = sh_cmt_read_cmstr(ch);
 308
 309        if (start)
 310                value |= 1 << ch->timer_bit;
 311        else
 312                value &= ~(1 << ch->timer_bit);
 313
 314        sh_cmt_write_cmstr(ch, value);
 315        raw_spin_unlock_irqrestore(&ch->cmt->lock, flags);
 316}
 317
 318static int sh_cmt_enable(struct sh_cmt_channel *ch)
 319{
 320        int k, ret;
 321
 322        pm_runtime_get_sync(&ch->cmt->pdev->dev);
 323        dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
 324
 325        /* enable clock */
 326        ret = clk_enable(ch->cmt->clk);
 327        if (ret) {
 328                dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n",
 329                        ch->index);
 330                goto err0;
 331        }
 332
 333        /* make sure channel is disabled */
 334        sh_cmt_start_stop_ch(ch, 0);
 335
 336        /* configure channel, periodic mode and maximum timeout */
 337        if (ch->cmt->info->width == 16) {
 338                sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE |
 339                                   SH_CMT16_CMCSR_CKS512);
 340        } else {
 341                sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM |
 342                                   SH_CMT32_CMCSR_CMTOUT_IE |
 343                                   SH_CMT32_CMCSR_CMR_IRQ |
 344                                   SH_CMT32_CMCSR_CKS_RCLK8);
 345        }
 346
 347        sh_cmt_write_cmcor(ch, 0xffffffff);
 348        sh_cmt_write_cmcnt(ch, 0);
 349
 350        /*
 351         * According to the sh73a0 user's manual, as CMCNT can be operated
 352         * only by the RCLK (Pseudo 32 kHz), there's one restriction on
 353         * modifying CMCNT register; two RCLK cycles are necessary before
 354         * this register is either read or any modification of the value
 355         * it holds is reflected in the LSI's actual operation.
 356         *
 357         * While at it, we're supposed to clear out the CMCNT as of this
 358         * moment, so make sure it's processed properly here.  This will
 359         * take RCLKx2 at maximum.
 360         */
 361        for (k = 0; k < 100; k++) {
 362                if (!sh_cmt_read_cmcnt(ch))
 363                        break;
 364                udelay(1);
 365        }
 366
 367        if (sh_cmt_read_cmcnt(ch)) {
 368                dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n",
 369                        ch->index);
 370                ret = -ETIMEDOUT;
 371                goto err1;
 372        }
 373
 374        /* enable channel */
 375        sh_cmt_start_stop_ch(ch, 1);
 376        return 0;
 377 err1:
 378        /* stop clock */
 379        clk_disable(ch->cmt->clk);
 380
 381 err0:
 382        return ret;
 383}
 384
 385static void sh_cmt_disable(struct sh_cmt_channel *ch)
 386{
 387        /* disable channel */
 388        sh_cmt_start_stop_ch(ch, 0);
 389
 390        /* disable interrupts in CMT block */
 391        sh_cmt_write_cmcsr(ch, 0);
 392
 393        /* stop clock */
 394        clk_disable(ch->cmt->clk);
 395
 396        dev_pm_syscore_device(&ch->cmt->pdev->dev, false);
 397        pm_runtime_put(&ch->cmt->pdev->dev);
 398}
 399
 400/* private flags */
 401#define FLAG_CLOCKEVENT (1 << 0)
 402#define FLAG_CLOCKSOURCE (1 << 1)
 403#define FLAG_REPROGRAM (1 << 2)
 404#define FLAG_SKIPEVENT (1 << 3)
 405#define FLAG_IRQCONTEXT (1 << 4)
 406
 407static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,
 408                                              int absolute)
 409{
 410        u32 value = ch->next_match_value;
 411        u32 new_match;
 412        u32 delay = 0;
 413        u32 now = 0;
 414        u32 has_wrapped;
 415
 416        now = sh_cmt_get_counter(ch, &has_wrapped);
 417        ch->flags |= FLAG_REPROGRAM; /* force reprogram */
 418
 419        if (has_wrapped) {
 420                /* we're competing with the interrupt handler.
 421                 *  -> let the interrupt handler reprogram the timer.
 422                 *  -> interrupt number two handles the event.
 423                 */
 424                ch->flags |= FLAG_SKIPEVENT;
 425                return;
 426        }
 427
 428        if (absolute)
 429                now = 0;
 430
 431        do {
 432                /* reprogram the timer hardware,
 433                 * but don't save the new match value yet.
 434                 */
 435                new_match = now + value + delay;
 436                if (new_match > ch->max_match_value)
 437                        new_match = ch->max_match_value;
 438
 439                sh_cmt_write_cmcor(ch, new_match);
 440
 441                now = sh_cmt_get_counter(ch, &has_wrapped);
 442                if (has_wrapped && (new_match > ch->match_value)) {
 443                        /* we are changing to a greater match value,
 444                         * so this wrap must be caused by the counter
 445                         * matching the old value.
 446                         * -> first interrupt reprograms the timer.
 447                         * -> interrupt number two handles the event.
 448                         */
 449                        ch->flags |= FLAG_SKIPEVENT;
 450                        break;
 451                }
 452
 453                if (has_wrapped) {
 454                        /* we are changing to a smaller match value,
 455                         * so the wrap must be caused by the counter
 456                         * matching the new value.
 457                         * -> save programmed match value.
 458                         * -> let isr handle the event.
 459                         */
 460                        ch->match_value = new_match;
 461                        break;
 462                }
 463
 464                /* be safe: verify hardware settings */
 465                if (now < new_match) {
 466                        /* timer value is below match value, all good.
 467                         * this makes sure we won't miss any match events.
 468                         * -> save programmed match value.
 469                         * -> let isr handle the event.
 470                         */
 471                        ch->match_value = new_match;
 472                        break;
 473                }
 474
 475                /* the counter has reached a value greater
 476                 * than our new match value. and since the
 477                 * has_wrapped flag isn't set we must have
 478                 * programmed a too close event.
 479                 * -> increase delay and retry.
 480                 */
 481                if (delay)
 482                        delay <<= 1;
 483                else
 484                        delay = 1;
 485
 486                if (!delay)
 487                        dev_warn(&ch->cmt->pdev->dev, "ch%u: too long delay\n",
 488                                 ch->index);
 489
 490        } while (delay);
 491}
 492
 493static void __sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
 494{
 495        if (delta > ch->max_match_value)
 496                dev_warn(&ch->cmt->pdev->dev, "ch%u: delta out of range\n",
 497                         ch->index);
 498
 499        ch->next_match_value = delta;
 500        sh_cmt_clock_event_program_verify(ch, 0);
 501}
 502
 503static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta)
 504{
 505        unsigned long flags;
 506
 507        raw_spin_lock_irqsave(&ch->lock, flags);
 508        __sh_cmt_set_next(ch, delta);
 509        raw_spin_unlock_irqrestore(&ch->lock, flags);
 510}
 511
 512static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
 513{
 514        struct sh_cmt_channel *ch = dev_id;
 515
 516        /* clear flags */
 517        sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) &
 518                           ch->cmt->info->clear_bits);
 519
 520        /* update clock source counter to begin with if enabled
 521         * the wrap flag should be cleared by the timer specific
 522         * isr before we end up here.
 523         */
 524        if (ch->flags & FLAG_CLOCKSOURCE)
 525                ch->total_cycles += ch->match_value + 1;
 526
 527        if (!(ch->flags & FLAG_REPROGRAM))
 528                ch->next_match_value = ch->max_match_value;
 529
 530        ch->flags |= FLAG_IRQCONTEXT;
 531
 532        if (ch->flags & FLAG_CLOCKEVENT) {
 533                if (!(ch->flags & FLAG_SKIPEVENT)) {
 534                        if (clockevent_state_oneshot(&ch->ced)) {
 535                                ch->next_match_value = ch->max_match_value;
 536                                ch->flags |= FLAG_REPROGRAM;
 537                        }
 538
 539                        ch->ced.event_handler(&ch->ced);
 540                }
 541        }
 542
 543        ch->flags &= ~FLAG_SKIPEVENT;
 544
 545        if (ch->flags & FLAG_REPROGRAM) {
 546                ch->flags &= ~FLAG_REPROGRAM;
 547                sh_cmt_clock_event_program_verify(ch, 1);
 548
 549                if (ch->flags & FLAG_CLOCKEVENT)
 550                        if ((clockevent_state_shutdown(&ch->ced))
 551                            || (ch->match_value == ch->next_match_value))
 552                                ch->flags &= ~FLAG_REPROGRAM;
 553        }
 554
 555        ch->flags &= ~FLAG_IRQCONTEXT;
 556
 557        return IRQ_HANDLED;
 558}
 559
 560static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
 561{
 562        int ret = 0;
 563        unsigned long flags;
 564
 565        raw_spin_lock_irqsave(&ch->lock, flags);
 566
 567        if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
 568                ret = sh_cmt_enable(ch);
 569
 570        if (ret)
 571                goto out;
 572        ch->flags |= flag;
 573
 574        /* setup timeout if no clockevent */
 575        if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT)))
 576                __sh_cmt_set_next(ch, ch->max_match_value);
 577 out:
 578        raw_spin_unlock_irqrestore(&ch->lock, flags);
 579
 580        return ret;
 581}
 582
 583static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
 584{
 585        unsigned long flags;
 586        unsigned long f;
 587
 588        raw_spin_lock_irqsave(&ch->lock, flags);
 589
 590        f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
 591        ch->flags &= ~flag;
 592
 593        if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
 594                sh_cmt_disable(ch);
 595
 596        /* adjust the timeout to maximum if only clocksource left */
 597        if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
 598                __sh_cmt_set_next(ch, ch->max_match_value);
 599
 600        raw_spin_unlock_irqrestore(&ch->lock, flags);
 601}
 602
 603static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
 604{
 605        return container_of(cs, struct sh_cmt_channel, cs);
 606}
 607
 608static u64 sh_cmt_clocksource_read(struct clocksource *cs)
 609{
 610        struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
 611        unsigned long flags;
 612        u32 has_wrapped;
 613        u64 value;
 614        u32 raw;
 615
 616        raw_spin_lock_irqsave(&ch->lock, flags);
 617        value = ch->total_cycles;
 618        raw = sh_cmt_get_counter(ch, &has_wrapped);
 619
 620        if (unlikely(has_wrapped))
 621                raw += ch->match_value + 1;
 622        raw_spin_unlock_irqrestore(&ch->lock, flags);
 623
 624        return value + raw;
 625}
 626
 627static int sh_cmt_clocksource_enable(struct clocksource *cs)
 628{
 629        int ret;
 630        struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
 631
 632        WARN_ON(ch->cs_enabled);
 633
 634        ch->total_cycles = 0;
 635
 636        ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
 637        if (!ret)
 638                ch->cs_enabled = true;
 639
 640        return ret;
 641}
 642
 643static void sh_cmt_clocksource_disable(struct clocksource *cs)
 644{
 645        struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
 646
 647        WARN_ON(!ch->cs_enabled);
 648
 649        sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
 650        ch->cs_enabled = false;
 651}
 652
 653static void sh_cmt_clocksource_suspend(struct clocksource *cs)
 654{
 655        struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
 656
 657        if (!ch->cs_enabled)
 658                return;
 659
 660        sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
 661        pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
 662}
 663
 664static void sh_cmt_clocksource_resume(struct clocksource *cs)
 665{
 666        struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
 667
 668        if (!ch->cs_enabled)
 669                return;
 670
 671        pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
 672        sh_cmt_start(ch, FLAG_CLOCKSOURCE);
 673}
 674
 675static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
 676                                       const char *name)
 677{
 678        struct clocksource *cs = &ch->cs;
 679
 680        cs->name = name;
 681        cs->rating = 125;
 682        cs->read = sh_cmt_clocksource_read;
 683        cs->enable = sh_cmt_clocksource_enable;
 684        cs->disable = sh_cmt_clocksource_disable;
 685        cs->suspend = sh_cmt_clocksource_suspend;
 686        cs->resume = sh_cmt_clocksource_resume;
 687        cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8);
 688        cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
 689
 690        dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
 691                 ch->index);
 692
 693        clocksource_register_hz(cs, ch->cmt->rate);
 694        return 0;
 695}
 696
 697static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)
 698{
 699        return container_of(ced, struct sh_cmt_channel, ced);
 700}
 701
 702static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
 703{
 704        sh_cmt_start(ch, FLAG_CLOCKEVENT);
 705
 706        if (periodic)
 707                sh_cmt_set_next(ch, ((ch->cmt->rate + HZ/2) / HZ) - 1);
 708        else
 709                sh_cmt_set_next(ch, ch->max_match_value);
 710}
 711
 712static int sh_cmt_clock_event_shutdown(struct clock_event_device *ced)
 713{
 714        struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
 715
 716        sh_cmt_stop(ch, FLAG_CLOCKEVENT);
 717        return 0;
 718}
 719
 720static int sh_cmt_clock_event_set_state(struct clock_event_device *ced,
 721                                        int periodic)
 722{
 723        struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
 724
 725        /* deal with old setting first */
 726        if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
 727                sh_cmt_stop(ch, FLAG_CLOCKEVENT);
 728
 729        dev_info(&ch->cmt->pdev->dev, "ch%u: used for %s clock events\n",
 730                 ch->index, periodic ? "periodic" : "oneshot");
 731        sh_cmt_clock_event_start(ch, periodic);
 732        return 0;
 733}
 734
 735static int sh_cmt_clock_event_set_oneshot(struct clock_event_device *ced)
 736{
 737        return sh_cmt_clock_event_set_state(ced, 0);
 738}
 739
 740static int sh_cmt_clock_event_set_periodic(struct clock_event_device *ced)
 741{
 742        return sh_cmt_clock_event_set_state(ced, 1);
 743}
 744
 745static int sh_cmt_clock_event_next(unsigned long delta,
 746                                   struct clock_event_device *ced)
 747{
 748        struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
 749
 750        BUG_ON(!clockevent_state_oneshot(ced));
 751        if (likely(ch->flags & FLAG_IRQCONTEXT))
 752                ch->next_match_value = delta - 1;
 753        else
 754                sh_cmt_set_next(ch, delta - 1);
 755
 756        return 0;
 757}
 758
 759static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
 760{
 761        struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
 762
 763        pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
 764        clk_unprepare(ch->cmt->clk);
 765}
 766
 767static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
 768{
 769        struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
 770
 771        clk_prepare(ch->cmt->clk);
 772        pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
 773}
 774
 775static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch,
 776                                      const char *name)
 777{
 778        struct clock_event_device *ced = &ch->ced;
 779        int irq;
 780        int ret;
 781
 782        irq = platform_get_irq(ch->cmt->pdev, ch->index);
 783        if (irq < 0)
 784                return irq;
 785
 786        ret = request_irq(irq, sh_cmt_interrupt,
 787                          IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
 788                          dev_name(&ch->cmt->pdev->dev), ch);
 789        if (ret) {
 790                dev_err(&ch->cmt->pdev->dev, "ch%u: failed to request irq %d\n",
 791                        ch->index, irq);
 792                return ret;
 793        }
 794
 795        ced->name = name;
 796        ced->features = CLOCK_EVT_FEAT_PERIODIC;
 797        ced->features |= CLOCK_EVT_FEAT_ONESHOT;
 798        ced->rating = 125;
 799        ced->cpumask = cpu_possible_mask;
 800        ced->set_next_event = sh_cmt_clock_event_next;
 801        ced->set_state_shutdown = sh_cmt_clock_event_shutdown;
 802        ced->set_state_periodic = sh_cmt_clock_event_set_periodic;
 803        ced->set_state_oneshot = sh_cmt_clock_event_set_oneshot;
 804        ced->suspend = sh_cmt_clock_event_suspend;
 805        ced->resume = sh_cmt_clock_event_resume;
 806
 807        /* TODO: calculate good shift from rate and counter bit width */
 808        ced->shift = 32;
 809        ced->mult = div_sc(ch->cmt->rate, NSEC_PER_SEC, ced->shift);
 810        ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced);
 811        ced->max_delta_ticks = ch->max_match_value;
 812        ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
 813        ced->min_delta_ticks = 0x1f;
 814
 815        dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n",
 816                 ch->index);
 817        clockevents_register_device(ced);
 818
 819        return 0;
 820}
 821
 822static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name,
 823                           bool clockevent, bool clocksource)
 824{
 825        int ret;
 826
 827        if (clockevent) {
 828                ch->cmt->has_clockevent = true;
 829                ret = sh_cmt_register_clockevent(ch, name);
 830                if (ret < 0)
 831                        return ret;
 832        }
 833
 834        if (clocksource) {
 835                ch->cmt->has_clocksource = true;
 836                sh_cmt_register_clocksource(ch, name);
 837        }
 838
 839        return 0;
 840}
 841
 842static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index,
 843                                unsigned int hwidx, bool clockevent,
 844                                bool clocksource, struct sh_cmt_device *cmt)
 845{
 846        int ret;
 847
 848        /* Skip unused channels. */
 849        if (!clockevent && !clocksource)
 850                return 0;
 851
 852        ch->cmt = cmt;
 853        ch->index = index;
 854        ch->hwidx = hwidx;
 855        ch->timer_bit = hwidx;
 856
 857        /*
 858         * Compute the address of the channel control register block. For the
 859         * timers with a per-channel start/stop register, compute its address
 860         * as well.
 861         */
 862        switch (cmt->info->model) {
 863        case SH_CMT_16BIT:
 864                ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6;
 865                break;
 866        case SH_CMT_32BIT:
 867        case SH_CMT_48BIT:
 868                ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10;
 869                break;
 870        case SH_CMT0_RCAR_GEN2:
 871        case SH_CMT1_RCAR_GEN2:
 872                ch->iostart = cmt->mapbase + ch->hwidx * 0x100;
 873                ch->ioctrl = ch->iostart + 0x10;
 874                ch->timer_bit = 0;
 875                break;
 876        }
 877
 878        if (cmt->info->width == (sizeof(ch->max_match_value) * 8))
 879                ch->max_match_value = ~0;
 880        else
 881                ch->max_match_value = (1 << cmt->info->width) - 1;
 882
 883        ch->match_value = ch->max_match_value;
 884        raw_spin_lock_init(&ch->lock);
 885
 886        ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev),
 887                              clockevent, clocksource);
 888        if (ret) {
 889                dev_err(&cmt->pdev->dev, "ch%u: registration failed\n",
 890                        ch->index);
 891                return ret;
 892        }
 893        ch->cs_enabled = false;
 894
 895        return 0;
 896}
 897
 898static int sh_cmt_map_memory(struct sh_cmt_device *cmt)
 899{
 900        struct resource *mem;
 901
 902        mem = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0);
 903        if (!mem) {
 904                dev_err(&cmt->pdev->dev, "failed to get I/O memory\n");
 905                return -ENXIO;
 906        }
 907
 908        cmt->mapbase = ioremap(mem->start, resource_size(mem));
 909        if (cmt->mapbase == NULL) {
 910                dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n");
 911                return -ENXIO;
 912        }
 913
 914        return 0;
 915}
 916
 917static const struct platform_device_id sh_cmt_id_table[] = {
 918        { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] },
 919        { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] },
 920        { }
 921};
 922MODULE_DEVICE_TABLE(platform, sh_cmt_id_table);
 923
 924static const struct of_device_id sh_cmt_of_table[] __maybe_unused = {
 925        {
 926                /* deprecated, preserved for backward compatibility */
 927                .compatible = "renesas,cmt-48",
 928                .data = &sh_cmt_info[SH_CMT_48BIT]
 929        },
 930        {
 931                /* deprecated, preserved for backward compatibility */
 932                .compatible = "renesas,cmt-48-gen2",
 933                .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
 934        },
 935        {
 936                .compatible = "renesas,r8a7740-cmt1",
 937                .data = &sh_cmt_info[SH_CMT_48BIT]
 938        },
 939        {
 940                .compatible = "renesas,sh73a0-cmt1",
 941                .data = &sh_cmt_info[SH_CMT_48BIT]
 942        },
 943        {
 944                .compatible = "renesas,rcar-gen2-cmt0",
 945                .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
 946        },
 947        {
 948                .compatible = "renesas,rcar-gen2-cmt1",
 949                .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
 950        },
 951        {
 952                .compatible = "renesas,rcar-gen3-cmt0",
 953                .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
 954        },
 955        {
 956                .compatible = "renesas,rcar-gen3-cmt1",
 957                .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
 958        },
 959        { }
 960};
 961MODULE_DEVICE_TABLE(of, sh_cmt_of_table);
 962
 963static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
 964{
 965        unsigned int mask;
 966        unsigned int i;
 967        int ret;
 968
 969        cmt->pdev = pdev;
 970        raw_spin_lock_init(&cmt->lock);
 971
 972        if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
 973                cmt->info = of_device_get_match_data(&pdev->dev);
 974                cmt->hw_channels = cmt->info->channels_mask;
 975        } else if (pdev->dev.platform_data) {
 976                struct sh_timer_config *cfg = pdev->dev.platform_data;
 977                const struct platform_device_id *id = pdev->id_entry;
 978
 979                cmt->info = (const struct sh_cmt_info *)id->driver_data;
 980                cmt->hw_channels = cfg->channels_mask;
 981        } else {
 982                dev_err(&cmt->pdev->dev, "missing platform data\n");
 983                return -ENXIO;
 984        }
 985
 986        /* Get hold of clock. */
 987        cmt->clk = clk_get(&cmt->pdev->dev, "fck");
 988        if (IS_ERR(cmt->clk)) {
 989                dev_err(&cmt->pdev->dev, "cannot get clock\n");
 990                return PTR_ERR(cmt->clk);
 991        }
 992
 993        ret = clk_prepare(cmt->clk);
 994        if (ret < 0)
 995                goto err_clk_put;
 996
 997        /* Determine clock rate. */
 998        ret = clk_enable(cmt->clk);
 999        if (ret < 0)
1000                goto err_clk_unprepare;
1001
1002        if (cmt->info->width == 16)
1003                cmt->rate = clk_get_rate(cmt->clk) / 512;
1004        else
1005                cmt->rate = clk_get_rate(cmt->clk) / 8;
1006
1007        clk_disable(cmt->clk);
1008
1009        /* Map the memory resource(s). */
1010        ret = sh_cmt_map_memory(cmt);
1011        if (ret < 0)
1012                goto err_clk_unprepare;
1013
1014        /* Allocate and setup the channels. */
1015        cmt->num_channels = hweight8(cmt->hw_channels);
1016        cmt->channels = kcalloc(cmt->num_channels, sizeof(*cmt->channels),
1017                                GFP_KERNEL);
1018        if (cmt->channels == NULL) {
1019                ret = -ENOMEM;
1020                goto err_unmap;
1021        }
1022
1023        /*
1024         * Use the first channel as a clock event device and the second channel
1025         * as a clock source. If only one channel is available use it for both.
1026         */
1027        for (i = 0, mask = cmt->hw_channels; i < cmt->num_channels; ++i) {
1028                unsigned int hwidx = ffs(mask) - 1;
1029                bool clocksource = i == 1 || cmt->num_channels == 1;
1030                bool clockevent = i == 0;
1031
1032                ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx,
1033                                           clockevent, clocksource, cmt);
1034                if (ret < 0)
1035                        goto err_unmap;
1036
1037                mask &= ~(1 << hwidx);
1038        }
1039
1040        platform_set_drvdata(pdev, cmt);
1041
1042        return 0;
1043
1044err_unmap:
1045        kfree(cmt->channels);
1046        iounmap(cmt->mapbase);
1047err_clk_unprepare:
1048        clk_unprepare(cmt->clk);
1049err_clk_put:
1050        clk_put(cmt->clk);
1051        return ret;
1052}
1053
1054static int sh_cmt_probe(struct platform_device *pdev)
1055{
1056        struct sh_cmt_device *cmt = platform_get_drvdata(pdev);
1057        int ret;
1058
1059        if (!is_sh_early_platform_device(pdev)) {
1060                pm_runtime_set_active(&pdev->dev);
1061                pm_runtime_enable(&pdev->dev);
1062        }
1063
1064        if (cmt) {
1065                dev_info(&pdev->dev, "kept as earlytimer\n");
1066                goto out;
1067        }
1068
1069        cmt = kzalloc(sizeof(*cmt), GFP_KERNEL);
1070        if (cmt == NULL)
1071                return -ENOMEM;
1072
1073        ret = sh_cmt_setup(cmt, pdev);
1074        if (ret) {
1075                kfree(cmt);
1076                pm_runtime_idle(&pdev->dev);
1077                return ret;
1078        }
1079        if (is_sh_early_platform_device(pdev))
1080                return 0;
1081
1082 out:
1083        if (cmt->has_clockevent || cmt->has_clocksource)
1084                pm_runtime_irq_safe(&pdev->dev);
1085        else
1086                pm_runtime_idle(&pdev->dev);
1087
1088        return 0;
1089}
1090
1091static int sh_cmt_remove(struct platform_device *pdev)
1092{
1093        return -EBUSY; /* cannot unregister clockevent and clocksource */
1094}
1095
1096static struct platform_driver sh_cmt_device_driver = {
1097        .probe          = sh_cmt_probe,
1098        .remove         = sh_cmt_remove,
1099        .driver         = {
1100                .name   = "sh_cmt",
1101                .of_match_table = of_match_ptr(sh_cmt_of_table),
1102        },
1103        .id_table       = sh_cmt_id_table,
1104};
1105
1106static int __init sh_cmt_init(void)
1107{
1108        return platform_driver_register(&sh_cmt_device_driver);
1109}
1110
1111static void __exit sh_cmt_exit(void)
1112{
1113        platform_driver_unregister(&sh_cmt_device_driver);
1114}
1115
1116#ifdef CONFIG_SUPERH
1117sh_early_platform_init("earlytimer", &sh_cmt_device_driver);
1118#endif
1119
1120subsys_initcall(sh_cmt_init);
1121module_exit(sh_cmt_exit);
1122
1123MODULE_AUTHOR("Magnus Damm");
1124MODULE_DESCRIPTION("SuperH CMT Timer Driver");
1125MODULE_LICENSE("GPL v2");
1126