linux/drivers/clocksource/timer-mp-csky.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
   3
   4#include <linux/init.h>
   5#include <linux/interrupt.h>
   6#include <linux/sched_clock.h>
   7#include <linux/cpu.h>
   8#include <linux/of_irq.h>
   9#include <asm/reg_ops.h>
  10
  11#include "timer-of.h"
  12
  13#define PTIM_CCVR       "cr<3, 14>"
  14#define PTIM_CTLR       "cr<0, 14>"
  15#define PTIM_LVR        "cr<6, 14>"
  16#define PTIM_TSR        "cr<1, 14>"
  17
  18static int csky_mptimer_irq;
  19
  20static int csky_mptimer_set_next_event(unsigned long delta,
  21                                       struct clock_event_device *ce)
  22{
  23        mtcr(PTIM_LVR, delta);
  24
  25        return 0;
  26}
  27
  28static int csky_mptimer_shutdown(struct clock_event_device *ce)
  29{
  30        mtcr(PTIM_CTLR, 0);
  31
  32        return 0;
  33}
  34
  35static int csky_mptimer_oneshot(struct clock_event_device *ce)
  36{
  37        mtcr(PTIM_CTLR, 1);
  38
  39        return 0;
  40}
  41
  42static int csky_mptimer_oneshot_stopped(struct clock_event_device *ce)
  43{
  44        mtcr(PTIM_CTLR, 0);
  45
  46        return 0;
  47}
  48
  49static DEFINE_PER_CPU(struct timer_of, csky_to) = {
  50        .flags                                  = TIMER_OF_CLOCK,
  51        .clkevt = {
  52                .rating                         = 300,
  53                .features                       = CLOCK_EVT_FEAT_PERCPU |
  54                                                  CLOCK_EVT_FEAT_ONESHOT,
  55                .set_state_shutdown             = csky_mptimer_shutdown,
  56                .set_state_oneshot              = csky_mptimer_oneshot,
  57                .set_state_oneshot_stopped      = csky_mptimer_oneshot_stopped,
  58                .set_next_event                 = csky_mptimer_set_next_event,
  59        },
  60};
  61
  62static irqreturn_t csky_timer_interrupt(int irq, void *dev)
  63{
  64        struct timer_of *to = this_cpu_ptr(&csky_to);
  65
  66        mtcr(PTIM_TSR, 0);
  67
  68        to->clkevt.event_handler(&to->clkevt);
  69
  70        return IRQ_HANDLED;
  71}
  72
  73/*
  74 * clock event for percpu
  75 */
  76static int csky_mptimer_starting_cpu(unsigned int cpu)
  77{
  78        struct timer_of *to = per_cpu_ptr(&csky_to, cpu);
  79
  80        to->clkevt.cpumask = cpumask_of(cpu);
  81
  82        enable_percpu_irq(csky_mptimer_irq, 0);
  83
  84        clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
  85                                        2, ULONG_MAX);
  86
  87        return 0;
  88}
  89
  90static int csky_mptimer_dying_cpu(unsigned int cpu)
  91{
  92        disable_percpu_irq(csky_mptimer_irq);
  93
  94        return 0;
  95}
  96
  97/*
  98 * clock source
  99 */
 100static u64 notrace sched_clock_read(void)
 101{
 102        return (u64)mfcr(PTIM_CCVR);
 103}
 104
 105static u64 clksrc_read(struct clocksource *c)
 106{
 107        return (u64)mfcr(PTIM_CCVR);
 108}
 109
 110struct clocksource csky_clocksource = {
 111        .name   = "csky",
 112        .rating = 400,
 113        .mask   = CLOCKSOURCE_MASK(32),
 114        .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
 115        .read   = clksrc_read,
 116};
 117
 118static int __init csky_mptimer_init(struct device_node *np)
 119{
 120        int ret, cpu, cpu_rollback;
 121        struct timer_of *to = NULL;
 122
 123        /*
 124         * Csky_mptimer is designed for C-SKY SMP multi-processors and
 125         * every core has it's own private irq and regs for clkevt and
 126         * clksrc.
 127         *
 128         * The regs is accessed by cpu instruction: mfcr/mtcr instead of
 129         * mmio map style. So we needn't mmio-address in dts, but we still
 130         * need to give clk and irq number.
 131         *
 132         * We use private irq for the mptimer and irq number is the same
 133         * for every core. So we use request_percpu_irq() in timer_of_init.
 134         */
 135        csky_mptimer_irq = irq_of_parse_and_map(np, 0);
 136        if (csky_mptimer_irq <= 0)
 137                return -EINVAL;
 138
 139        ret = request_percpu_irq(csky_mptimer_irq, csky_timer_interrupt,
 140                                 "csky_mp_timer", &csky_to);
 141        if (ret)
 142                return -EINVAL;
 143
 144        for_each_possible_cpu(cpu) {
 145                to = per_cpu_ptr(&csky_to, cpu);
 146                ret = timer_of_init(np, to);
 147                if (ret)
 148                        goto rollback;
 149        }
 150
 151        clocksource_register_hz(&csky_clocksource, timer_of_rate(to));
 152        sched_clock_register(sched_clock_read, 32, timer_of_rate(to));
 153
 154        ret = cpuhp_setup_state(CPUHP_AP_CSKY_TIMER_STARTING,
 155                                "clockevents/csky/timer:starting",
 156                                csky_mptimer_starting_cpu,
 157                                csky_mptimer_dying_cpu);
 158        if (ret)
 159                return -EINVAL;
 160
 161        return 0;
 162
 163rollback:
 164        for_each_possible_cpu(cpu_rollback) {
 165                if (cpu_rollback == cpu)
 166                        break;
 167
 168                to = per_cpu_ptr(&csky_to, cpu_rollback);
 169                timer_of_cleanup(to);
 170        }
 171        return -EINVAL;
 172}
 173TIMER_OF_DECLARE(csky_mptimer, "csky,mptimer", csky_mptimer_init);
 174