linux/arch/x86/platform/uv/uv_time.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * SGI RTC clock/timer routines.
   4 *
   5 *  Copyright (c) 2009-2013 Silicon Graphics, Inc.  All Rights Reserved.
   6 *  Copyright (c) Dimitri Sivanich
   7 */
   8#include <linux/clockchips.h>
   9#include <linux/slab.h>
  10
  11#include <asm/uv/uv_mmrs.h>
  12#include <asm/uv/uv_hub.h>
  13#include <asm/uv/bios.h>
  14#include <asm/uv/uv.h>
  15#include <asm/apic.h>
  16#include <asm/cpu.h>
  17
  18#define RTC_NAME                "sgi_rtc"
  19
  20static u64 uv_read_rtc(struct clocksource *cs);
  21static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
  22static int uv_rtc_shutdown(struct clock_event_device *evt);
  23
  24static struct clocksource clocksource_uv = {
  25        .name           = RTC_NAME,
  26        .rating         = 299,
  27        .read           = uv_read_rtc,
  28        .mask           = (u64)UVH_RTC_REAL_TIME_CLOCK_MASK,
  29        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
  30};
  31
  32static struct clock_event_device clock_event_device_uv = {
  33        .name                   = RTC_NAME,
  34        .features               = CLOCK_EVT_FEAT_ONESHOT,
  35        .shift                  = 20,
  36        .rating                 = 400,
  37        .irq                    = -1,
  38        .set_next_event         = uv_rtc_next_event,
  39        .set_state_shutdown     = uv_rtc_shutdown,
  40        .event_handler          = NULL,
  41};
  42
  43static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
  44
  45/* There is one of these allocated per node */
  46struct uv_rtc_timer_head {
  47        spinlock_t      lock;
  48        /* next cpu waiting for timer, local node relative: */
  49        int             next_cpu;
  50        /* number of cpus on this node: */
  51        int             ncpus;
  52        struct {
  53                int     lcpu;           /* systemwide logical cpu number */
  54                u64     expires;        /* next timer expiration for this cpu */
  55        } cpu[1];
  56};
  57
  58/*
  59 * Access to uv_rtc_timer_head via blade id.
  60 */
  61static struct uv_rtc_timer_head         **blade_info __read_mostly;
  62
  63static int                              uv_rtc_evt_enable;
  64
  65/*
  66 * Hardware interface routines
  67 */
  68
  69/* Send IPIs to another node */
  70static void uv_rtc_send_IPI(int cpu)
  71{
  72        unsigned long apicid, val;
  73        int pnode;
  74
  75        apicid = cpu_physical_id(cpu);
  76        pnode = uv_apicid_to_pnode(apicid);
  77        val = (1UL << UVH_IPI_INT_SEND_SHFT) |
  78              (apicid << UVH_IPI_INT_APIC_ID_SHFT) |
  79              (X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT);
  80
  81        uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
  82}
  83
  84/* Check for an RTC interrupt pending */
  85static int uv_intr_pending(int pnode)
  86{
  87        if (is_uvx_hub())
  88                return uv_read_global_mmr64(pnode, UVXH_EVENT_OCCURRED2) &
  89                        UVXH_EVENT_OCCURRED2_RTC_1_MASK;
  90        return 0;
  91}
  92
  93/* Setup interrupt and return non-zero if early expiration occurred. */
  94static int uv_setup_intr(int cpu, u64 expires)
  95{
  96        u64 val;
  97        unsigned long apicid = cpu_physical_id(cpu);
  98        int pnode = uv_cpu_to_pnode(cpu);
  99
 100        uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
 101                UVH_RTC1_INT_CONFIG_M_MASK);
 102        uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
 103
 104        uv_write_global_mmr64(pnode, UVXH_EVENT_OCCURRED2_ALIAS,
 105                              UVXH_EVENT_OCCURRED2_RTC_1_MASK);
 106
 107        val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
 108                ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
 109
 110        /* Set configuration */
 111        uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val);
 112        /* Initialize comparator value */
 113        uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires);
 114
 115        if (uv_read_rtc(NULL) <= expires)
 116                return 0;
 117
 118        return !uv_intr_pending(pnode);
 119}
 120
 121/*
 122 * Per-cpu timer tracking routines
 123 */
 124
 125static __init void uv_rtc_deallocate_timers(void)
 126{
 127        int bid;
 128
 129        for_each_possible_blade(bid) {
 130                kfree(blade_info[bid]);
 131        }
 132        kfree(blade_info);
 133}
 134
 135/* Allocate per-node list of cpu timer expiration times. */
 136static __init int uv_rtc_allocate_timers(void)
 137{
 138        int cpu;
 139
 140        blade_info = kcalloc(uv_possible_blades, sizeof(void *), GFP_KERNEL);
 141        if (!blade_info)
 142                return -ENOMEM;
 143
 144        for_each_present_cpu(cpu) {
 145                int nid = cpu_to_node(cpu);
 146                int bid = uv_cpu_to_blade_id(cpu);
 147                int bcpu = uv_cpu_blade_processor_id(cpu);
 148                struct uv_rtc_timer_head *head = blade_info[bid];
 149
 150                if (!head) {
 151                        head = kmalloc_node(sizeof(struct uv_rtc_timer_head) +
 152                                (uv_blade_nr_possible_cpus(bid) *
 153                                        2 * sizeof(u64)),
 154                                GFP_KERNEL, nid);
 155                        if (!head) {
 156                                uv_rtc_deallocate_timers();
 157                                return -ENOMEM;
 158                        }
 159                        spin_lock_init(&head->lock);
 160                        head->ncpus = uv_blade_nr_possible_cpus(bid);
 161                        head->next_cpu = -1;
 162                        blade_info[bid] = head;
 163                }
 164
 165                head->cpu[bcpu].lcpu = cpu;
 166                head->cpu[bcpu].expires = ULLONG_MAX;
 167        }
 168
 169        return 0;
 170}
 171
 172/* Find and set the next expiring timer.  */
 173static void uv_rtc_find_next_timer(struct uv_rtc_timer_head *head, int pnode)
 174{
 175        u64 lowest = ULLONG_MAX;
 176        int c, bcpu = -1;
 177
 178        head->next_cpu = -1;
 179        for (c = 0; c < head->ncpus; c++) {
 180                u64 exp = head->cpu[c].expires;
 181                if (exp < lowest) {
 182                        bcpu = c;
 183                        lowest = exp;
 184                }
 185        }
 186        if (bcpu >= 0) {
 187                head->next_cpu = bcpu;
 188                c = head->cpu[bcpu].lcpu;
 189                if (uv_setup_intr(c, lowest))
 190                        /* If we didn't set it up in time, trigger */
 191                        uv_rtc_send_IPI(c);
 192        } else {
 193                uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
 194                        UVH_RTC1_INT_CONFIG_M_MASK);
 195        }
 196}
 197
 198/*
 199 * Set expiration time for current cpu.
 200 *
 201 * Returns 1 if we missed the expiration time.
 202 */
 203static int uv_rtc_set_timer(int cpu, u64 expires)
 204{
 205        int pnode = uv_cpu_to_pnode(cpu);
 206        int bid = uv_cpu_to_blade_id(cpu);
 207        struct uv_rtc_timer_head *head = blade_info[bid];
 208        int bcpu = uv_cpu_blade_processor_id(cpu);
 209        u64 *t = &head->cpu[bcpu].expires;
 210        unsigned long flags;
 211        int next_cpu;
 212
 213        spin_lock_irqsave(&head->lock, flags);
 214
 215        next_cpu = head->next_cpu;
 216        *t = expires;
 217
 218        /* Will this one be next to go off? */
 219        if (next_cpu < 0 || bcpu == next_cpu ||
 220                        expires < head->cpu[next_cpu].expires) {
 221                head->next_cpu = bcpu;
 222                if (uv_setup_intr(cpu, expires)) {
 223                        *t = ULLONG_MAX;
 224                        uv_rtc_find_next_timer(head, pnode);
 225                        spin_unlock_irqrestore(&head->lock, flags);
 226                        return -ETIME;
 227                }
 228        }
 229
 230        spin_unlock_irqrestore(&head->lock, flags);
 231        return 0;
 232}
 233
 234/*
 235 * Unset expiration time for current cpu.
 236 *
 237 * Returns 1 if this timer was pending.
 238 */
 239static int uv_rtc_unset_timer(int cpu, int force)
 240{
 241        int pnode = uv_cpu_to_pnode(cpu);
 242        int bid = uv_cpu_to_blade_id(cpu);
 243        struct uv_rtc_timer_head *head = blade_info[bid];
 244        int bcpu = uv_cpu_blade_processor_id(cpu);
 245        u64 *t = &head->cpu[bcpu].expires;
 246        unsigned long flags;
 247        int rc = 0;
 248
 249        spin_lock_irqsave(&head->lock, flags);
 250
 251        if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
 252                rc = 1;
 253
 254        if (rc) {
 255                *t = ULLONG_MAX;
 256                /* Was the hardware setup for this timer? */
 257                if (head->next_cpu == bcpu)
 258                        uv_rtc_find_next_timer(head, pnode);
 259        }
 260
 261        spin_unlock_irqrestore(&head->lock, flags);
 262
 263        return rc;
 264}
 265
 266
 267/*
 268 * Kernel interface routines.
 269 */
 270
 271/*
 272 * Read the RTC.
 273 *
 274 * Starting with HUB rev 2.0, the UV RTC register is replicated across all
 275 * cachelines of it's own page.  This allows faster simultaneous reads
 276 * from a given socket.
 277 */
 278static u64 uv_read_rtc(struct clocksource *cs)
 279{
 280        unsigned long offset;
 281
 282        if (uv_get_min_hub_revision_id() == 1)
 283                offset = 0;
 284        else
 285                offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
 286
 287        return (u64)uv_read_local_mmr(UVH_RTC | offset);
 288}
 289
 290/*
 291 * Program the next event, relative to now
 292 */
 293static int uv_rtc_next_event(unsigned long delta,
 294                             struct clock_event_device *ced)
 295{
 296        int ced_cpu = cpumask_first(ced->cpumask);
 297
 298        return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(NULL));
 299}
 300
 301/*
 302 * Shutdown the RTC timer
 303 */
 304static int uv_rtc_shutdown(struct clock_event_device *evt)
 305{
 306        int ced_cpu = cpumask_first(evt->cpumask);
 307
 308        uv_rtc_unset_timer(ced_cpu, 1);
 309        return 0;
 310}
 311
 312static void uv_rtc_interrupt(void)
 313{
 314        int cpu = smp_processor_id();
 315        struct clock_event_device *ced = &per_cpu(cpu_ced, cpu);
 316
 317        if (!ced || !ced->event_handler)
 318                return;
 319
 320        if (uv_rtc_unset_timer(cpu, 0) != 1)
 321                return;
 322
 323        ced->event_handler(ced);
 324}
 325
 326static int __init uv_enable_evt_rtc(char *str)
 327{
 328        uv_rtc_evt_enable = 1;
 329
 330        return 1;
 331}
 332__setup("uvrtcevt", uv_enable_evt_rtc);
 333
 334static __init void uv_rtc_register_clockevents(struct work_struct *dummy)
 335{
 336        struct clock_event_device *ced = this_cpu_ptr(&cpu_ced);
 337
 338        *ced = clock_event_device_uv;
 339        ced->cpumask = cpumask_of(smp_processor_id());
 340        clockevents_register_device(ced);
 341}
 342
 343static __init int uv_rtc_setup_clock(void)
 344{
 345        int rc;
 346
 347        if (!is_uv_system())
 348                return -ENODEV;
 349
 350        rc = clocksource_register_hz(&clocksource_uv, sn_rtc_cycles_per_second);
 351        if (rc)
 352                printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc);
 353        else
 354                printk(KERN_INFO "UV RTC clocksource registered freq %lu MHz\n",
 355                        sn_rtc_cycles_per_second/(unsigned long)1E6);
 356
 357        if (rc || !uv_rtc_evt_enable || x86_platform_ipi_callback)
 358                return rc;
 359
 360        /* Setup and register clockevents */
 361        rc = uv_rtc_allocate_timers();
 362        if (rc)
 363                goto error;
 364
 365        x86_platform_ipi_callback = uv_rtc_interrupt;
 366
 367        clock_event_device_uv.mult = div_sc(sn_rtc_cycles_per_second,
 368                                NSEC_PER_SEC, clock_event_device_uv.shift);
 369
 370        clock_event_device_uv.min_delta_ns = NSEC_PER_SEC /
 371                                                sn_rtc_cycles_per_second;
 372        clock_event_device_uv.min_delta_ticks = 1;
 373
 374        clock_event_device_uv.max_delta_ns = clocksource_uv.mask *
 375                                (NSEC_PER_SEC / sn_rtc_cycles_per_second);
 376        clock_event_device_uv.max_delta_ticks = clocksource_uv.mask;
 377
 378        rc = schedule_on_each_cpu(uv_rtc_register_clockevents);
 379        if (rc) {
 380                x86_platform_ipi_callback = NULL;
 381                uv_rtc_deallocate_timers();
 382                goto error;
 383        }
 384
 385        printk(KERN_INFO "UV RTC clockevents registered\n");
 386
 387        return 0;
 388
 389error:
 390        clocksource_unregister(&clocksource_uv);
 391        printk(KERN_INFO "UV RTC clockevents failed rc %d\n", rc);
 392
 393        return rc;
 394}
 395arch_initcall(uv_rtc_setup_clock);
 396