linux/arch/x86/platform/uv/uv_time.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * SGI RTC clock/timer routines.
   4 *
   5 *  Copyright (c) 2009-2013 Silicon Graphics, Inc.  All Rights Reserved.
   6 *  Copyright (c) Dimitri Sivanich
   7 */
   8#include <linux/clockchips.h>
   9#include <linux/slab.h>
  10
  11#include <asm/uv/uv_mmrs.h>
  12#include <asm/uv/uv_hub.h>
  13#include <asm/uv/bios.h>
  14#include <asm/uv/uv.h>
  15#include <asm/apic.h>
  16#include <asm/cpu.h>
  17
  18#define RTC_NAME                "sgi_rtc"
  19
  20static u64 uv_read_rtc(struct clocksource *cs);
  21static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
  22static int uv_rtc_shutdown(struct clock_event_device *evt);
  23
  24static struct clocksource clocksource_uv = {
  25        .name           = RTC_NAME,
  26        .rating         = 299,
  27        .read           = uv_read_rtc,
  28        .mask           = (u64)UVH_RTC_REAL_TIME_CLOCK_MASK,
  29        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
  30};
  31
  32static struct clock_event_device clock_event_device_uv = {
  33        .name                   = RTC_NAME,
  34        .features               = CLOCK_EVT_FEAT_ONESHOT,
  35        .shift                  = 20,
  36        .rating                 = 400,
  37        .irq                    = -1,
  38        .set_next_event         = uv_rtc_next_event,
  39        .set_state_shutdown     = uv_rtc_shutdown,
  40        .event_handler          = NULL,
  41};
  42
  43static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
  44
  45/* There is one of these allocated per node */
  46struct uv_rtc_timer_head {
  47        spinlock_t      lock;
  48        /* next cpu waiting for timer, local node relative: */
  49        int             next_cpu;
  50        /* number of cpus on this node: */
  51        int             ncpus;
  52        struct {
  53                int     lcpu;           /* systemwide logical cpu number */
  54                u64     expires;        /* next timer expiration for this cpu */
  55        } cpu[1];
  56};
  57
  58/*
  59 * Access to uv_rtc_timer_head via blade id.
  60 */
  61static struct uv_rtc_timer_head         **blade_info __read_mostly;
  62
  63static int                              uv_rtc_evt_enable;
  64
  65/*
  66 * Hardware interface routines
  67 */
  68
  69/* Send IPIs to another node */
  70static void uv_rtc_send_IPI(int cpu)
  71{
  72        unsigned long apicid, val;
  73        int pnode;
  74
  75        apicid = cpu_physical_id(cpu);
  76        pnode = uv_apicid_to_pnode(apicid);
  77        apicid |= uv_apicid_hibits;
  78        val = (1UL << UVH_IPI_INT_SEND_SHFT) |
  79              (apicid << UVH_IPI_INT_APIC_ID_SHFT) |
  80              (X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT);
  81
  82        uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
  83}
  84
  85/* Check for an RTC interrupt pending */
  86static int uv_intr_pending(int pnode)
  87{
  88        if (is_uv1_hub())
  89                return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
  90                        UV1H_EVENT_OCCURRED0_RTC1_MASK;
  91        else if (is_uvx_hub())
  92                return uv_read_global_mmr64(pnode, UVXH_EVENT_OCCURRED2) &
  93                        UVXH_EVENT_OCCURRED2_RTC_1_MASK;
  94        return 0;
  95}
  96
  97/* Setup interrupt and return non-zero if early expiration occurred. */
  98static int uv_setup_intr(int cpu, u64 expires)
  99{
 100        u64 val;
 101        unsigned long apicid = cpu_physical_id(cpu) | uv_apicid_hibits;
 102        int pnode = uv_cpu_to_pnode(cpu);
 103
 104        uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
 105                UVH_RTC1_INT_CONFIG_M_MASK);
 106        uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
 107
 108        if (is_uv1_hub())
 109                uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
 110                                UV1H_EVENT_OCCURRED0_RTC1_MASK);
 111        else
 112                uv_write_global_mmr64(pnode, UVXH_EVENT_OCCURRED2_ALIAS,
 113                                UVXH_EVENT_OCCURRED2_RTC_1_MASK);
 114
 115        val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
 116                ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
 117
 118        /* Set configuration */
 119        uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val);
 120        /* Initialize comparator value */
 121        uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires);
 122
 123        if (uv_read_rtc(NULL) <= expires)
 124                return 0;
 125
 126        return !uv_intr_pending(pnode);
 127}
 128
 129/*
 130 * Per-cpu timer tracking routines
 131 */
 132
 133static __init void uv_rtc_deallocate_timers(void)
 134{
 135        int bid;
 136
 137        for_each_possible_blade(bid) {
 138                kfree(blade_info[bid]);
 139        }
 140        kfree(blade_info);
 141}
 142
 143/* Allocate per-node list of cpu timer expiration times. */
 144static __init int uv_rtc_allocate_timers(void)
 145{
 146        int cpu;
 147
 148        blade_info = kcalloc(uv_possible_blades, sizeof(void *), GFP_KERNEL);
 149        if (!blade_info)
 150                return -ENOMEM;
 151
 152        for_each_present_cpu(cpu) {
 153                int nid = cpu_to_node(cpu);
 154                int bid = uv_cpu_to_blade_id(cpu);
 155                int bcpu = uv_cpu_blade_processor_id(cpu);
 156                struct uv_rtc_timer_head *head = blade_info[bid];
 157
 158                if (!head) {
 159                        head = kmalloc_node(sizeof(struct uv_rtc_timer_head) +
 160                                (uv_blade_nr_possible_cpus(bid) *
 161                                        2 * sizeof(u64)),
 162                                GFP_KERNEL, nid);
 163                        if (!head) {
 164                                uv_rtc_deallocate_timers();
 165                                return -ENOMEM;
 166                        }
 167                        spin_lock_init(&head->lock);
 168                        head->ncpus = uv_blade_nr_possible_cpus(bid);
 169                        head->next_cpu = -1;
 170                        blade_info[bid] = head;
 171                }
 172
 173                head->cpu[bcpu].lcpu = cpu;
 174                head->cpu[bcpu].expires = ULLONG_MAX;
 175        }
 176
 177        return 0;
 178}
 179
 180/* Find and set the next expiring timer.  */
 181static void uv_rtc_find_next_timer(struct uv_rtc_timer_head *head, int pnode)
 182{
 183        u64 lowest = ULLONG_MAX;
 184        int c, bcpu = -1;
 185
 186        head->next_cpu = -1;
 187        for (c = 0; c < head->ncpus; c++) {
 188                u64 exp = head->cpu[c].expires;
 189                if (exp < lowest) {
 190                        bcpu = c;
 191                        lowest = exp;
 192                }
 193        }
 194        if (bcpu >= 0) {
 195                head->next_cpu = bcpu;
 196                c = head->cpu[bcpu].lcpu;
 197                if (uv_setup_intr(c, lowest))
 198                        /* If we didn't set it up in time, trigger */
 199                        uv_rtc_send_IPI(c);
 200        } else {
 201                uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
 202                        UVH_RTC1_INT_CONFIG_M_MASK);
 203        }
 204}
 205
 206/*
 207 * Set expiration time for current cpu.
 208 *
 209 * Returns 1 if we missed the expiration time.
 210 */
 211static int uv_rtc_set_timer(int cpu, u64 expires)
 212{
 213        int pnode = uv_cpu_to_pnode(cpu);
 214        int bid = uv_cpu_to_blade_id(cpu);
 215        struct uv_rtc_timer_head *head = blade_info[bid];
 216        int bcpu = uv_cpu_blade_processor_id(cpu);
 217        u64 *t = &head->cpu[bcpu].expires;
 218        unsigned long flags;
 219        int next_cpu;
 220
 221        spin_lock_irqsave(&head->lock, flags);
 222
 223        next_cpu = head->next_cpu;
 224        *t = expires;
 225
 226        /* Will this one be next to go off? */
 227        if (next_cpu < 0 || bcpu == next_cpu ||
 228                        expires < head->cpu[next_cpu].expires) {
 229                head->next_cpu = bcpu;
 230                if (uv_setup_intr(cpu, expires)) {
 231                        *t = ULLONG_MAX;
 232                        uv_rtc_find_next_timer(head, pnode);
 233                        spin_unlock_irqrestore(&head->lock, flags);
 234                        return -ETIME;
 235                }
 236        }
 237
 238        spin_unlock_irqrestore(&head->lock, flags);
 239        return 0;
 240}
 241
 242/*
 243 * Unset expiration time for current cpu.
 244 *
 245 * Returns 1 if this timer was pending.
 246 */
 247static int uv_rtc_unset_timer(int cpu, int force)
 248{
 249        int pnode = uv_cpu_to_pnode(cpu);
 250        int bid = uv_cpu_to_blade_id(cpu);
 251        struct uv_rtc_timer_head *head = blade_info[bid];
 252        int bcpu = uv_cpu_blade_processor_id(cpu);
 253        u64 *t = &head->cpu[bcpu].expires;
 254        unsigned long flags;
 255        int rc = 0;
 256
 257        spin_lock_irqsave(&head->lock, flags);
 258
 259        if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
 260                rc = 1;
 261
 262        if (rc) {
 263                *t = ULLONG_MAX;
 264                /* Was the hardware setup for this timer? */
 265                if (head->next_cpu == bcpu)
 266                        uv_rtc_find_next_timer(head, pnode);
 267        }
 268
 269        spin_unlock_irqrestore(&head->lock, flags);
 270
 271        return rc;
 272}
 273
 274
 275/*
 276 * Kernel interface routines.
 277 */
 278
 279/*
 280 * Read the RTC.
 281 *
 282 * Starting with HUB rev 2.0, the UV RTC register is replicated across all
 283 * cachelines of it's own page.  This allows faster simultaneous reads
 284 * from a given socket.
 285 */
 286static u64 uv_read_rtc(struct clocksource *cs)
 287{
 288        unsigned long offset;
 289
 290        if (uv_get_min_hub_revision_id() == 1)
 291                offset = 0;
 292        else
 293                offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
 294
 295        return (u64)uv_read_local_mmr(UVH_RTC | offset);
 296}
 297
 298/*
 299 * Program the next event, relative to now
 300 */
 301static int uv_rtc_next_event(unsigned long delta,
 302                             struct clock_event_device *ced)
 303{
 304        int ced_cpu = cpumask_first(ced->cpumask);
 305
 306        return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(NULL));
 307}
 308
 309/*
 310 * Shutdown the RTC timer
 311 */
 312static int uv_rtc_shutdown(struct clock_event_device *evt)
 313{
 314        int ced_cpu = cpumask_first(evt->cpumask);
 315
 316        uv_rtc_unset_timer(ced_cpu, 1);
 317        return 0;
 318}
 319
 320static void uv_rtc_interrupt(void)
 321{
 322        int cpu = smp_processor_id();
 323        struct clock_event_device *ced = &per_cpu(cpu_ced, cpu);
 324
 325        if (!ced || !ced->event_handler)
 326                return;
 327
 328        if (uv_rtc_unset_timer(cpu, 0) != 1)
 329                return;
 330
 331        ced->event_handler(ced);
 332}
 333
 334static int __init uv_enable_evt_rtc(char *str)
 335{
 336        uv_rtc_evt_enable = 1;
 337
 338        return 1;
 339}
 340__setup("uvrtcevt", uv_enable_evt_rtc);
 341
 342static __init void uv_rtc_register_clockevents(struct work_struct *dummy)
 343{
 344        struct clock_event_device *ced = this_cpu_ptr(&cpu_ced);
 345
 346        *ced = clock_event_device_uv;
 347        ced->cpumask = cpumask_of(smp_processor_id());
 348        clockevents_register_device(ced);
 349}
 350
 351static __init int uv_rtc_setup_clock(void)
 352{
 353        int rc;
 354
 355        if (!is_uv_system())
 356                return -ENODEV;
 357
 358        rc = clocksource_register_hz(&clocksource_uv, sn_rtc_cycles_per_second);
 359        if (rc)
 360                printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc);
 361        else
 362                printk(KERN_INFO "UV RTC clocksource registered freq %lu MHz\n",
 363                        sn_rtc_cycles_per_second/(unsigned long)1E6);
 364
 365        if (rc || !uv_rtc_evt_enable || x86_platform_ipi_callback)
 366                return rc;
 367
 368        /* Setup and register clockevents */
 369        rc = uv_rtc_allocate_timers();
 370        if (rc)
 371                goto error;
 372
 373        x86_platform_ipi_callback = uv_rtc_interrupt;
 374
 375        clock_event_device_uv.mult = div_sc(sn_rtc_cycles_per_second,
 376                                NSEC_PER_SEC, clock_event_device_uv.shift);
 377
 378        clock_event_device_uv.min_delta_ns = NSEC_PER_SEC /
 379                                                sn_rtc_cycles_per_second;
 380        clock_event_device_uv.min_delta_ticks = 1;
 381
 382        clock_event_device_uv.max_delta_ns = clocksource_uv.mask *
 383                                (NSEC_PER_SEC / sn_rtc_cycles_per_second);
 384        clock_event_device_uv.max_delta_ticks = clocksource_uv.mask;
 385
 386        rc = schedule_on_each_cpu(uv_rtc_register_clockevents);
 387        if (rc) {
 388                x86_platform_ipi_callback = NULL;
 389                uv_rtc_deallocate_timers();
 390                goto error;
 391        }
 392
 393        printk(KERN_INFO "UV RTC clockevents registered\n");
 394
 395        return 0;
 396
 397error:
 398        clocksource_unregister(&clocksource_uv);
 399        printk(KERN_INFO "UV RTC clockevents failed rc %d\n", rc);
 400
 401        return rc;
 402}
 403arch_initcall(uv_rtc_setup_clock);
 404