linux/arch/x86/kernel/hpet.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/clockchips.h>
   3#include <linux/interrupt.h>
   4#include <linux/export.h>
   5#include <linux/delay.h>
   6#include <linux/hpet.h>
   7#include <linux/cpu.h>
   8#include <linux/irq.h>
   9
  10#include <asm/irq_remapping.h>
  11#include <asm/hpet.h>
  12#include <asm/time.h>
  13#include <asm/mwait.h>
  14
  15#undef  pr_fmt
  16#define pr_fmt(fmt) "hpet: " fmt
  17
  18enum hpet_mode {
  19        HPET_MODE_UNUSED,
  20        HPET_MODE_LEGACY,
  21        HPET_MODE_CLOCKEVT,
  22        HPET_MODE_DEVICE,
  23};
  24
  25struct hpet_channel {
  26        struct clock_event_device       evt;
  27        unsigned int                    num;
  28        unsigned int                    cpu;
  29        unsigned int                    irq;
  30        unsigned int                    in_use;
  31        enum hpet_mode                  mode;
  32        unsigned int                    boot_cfg;
  33        char                            name[10];
  34};
  35
  36struct hpet_base {
  37        unsigned int                    nr_channels;
  38        unsigned int                    nr_clockevents;
  39        unsigned int                    boot_cfg;
  40        struct hpet_channel             *channels;
  41};
  42
  43#define HPET_MASK                       CLOCKSOURCE_MASK(32)
  44
  45#define HPET_MIN_CYCLES                 128
  46#define HPET_MIN_PROG_DELTA             (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
  47
  48/*
  49 * HPET address is set in acpi/boot.c, when an ACPI entry exists
  50 */
  51unsigned long                           hpet_address;
  52u8                                      hpet_blockid; /* OS timer block num */
  53bool                                    hpet_msi_disable;
  54
  55#ifdef CONFIG_GENERIC_MSI_IRQ
  56static DEFINE_PER_CPU(struct hpet_channel *, cpu_hpet_channel);
  57static struct irq_domain                *hpet_domain;
  58#endif
  59
  60static void __iomem                     *hpet_virt_address;
  61
  62static struct hpet_base                 hpet_base;
  63
  64static bool                             hpet_legacy_int_enabled;
  65static unsigned long                    hpet_freq;
  66
  67bool                                    boot_hpet_disable;
  68bool                                    hpet_force_user;
  69static bool                             hpet_verbose;
  70
  71static inline
  72struct hpet_channel *clockevent_to_channel(struct clock_event_device *evt)
  73{
  74        return container_of(evt, struct hpet_channel, evt);
  75}
  76
  77inline unsigned int hpet_readl(unsigned int a)
  78{
  79        return readl(hpet_virt_address + a);
  80}
  81
  82static inline void hpet_writel(unsigned int d, unsigned int a)
  83{
  84        writel(d, hpet_virt_address + a);
  85}
  86
  87static inline void hpet_set_mapping(void)
  88{
  89        hpet_virt_address = ioremap(hpet_address, HPET_MMAP_SIZE);
  90}
  91
  92static inline void hpet_clear_mapping(void)
  93{
  94        iounmap(hpet_virt_address);
  95        hpet_virt_address = NULL;
  96}
  97
  98/*
  99 * HPET command line enable / disable
 100 */
 101static int __init hpet_setup(char *str)
 102{
 103        while (str) {
 104                char *next = strchr(str, ',');
 105
 106                if (next)
 107                        *next++ = 0;
 108                if (!strncmp("disable", str, 7))
 109                        boot_hpet_disable = true;
 110                if (!strncmp("force", str, 5))
 111                        hpet_force_user = true;
 112                if (!strncmp("verbose", str, 7))
 113                        hpet_verbose = true;
 114                str = next;
 115        }
 116        return 1;
 117}
 118__setup("hpet=", hpet_setup);
 119
 120static int __init disable_hpet(char *str)
 121{
 122        boot_hpet_disable = true;
 123        return 1;
 124}
 125__setup("nohpet", disable_hpet);
 126
 127static inline int is_hpet_capable(void)
 128{
 129        return !boot_hpet_disable && hpet_address;
 130}
 131
 132/**
 133 * is_hpet_enabled - Check whether the legacy HPET timer interrupt is enabled
 134 */
 135int is_hpet_enabled(void)
 136{
 137        return is_hpet_capable() && hpet_legacy_int_enabled;
 138}
 139EXPORT_SYMBOL_GPL(is_hpet_enabled);
 140
 141static void _hpet_print_config(const char *function, int line)
 142{
 143        u32 i, id, period, cfg, status, channels, l, h;
 144
 145        pr_info("%s(%d):\n", function, line);
 146
 147        id = hpet_readl(HPET_ID);
 148        period = hpet_readl(HPET_PERIOD);
 149        pr_info("ID: 0x%x, PERIOD: 0x%x\n", id, period);
 150
 151        cfg = hpet_readl(HPET_CFG);
 152        status = hpet_readl(HPET_STATUS);
 153        pr_info("CFG: 0x%x, STATUS: 0x%x\n", cfg, status);
 154
 155        l = hpet_readl(HPET_COUNTER);
 156        h = hpet_readl(HPET_COUNTER+4);
 157        pr_info("COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h);
 158
 159        channels = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
 160
 161        for (i = 0; i < channels; i++) {
 162                l = hpet_readl(HPET_Tn_CFG(i));
 163                h = hpet_readl(HPET_Tn_CFG(i)+4);
 164                pr_info("T%d: CFG_l: 0x%x, CFG_h: 0x%x\n", i, l, h);
 165
 166                l = hpet_readl(HPET_Tn_CMP(i));
 167                h = hpet_readl(HPET_Tn_CMP(i)+4);
 168                pr_info("T%d: CMP_l: 0x%x, CMP_h: 0x%x\n", i, l, h);
 169
 170                l = hpet_readl(HPET_Tn_ROUTE(i));
 171                h = hpet_readl(HPET_Tn_ROUTE(i)+4);
 172                pr_info("T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n", i, l, h);
 173        }
 174}
 175
 176#define hpet_print_config()                                     \
 177do {                                                            \
 178        if (hpet_verbose)                                       \
 179                _hpet_print_config(__func__, __LINE__); \
 180} while (0)
 181
 182/*
 183 * When the HPET driver (/dev/hpet) is enabled, we need to reserve
 184 * timer 0 and timer 1 in case of RTC emulation.
 185 */
 186#ifdef CONFIG_HPET
 187
 188static void __init hpet_reserve_platform_timers(void)
 189{
 190        struct hpet_data hd;
 191        unsigned int i;
 192
 193        memset(&hd, 0, sizeof(hd));
 194        hd.hd_phys_address      = hpet_address;
 195        hd.hd_address           = hpet_virt_address;
 196        hd.hd_nirqs             = hpet_base.nr_channels;
 197
 198        /*
 199         * NOTE that hd_irq[] reflects IOAPIC input pins (LEGACY_8254
 200         * is wrong for i8259!) not the output IRQ.  Many BIOS writers
 201         * don't bother configuring *any* comparator interrupts.
 202         */
 203        hd.hd_irq[0] = HPET_LEGACY_8254;
 204        hd.hd_irq[1] = HPET_LEGACY_RTC;
 205
 206        for (i = 0; i < hpet_base.nr_channels; i++) {
 207                struct hpet_channel *hc = hpet_base.channels + i;
 208
 209                if (i >= 2)
 210                        hd.hd_irq[i] = hc->irq;
 211
 212                switch (hc->mode) {
 213                case HPET_MODE_UNUSED:
 214                case HPET_MODE_DEVICE:
 215                        hc->mode = HPET_MODE_DEVICE;
 216                        break;
 217                case HPET_MODE_CLOCKEVT:
 218                case HPET_MODE_LEGACY:
 219                        hpet_reserve_timer(&hd, hc->num);
 220                        break;
 221                }
 222        }
 223
 224        hpet_alloc(&hd);
 225}
 226
 227static void __init hpet_select_device_channel(void)
 228{
 229        int i;
 230
 231        for (i = 0; i < hpet_base.nr_channels; i++) {
 232                struct hpet_channel *hc = hpet_base.channels + i;
 233
 234                /* Associate the first unused channel to /dev/hpet */
 235                if (hc->mode == HPET_MODE_UNUSED) {
 236                        hc->mode = HPET_MODE_DEVICE;
 237                        return;
 238                }
 239        }
 240}
 241
 242#else
 243static inline void hpet_reserve_platform_timers(void) { }
 244static inline void hpet_select_device_channel(void) {}
 245#endif
 246
 247/* Common HPET functions */
 248static void hpet_stop_counter(void)
 249{
 250        u32 cfg = hpet_readl(HPET_CFG);
 251
 252        cfg &= ~HPET_CFG_ENABLE;
 253        hpet_writel(cfg, HPET_CFG);
 254}
 255
 256static void hpet_reset_counter(void)
 257{
 258        hpet_writel(0, HPET_COUNTER);
 259        hpet_writel(0, HPET_COUNTER + 4);
 260}
 261
 262static void hpet_start_counter(void)
 263{
 264        unsigned int cfg = hpet_readl(HPET_CFG);
 265
 266        cfg |= HPET_CFG_ENABLE;
 267        hpet_writel(cfg, HPET_CFG);
 268}
 269
 270static void hpet_restart_counter(void)
 271{
 272        hpet_stop_counter();
 273        hpet_reset_counter();
 274        hpet_start_counter();
 275}
 276
 277static void hpet_resume_device(void)
 278{
 279        force_hpet_resume();
 280}
 281
 282static void hpet_resume_counter(struct clocksource *cs)
 283{
 284        hpet_resume_device();
 285        hpet_restart_counter();
 286}
 287
 288static void hpet_enable_legacy_int(void)
 289{
 290        unsigned int cfg = hpet_readl(HPET_CFG);
 291
 292        cfg |= HPET_CFG_LEGACY;
 293        hpet_writel(cfg, HPET_CFG);
 294        hpet_legacy_int_enabled = true;
 295}
 296
 297static int hpet_clkevt_set_state_periodic(struct clock_event_device *evt)
 298{
 299        unsigned int channel = clockevent_to_channel(evt)->num;
 300        unsigned int cfg, cmp, now;
 301        uint64_t delta;
 302
 303        hpet_stop_counter();
 304        delta = ((uint64_t)(NSEC_PER_SEC / HZ)) * evt->mult;
 305        delta >>= evt->shift;
 306        now = hpet_readl(HPET_COUNTER);
 307        cmp = now + (unsigned int)delta;
 308        cfg = hpet_readl(HPET_Tn_CFG(channel));
 309        cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
 310               HPET_TN_32BIT;
 311        hpet_writel(cfg, HPET_Tn_CFG(channel));
 312        hpet_writel(cmp, HPET_Tn_CMP(channel));
 313        udelay(1);
 314        /*
 315         * HPET on AMD 81xx needs a second write (with HPET_TN_SETVAL
 316         * cleared) to T0_CMP to set the period. The HPET_TN_SETVAL
 317         * bit is automatically cleared after the first write.
 318         * (See AMD-8111 HyperTransport I/O Hub Data Sheet,
 319         * Publication # 24674)
 320         */
 321        hpet_writel((unsigned int)delta, HPET_Tn_CMP(channel));
 322        hpet_start_counter();
 323        hpet_print_config();
 324
 325        return 0;
 326}
 327
 328static int hpet_clkevt_set_state_oneshot(struct clock_event_device *evt)
 329{
 330        unsigned int channel = clockevent_to_channel(evt)->num;
 331        unsigned int cfg;
 332
 333        cfg = hpet_readl(HPET_Tn_CFG(channel));
 334        cfg &= ~HPET_TN_PERIODIC;
 335        cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
 336        hpet_writel(cfg, HPET_Tn_CFG(channel));
 337
 338        return 0;
 339}
 340
 341static int hpet_clkevt_set_state_shutdown(struct clock_event_device *evt)
 342{
 343        unsigned int channel = clockevent_to_channel(evt)->num;
 344        unsigned int cfg;
 345
 346        cfg = hpet_readl(HPET_Tn_CFG(channel));
 347        cfg &= ~HPET_TN_ENABLE;
 348        hpet_writel(cfg, HPET_Tn_CFG(channel));
 349
 350        return 0;
 351}
 352
 353static int hpet_clkevt_legacy_resume(struct clock_event_device *evt)
 354{
 355        hpet_enable_legacy_int();
 356        hpet_print_config();
 357        return 0;
 358}
 359
 360static int
 361hpet_clkevt_set_next_event(unsigned long delta, struct clock_event_device *evt)
 362{
 363        unsigned int channel = clockevent_to_channel(evt)->num;
 364        u32 cnt;
 365        s32 res;
 366
 367        cnt = hpet_readl(HPET_COUNTER);
 368        cnt += (u32) delta;
 369        hpet_writel(cnt, HPET_Tn_CMP(channel));
 370
 371        /*
 372         * HPETs are a complete disaster. The compare register is
 373         * based on a equal comparison and neither provides a less
 374         * than or equal functionality (which would require to take
 375         * the wraparound into account) nor a simple count down event
 376         * mode. Further the write to the comparator register is
 377         * delayed internally up to two HPET clock cycles in certain
 378         * chipsets (ATI, ICH9,10). Some newer AMD chipsets have even
 379         * longer delays. We worked around that by reading back the
 380         * compare register, but that required another workaround for
 381         * ICH9,10 chips where the first readout after write can
 382         * return the old stale value. We already had a minimum
 383         * programming delta of 5us enforced, but a NMI or SMI hitting
 384         * between the counter readout and the comparator write can
 385         * move us behind that point easily. Now instead of reading
 386         * the compare register back several times, we make the ETIME
 387         * decision based on the following: Return ETIME if the
 388         * counter value after the write is less than HPET_MIN_CYCLES
 389         * away from the event or if the counter is already ahead of
 390         * the event. The minimum programming delta for the generic
 391         * clockevents code is set to 1.5 * HPET_MIN_CYCLES.
 392         */
 393        res = (s32)(cnt - hpet_readl(HPET_COUNTER));
 394
 395        return res < HPET_MIN_CYCLES ? -ETIME : 0;
 396}
 397
 398static void hpet_init_clockevent(struct hpet_channel *hc, unsigned int rating)
 399{
 400        struct clock_event_device *evt = &hc->evt;
 401
 402        evt->rating             = rating;
 403        evt->irq                = hc->irq;
 404        evt->name               = hc->name;
 405        evt->cpumask            = cpumask_of(hc->cpu);
 406        evt->set_state_oneshot  = hpet_clkevt_set_state_oneshot;
 407        evt->set_next_event     = hpet_clkevt_set_next_event;
 408        evt->set_state_shutdown = hpet_clkevt_set_state_shutdown;
 409
 410        evt->features = CLOCK_EVT_FEAT_ONESHOT;
 411        if (hc->boot_cfg & HPET_TN_PERIODIC) {
 412                evt->features           |= CLOCK_EVT_FEAT_PERIODIC;
 413                evt->set_state_periodic = hpet_clkevt_set_state_periodic;
 414        }
 415}
 416
 417static void __init hpet_legacy_clockevent_register(struct hpet_channel *hc)
 418{
 419        /*
 420         * Start HPET with the boot CPU's cpumask and make it global after
 421         * the IO_APIC has been initialized.
 422         */
 423        hc->cpu = boot_cpu_data.cpu_index;
 424        strncpy(hc->name, "hpet", sizeof(hc->name));
 425        hpet_init_clockevent(hc, 50);
 426
 427        hc->evt.tick_resume     = hpet_clkevt_legacy_resume;
 428
 429        /*
 430         * Legacy horrors and sins from the past. HPET used periodic mode
 431         * unconditionally forever on the legacy channel 0. Removing the
 432         * below hack and using the conditional in hpet_init_clockevent()
 433         * makes at least Qemu and one hardware machine fail to boot.
 434         * There are two issues which cause the boot failure:
 435         *
 436         * #1 After the timer delivery test in IOAPIC and the IOAPIC setup
 437         *    the next interrupt is not delivered despite the HPET channel
 438         *    being programmed correctly. Reprogramming the HPET after
 439         *    switching to IOAPIC makes it work again. After fixing this,
 440         *    the next issue surfaces:
 441         *
 442         * #2 Due to the unconditional periodic mode availability the Local
 443         *    APIC timer calibration can hijack the global clockevents
 444         *    event handler without causing damage. Using oneshot at this
 445         *    stage makes if hang because the HPET does not get
 446         *    reprogrammed due to the handler hijacking. Duh, stupid me!
 447         *
 448         * Both issues require major surgery and especially the kick HPET
 449         * again after enabling IOAPIC results in really nasty hackery.
 450         * This 'assume periodic works' magic has survived since HPET
 451         * support got added, so it's questionable whether this should be
 452         * fixed. Both Qemu and the failing hardware machine support
 453         * periodic mode despite the fact that both don't advertise it in
 454         * the configuration register and both need that extra kick after
 455         * switching to IOAPIC. Seems to be a feature...
 456         */
 457        hc->evt.features                |= CLOCK_EVT_FEAT_PERIODIC;
 458        hc->evt.set_state_periodic      = hpet_clkevt_set_state_periodic;
 459
 460        /* Start HPET legacy interrupts */
 461        hpet_enable_legacy_int();
 462
 463        clockevents_config_and_register(&hc->evt, hpet_freq,
 464                                        HPET_MIN_PROG_DELTA, 0x7FFFFFFF);
 465        global_clock_event = &hc->evt;
 466        pr_debug("Clockevent registered\n");
 467}
 468
 469/*
 470 * HPET MSI Support
 471 */
 472#ifdef CONFIG_GENERIC_MSI_IRQ
 473static void hpet_msi_unmask(struct irq_data *data)
 474{
 475        struct hpet_channel *hc = irq_data_get_irq_handler_data(data);
 476        unsigned int cfg;
 477
 478        cfg = hpet_readl(HPET_Tn_CFG(hc->num));
 479        cfg |= HPET_TN_ENABLE | HPET_TN_FSB;
 480        hpet_writel(cfg, HPET_Tn_CFG(hc->num));
 481}
 482
 483static void hpet_msi_mask(struct irq_data *data)
 484{
 485        struct hpet_channel *hc = irq_data_get_irq_handler_data(data);
 486        unsigned int cfg;
 487
 488        cfg = hpet_readl(HPET_Tn_CFG(hc->num));
 489        cfg &= ~(HPET_TN_ENABLE | HPET_TN_FSB);
 490        hpet_writel(cfg, HPET_Tn_CFG(hc->num));
 491}
 492
 493static void hpet_msi_write(struct hpet_channel *hc, struct msi_msg *msg)
 494{
 495        hpet_writel(msg->data, HPET_Tn_ROUTE(hc->num));
 496        hpet_writel(msg->address_lo, HPET_Tn_ROUTE(hc->num) + 4);
 497}
 498
 499static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
 500{
 501        hpet_msi_write(irq_data_get_irq_handler_data(data), msg);
 502}
 503
 504static struct irq_chip hpet_msi_controller __ro_after_init = {
 505        .name = "HPET-MSI",
 506        .irq_unmask = hpet_msi_unmask,
 507        .irq_mask = hpet_msi_mask,
 508        .irq_ack = irq_chip_ack_parent,
 509        .irq_set_affinity = msi_domain_set_affinity,
 510        .irq_retrigger = irq_chip_retrigger_hierarchy,
 511        .irq_write_msi_msg = hpet_msi_write_msg,
 512        .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP,
 513};
 514
 515static int hpet_msi_init(struct irq_domain *domain,
 516                         struct msi_domain_info *info, unsigned int virq,
 517                         irq_hw_number_t hwirq, msi_alloc_info_t *arg)
 518{
 519        irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
 520        irq_domain_set_info(domain, virq, arg->hwirq, info->chip, NULL,
 521                            handle_edge_irq, arg->data, "edge");
 522
 523        return 0;
 524}
 525
 526static void hpet_msi_free(struct irq_domain *domain,
 527                          struct msi_domain_info *info, unsigned int virq)
 528{
 529        irq_clear_status_flags(virq, IRQ_MOVE_PCNTXT);
 530}
 531
 532static struct msi_domain_ops hpet_msi_domain_ops = {
 533        .msi_init       = hpet_msi_init,
 534        .msi_free       = hpet_msi_free,
 535};
 536
 537static struct msi_domain_info hpet_msi_domain_info = {
 538        .ops            = &hpet_msi_domain_ops,
 539        .chip           = &hpet_msi_controller,
 540        .flags          = MSI_FLAG_USE_DEF_DOM_OPS,
 541};
 542
 543static struct irq_domain *hpet_create_irq_domain(int hpet_id)
 544{
 545        struct msi_domain_info *domain_info;
 546        struct irq_domain *parent, *d;
 547        struct fwnode_handle *fn;
 548        struct irq_fwspec fwspec;
 549
 550        if (x86_vector_domain == NULL)
 551                return NULL;
 552
 553        domain_info = kzalloc(sizeof(*domain_info), GFP_KERNEL);
 554        if (!domain_info)
 555                return NULL;
 556
 557        *domain_info = hpet_msi_domain_info;
 558        domain_info->data = (void *)(long)hpet_id;
 559
 560        fn = irq_domain_alloc_named_id_fwnode(hpet_msi_controller.name,
 561                                              hpet_id);
 562        if (!fn) {
 563                kfree(domain_info);
 564                return NULL;
 565        }
 566
 567        fwspec.fwnode = fn;
 568        fwspec.param_count = 1;
 569        fwspec.param[0] = hpet_id;
 570
 571        parent = irq_find_matching_fwspec(&fwspec, DOMAIN_BUS_ANY);
 572        if (!parent) {
 573                irq_domain_free_fwnode(fn);
 574                kfree(domain_info);
 575                return NULL;
 576        }
 577        if (parent != x86_vector_domain)
 578                hpet_msi_controller.name = "IR-HPET-MSI";
 579
 580        d = msi_create_irq_domain(fn, domain_info, parent);
 581        if (!d) {
 582                irq_domain_free_fwnode(fn);
 583                kfree(domain_info);
 584        }
 585        return d;
 586}
 587
 588static inline int hpet_dev_id(struct irq_domain *domain)
 589{
 590        struct msi_domain_info *info = msi_get_domain_info(domain);
 591
 592        return (int)(long)info->data;
 593}
 594
 595static int hpet_assign_irq(struct irq_domain *domain, struct hpet_channel *hc,
 596                           int dev_num)
 597{
 598        struct irq_alloc_info info;
 599
 600        init_irq_alloc_info(&info, NULL);
 601        info.type = X86_IRQ_ALLOC_TYPE_HPET;
 602        info.data = hc;
 603        info.devid = hpet_dev_id(domain);
 604        info.hwirq = dev_num;
 605
 606        return irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info);
 607}
 608
 609static int hpet_clkevt_msi_resume(struct clock_event_device *evt)
 610{
 611        struct hpet_channel *hc = clockevent_to_channel(evt);
 612        struct irq_data *data = irq_get_irq_data(hc->irq);
 613        struct msi_msg msg;
 614
 615        /* Restore the MSI msg and unmask the interrupt */
 616        irq_chip_compose_msi_msg(data, &msg);
 617        hpet_msi_write(hc, &msg);
 618        hpet_msi_unmask(data);
 619        return 0;
 620}
 621
 622static irqreturn_t hpet_msi_interrupt_handler(int irq, void *data)
 623{
 624        struct hpet_channel *hc = data;
 625        struct clock_event_device *evt = &hc->evt;
 626
 627        if (!evt->event_handler) {
 628                pr_info("Spurious interrupt HPET channel %d\n", hc->num);
 629                return IRQ_HANDLED;
 630        }
 631
 632        evt->event_handler(evt);
 633        return IRQ_HANDLED;
 634}
 635
 636static int hpet_setup_msi_irq(struct hpet_channel *hc)
 637{
 638        if (request_irq(hc->irq, hpet_msi_interrupt_handler,
 639                        IRQF_TIMER | IRQF_NOBALANCING,
 640                        hc->name, hc))
 641                return -1;
 642
 643        disable_irq(hc->irq);
 644        irq_set_affinity(hc->irq, cpumask_of(hc->cpu));
 645        enable_irq(hc->irq);
 646
 647        pr_debug("%s irq %u for MSI\n", hc->name, hc->irq);
 648
 649        return 0;
 650}
 651
 652/* Invoked from the hotplug callback on @cpu */
 653static void init_one_hpet_msi_clockevent(struct hpet_channel *hc, int cpu)
 654{
 655        struct clock_event_device *evt = &hc->evt;
 656
 657        hc->cpu = cpu;
 658        per_cpu(cpu_hpet_channel, cpu) = hc;
 659        hpet_setup_msi_irq(hc);
 660
 661        hpet_init_clockevent(hc, 110);
 662        evt->tick_resume = hpet_clkevt_msi_resume;
 663
 664        clockevents_config_and_register(evt, hpet_freq, HPET_MIN_PROG_DELTA,
 665                                        0x7FFFFFFF);
 666}
 667
 668static struct hpet_channel *hpet_get_unused_clockevent(void)
 669{
 670        int i;
 671
 672        for (i = 0; i < hpet_base.nr_channels; i++) {
 673                struct hpet_channel *hc = hpet_base.channels + i;
 674
 675                if (hc->mode != HPET_MODE_CLOCKEVT || hc->in_use)
 676                        continue;
 677                hc->in_use = 1;
 678                return hc;
 679        }
 680        return NULL;
 681}
 682
 683static int hpet_cpuhp_online(unsigned int cpu)
 684{
 685        struct hpet_channel *hc = hpet_get_unused_clockevent();
 686
 687        if (hc)
 688                init_one_hpet_msi_clockevent(hc, cpu);
 689        return 0;
 690}
 691
 692static int hpet_cpuhp_dead(unsigned int cpu)
 693{
 694        struct hpet_channel *hc = per_cpu(cpu_hpet_channel, cpu);
 695
 696        if (!hc)
 697                return 0;
 698        free_irq(hc->irq, hc);
 699        hc->in_use = 0;
 700        per_cpu(cpu_hpet_channel, cpu) = NULL;
 701        return 0;
 702}
 703
 704static void __init hpet_select_clockevents(void)
 705{
 706        unsigned int i;
 707
 708        hpet_base.nr_clockevents = 0;
 709
 710        /* No point if MSI is disabled or CPU has an Always Runing APIC Timer */
 711        if (hpet_msi_disable || boot_cpu_has(X86_FEATURE_ARAT))
 712                return;
 713
 714        hpet_print_config();
 715
 716        hpet_domain = hpet_create_irq_domain(hpet_blockid);
 717        if (!hpet_domain)
 718                return;
 719
 720        for (i = 0; i < hpet_base.nr_channels; i++) {
 721                struct hpet_channel *hc = hpet_base.channels + i;
 722                int irq;
 723
 724                if (hc->mode != HPET_MODE_UNUSED)
 725                        continue;
 726
 727                /* Only consider HPET channel with MSI support */
 728                if (!(hc->boot_cfg & HPET_TN_FSB_CAP))
 729                        continue;
 730
 731                sprintf(hc->name, "hpet%d", i);
 732
 733                irq = hpet_assign_irq(hpet_domain, hc, hc->num);
 734                if (irq <= 0)
 735                        continue;
 736
 737                hc->irq = irq;
 738                hc->mode = HPET_MODE_CLOCKEVT;
 739
 740                if (++hpet_base.nr_clockevents == num_possible_cpus())
 741                        break;
 742        }
 743
 744        pr_info("%d channels of %d reserved for per-cpu timers\n",
 745                hpet_base.nr_channels, hpet_base.nr_clockevents);
 746}
 747
 748#else
 749
 750static inline void hpet_select_clockevents(void) { }
 751
 752#define hpet_cpuhp_online       NULL
 753#define hpet_cpuhp_dead         NULL
 754
 755#endif
 756
 757/*
 758 * Clock source related code
 759 */
 760#if defined(CONFIG_SMP) && defined(CONFIG_64BIT)
 761/*
 762 * Reading the HPET counter is a very slow operation. If a large number of
 763 * CPUs are trying to access the HPET counter simultaneously, it can cause
 764 * massive delays and slow down system performance dramatically. This may
 765 * happen when HPET is the default clock source instead of TSC. For a
 766 * really large system with hundreds of CPUs, the slowdown may be so
 767 * severe, that it can actually crash the system because of a NMI watchdog
 768 * soft lockup, for example.
 769 *
 770 * If multiple CPUs are trying to access the HPET counter at the same time,
 771 * we don't actually need to read the counter multiple times. Instead, the
 772 * other CPUs can use the counter value read by the first CPU in the group.
 773 *
 774 * This special feature is only enabled on x86-64 systems. It is unlikely
 775 * that 32-bit x86 systems will have enough CPUs to require this feature
 776 * with its associated locking overhead. We also need 64-bit atomic read.
 777 *
 778 * The lock and the HPET value are stored together and can be read in a
 779 * single atomic 64-bit read. It is explicitly assumed that arch_spinlock_t
 780 * is 32 bits in size.
 781 */
 782union hpet_lock {
 783        struct {
 784                arch_spinlock_t lock;
 785                u32 value;
 786        };
 787        u64 lockval;
 788};
 789
 790static union hpet_lock hpet __cacheline_aligned = {
 791        { .lock = __ARCH_SPIN_LOCK_UNLOCKED, },
 792};
 793
 794static u64 read_hpet(struct clocksource *cs)
 795{
 796        unsigned long flags;
 797        union hpet_lock old, new;
 798
 799        BUILD_BUG_ON(sizeof(union hpet_lock) != 8);
 800
 801        /*
 802         * Read HPET directly if in NMI.
 803         */
 804        if (in_nmi())
 805                return (u64)hpet_readl(HPET_COUNTER);
 806
 807        /*
 808         * Read the current state of the lock and HPET value atomically.
 809         */
 810        old.lockval = READ_ONCE(hpet.lockval);
 811
 812        if (arch_spin_is_locked(&old.lock))
 813                goto contended;
 814
 815        local_irq_save(flags);
 816        if (arch_spin_trylock(&hpet.lock)) {
 817                new.value = hpet_readl(HPET_COUNTER);
 818                /*
 819                 * Use WRITE_ONCE() to prevent store tearing.
 820                 */
 821                WRITE_ONCE(hpet.value, new.value);
 822                arch_spin_unlock(&hpet.lock);
 823                local_irq_restore(flags);
 824                return (u64)new.value;
 825        }
 826        local_irq_restore(flags);
 827
 828contended:
 829        /*
 830         * Contended case
 831         * --------------
 832         * Wait until the HPET value change or the lock is free to indicate
 833         * its value is up-to-date.
 834         *
 835         * It is possible that old.value has already contained the latest
 836         * HPET value while the lock holder was in the process of releasing
 837         * the lock. Checking for lock state change will enable us to return
 838         * the value immediately instead of waiting for the next HPET reader
 839         * to come along.
 840         */
 841        do {
 842                cpu_relax();
 843                new.lockval = READ_ONCE(hpet.lockval);
 844        } while ((new.value == old.value) && arch_spin_is_locked(&new.lock));
 845
 846        return (u64)new.value;
 847}
 848#else
 849/*
 850 * For UP or 32-bit.
 851 */
 852static u64 read_hpet(struct clocksource *cs)
 853{
 854        return (u64)hpet_readl(HPET_COUNTER);
 855}
 856#endif
 857
 858static struct clocksource clocksource_hpet = {
 859        .name           = "hpet",
 860        .rating         = 250,
 861        .read           = read_hpet,
 862        .mask           = HPET_MASK,
 863        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 864        .resume         = hpet_resume_counter,
 865};
 866
 867/*
 868 * AMD SB700 based systems with spread spectrum enabled use a SMM based
 869 * HPET emulation to provide proper frequency setting.
 870 *
 871 * On such systems the SMM code is initialized with the first HPET register
 872 * access and takes some time to complete. During this time the config
 873 * register reads 0xffffffff. We check for max 1000 loops whether the
 874 * config register reads a non-0xffffffff value to make sure that the
 875 * HPET is up and running before we proceed any further.
 876 *
 877 * A counting loop is safe, as the HPET access takes thousands of CPU cycles.
 878 *
 879 * On non-SB700 based machines this check is only done once and has no
 880 * side effects.
 881 */
 882static bool __init hpet_cfg_working(void)
 883{
 884        int i;
 885
 886        for (i = 0; i < 1000; i++) {
 887                if (hpet_readl(HPET_CFG) != 0xFFFFFFFF)
 888                        return true;
 889        }
 890
 891        pr_warn("Config register invalid. Disabling HPET\n");
 892        return false;
 893}
 894
 895static bool __init hpet_counting(void)
 896{
 897        u64 start, now, t1;
 898
 899        hpet_restart_counter();
 900
 901        t1 = hpet_readl(HPET_COUNTER);
 902        start = rdtsc();
 903
 904        /*
 905         * We don't know the TSC frequency yet, but waiting for
 906         * 200000 TSC cycles is safe:
 907         * 4 GHz == 50us
 908         * 1 GHz == 200us
 909         */
 910        do {
 911                if (t1 != hpet_readl(HPET_COUNTER))
 912                        return true;
 913                now = rdtsc();
 914        } while ((now - start) < 200000UL);
 915
 916        pr_warn("Counter not counting. HPET disabled\n");
 917        return false;
 918}
 919
 920static bool __init mwait_pc10_supported(void)
 921{
 922        unsigned int eax, ebx, ecx, mwait_substates;
 923
 924        if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
 925                return false;
 926
 927        if (!cpu_feature_enabled(X86_FEATURE_MWAIT))
 928                return false;
 929
 930        if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
 931                return false;
 932
 933        cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
 934
 935        return (ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) &&
 936               (ecx & CPUID5_ECX_INTERRUPT_BREAK) &&
 937               (mwait_substates & (0xF << 28));
 938}
 939
 940/*
 941 * Check whether the system supports PC10. If so force disable HPET as that
 942 * stops counting in PC10. This check is overbroad as it does not take any
 943 * of the following into account:
 944 *
 945 *      - ACPI tables
 946 *      - Enablement of intel_idle
 947 *      - Command line arguments which limit intel_idle C-state support
 948 *
 949 * That's perfectly fine. HPET is a piece of hardware designed by committee
 950 * and the only reasons why it is still in use on modern systems is the
 951 * fact that it is impossible to reliably query TSC and CPU frequency via
 952 * CPUID or firmware.
 953 *
 954 * If HPET is functional it is useful for calibrating TSC, but this can be
 955 * done via PMTIMER as well which seems to be the last remaining timer on
 956 * X86/INTEL platforms that has not been completely wreckaged by feature
 957 * creep.
 958 *
 959 * In theory HPET support should be removed altogether, but there are older
 960 * systems out there which depend on it because TSC and APIC timer are
 961 * dysfunctional in deeper C-states.
 962 *
 963 * It's only 20 years now that hardware people have been asked to provide
 964 * reliable and discoverable facilities which can be used for timekeeping
 965 * and per CPU timer interrupts.
 966 *
 967 * The probability that this problem is going to be solved in the
 968 * forseeable future is close to zero, so the kernel has to be cluttered
 969 * with heuristics to keep up with the ever growing amount of hardware and
 970 * firmware trainwrecks. Hopefully some day hardware people will understand
 971 * that the approach of "This can be fixed in software" is not sustainable.
 972 * Hope dies last...
 973 */
 974static bool __init hpet_is_pc10_damaged(void)
 975{
 976        unsigned long long pcfg;
 977
 978        /* Check whether PC10 substates are supported */
 979        if (!mwait_pc10_supported())
 980                return false;
 981
 982        /* Check whether PC10 is enabled in PKG C-state limit */
 983        rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, pcfg);
 984        if ((pcfg & 0xF) < 8)
 985                return false;
 986
 987        if (hpet_force_user) {
 988                pr_warn("HPET force enabled via command line, but dysfunctional in PC10.\n");
 989                return false;
 990        }
 991
 992        pr_info("HPET dysfunctional in PC10. Force disabled.\n");
 993        boot_hpet_disable = true;
 994        return true;
 995}
 996
 997/**
 998 * hpet_enable - Try to setup the HPET timer. Returns 1 on success.
 999 */
1000int __init hpet_enable(void)
1001{
1002        u32 hpet_period, cfg, id, irq;
1003        unsigned int i, channels;
1004        struct hpet_channel *hc;
1005        u64 freq;
1006
1007        if (!is_hpet_capable())
1008                return 0;
1009
1010        if (hpet_is_pc10_damaged())
1011                return 0;
1012
1013        hpet_set_mapping();
1014        if (!hpet_virt_address)
1015                return 0;
1016
1017        /* Validate that the config register is working */
1018        if (!hpet_cfg_working())
1019                goto out_nohpet;
1020
1021        /*
1022         * Read the period and check for a sane value:
1023         */
1024        hpet_period = hpet_readl(HPET_PERIOD);
1025        if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
1026                goto out_nohpet;
1027
1028        /* The period is a femtoseconds value. Convert it to a frequency. */
1029        freq = FSEC_PER_SEC;
1030        do_div(freq, hpet_period);
1031        hpet_freq = freq;
1032
1033        /*
1034         * Read the HPET ID register to retrieve the IRQ routing
1035         * information and the number of channels
1036         */
1037        id = hpet_readl(HPET_ID);
1038        hpet_print_config();
1039
1040        /* This is the HPET channel number which is zero based */
1041        channels = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
1042
1043        /*
1044         * The legacy routing mode needs at least two channels, tick timer
1045         * and the rtc emulation channel.
1046         */
1047        if (IS_ENABLED(CONFIG_HPET_EMULATE_RTC) && channels < 2)
1048                goto out_nohpet;
1049
1050        hc = kcalloc(channels, sizeof(*hc), GFP_KERNEL);
1051        if (!hc) {
1052                pr_warn("Disabling HPET.\n");
1053                goto out_nohpet;
1054        }
1055        hpet_base.channels = hc;
1056        hpet_base.nr_channels = channels;
1057
1058        /* Read, store and sanitize the global configuration */
1059        cfg = hpet_readl(HPET_CFG);
1060        hpet_base.boot_cfg = cfg;
1061        cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
1062        hpet_writel(cfg, HPET_CFG);
1063        if (cfg)
1064                pr_warn("Global config: Unknown bits %#x\n", cfg);
1065
1066        /* Read, store and sanitize the per channel configuration */
1067        for (i = 0; i < channels; i++, hc++) {
1068                hc->num = i;
1069
1070                cfg = hpet_readl(HPET_Tn_CFG(i));
1071                hc->boot_cfg = cfg;
1072                irq = (cfg & Tn_INT_ROUTE_CNF_MASK) >> Tn_INT_ROUTE_CNF_SHIFT;
1073                hc->irq = irq;
1074
1075                cfg &= ~(HPET_TN_ENABLE | HPET_TN_LEVEL | HPET_TN_FSB);
1076                hpet_writel(cfg, HPET_Tn_CFG(i));
1077
1078                cfg &= ~(HPET_TN_PERIODIC | HPET_TN_PERIODIC_CAP
1079                         | HPET_TN_64BIT_CAP | HPET_TN_32BIT | HPET_TN_ROUTE
1080                         | HPET_TN_FSB | HPET_TN_FSB_CAP);
1081                if (cfg)
1082                        pr_warn("Channel #%u config: Unknown bits %#x\n", i, cfg);
1083        }
1084        hpet_print_config();
1085
1086        /*
1087         * Validate that the counter is counting. This needs to be done
1088         * after sanitizing the config registers to properly deal with
1089         * force enabled HPETs.
1090         */
1091        if (!hpet_counting())
1092                goto out_nohpet;
1093
1094        clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq);
1095
1096        if (id & HPET_ID_LEGSUP) {
1097                hpet_legacy_clockevent_register(&hpet_base.channels[0]);
1098                hpet_base.channels[0].mode = HPET_MODE_LEGACY;
1099                if (IS_ENABLED(CONFIG_HPET_EMULATE_RTC))
1100                        hpet_base.channels[1].mode = HPET_MODE_LEGACY;
1101                return 1;
1102        }
1103        return 0;
1104
1105out_nohpet:
1106        kfree(hpet_base.channels);
1107        hpet_base.channels = NULL;
1108        hpet_base.nr_channels = 0;
1109        hpet_clear_mapping();
1110        hpet_address = 0;
1111        return 0;
1112}
1113
1114/*
1115 * The late initialization runs after the PCI quirks have been invoked
1116 * which might have detected a system on which the HPET can be enforced.
1117 *
1118 * Also, the MSI machinery is not working yet when the HPET is initialized
1119 * early.
1120 *
1121 * If the HPET is enabled, then:
1122 *
1123 *  1) Reserve one channel for /dev/hpet if CONFIG_HPET=y
1124 *  2) Reserve up to num_possible_cpus() channels as per CPU clockevents
1125 *  3) Setup /dev/hpet if CONFIG_HPET=y
1126 *  4) Register hotplug callbacks when clockevents are available
1127 */
1128static __init int hpet_late_init(void)
1129{
1130        int ret;
1131
1132        if (!hpet_address) {
1133                if (!force_hpet_address)
1134                        return -ENODEV;
1135
1136                hpet_address = force_hpet_address;
1137                hpet_enable();
1138        }
1139
1140        if (!hpet_virt_address)
1141                return -ENODEV;
1142
1143        hpet_select_device_channel();
1144        hpet_select_clockevents();
1145        hpet_reserve_platform_timers();
1146        hpet_print_config();
1147
1148        if (!hpet_base.nr_clockevents)
1149                return 0;
1150
1151        ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "x86/hpet:online",
1152                                hpet_cpuhp_online, NULL);
1153        if (ret)
1154                return ret;
1155        ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "x86/hpet:dead", NULL,
1156                                hpet_cpuhp_dead);
1157        if (ret)
1158                goto err_cpuhp;
1159        return 0;
1160
1161err_cpuhp:
1162        cpuhp_remove_state(CPUHP_AP_X86_HPET_ONLINE);
1163        return ret;
1164}
1165fs_initcall(hpet_late_init);
1166
1167void hpet_disable(void)
1168{
1169        unsigned int i;
1170        u32 cfg;
1171
1172        if (!is_hpet_capable() || !hpet_virt_address)
1173                return;
1174
1175        /* Restore boot configuration with the enable bit cleared */
1176        cfg = hpet_base.boot_cfg;
1177        cfg &= ~HPET_CFG_ENABLE;
1178        hpet_writel(cfg, HPET_CFG);
1179
1180        /* Restore the channel boot configuration */
1181        for (i = 0; i < hpet_base.nr_channels; i++)
1182                hpet_writel(hpet_base.channels[i].boot_cfg, HPET_Tn_CFG(i));
1183
1184        /* If the HPET was enabled at boot time, reenable it */
1185        if (hpet_base.boot_cfg & HPET_CFG_ENABLE)
1186                hpet_writel(hpet_base.boot_cfg, HPET_CFG);
1187}
1188
1189#ifdef CONFIG_HPET_EMULATE_RTC
1190
1191/*
1192 * HPET in LegacyReplacement mode eats up the RTC interrupt line. When HPET
1193 * is enabled, we support RTC interrupt functionality in software.
1194 *
1195 * RTC has 3 kinds of interrupts:
1196 *
1197 *  1) Update Interrupt - generate an interrupt, every second, when the
1198 *     RTC clock is updated
1199 *  2) Alarm Interrupt - generate an interrupt at a specific time of day
1200 *  3) Periodic Interrupt - generate periodic interrupt, with frequencies
1201 *     2Hz-8192Hz (2Hz-64Hz for non-root user) (all frequencies in powers of 2)
1202 *
1203 * (1) and (2) above are implemented using polling at a frequency of 64 Hz:
1204 * DEFAULT_RTC_INT_FREQ.
1205 *
1206 * The exact frequency is a tradeoff between accuracy and interrupt overhead.
1207 *
1208 * For (3), we use interrupts at 64 Hz, or the user specified periodic frequency,
1209 * if it's higher.
1210 */
1211#include <linux/mc146818rtc.h>
1212#include <linux/rtc.h>
1213
1214#define DEFAULT_RTC_INT_FREQ    64
1215#define DEFAULT_RTC_SHIFT       6
1216#define RTC_NUM_INTS            1
1217
1218static unsigned long hpet_rtc_flags;
1219static int hpet_prev_update_sec;
1220static struct rtc_time hpet_alarm_time;
1221static unsigned long hpet_pie_count;
1222static u32 hpet_t1_cmp;
1223static u32 hpet_default_delta;
1224static u32 hpet_pie_delta;
1225static unsigned long hpet_pie_limit;
1226
1227static rtc_irq_handler irq_handler;
1228
1229/*
1230 * Check that the HPET counter c1 is ahead of c2
1231 */
1232static inline int hpet_cnt_ahead(u32 c1, u32 c2)
1233{
1234        return (s32)(c2 - c1) < 0;
1235}
1236
1237/*
1238 * Registers a IRQ handler.
1239 */
1240int hpet_register_irq_handler(rtc_irq_handler handler)
1241{
1242        if (!is_hpet_enabled())
1243                return -ENODEV;
1244        if (irq_handler)
1245                return -EBUSY;
1246
1247        irq_handler = handler;
1248
1249        return 0;
1250}
1251EXPORT_SYMBOL_GPL(hpet_register_irq_handler);
1252
1253/*
1254 * Deregisters the IRQ handler registered with hpet_register_irq_handler()
1255 * and does cleanup.
1256 */
1257void hpet_unregister_irq_handler(rtc_irq_handler handler)
1258{
1259        if (!is_hpet_enabled())
1260                return;
1261
1262        irq_handler = NULL;
1263        hpet_rtc_flags = 0;
1264}
1265EXPORT_SYMBOL_GPL(hpet_unregister_irq_handler);
1266
1267/*
1268 * Channel 1 for RTC emulation. We use one shot mode, as periodic mode
1269 * is not supported by all HPET implementations for channel 1.
1270 *
1271 * hpet_rtc_timer_init() is called when the rtc is initialized.
1272 */
1273int hpet_rtc_timer_init(void)
1274{
1275        unsigned int cfg, cnt, delta;
1276        unsigned long flags;
1277
1278        if (!is_hpet_enabled())
1279                return 0;
1280
1281        if (!hpet_default_delta) {
1282                struct clock_event_device *evt = &hpet_base.channels[0].evt;
1283                uint64_t clc;
1284
1285                clc = (uint64_t) evt->mult * NSEC_PER_SEC;
1286                clc >>= evt->shift + DEFAULT_RTC_SHIFT;
1287                hpet_default_delta = clc;
1288        }
1289
1290        if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
1291                delta = hpet_default_delta;
1292        else
1293                delta = hpet_pie_delta;
1294
1295        local_irq_save(flags);
1296
1297        cnt = delta + hpet_readl(HPET_COUNTER);
1298        hpet_writel(cnt, HPET_T1_CMP);
1299        hpet_t1_cmp = cnt;
1300
1301        cfg = hpet_readl(HPET_T1_CFG);
1302        cfg &= ~HPET_TN_PERIODIC;
1303        cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
1304        hpet_writel(cfg, HPET_T1_CFG);
1305
1306        local_irq_restore(flags);
1307
1308        return 1;
1309}
1310EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
1311
1312static void hpet_disable_rtc_channel(void)
1313{
1314        u32 cfg = hpet_readl(HPET_T1_CFG);
1315
1316        cfg &= ~HPET_TN_ENABLE;
1317        hpet_writel(cfg, HPET_T1_CFG);
1318}
1319
1320/*
1321 * The functions below are called from rtc driver.
1322 * Return 0 if HPET is not being used.
1323 * Otherwise do the necessary changes and return 1.
1324 */
1325int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
1326{
1327        if (!is_hpet_enabled())
1328                return 0;
1329
1330        hpet_rtc_flags &= ~bit_mask;
1331        if (unlikely(!hpet_rtc_flags))
1332                hpet_disable_rtc_channel();
1333
1334        return 1;
1335}
1336EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
1337
1338int hpet_set_rtc_irq_bit(unsigned long bit_mask)
1339{
1340        unsigned long oldbits = hpet_rtc_flags;
1341
1342        if (!is_hpet_enabled())
1343                return 0;
1344
1345        hpet_rtc_flags |= bit_mask;
1346
1347        if ((bit_mask & RTC_UIE) && !(oldbits & RTC_UIE))
1348                hpet_prev_update_sec = -1;
1349
1350        if (!oldbits)
1351                hpet_rtc_timer_init();
1352
1353        return 1;
1354}
1355EXPORT_SYMBOL_GPL(hpet_set_rtc_irq_bit);
1356
1357int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
1358{
1359        if (!is_hpet_enabled())
1360                return 0;
1361
1362        hpet_alarm_time.tm_hour = hrs;
1363        hpet_alarm_time.tm_min = min;
1364        hpet_alarm_time.tm_sec = sec;
1365
1366        return 1;
1367}
1368EXPORT_SYMBOL_GPL(hpet_set_alarm_time);
1369
1370int hpet_set_periodic_freq(unsigned long freq)
1371{
1372        uint64_t clc;
1373
1374        if (!is_hpet_enabled())
1375                return 0;
1376
1377        if (freq <= DEFAULT_RTC_INT_FREQ) {
1378                hpet_pie_limit = DEFAULT_RTC_INT_FREQ / freq;
1379        } else {
1380                struct clock_event_device *evt = &hpet_base.channels[0].evt;
1381
1382                clc = (uint64_t) evt->mult * NSEC_PER_SEC;
1383                do_div(clc, freq);
1384                clc >>= evt->shift;
1385                hpet_pie_delta = clc;
1386                hpet_pie_limit = 0;
1387        }
1388
1389        return 1;
1390}
1391EXPORT_SYMBOL_GPL(hpet_set_periodic_freq);
1392
1393int hpet_rtc_dropped_irq(void)
1394{
1395        return is_hpet_enabled();
1396}
1397EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
1398
1399static void hpet_rtc_timer_reinit(void)
1400{
1401        unsigned int delta;
1402        int lost_ints = -1;
1403
1404        if (unlikely(!hpet_rtc_flags))
1405                hpet_disable_rtc_channel();
1406
1407        if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
1408                delta = hpet_default_delta;
1409        else
1410                delta = hpet_pie_delta;
1411
1412        /*
1413         * Increment the comparator value until we are ahead of the
1414         * current count.
1415         */
1416        do {
1417                hpet_t1_cmp += delta;
1418                hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
1419                lost_ints++;
1420        } while (!hpet_cnt_ahead(hpet_t1_cmp, hpet_readl(HPET_COUNTER)));
1421
1422        if (lost_ints) {
1423                if (hpet_rtc_flags & RTC_PIE)
1424                        hpet_pie_count += lost_ints;
1425                if (printk_ratelimit())
1426                        pr_warn("Lost %d RTC interrupts\n", lost_ints);
1427        }
1428}
1429
1430irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
1431{
1432        struct rtc_time curr_time;
1433        unsigned long rtc_int_flag = 0;
1434
1435        hpet_rtc_timer_reinit();
1436        memset(&curr_time, 0, sizeof(struct rtc_time));
1437
1438        if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
1439                mc146818_get_time(&curr_time);
1440
1441        if (hpet_rtc_flags & RTC_UIE &&
1442            curr_time.tm_sec != hpet_prev_update_sec) {
1443                if (hpet_prev_update_sec >= 0)
1444                        rtc_int_flag = RTC_UF;
1445                hpet_prev_update_sec = curr_time.tm_sec;
1446        }
1447
1448        if (hpet_rtc_flags & RTC_PIE && ++hpet_pie_count >= hpet_pie_limit) {
1449                rtc_int_flag |= RTC_PF;
1450                hpet_pie_count = 0;
1451        }
1452
1453        if (hpet_rtc_flags & RTC_AIE &&
1454            (curr_time.tm_sec == hpet_alarm_time.tm_sec) &&
1455            (curr_time.tm_min == hpet_alarm_time.tm_min) &&
1456            (curr_time.tm_hour == hpet_alarm_time.tm_hour))
1457                rtc_int_flag |= RTC_AF;
1458
1459        if (rtc_int_flag) {
1460                rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
1461                if (irq_handler)
1462                        irq_handler(rtc_int_flag, dev_id);
1463        }
1464        return IRQ_HANDLED;
1465}
1466EXPORT_SYMBOL_GPL(hpet_rtc_interrupt);
1467#endif
1468