linux/kernel/time/clockevents.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/time/clockevents.c
   3 *
   4 * This file contains functions which manage clock event devices.
   5 *
   6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
   7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
   8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
   9 *
  10 * This code is licenced under the GPL version 2. For details see
  11 * kernel-base/COPYING.
  12 */
  13
  14#include <linux/clockchips.h>
  15#include <linux/hrtimer.h>
  16#include <linux/init.h>
  17#include <linux/module.h>
  18#include <linux/smp.h>
  19#include <linux/device.h>
  20
  21#include "tick-internal.h"
  22
  23/* The registered clock event devices */
  24static LIST_HEAD(clockevent_devices);
  25static LIST_HEAD(clockevents_released);
  26/* Protection for the above */
  27static DEFINE_RAW_SPINLOCK(clockevents_lock);
  28/* Protection for unbind operations */
  29static DEFINE_MUTEX(clockevents_mutex);
  30
  31struct ce_unbind {
  32        struct clock_event_device *ce;
  33        int res;
  34};
  35
  36static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
  37                        bool ismax)
  38{
  39        u64 clc = (u64) latch << evt->shift;
  40        u64 rnd;
  41
  42        if (unlikely(!evt->mult)) {
  43                evt->mult = 1;
  44                WARN_ON(1);
  45        }
  46        rnd = (u64) evt->mult - 1;
  47
  48        /*
  49         * Upper bound sanity check. If the backwards conversion is
  50         * not equal latch, we know that the above shift overflowed.
  51         */
  52        if ((clc >> evt->shift) != (u64)latch)
  53                clc = ~0ULL;
  54
  55        /*
  56         * Scaled math oddities:
  57         *
  58         * For mult <= (1 << shift) we can safely add mult - 1 to
  59         * prevent integer rounding loss. So the backwards conversion
  60         * from nsec to device ticks will be correct.
  61         *
  62         * For mult > (1 << shift), i.e. device frequency is > 1GHz we
  63         * need to be careful. Adding mult - 1 will result in a value
  64         * which when converted back to device ticks can be larger
  65         * than latch by up to (mult - 1) >> shift. For the min_delta
  66         * calculation we still want to apply this in order to stay
  67         * above the minimum device ticks limit. For the upper limit
  68         * we would end up with a latch value larger than the upper
  69         * limit of the device, so we omit the add to stay below the
  70         * device upper boundary.
  71         *
  72         * Also omit the add if it would overflow the u64 boundary.
  73         */
  74        if ((~0ULL - clc > rnd) &&
  75            (!ismax || evt->mult <= (1U << evt->shift)))
  76                clc += rnd;
  77
  78        do_div(clc, evt->mult);
  79
  80        /* Deltas less than 1usec are pointless noise */
  81        return clc > 1000 ? clc : 1000;
  82}
  83
  84/**
  85 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
  86 * @latch:      value to convert
  87 * @evt:        pointer to clock event device descriptor
  88 *
  89 * Math helper, returns latch value converted to nanoseconds (bound checked)
  90 */
  91u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
  92{
  93        return cev_delta2ns(latch, evt, false);
  94}
  95EXPORT_SYMBOL_GPL(clockevent_delta2ns);
  96
  97/**
  98 * clockevents_set_mode - set the operating mode of a clock event device
  99 * @dev:        device to modify
 100 * @mode:       new mode
 101 *
 102 * Must be called with interrupts disabled !
 103 */
 104void clockevents_set_mode(struct clock_event_device *dev,
 105                                 enum clock_event_mode mode)
 106{
 107        if (dev->mode != mode) {
 108                dev->set_mode(mode, dev);
 109                dev->mode = mode;
 110
 111                /*
 112                 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
 113                 * on it, so fix it up and emit a warning:
 114                 */
 115                if (mode == CLOCK_EVT_MODE_ONESHOT) {
 116                        if (unlikely(!dev->mult)) {
 117                                dev->mult = 1;
 118                                WARN_ON(1);
 119                        }
 120                }
 121        }
 122}
 123
 124/**
 125 * clockevents_shutdown - shutdown the device and clear next_event
 126 * @dev:        device to shutdown
 127 */
 128void clockevents_shutdown(struct clock_event_device *dev)
 129{
 130        clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
 131        dev->next_event.tv64 = KTIME_MAX;
 132}
 133
 134#ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
 135
 136/* Limit min_delta to a jiffie */
 137#define MIN_DELTA_LIMIT         (NSEC_PER_SEC / HZ)
 138
 139/**
 140 * clockevents_increase_min_delta - raise minimum delta of a clock event device
 141 * @dev:       device to increase the minimum delta
 142 *
 143 * Returns 0 on success, -ETIME when the minimum delta reached the limit.
 144 */
 145static int clockevents_increase_min_delta(struct clock_event_device *dev)
 146{
 147        /* Nothing to do if we already reached the limit */
 148        if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
 149                printk_deferred(KERN_WARNING
 150                                "CE: Reprogramming failure. Giving up\n");
 151                dev->next_event.tv64 = KTIME_MAX;
 152                return -ETIME;
 153        }
 154
 155        if (dev->min_delta_ns < 5000)
 156                dev->min_delta_ns = 5000;
 157        else
 158                dev->min_delta_ns += dev->min_delta_ns >> 1;
 159
 160        if (dev->min_delta_ns > MIN_DELTA_LIMIT)
 161                dev->min_delta_ns = MIN_DELTA_LIMIT;
 162
 163        printk_deferred(KERN_WARNING
 164                        "CE: %s increased min_delta_ns to %llu nsec\n",
 165                        dev->name ? dev->name : "?",
 166                        (unsigned long long) dev->min_delta_ns);
 167        return 0;
 168}
 169
 170/**
 171 * clockevents_program_min_delta - Set clock event device to the minimum delay.
 172 * @dev:        device to program
 173 *
 174 * Returns 0 on success, -ETIME when the retry loop failed.
 175 */
 176static int clockevents_program_min_delta(struct clock_event_device *dev)
 177{
 178        unsigned long long clc;
 179        int64_t delta;
 180        int i;
 181
 182        for (i = 0;;) {
 183                delta = dev->min_delta_ns;
 184                dev->next_event = ktime_add_ns(ktime_get(), delta);
 185
 186                if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
 187                        return 0;
 188
 189                dev->retries++;
 190                clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
 191                if (dev->set_next_event((unsigned long) clc, dev) == 0)
 192                        return 0;
 193
 194                if (++i > 2) {
 195                        /*
 196                         * We tried 3 times to program the device with the
 197                         * given min_delta_ns. Try to increase the minimum
 198                         * delta, if that fails as well get out of here.
 199                         */
 200                        if (clockevents_increase_min_delta(dev))
 201                                return -ETIME;
 202                        i = 0;
 203                }
 204        }
 205}
 206
 207#else  /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
 208
 209/**
 210 * clockevents_program_min_delta - Set clock event device to the minimum delay.
 211 * @dev:        device to program
 212 *
 213 * Returns 0 on success, -ETIME when the retry loop failed.
 214 */
 215static int clockevents_program_min_delta(struct clock_event_device *dev)
 216{
 217        unsigned long long clc;
 218        int64_t delta;
 219
 220        delta = dev->min_delta_ns;
 221        dev->next_event = ktime_add_ns(ktime_get(), delta);
 222
 223        if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
 224                return 0;
 225
 226        dev->retries++;
 227        clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
 228        return dev->set_next_event((unsigned long) clc, dev);
 229}
 230
 231#endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
 232
 233/**
 234 * clockevents_program_event - Reprogram the clock event device.
 235 * @dev:        device to program
 236 * @expires:    absolute expiry time (monotonic clock)
 237 * @force:      program minimum delay if expires can not be set
 238 *
 239 * Returns 0 on success, -ETIME when the event is in the past.
 240 */
 241int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
 242                              bool force)
 243{
 244        unsigned long long clc;
 245        int64_t delta;
 246        int rc;
 247
 248        if (unlikely(expires.tv64 < 0)) {
 249                WARN_ON_ONCE(1);
 250                return -ETIME;
 251        }
 252
 253        dev->next_event = expires;
 254
 255        if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
 256                return 0;
 257
 258        /* Shortcut for clockevent devices that can deal with ktime. */
 259        if (dev->features & CLOCK_EVT_FEAT_KTIME)
 260                return dev->set_next_ktime(expires, dev);
 261
 262        delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
 263        if (delta <= 0)
 264                return force ? clockevents_program_min_delta(dev) : -ETIME;
 265
 266        delta = min(delta, (int64_t) dev->max_delta_ns);
 267        delta = max(delta, (int64_t) dev->min_delta_ns);
 268
 269        clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
 270        rc = dev->set_next_event((unsigned long) clc, dev);
 271
 272        return (rc && force) ? clockevents_program_min_delta(dev) : rc;
 273}
 274
 275/*
 276 * Called after a notify add to make devices available which were
 277 * released from the notifier call.
 278 */
 279static void clockevents_notify_released(void)
 280{
 281        struct clock_event_device *dev;
 282
 283        while (!list_empty(&clockevents_released)) {
 284                dev = list_entry(clockevents_released.next,
 285                                 struct clock_event_device, list);
 286                list_del(&dev->list);
 287                list_add(&dev->list, &clockevent_devices);
 288                tick_check_new_device(dev);
 289        }
 290}
 291
 292/*
 293 * Try to install a replacement clock event device
 294 */
 295static int clockevents_replace(struct clock_event_device *ced)
 296{
 297        struct clock_event_device *dev, *newdev = NULL;
 298
 299        list_for_each_entry(dev, &clockevent_devices, list) {
 300                if (dev == ced || dev->mode != CLOCK_EVT_MODE_UNUSED)
 301                        continue;
 302
 303                if (!tick_check_replacement(newdev, dev))
 304                        continue;
 305
 306                if (!try_module_get(dev->owner))
 307                        continue;
 308
 309                if (newdev)
 310                        module_put(newdev->owner);
 311                newdev = dev;
 312        }
 313        if (newdev) {
 314                tick_install_replacement(newdev);
 315                list_del_init(&ced->list);
 316        }
 317        return newdev ? 0 : -EBUSY;
 318}
 319
 320/*
 321 * Called with clockevents_mutex and clockevents_lock held
 322 */
 323static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
 324{
 325        /* Fast track. Device is unused */
 326        if (ced->mode == CLOCK_EVT_MODE_UNUSED) {
 327                list_del_init(&ced->list);
 328                return 0;
 329        }
 330
 331        return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY;
 332}
 333
 334/*
 335 * SMP function call to unbind a device
 336 */
 337static void __clockevents_unbind(void *arg)
 338{
 339        struct ce_unbind *cu = arg;
 340        int res;
 341
 342        raw_spin_lock(&clockevents_lock);
 343        res = __clockevents_try_unbind(cu->ce, smp_processor_id());
 344        if (res == -EAGAIN)
 345                res = clockevents_replace(cu->ce);
 346        cu->res = res;
 347        raw_spin_unlock(&clockevents_lock);
 348}
 349
 350/*
 351 * Issues smp function call to unbind a per cpu device. Called with
 352 * clockevents_mutex held.
 353 */
 354static int clockevents_unbind(struct clock_event_device *ced, int cpu)
 355{
 356        struct ce_unbind cu = { .ce = ced, .res = -ENODEV };
 357
 358        smp_call_function_single(cpu, __clockevents_unbind, &cu, 1);
 359        return cu.res;
 360}
 361
 362/*
 363 * Unbind a clockevents device.
 364 */
 365int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
 366{
 367        int ret;
 368
 369        mutex_lock(&clockevents_mutex);
 370        ret = clockevents_unbind(ced, cpu);
 371        mutex_unlock(&clockevents_mutex);
 372        return ret;
 373}
 374EXPORT_SYMBOL_GPL(clockevents_unbind_device);
 375
 376/**
 377 * clockevents_register_device - register a clock event device
 378 * @dev:        device to register
 379 */
 380void clockevents_register_device(struct clock_event_device *dev)
 381{
 382        unsigned long flags;
 383
 384        BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
 385        if (!dev->cpumask) {
 386                WARN_ON(num_possible_cpus() > 1);
 387                dev->cpumask = cpumask_of(smp_processor_id());
 388        }
 389
 390        raw_spin_lock_irqsave(&clockevents_lock, flags);
 391
 392        list_add(&dev->list, &clockevent_devices);
 393        tick_check_new_device(dev);
 394        clockevents_notify_released();
 395
 396        raw_spin_unlock_irqrestore(&clockevents_lock, flags);
 397}
 398EXPORT_SYMBOL_GPL(clockevents_register_device);
 399
 400void clockevents_config(struct clock_event_device *dev, u32 freq)
 401{
 402        u64 sec;
 403
 404        if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
 405                return;
 406
 407        /*
 408         * Calculate the maximum number of seconds we can sleep. Limit
 409         * to 10 minutes for hardware which can program more than
 410         * 32bit ticks so we still get reasonable conversion values.
 411         */
 412        sec = dev->max_delta_ticks;
 413        do_div(sec, freq);
 414        if (!sec)
 415                sec = 1;
 416        else if (sec > 600 && dev->max_delta_ticks > UINT_MAX)
 417                sec = 600;
 418
 419        clockevents_calc_mult_shift(dev, freq, sec);
 420        dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
 421        dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
 422}
 423
 424/**
 425 * clockevents_config_and_register - Configure and register a clock event device
 426 * @dev:        device to register
 427 * @freq:       The clock frequency
 428 * @min_delta:  The minimum clock ticks to program in oneshot mode
 429 * @max_delta:  The maximum clock ticks to program in oneshot mode
 430 *
 431 * min/max_delta can be 0 for devices which do not support oneshot mode.
 432 */
 433void clockevents_config_and_register(struct clock_event_device *dev,
 434                                     u32 freq, unsigned long min_delta,
 435                                     unsigned long max_delta)
 436{
 437        dev->min_delta_ticks = min_delta;
 438        dev->max_delta_ticks = max_delta;
 439        clockevents_config(dev, freq);
 440        clockevents_register_device(dev);
 441}
 442EXPORT_SYMBOL_GPL(clockevents_config_and_register);
 443
 444int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
 445{
 446        clockevents_config(dev, freq);
 447
 448        if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
 449                return 0;
 450
 451        return clockevents_program_event(dev, dev->next_event, false);
 452}
 453
 454/**
 455 * clockevents_update_freq - Update frequency and reprogram a clock event device.
 456 * @dev:        device to modify
 457 * @freq:       new device frequency
 458 *
 459 * Reconfigure and reprogram a clock event device in oneshot
 460 * mode. Must be called on the cpu for which the device delivers per
 461 * cpu timer events. If called for the broadcast device the core takes
 462 * care of serialization.
 463 *
 464 * Returns 0 on success, -ETIME when the event is in the past.
 465 */
 466int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
 467{
 468        unsigned long flags;
 469        int ret;
 470
 471        local_irq_save(flags);
 472        ret = tick_broadcast_update_freq(dev, freq);
 473        if (ret == -ENODEV)
 474                ret = __clockevents_update_freq(dev, freq);
 475        local_irq_restore(flags);
 476        return ret;
 477}
 478
 479/*
 480 * Noop handler when we shut down an event device
 481 */
 482void clockevents_handle_noop(struct clock_event_device *dev)
 483{
 484}
 485
 486/**
 487 * clockevents_exchange_device - release and request clock devices
 488 * @old:        device to release (can be NULL)
 489 * @new:        device to request (can be NULL)
 490 *
 491 * Called from the notifier chain. clockevents_lock is held already
 492 */
 493void clockevents_exchange_device(struct clock_event_device *old,
 494                                 struct clock_event_device *new)
 495{
 496        unsigned long flags;
 497
 498        local_irq_save(flags);
 499        /*
 500         * Caller releases a clock event device. We queue it into the
 501         * released list and do a notify add later.
 502         */
 503        if (old) {
 504                module_put(old->owner);
 505                clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
 506                list_del(&old->list);
 507                list_add(&old->list, &clockevents_released);
 508        }
 509
 510        if (new) {
 511                BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED);
 512                clockevents_shutdown(new);
 513        }
 514        local_irq_restore(flags);
 515}
 516
 517/**
 518 * clockevents_suspend - suspend clock devices
 519 */
 520void clockevents_suspend(void)
 521{
 522        struct clock_event_device *dev;
 523
 524        list_for_each_entry_reverse(dev, &clockevent_devices, list)
 525                if (dev->suspend)
 526                        dev->suspend(dev);
 527}
 528
 529/**
 530 * clockevents_resume - resume clock devices
 531 */
 532void clockevents_resume(void)
 533{
 534        struct clock_event_device *dev;
 535
 536        list_for_each_entry(dev, &clockevent_devices, list)
 537                if (dev->resume)
 538                        dev->resume(dev);
 539}
 540
 541#ifdef CONFIG_GENERIC_CLOCKEVENTS
 542/**
 543 * clockevents_notify - notification about relevant events
 544 * Returns 0 on success, any other value on error
 545 */
 546int clockevents_notify(unsigned long reason, void *arg)
 547{
 548        struct clock_event_device *dev, *tmp;
 549        unsigned long flags;
 550        int cpu, ret = 0;
 551
 552        raw_spin_lock_irqsave(&clockevents_lock, flags);
 553
 554        switch (reason) {
 555        case CLOCK_EVT_NOTIFY_BROADCAST_ON:
 556        case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
 557        case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
 558                tick_broadcast_on_off(reason, arg);
 559                break;
 560
 561        case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
 562        case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
 563                ret = tick_broadcast_oneshot_control(reason);
 564                break;
 565
 566        case CLOCK_EVT_NOTIFY_CPU_DYING:
 567                tick_handover_do_timer(arg);
 568                break;
 569
 570        case CLOCK_EVT_NOTIFY_SUSPEND:
 571                tick_suspend();
 572                tick_suspend_broadcast();
 573                break;
 574
 575        case CLOCK_EVT_NOTIFY_RESUME:
 576                tick_resume();
 577                break;
 578
 579        case CLOCK_EVT_NOTIFY_CPU_DEAD:
 580                tick_shutdown_broadcast_oneshot(arg);
 581                tick_shutdown_broadcast(arg);
 582                tick_shutdown(arg);
 583                /*
 584                 * Unregister the clock event devices which were
 585                 * released from the users in the notify chain.
 586                 */
 587                list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
 588                        list_del(&dev->list);
 589                /*
 590                 * Now check whether the CPU has left unused per cpu devices
 591                 */
 592                cpu = *((int *)arg);
 593                list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
 594                        if (cpumask_test_cpu(cpu, dev->cpumask) &&
 595                            cpumask_weight(dev->cpumask) == 1 &&
 596                            !tick_is_broadcast_device(dev)) {
 597                                BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
 598                                list_del(&dev->list);
 599                        }
 600                }
 601                break;
 602        default:
 603                break;
 604        }
 605        raw_spin_unlock_irqrestore(&clockevents_lock, flags);
 606        return ret;
 607}
 608EXPORT_SYMBOL_GPL(clockevents_notify);
 609
 610#ifdef CONFIG_SYSFS
 611struct bus_type clockevents_subsys = {
 612        .name           = "clockevents",
 613        .dev_name       = "clockevent",
 614};
 615
 616static DEFINE_PER_CPU(struct device, tick_percpu_dev);
 617static struct tick_device *tick_get_tick_dev(struct device *dev);
 618
 619static ssize_t sysfs_show_current_tick_dev(struct device *dev,
 620                                           struct device_attribute *attr,
 621                                           char *buf)
 622{
 623        struct tick_device *td;
 624        ssize_t count = 0;
 625
 626        raw_spin_lock_irq(&clockevents_lock);
 627        td = tick_get_tick_dev(dev);
 628        if (td && td->evtdev)
 629                count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name);
 630        raw_spin_unlock_irq(&clockevents_lock);
 631        return count;
 632}
 633static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL);
 634
 635/* We don't support the abomination of removable broadcast devices */
 636static ssize_t sysfs_unbind_tick_dev(struct device *dev,
 637                                     struct device_attribute *attr,
 638                                     const char *buf, size_t count)
 639{
 640        char name[CS_NAME_LEN];
 641        size_t ret = sysfs_get_uname(buf, name, count);
 642        struct clock_event_device *ce;
 643
 644        if (ret < 0)
 645                return ret;
 646
 647        ret = -ENODEV;
 648        mutex_lock(&clockevents_mutex);
 649        raw_spin_lock_irq(&clockevents_lock);
 650        list_for_each_entry(ce, &clockevent_devices, list) {
 651                if (!strcmp(ce->name, name)) {
 652                        ret = __clockevents_try_unbind(ce, dev->id);
 653                        break;
 654                }
 655        }
 656        raw_spin_unlock_irq(&clockevents_lock);
 657        /*
 658         * We hold clockevents_mutex, so ce can't go away
 659         */
 660        if (ret == -EAGAIN)
 661                ret = clockevents_unbind(ce, dev->id);
 662        mutex_unlock(&clockevents_mutex);
 663        return ret ? ret : count;
 664}
 665static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev);
 666
 667#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 668static struct device tick_bc_dev = {
 669        .init_name      = "broadcast",
 670        .id             = 0,
 671        .bus            = &clockevents_subsys,
 672};
 673
 674static struct tick_device *tick_get_tick_dev(struct device *dev)
 675{
 676        return dev == &tick_bc_dev ? tick_get_broadcast_device() :
 677                &per_cpu(tick_cpu_device, dev->id);
 678}
 679
 680static __init int tick_broadcast_init_sysfs(void)
 681{
 682        int err = device_register(&tick_bc_dev);
 683
 684        if (!err)
 685                err = device_create_file(&tick_bc_dev, &dev_attr_current_device);
 686        return err;
 687}
 688#else
 689static struct tick_device *tick_get_tick_dev(struct device *dev)
 690{
 691        return &per_cpu(tick_cpu_device, dev->id);
 692}
 693static inline int tick_broadcast_init_sysfs(void) { return 0; }
 694#endif
 695
 696static int __init tick_init_sysfs(void)
 697{
 698        int cpu;
 699
 700        for_each_possible_cpu(cpu) {
 701                struct device *dev = &per_cpu(tick_percpu_dev, cpu);
 702                int err;
 703
 704                dev->id = cpu;
 705                dev->bus = &clockevents_subsys;
 706                err = device_register(dev);
 707                if (!err)
 708                        err = device_create_file(dev, &dev_attr_current_device);
 709                if (!err)
 710                        err = device_create_file(dev, &dev_attr_unbind_device);
 711                if (err)
 712                        return err;
 713        }
 714        return tick_broadcast_init_sysfs();
 715}
 716
 717static int __init clockevents_init_sysfs(void)
 718{
 719        int err = subsys_system_register(&clockevents_subsys, NULL);
 720
 721        if (!err)
 722                err = tick_init_sysfs();
 723        return err;
 724}
 725device_initcall(clockevents_init_sysfs);
 726#endif /* SYSFS */
 727
 728#endif /* GENERIC_CLOCK_EVENTS */
 729