linux/kernel/time/clockevents.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/time/clockevents.c
   3 *
   4 * This file contains functions which manage clock event devices.
   5 *
   6 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
   7 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
   8 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
   9 *
  10 * This code is licenced under the GPL version 2. For details see
  11 * kernel-base/COPYING.
  12 */
  13
  14#include <linux/clockchips.h>
  15#include <linux/hrtimer.h>
  16#include <linux/init.h>
  17#include <linux/module.h>
  18#include <linux/smp.h>
  19#include <linux/device.h>
  20
  21#include "tick-internal.h"
  22
  23/* The registered clock event devices */
  24static LIST_HEAD(clockevent_devices);
  25static LIST_HEAD(clockevents_released);
  26/* Protection for the above */
  27static DEFINE_RAW_SPINLOCK(clockevents_lock);
  28/* Protection for unbind operations */
  29static DEFINE_MUTEX(clockevents_mutex);
  30
  31struct ce_unbind {
  32        struct clock_event_device *ce;
  33        int res;
  34};
  35
  36static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
  37                        bool ismax)
  38{
  39        u64 clc = (u64) latch << evt->shift;
  40        u64 rnd;
  41
  42        if (unlikely(!evt->mult)) {
  43                evt->mult = 1;
  44                WARN_ON(1);
  45        }
  46        rnd = (u64) evt->mult - 1;
  47
  48        /*
  49         * Upper bound sanity check. If the backwards conversion is
  50         * not equal latch, we know that the above shift overflowed.
  51         */
  52        if ((clc >> evt->shift) != (u64)latch)
  53                clc = ~0ULL;
  54
  55        /*
  56         * Scaled math oddities:
  57         *
  58         * For mult <= (1 << shift) we can safely add mult - 1 to
  59         * prevent integer rounding loss. So the backwards conversion
  60         * from nsec to device ticks will be correct.
  61         *
  62         * For mult > (1 << shift), i.e. device frequency is > 1GHz we
  63         * need to be careful. Adding mult - 1 will result in a value
  64         * which when converted back to device ticks can be larger
  65         * than latch by up to (mult - 1) >> shift. For the min_delta
  66         * calculation we still want to apply this in order to stay
  67         * above the minimum device ticks limit. For the upper limit
  68         * we would end up with a latch value larger than the upper
  69         * limit of the device, so we omit the add to stay below the
  70         * device upper boundary.
  71         *
  72         * Also omit the add if it would overflow the u64 boundary.
  73         */
  74        if ((~0ULL - clc > rnd) &&
  75            (!ismax || evt->mult <= (1U << evt->shift)))
  76                clc += rnd;
  77
  78        do_div(clc, evt->mult);
  79
  80        /* Deltas less than 1usec are pointless noise */
  81        return clc > 1000 ? clc : 1000;
  82}
  83
  84/**
  85 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
  86 * @latch:      value to convert
  87 * @evt:        pointer to clock event device descriptor
  88 *
  89 * Math helper, returns latch value converted to nanoseconds (bound checked)
  90 */
  91u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
  92{
  93        return cev_delta2ns(latch, evt, false);
  94}
  95EXPORT_SYMBOL_GPL(clockevent_delta2ns);
  96
  97/**
  98 * clockevents_set_mode - set the operating mode of a clock event device
  99 * @dev:        device to modify
 100 * @mode:       new mode
 101 *
 102 * Must be called with interrupts disabled !
 103 */
 104void clockevents_set_mode(struct clock_event_device *dev,
 105                                 enum clock_event_mode mode)
 106{
 107        if (dev->mode != mode) {
 108                dev->set_mode(mode, dev);
 109                dev->mode = mode;
 110
 111                /*
 112                 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
 113                 * on it, so fix it up and emit a warning:
 114                 */
 115                if (mode == CLOCK_EVT_MODE_ONESHOT) {
 116                        if (unlikely(!dev->mult)) {
 117                                dev->mult = 1;
 118                                WARN_ON(1);
 119                        }
 120                }
 121        }
 122}
 123
 124/**
 125 * clockevents_shutdown - shutdown the device and clear next_event
 126 * @dev:        device to shutdown
 127 */
 128void clockevents_shutdown(struct clock_event_device *dev)
 129{
 130        clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
 131        dev->next_event.tv64 = KTIME_MAX;
 132}
 133
 134#ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
 135
 136/* Limit min_delta to a jiffie */
 137#define MIN_DELTA_LIMIT         (NSEC_PER_SEC / HZ)
 138
 139/**
 140 * clockevents_increase_min_delta - raise minimum delta of a clock event device
 141 * @dev:       device to increase the minimum delta
 142 *
 143 * Returns 0 on success, -ETIME when the minimum delta reached the limit.
 144 */
 145static int clockevents_increase_min_delta(struct clock_event_device *dev)
 146{
 147        /* Nothing to do if we already reached the limit */
 148        if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
 149                printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n");
 150                dev->next_event.tv64 = KTIME_MAX;
 151                return -ETIME;
 152        }
 153
 154        if (dev->min_delta_ns < 5000)
 155                dev->min_delta_ns = 5000;
 156        else
 157                dev->min_delta_ns += dev->min_delta_ns >> 1;
 158
 159        if (dev->min_delta_ns > MIN_DELTA_LIMIT)
 160                dev->min_delta_ns = MIN_DELTA_LIMIT;
 161
 162        printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
 163               dev->name ? dev->name : "?",
 164               (unsigned long long) dev->min_delta_ns);
 165        return 0;
 166}
 167
 168/**
 169 * clockevents_program_min_delta - Set clock event device to the minimum delay.
 170 * @dev:        device to program
 171 *
 172 * Returns 0 on success, -ETIME when the retry loop failed.
 173 */
 174static int clockevents_program_min_delta(struct clock_event_device *dev)
 175{
 176        unsigned long long clc;
 177        int64_t delta;
 178        int i;
 179
 180        for (i = 0;;) {
 181                delta = dev->min_delta_ns;
 182                dev->next_event = ktime_add_ns(ktime_get(), delta);
 183
 184                if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
 185                        return 0;
 186
 187                dev->retries++;
 188                clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
 189                if (dev->set_next_event((unsigned long) clc, dev) == 0)
 190                        return 0;
 191
 192                if (++i > 2) {
 193                        /*
 194                         * We tried 3 times to program the device with the
 195                         * given min_delta_ns. Try to increase the minimum
 196                         * delta, if that fails as well get out of here.
 197                         */
 198                        if (clockevents_increase_min_delta(dev))
 199                                return -ETIME;
 200                        i = 0;
 201                }
 202        }
 203}
 204
 205#else  /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
 206
 207/**
 208 * clockevents_program_min_delta - Set clock event device to the minimum delay.
 209 * @dev:        device to program
 210 *
 211 * Returns 0 on success, -ETIME when the retry loop failed.
 212 */
 213static int clockevents_program_min_delta(struct clock_event_device *dev)
 214{
 215        unsigned long long clc;
 216        int64_t delta;
 217
 218        delta = dev->min_delta_ns;
 219        dev->next_event = ktime_add_ns(ktime_get(), delta);
 220
 221        if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
 222                return 0;
 223
 224        dev->retries++;
 225        clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
 226        return dev->set_next_event((unsigned long) clc, dev);
 227}
 228
 229#endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
 230
 231/**
 232 * clockevents_program_event - Reprogram the clock event device.
 233 * @dev:        device to program
 234 * @expires:    absolute expiry time (monotonic clock)
 235 * @force:      program minimum delay if expires can not be set
 236 *
 237 * Returns 0 on success, -ETIME when the event is in the past.
 238 */
 239int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
 240                              bool force)
 241{
 242        unsigned long long clc;
 243        int64_t delta;
 244        int rc;
 245
 246        if (unlikely(expires.tv64 < 0)) {
 247                WARN_ON_ONCE(1);
 248                return -ETIME;
 249        }
 250
 251        dev->next_event = expires;
 252
 253        if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
 254                return 0;
 255
 256        /* Shortcut for clockevent devices that can deal with ktime. */
 257        if (dev->features & CLOCK_EVT_FEAT_KTIME)
 258                return dev->set_next_ktime(expires, dev);
 259
 260        delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
 261        if (delta <= 0)
 262                return force ? clockevents_program_min_delta(dev) : -ETIME;
 263
 264        delta = min(delta, (int64_t) dev->max_delta_ns);
 265        delta = max(delta, (int64_t) dev->min_delta_ns);
 266
 267        clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
 268        rc = dev->set_next_event((unsigned long) clc, dev);
 269
 270        return (rc && force) ? clockevents_program_min_delta(dev) : rc;
 271}
 272
 273/*
 274 * Called after a notify add to make devices available which were
 275 * released from the notifier call.
 276 */
 277static void clockevents_notify_released(void)
 278{
 279        struct clock_event_device *dev;
 280
 281        while (!list_empty(&clockevents_released)) {
 282                dev = list_entry(clockevents_released.next,
 283                                 struct clock_event_device, list);
 284                list_del(&dev->list);
 285                list_add(&dev->list, &clockevent_devices);
 286                tick_check_new_device(dev);
 287        }
 288}
 289
 290/*
 291 * Try to install a replacement clock event device
 292 */
 293static int clockevents_replace(struct clock_event_device *ced)
 294{
 295        struct clock_event_device *dev, *newdev = NULL;
 296
 297        list_for_each_entry(dev, &clockevent_devices, list) {
 298                if (dev == ced || dev->mode != CLOCK_EVT_MODE_UNUSED)
 299                        continue;
 300
 301                if (!tick_check_replacement(newdev, dev))
 302                        continue;
 303
 304                if (!try_module_get(dev->owner))
 305                        continue;
 306
 307                if (newdev)
 308                        module_put(newdev->owner);
 309                newdev = dev;
 310        }
 311        if (newdev) {
 312                tick_install_replacement(newdev);
 313                list_del_init(&ced->list);
 314        }
 315        return newdev ? 0 : -EBUSY;
 316}
 317
 318/*
 319 * Called with clockevents_mutex and clockevents_lock held
 320 */
 321static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
 322{
 323        /* Fast track. Device is unused */
 324        if (ced->mode == CLOCK_EVT_MODE_UNUSED) {
 325                list_del_init(&ced->list);
 326                return 0;
 327        }
 328
 329        return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY;
 330}
 331
 332/*
 333 * SMP function call to unbind a device
 334 */
 335static void __clockevents_unbind(void *arg)
 336{
 337        struct ce_unbind *cu = arg;
 338        int res;
 339
 340        raw_spin_lock(&clockevents_lock);
 341        res = __clockevents_try_unbind(cu->ce, smp_processor_id());
 342        if (res == -EAGAIN)
 343                res = clockevents_replace(cu->ce);
 344        cu->res = res;
 345        raw_spin_unlock(&clockevents_lock);
 346}
 347
 348/*
 349 * Issues smp function call to unbind a per cpu device. Called with
 350 * clockevents_mutex held.
 351 */
 352static int clockevents_unbind(struct clock_event_device *ced, int cpu)
 353{
 354        struct ce_unbind cu = { .ce = ced, .res = -ENODEV };
 355
 356        smp_call_function_single(cpu, __clockevents_unbind, &cu, 1);
 357        return cu.res;
 358}
 359
 360/*
 361 * Unbind a clockevents device.
 362 */
 363int clockevents_unbind_device(struct clock_event_device *ced, int cpu)
 364{
 365        int ret;
 366
 367        mutex_lock(&clockevents_mutex);
 368        ret = clockevents_unbind(ced, cpu);
 369        mutex_unlock(&clockevents_mutex);
 370        return ret;
 371}
 372EXPORT_SYMBOL_GPL(clockevents_unbind);
 373
 374/**
 375 * clockevents_register_device - register a clock event device
 376 * @dev:        device to register
 377 */
 378void clockevents_register_device(struct clock_event_device *dev)
 379{
 380        unsigned long flags;
 381
 382        BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
 383        if (!dev->cpumask) {
 384                WARN_ON(num_possible_cpus() > 1);
 385                dev->cpumask = cpumask_of(smp_processor_id());
 386        }
 387
 388        raw_spin_lock_irqsave(&clockevents_lock, flags);
 389
 390        list_add(&dev->list, &clockevent_devices);
 391        tick_check_new_device(dev);
 392        clockevents_notify_released();
 393
 394        raw_spin_unlock_irqrestore(&clockevents_lock, flags);
 395}
 396EXPORT_SYMBOL_GPL(clockevents_register_device);
 397
 398void clockevents_config(struct clock_event_device *dev, u32 freq)
 399{
 400        u64 sec;
 401
 402        if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
 403                return;
 404
 405        /*
 406         * Calculate the maximum number of seconds we can sleep. Limit
 407         * to 10 minutes for hardware which can program more than
 408         * 32bit ticks so we still get reasonable conversion values.
 409         */
 410        sec = dev->max_delta_ticks;
 411        do_div(sec, freq);
 412        if (!sec)
 413                sec = 1;
 414        else if (sec > 600 && dev->max_delta_ticks > UINT_MAX)
 415                sec = 600;
 416
 417        clockevents_calc_mult_shift(dev, freq, sec);
 418        dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
 419        dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
 420}
 421
 422/**
 423 * clockevents_config_and_register - Configure and register a clock event device
 424 * @dev:        device to register
 425 * @freq:       The clock frequency
 426 * @min_delta:  The minimum clock ticks to program in oneshot mode
 427 * @max_delta:  The maximum clock ticks to program in oneshot mode
 428 *
 429 * min/max_delta can be 0 for devices which do not support oneshot mode.
 430 */
 431void clockevents_config_and_register(struct clock_event_device *dev,
 432                                     u32 freq, unsigned long min_delta,
 433                                     unsigned long max_delta)
 434{
 435        dev->min_delta_ticks = min_delta;
 436        dev->max_delta_ticks = max_delta;
 437        clockevents_config(dev, freq);
 438        clockevents_register_device(dev);
 439}
 440EXPORT_SYMBOL_GPL(clockevents_config_and_register);
 441
 442int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
 443{
 444        clockevents_config(dev, freq);
 445
 446        if (dev->mode == CLOCK_EVT_MODE_ONESHOT)
 447                return clockevents_program_event(dev, dev->next_event, false);
 448
 449        if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
 450                dev->set_mode(CLOCK_EVT_MODE_PERIODIC, dev);
 451
 452        return 0;
 453}
 454
 455/**
 456 * clockevents_update_freq - Update frequency and reprogram a clock event device.
 457 * @dev:        device to modify
 458 * @freq:       new device frequency
 459 *
 460 * Reconfigure and reprogram a clock event device in oneshot
 461 * mode. Must be called on the cpu for which the device delivers per
 462 * cpu timer events. If called for the broadcast device the core takes
 463 * care of serialization.
 464 *
 465 * Returns 0 on success, -ETIME when the event is in the past.
 466 */
 467int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
 468{
 469        unsigned long flags;
 470        int ret;
 471
 472        local_irq_save(flags);
 473        ret = tick_broadcast_update_freq(dev, freq);
 474        if (ret == -ENODEV)
 475                ret = __clockevents_update_freq(dev, freq);
 476        local_irq_restore(flags);
 477        return ret;
 478}
 479
 480/*
 481 * Noop handler when we shut down an event device
 482 */
 483void clockevents_handle_noop(struct clock_event_device *dev)
 484{
 485}
 486
 487/**
 488 * clockevents_exchange_device - release and request clock devices
 489 * @old:        device to release (can be NULL)
 490 * @new:        device to request (can be NULL)
 491 *
 492 * Called from the notifier chain. clockevents_lock is held already
 493 */
 494void clockevents_exchange_device(struct clock_event_device *old,
 495                                 struct clock_event_device *new)
 496{
 497        unsigned long flags;
 498
 499        local_irq_save(flags);
 500        /*
 501         * Caller releases a clock event device. We queue it into the
 502         * released list and do a notify add later.
 503         */
 504        if (old) {
 505                module_put(old->owner);
 506                clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
 507                list_del(&old->list);
 508                list_add(&old->list, &clockevents_released);
 509        }
 510
 511        if (new) {
 512                BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED);
 513                clockevents_shutdown(new);
 514        }
 515        local_irq_restore(flags);
 516}
 517
 518/**
 519 * clockevents_suspend - suspend clock devices
 520 */
 521void clockevents_suspend(void)
 522{
 523        struct clock_event_device *dev;
 524
 525        list_for_each_entry_reverse(dev, &clockevent_devices, list)
 526                if (dev->suspend)
 527                        dev->suspend(dev);
 528}
 529
 530/**
 531 * clockevents_resume - resume clock devices
 532 */
 533void clockevents_resume(void)
 534{
 535        struct clock_event_device *dev;
 536
 537        list_for_each_entry(dev, &clockevent_devices, list)
 538                if (dev->resume)
 539                        dev->resume(dev);
 540}
 541
 542#ifdef CONFIG_GENERIC_CLOCKEVENTS
 543/**
 544 * clockevents_notify - notification about relevant events
 545 * Returns 0 on success, any other value on error
 546 */
 547int clockevents_notify(unsigned long reason, void *arg)
 548{
 549        struct clock_event_device *dev, *tmp;
 550        unsigned long flags;
 551        int cpu, ret = 0;
 552
 553        raw_spin_lock_irqsave(&clockevents_lock, flags);
 554
 555        switch (reason) {
 556        case CLOCK_EVT_NOTIFY_BROADCAST_ON:
 557        case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
 558        case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
 559                tick_broadcast_on_off(reason, arg);
 560                break;
 561
 562        case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
 563        case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
 564                ret = tick_broadcast_oneshot_control(reason);
 565                break;
 566
 567        case CLOCK_EVT_NOTIFY_CPU_DYING:
 568                tick_handover_do_timer(arg);
 569                break;
 570
 571        case CLOCK_EVT_NOTIFY_SUSPEND:
 572                tick_suspend();
 573                tick_suspend_broadcast();
 574                break;
 575
 576        case CLOCK_EVT_NOTIFY_RESUME:
 577                tick_resume();
 578                break;
 579
 580        case CLOCK_EVT_NOTIFY_CPU_DEAD:
 581                tick_shutdown_broadcast_oneshot(arg);
 582                tick_shutdown_broadcast(arg);
 583                tick_shutdown(arg);
 584                /*
 585                 * Unregister the clock event devices which were
 586                 * released from the users in the notify chain.
 587                 */
 588                list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
 589                        list_del(&dev->list);
 590                /*
 591                 * Now check whether the CPU has left unused per cpu devices
 592                 */
 593                cpu = *((int *)arg);
 594                list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
 595                        if (cpumask_test_cpu(cpu, dev->cpumask) &&
 596                            cpumask_weight(dev->cpumask) == 1 &&
 597                            !tick_is_broadcast_device(dev)) {
 598                                BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
 599                                list_del(&dev->list);
 600                        }
 601                }
 602                break;
 603        default:
 604                break;
 605        }
 606        raw_spin_unlock_irqrestore(&clockevents_lock, flags);
 607        return ret;
 608}
 609EXPORT_SYMBOL_GPL(clockevents_notify);
 610
 611#ifdef CONFIG_SYSFS
 612struct bus_type clockevents_subsys = {
 613        .name           = "clockevents",
 614        .dev_name       = "clockevent",
 615};
 616
 617static DEFINE_PER_CPU(struct device, tick_percpu_dev);
 618static struct tick_device *tick_get_tick_dev(struct device *dev);
 619
 620static ssize_t sysfs_show_current_tick_dev(struct device *dev,
 621                                           struct device_attribute *attr,
 622                                           char *buf)
 623{
 624        struct tick_device *td;
 625        ssize_t count = 0;
 626
 627        raw_spin_lock_irq(&clockevents_lock);
 628        td = tick_get_tick_dev(dev);
 629        if (td && td->evtdev)
 630                count = snprintf(buf, PAGE_SIZE, "%s\n", td->evtdev->name);
 631        raw_spin_unlock_irq(&clockevents_lock);
 632        return count;
 633}
 634static DEVICE_ATTR(current_device, 0444, sysfs_show_current_tick_dev, NULL);
 635
 636/* We don't support the abomination of removable broadcast devices */
 637static ssize_t sysfs_unbind_tick_dev(struct device *dev,
 638                                     struct device_attribute *attr,
 639                                     const char *buf, size_t count)
 640{
 641        char name[CS_NAME_LEN];
 642        ssize_t ret = sysfs_get_uname(buf, name, count);
 643        struct clock_event_device *ce;
 644
 645        if (ret < 0)
 646                return ret;
 647
 648        ret = -ENODEV;
 649        mutex_lock(&clockevents_mutex);
 650        raw_spin_lock_irq(&clockevents_lock);
 651        list_for_each_entry(ce, &clockevent_devices, list) {
 652                if (!strcmp(ce->name, name)) {
 653                        ret = __clockevents_try_unbind(ce, dev->id);
 654                        break;
 655                }
 656        }
 657        raw_spin_unlock_irq(&clockevents_lock);
 658        /*
 659         * We hold clockevents_mutex, so ce can't go away
 660         */
 661        if (ret == -EAGAIN)
 662                ret = clockevents_unbind(ce, dev->id);
 663        mutex_unlock(&clockevents_mutex);
 664        return ret ? ret : count;
 665}
 666static DEVICE_ATTR(unbind_device, 0200, NULL, sysfs_unbind_tick_dev);
 667
 668#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 669static struct device tick_bc_dev = {
 670        .init_name      = "broadcast",
 671        .id             = 0,
 672        .bus            = &clockevents_subsys,
 673};
 674
 675static struct tick_device *tick_get_tick_dev(struct device *dev)
 676{
 677        return dev == &tick_bc_dev ? tick_get_broadcast_device() :
 678                &per_cpu(tick_cpu_device, dev->id);
 679}
 680
 681static __init int tick_broadcast_init_sysfs(void)
 682{
 683        int err = device_register(&tick_bc_dev);
 684
 685        if (!err)
 686                err = device_create_file(&tick_bc_dev, &dev_attr_current_device);
 687        return err;
 688}
 689#else
 690static struct tick_device *tick_get_tick_dev(struct device *dev)
 691{
 692        return &per_cpu(tick_cpu_device, dev->id);
 693}
 694static inline int tick_broadcast_init_sysfs(void) { return 0; }
 695#endif
 696
 697static int __init tick_init_sysfs(void)
 698{
 699        int cpu;
 700
 701        for_each_possible_cpu(cpu) {
 702                struct device *dev = &per_cpu(tick_percpu_dev, cpu);
 703                int err;
 704
 705                dev->id = cpu;
 706                dev->bus = &clockevents_subsys;
 707                err = device_register(dev);
 708                if (!err)
 709                        err = device_create_file(dev, &dev_attr_current_device);
 710                if (!err)
 711                        err = device_create_file(dev, &dev_attr_unbind_device);
 712                if (err)
 713                        return err;
 714        }
 715        return tick_broadcast_init_sysfs();
 716}
 717
 718static int __init clockevents_init_sysfs(void)
 719{
 720        int err = subsys_system_register(&clockevents_subsys, NULL);
 721
 722        if (!err)
 723                err = tick_init_sysfs();
 724        return err;
 725}
 726device_initcall(clockevents_init_sysfs);
 727#endif /* SYSFS */
 728
 729#endif /* GENERIC_CLOCK_EVENTS */
 730