linux/kernel/time/clocksource.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/time/clocksource.c
   3 *
   4 * This file contains the functions which manage clocksource drivers.
   5 *
   6 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21 *
  22 * TODO WishList:
  23 *   o Allow clocksource drivers to be unregistered
  24 */
  25
  26#include <linux/clocksource.h>
  27#include <linux/sysdev.h>
  28#include <linux/init.h>
  29#include <linux/module.h>
  30#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
  31#include <linux/tick.h>
  32#include <linux/kthread.h>
  33
  34void timecounter_init(struct timecounter *tc,
  35                      const struct cyclecounter *cc,
  36                      u64 start_tstamp)
  37{
  38        tc->cc = cc;
  39        tc->cycle_last = cc->read(cc);
  40        tc->nsec = start_tstamp;
  41}
  42EXPORT_SYMBOL(timecounter_init);
  43
  44/**
  45 * timecounter_read_delta - get nanoseconds since last call of this function
  46 * @tc:         Pointer to time counter
  47 *
  48 * When the underlying cycle counter runs over, this will be handled
  49 * correctly as long as it does not run over more than once between
  50 * calls.
  51 *
  52 * The first call to this function for a new time counter initializes
  53 * the time tracking and returns an undefined result.
  54 */
  55static u64 timecounter_read_delta(struct timecounter *tc)
  56{
  57        cycle_t cycle_now, cycle_delta;
  58        u64 ns_offset;
  59
  60        /* read cycle counter: */
  61        cycle_now = tc->cc->read(tc->cc);
  62
  63        /* calculate the delta since the last timecounter_read_delta(): */
  64        cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;
  65
  66        /* convert to nanoseconds: */
  67        ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta);
  68
  69        /* update time stamp of timecounter_read_delta() call: */
  70        tc->cycle_last = cycle_now;
  71
  72        return ns_offset;
  73}
  74
  75u64 timecounter_read(struct timecounter *tc)
  76{
  77        u64 nsec;
  78
  79        /* increment time by nanoseconds since last call */
  80        nsec = timecounter_read_delta(tc);
  81        nsec += tc->nsec;
  82        tc->nsec = nsec;
  83
  84        return nsec;
  85}
  86EXPORT_SYMBOL(timecounter_read);
  87
  88u64 timecounter_cyc2time(struct timecounter *tc,
  89                         cycle_t cycle_tstamp)
  90{
  91        u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
  92        u64 nsec;
  93
  94        /*
  95         * Instead of always treating cycle_tstamp as more recent
  96         * than tc->cycle_last, detect when it is too far in the
  97         * future and treat it as old time stamp instead.
  98         */
  99        if (cycle_delta > tc->cc->mask / 2) {
 100                cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
 101                nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta);
 102        } else {
 103                nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec;
 104        }
 105
 106        return nsec;
 107}
 108EXPORT_SYMBOL(timecounter_cyc2time);
 109
 110/*[Clocksource internal variables]---------
 111 * curr_clocksource:
 112 *      currently selected clocksource.
 113 * clocksource_list:
 114 *      linked list with the registered clocksources
 115 * clocksource_mutex:
 116 *      protects manipulations to curr_clocksource and the clocksource_list
 117 * override_name:
 118 *      Name of the user-specified clocksource.
 119 */
 120static struct clocksource *curr_clocksource;
 121static LIST_HEAD(clocksource_list);
 122static DEFINE_MUTEX(clocksource_mutex);
 123static char override_name[32];
 124static int finished_booting;
 125
 126#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
 127static void clocksource_watchdog_work(struct work_struct *work);
 128
 129static LIST_HEAD(watchdog_list);
 130static struct clocksource *watchdog;
 131static struct timer_list watchdog_timer;
 132static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
 133static DEFINE_SPINLOCK(watchdog_lock);
 134static cycle_t watchdog_last;
 135static int watchdog_running;
 136
 137static int clocksource_watchdog_kthread(void *data);
 138static void __clocksource_change_rating(struct clocksource *cs, int rating);
 139
 140/*
 141 * Interval: 0.5sec Threshold: 0.0625s
 142 */
 143#define WATCHDOG_INTERVAL (HZ >> 1)
 144#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
 145
 146static void clocksource_watchdog_work(struct work_struct *work)
 147{
 148        /*
 149         * If kthread_run fails the next watchdog scan over the
 150         * watchdog_list will find the unstable clock again.
 151         */
 152        kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
 153}
 154
 155static void __clocksource_unstable(struct clocksource *cs)
 156{
 157        cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
 158        cs->flags |= CLOCK_SOURCE_UNSTABLE;
 159        if (finished_booting)
 160                schedule_work(&watchdog_work);
 161}
 162
 163static void clocksource_unstable(struct clocksource *cs, int64_t delta)
 164{
 165        printk(KERN_WARNING "Clocksource %s unstable (delta = %Ld ns)\n",
 166               cs->name, delta);
 167        __clocksource_unstable(cs);
 168}
 169
 170/**
 171 * clocksource_mark_unstable - mark clocksource unstable via watchdog
 172 * @cs:         clocksource to be marked unstable
 173 *
 174 * This function is called instead of clocksource_change_rating from
 175 * cpu hotplug code to avoid a deadlock between the clocksource mutex
 176 * and the cpu hotplug mutex. It defers the update of the clocksource
 177 * to the watchdog thread.
 178 */
 179void clocksource_mark_unstable(struct clocksource *cs)
 180{
 181        unsigned long flags;
 182
 183        spin_lock_irqsave(&watchdog_lock, flags);
 184        if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) {
 185                if (list_empty(&cs->wd_list))
 186                        list_add(&cs->wd_list, &watchdog_list);
 187                __clocksource_unstable(cs);
 188        }
 189        spin_unlock_irqrestore(&watchdog_lock, flags);
 190}
 191
 192static void clocksource_watchdog(unsigned long data)
 193{
 194        struct clocksource *cs;
 195        cycle_t csnow, wdnow;
 196        int64_t wd_nsec, cs_nsec;
 197        int next_cpu;
 198
 199        spin_lock(&watchdog_lock);
 200        if (!watchdog_running)
 201                goto out;
 202
 203        wdnow = watchdog->read(watchdog);
 204        wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
 205                                     watchdog->mult, watchdog->shift);
 206        watchdog_last = wdnow;
 207
 208        list_for_each_entry(cs, &watchdog_list, wd_list) {
 209
 210                /* Clocksource already marked unstable? */
 211                if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
 212                        if (finished_booting)
 213                                schedule_work(&watchdog_work);
 214                        continue;
 215                }
 216
 217                csnow = cs->read(cs);
 218
 219                /* Clocksource initialized ? */
 220                if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
 221                        cs->flags |= CLOCK_SOURCE_WATCHDOG;
 222                        cs->wd_last = csnow;
 223                        continue;
 224                }
 225
 226                /* Check the deviation from the watchdog clocksource. */
 227                cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) &
 228                                             cs->mask, cs->mult, cs->shift);
 229                cs->wd_last = csnow;
 230                if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
 231                        clocksource_unstable(cs, cs_nsec - wd_nsec);
 232                        continue;
 233                }
 234
 235                if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
 236                    (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) &&
 237                    (watchdog->flags & CLOCK_SOURCE_IS_CONTINUOUS)) {
 238                        cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
 239                        /*
 240                         * We just marked the clocksource as highres-capable,
 241                         * notify the rest of the system as well so that we
 242                         * transition into high-res mode:
 243                         */
 244                        tick_clock_notify();
 245                }
 246        }
 247
 248        /*
 249         * Cycle through CPUs to check if the CPUs stay synchronized
 250         * to each other.
 251         */
 252        next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
 253        if (next_cpu >= nr_cpu_ids)
 254                next_cpu = cpumask_first(cpu_online_mask);
 255        watchdog_timer.expires += WATCHDOG_INTERVAL;
 256        add_timer_on(&watchdog_timer, next_cpu);
 257out:
 258        spin_unlock(&watchdog_lock);
 259}
 260
 261static inline void clocksource_start_watchdog(void)
 262{
 263        if (watchdog_running || !watchdog || list_empty(&watchdog_list))
 264                return;
 265        init_timer(&watchdog_timer);
 266        watchdog_timer.function = clocksource_watchdog;
 267        watchdog_last = watchdog->read(watchdog);
 268        watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
 269        add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
 270        watchdog_running = 1;
 271}
 272
 273static inline void clocksource_stop_watchdog(void)
 274{
 275        if (!watchdog_running || (watchdog && !list_empty(&watchdog_list)))
 276                return;
 277        del_timer(&watchdog_timer);
 278        watchdog_running = 0;
 279}
 280
 281static inline void clocksource_reset_watchdog(void)
 282{
 283        struct clocksource *cs;
 284
 285        list_for_each_entry(cs, &watchdog_list, wd_list)
 286                cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
 287}
 288
 289static void clocksource_resume_watchdog(void)
 290{
 291        unsigned long flags;
 292
 293        spin_lock_irqsave(&watchdog_lock, flags);
 294        clocksource_reset_watchdog();
 295        spin_unlock_irqrestore(&watchdog_lock, flags);
 296}
 297
 298static void clocksource_enqueue_watchdog(struct clocksource *cs)
 299{
 300        unsigned long flags;
 301
 302        spin_lock_irqsave(&watchdog_lock, flags);
 303        if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
 304                /* cs is a clocksource to be watched. */
 305                list_add(&cs->wd_list, &watchdog_list);
 306                cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
 307        } else {
 308                /* cs is a watchdog. */
 309                if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
 310                        cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
 311                /* Pick the best watchdog. */
 312                if (!watchdog || cs->rating > watchdog->rating) {
 313                        watchdog = cs;
 314                        /* Reset watchdog cycles */
 315                        clocksource_reset_watchdog();
 316                }
 317        }
 318        /* Check if the watchdog timer needs to be started. */
 319        clocksource_start_watchdog();
 320        spin_unlock_irqrestore(&watchdog_lock, flags);
 321}
 322
 323static void clocksource_dequeue_watchdog(struct clocksource *cs)
 324{
 325        struct clocksource *tmp;
 326        unsigned long flags;
 327
 328        spin_lock_irqsave(&watchdog_lock, flags);
 329        if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) {
 330                /* cs is a watched clocksource. */
 331                list_del_init(&cs->wd_list);
 332        } else if (cs == watchdog) {
 333                /* Reset watchdog cycles */
 334                clocksource_reset_watchdog();
 335                /* Current watchdog is removed. Find an alternative. */
 336                watchdog = NULL;
 337                list_for_each_entry(tmp, &clocksource_list, list) {
 338                        if (tmp == cs || tmp->flags & CLOCK_SOURCE_MUST_VERIFY)
 339                                continue;
 340                        if (!watchdog || tmp->rating > watchdog->rating)
 341                                watchdog = tmp;
 342                }
 343        }
 344        cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
 345        /* Check if the watchdog timer needs to be stopped. */
 346        clocksource_stop_watchdog();
 347        spin_unlock_irqrestore(&watchdog_lock, flags);
 348}
 349
 350static int clocksource_watchdog_kthread(void *data)
 351{
 352        struct clocksource *cs, *tmp;
 353        unsigned long flags;
 354        LIST_HEAD(unstable);
 355
 356        mutex_lock(&clocksource_mutex);
 357        spin_lock_irqsave(&watchdog_lock, flags);
 358        list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list)
 359                if (cs->flags & CLOCK_SOURCE_UNSTABLE) {
 360                        list_del_init(&cs->wd_list);
 361                        list_add(&cs->wd_list, &unstable);
 362                }
 363        /* Check if the watchdog timer needs to be stopped. */
 364        clocksource_stop_watchdog();
 365        spin_unlock_irqrestore(&watchdog_lock, flags);
 366
 367        /* Needs to be done outside of watchdog lock */
 368        list_for_each_entry_safe(cs, tmp, &unstable, wd_list) {
 369                list_del_init(&cs->wd_list);
 370                __clocksource_change_rating(cs, 0);
 371        }
 372        mutex_unlock(&clocksource_mutex);
 373        return 0;
 374}
 375
 376#else /* CONFIG_CLOCKSOURCE_WATCHDOG */
 377
 378static void clocksource_enqueue_watchdog(struct clocksource *cs)
 379{
 380        if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
 381                cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
 382}
 383
 384static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
 385static inline void clocksource_resume_watchdog(void) { }
 386static inline int clocksource_watchdog_kthread(void *data) { return 0; }
 387
 388#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
 389
 390/**
 391 * clocksource_resume - resume the clocksource(s)
 392 */
 393void clocksource_resume(void)
 394{
 395        struct clocksource *cs;
 396
 397        list_for_each_entry(cs, &clocksource_list, list)
 398                if (cs->resume)
 399                        cs->resume();
 400
 401        clocksource_resume_watchdog();
 402}
 403
 404/**
 405 * clocksource_touch_watchdog - Update watchdog
 406 *
 407 * Update the watchdog after exception contexts such as kgdb so as not
 408 * to incorrectly trip the watchdog.
 409 *
 410 */
 411void clocksource_touch_watchdog(void)
 412{
 413        clocksource_resume_watchdog();
 414}
 415
 416#ifdef CONFIG_GENERIC_TIME
 417
 418/**
 419 * clocksource_select - Select the best clocksource available
 420 *
 421 * Private function. Must hold clocksource_mutex when called.
 422 *
 423 * Select the clocksource with the best rating, or the clocksource,
 424 * which is selected by userspace override.
 425 */
 426static void clocksource_select(void)
 427{
 428        struct clocksource *best, *cs;
 429
 430        if (!finished_booting || list_empty(&clocksource_list))
 431                return;
 432        /* First clocksource on the list has the best rating. */
 433        best = list_first_entry(&clocksource_list, struct clocksource, list);
 434        /* Check for the override clocksource. */
 435        list_for_each_entry(cs, &clocksource_list, list) {
 436                if (strcmp(cs->name, override_name) != 0)
 437                        continue;
 438                /*
 439                 * Check to make sure we don't switch to a non-highres
 440                 * capable clocksource if the tick code is in oneshot
 441                 * mode (highres or nohz)
 442                 */
 443                if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) &&
 444                    tick_oneshot_mode_active()) {
 445                        /* Override clocksource cannot be used. */
 446                        printk(KERN_WARNING "Override clocksource %s is not "
 447                               "HRT compatible. Cannot switch while in "
 448                               "HRT/NOHZ mode\n", cs->name);
 449                        override_name[0] = 0;
 450                } else
 451                        /* Override clocksource can be used. */
 452                        best = cs;
 453                break;
 454        }
 455        if (curr_clocksource != best) {
 456                printk(KERN_INFO "Switching to clocksource %s\n", best->name);
 457                curr_clocksource = best;
 458                timekeeping_notify(curr_clocksource);
 459        }
 460}
 461
 462#else /* CONFIG_GENERIC_TIME */
 463
 464static inline void clocksource_select(void) { }
 465
 466#endif
 467
 468/*
 469 * clocksource_done_booting - Called near the end of core bootup
 470 *
 471 * Hack to avoid lots of clocksource churn at boot time.
 472 * We use fs_initcall because we want this to start before
 473 * device_initcall but after subsys_initcall.
 474 */
 475static int __init clocksource_done_booting(void)
 476{
 477        finished_booting = 1;
 478
 479        /*
 480         * Run the watchdog first to eliminate unstable clock sources
 481         */
 482        clocksource_watchdog_kthread(NULL);
 483
 484        mutex_lock(&clocksource_mutex);
 485        clocksource_select();
 486        mutex_unlock(&clocksource_mutex);
 487        return 0;
 488}
 489fs_initcall(clocksource_done_booting);
 490
 491/*
 492 * Enqueue the clocksource sorted by rating
 493 */
 494static void clocksource_enqueue(struct clocksource *cs)
 495{
 496        struct list_head *entry = &clocksource_list;
 497        struct clocksource *tmp;
 498
 499        list_for_each_entry(tmp, &clocksource_list, list)
 500                /* Keep track of the place, where to insert */
 501                if (tmp->rating >= cs->rating)
 502                        entry = &tmp->list;
 503        list_add(&cs->list, entry);
 504}
 505
 506/**
 507 * clocksource_register - Used to install new clocksources
 508 * @t:          clocksource to be registered
 509 *
 510 * Returns -EBUSY if registration fails, zero otherwise.
 511 */
 512int clocksource_register(struct clocksource *cs)
 513{
 514        mutex_lock(&clocksource_mutex);
 515        clocksource_enqueue(cs);
 516        clocksource_select();
 517        clocksource_enqueue_watchdog(cs);
 518        mutex_unlock(&clocksource_mutex);
 519        return 0;
 520}
 521EXPORT_SYMBOL(clocksource_register);
 522
 523static void __clocksource_change_rating(struct clocksource *cs, int rating)
 524{
 525        list_del(&cs->list);
 526        cs->rating = rating;
 527        clocksource_enqueue(cs);
 528        clocksource_select();
 529}
 530
 531/**
 532 * clocksource_change_rating - Change the rating of a registered clocksource
 533 */
 534void clocksource_change_rating(struct clocksource *cs, int rating)
 535{
 536        mutex_lock(&clocksource_mutex);
 537        __clocksource_change_rating(cs, rating);
 538        mutex_unlock(&clocksource_mutex);
 539}
 540EXPORT_SYMBOL(clocksource_change_rating);
 541
 542/**
 543 * clocksource_unregister - remove a registered clocksource
 544 */
 545void clocksource_unregister(struct clocksource *cs)
 546{
 547        mutex_lock(&clocksource_mutex);
 548        clocksource_dequeue_watchdog(cs);
 549        list_del(&cs->list);
 550        clocksource_select();
 551        mutex_unlock(&clocksource_mutex);
 552}
 553EXPORT_SYMBOL(clocksource_unregister);
 554
 555#ifdef CONFIG_SYSFS
 556/**
 557 * sysfs_show_current_clocksources - sysfs interface for current clocksource
 558 * @dev:        unused
 559 * @buf:        char buffer to be filled with clocksource list
 560 *
 561 * Provides sysfs interface for listing current clocksource.
 562 */
 563static ssize_t
 564sysfs_show_current_clocksources(struct sys_device *dev,
 565                                struct sysdev_attribute *attr, char *buf)
 566{
 567        ssize_t count = 0;
 568
 569        mutex_lock(&clocksource_mutex);
 570        count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
 571        mutex_unlock(&clocksource_mutex);
 572
 573        return count;
 574}
 575
 576/**
 577 * sysfs_override_clocksource - interface for manually overriding clocksource
 578 * @dev:        unused
 579 * @buf:        name of override clocksource
 580 * @count:      length of buffer
 581 *
 582 * Takes input from sysfs interface for manually overriding the default
 583 * clocksource selction.
 584 */
 585static ssize_t sysfs_override_clocksource(struct sys_device *dev,
 586                                          struct sysdev_attribute *attr,
 587                                          const char *buf, size_t count)
 588{
 589        size_t ret = count;
 590
 591        /* strings from sysfs write are not 0 terminated! */
 592        if (count >= sizeof(override_name))
 593                return -EINVAL;
 594
 595        /* strip of \n: */
 596        if (buf[count-1] == '\n')
 597                count--;
 598
 599        mutex_lock(&clocksource_mutex);
 600
 601        if (count > 0)
 602                memcpy(override_name, buf, count);
 603        override_name[count] = 0;
 604        clocksource_select();
 605
 606        mutex_unlock(&clocksource_mutex);
 607
 608        return ret;
 609}
 610
 611/**
 612 * sysfs_show_available_clocksources - sysfs interface for listing clocksource
 613 * @dev:        unused
 614 * @buf:        char buffer to be filled with clocksource list
 615 *
 616 * Provides sysfs interface for listing registered clocksources
 617 */
 618static ssize_t
 619sysfs_show_available_clocksources(struct sys_device *dev,
 620                                  struct sysdev_attribute *attr,
 621                                  char *buf)
 622{
 623        struct clocksource *src;
 624        ssize_t count = 0;
 625
 626        mutex_lock(&clocksource_mutex);
 627        list_for_each_entry(src, &clocksource_list, list) {
 628                /*
 629                 * Don't show non-HRES clocksource if the tick code is
 630                 * in one shot mode (highres=on or nohz=on)
 631                 */
 632                if (!tick_oneshot_mode_active() ||
 633                    (src->flags & CLOCK_SOURCE_VALID_FOR_HRES))
 634                        count += snprintf(buf + count,
 635                                  max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
 636                                  "%s ", src->name);
 637        }
 638        mutex_unlock(&clocksource_mutex);
 639
 640        count += snprintf(buf + count,
 641                          max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
 642
 643        return count;
 644}
 645
 646/*
 647 * Sysfs setup bits:
 648 */
 649static SYSDEV_ATTR(current_clocksource, 0644, sysfs_show_current_clocksources,
 650                   sysfs_override_clocksource);
 651
 652static SYSDEV_ATTR(available_clocksource, 0444,
 653                   sysfs_show_available_clocksources, NULL);
 654
 655static struct sysdev_class clocksource_sysclass = {
 656        .name = "clocksource",
 657};
 658
 659static struct sys_device device_clocksource = {
 660        .id     = 0,
 661        .cls    = &clocksource_sysclass,
 662};
 663
 664static int __init init_clocksource_sysfs(void)
 665{
 666        int error = sysdev_class_register(&clocksource_sysclass);
 667
 668        if (!error)
 669                error = sysdev_register(&device_clocksource);
 670        if (!error)
 671                error = sysdev_create_file(
 672                                &device_clocksource,
 673                                &attr_current_clocksource);
 674        if (!error)
 675                error = sysdev_create_file(
 676                                &device_clocksource,
 677                                &attr_available_clocksource);
 678        return error;
 679}
 680
 681device_initcall(init_clocksource_sysfs);
 682#endif /* CONFIG_SYSFS */
 683
 684/**
 685 * boot_override_clocksource - boot clock override
 686 * @str:        override name
 687 *
 688 * Takes a clocksource= boot argument and uses it
 689 * as the clocksource override name.
 690 */
 691static int __init boot_override_clocksource(char* str)
 692{
 693        mutex_lock(&clocksource_mutex);
 694        if (str)
 695                strlcpy(override_name, str, sizeof(override_name));
 696        mutex_unlock(&clocksource_mutex);
 697        return 1;
 698}
 699
 700__setup("clocksource=", boot_override_clocksource);
 701
 702/**
 703 * boot_override_clock - Compatibility layer for deprecated boot option
 704 * @str:        override name
 705 *
 706 * DEPRECATED! Takes a clock= boot argument and uses it
 707 * as the clocksource override name
 708 */
 709static int __init boot_override_clock(char* str)
 710{
 711        if (!strcmp(str, "pmtmr")) {
 712                printk("Warning: clock=pmtmr is deprecated. "
 713                        "Use clocksource=acpi_pm.\n");
 714                return boot_override_clocksource("acpi_pm");
 715        }
 716        printk("Warning! clock= boot option is deprecated. "
 717                "Use clocksource=xyz\n");
 718        return boot_override_clocksource(str);
 719}
 720
 721__setup("clock=", boot_override_clock);
 722