linux/arch/s390/kernel/time.c
<<
>>
Prefs
   1/*
   2 *  arch/s390/kernel/time.c
   3 *    Time of day based timer functions.
   4 *
   5 *  S390 version
   6 *    Copyright IBM Corp. 1999, 2008
   7 *    Author(s): Hartmut Penner (hp@de.ibm.com),
   8 *               Martin Schwidefsky (schwidefsky@de.ibm.com),
   9 *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
  10 *
  11 *  Derived from "arch/i386/kernel/time.c"
  12 *    Copyright (C) 1991, 1992, 1995  Linus Torvalds
  13 */
  14
  15#define KMSG_COMPONENT "time"
  16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  17
  18#include <linux/kernel_stat.h>
  19#include <linux/errno.h>
  20#include <linux/module.h>
  21#include <linux/sched.h>
  22#include <linux/kernel.h>
  23#include <linux/param.h>
  24#include <linux/string.h>
  25#include <linux/mm.h>
  26#include <linux/interrupt.h>
  27#include <linux/cpu.h>
  28#include <linux/stop_machine.h>
  29#include <linux/time.h>
  30#include <linux/sysdev.h>
  31#include <linux/delay.h>
  32#include <linux/init.h>
  33#include <linux/smp.h>
  34#include <linux/types.h>
  35#include <linux/profile.h>
  36#include <linux/timex.h>
  37#include <linux/notifier.h>
  38#include <linux/clocksource.h>
  39#include <linux/clockchips.h>
  40#include <linux/gfp.h>
  41#include <linux/kprobes.h>
  42#include <asm/uaccess.h>
  43#include <asm/delay.h>
  44#include <asm/s390_ext.h>
  45#include <asm/div64.h>
  46#include <asm/vdso.h>
  47#include <asm/irq.h>
  48#include <asm/irq_regs.h>
  49#include <asm/timer.h>
  50#include <asm/etr.h>
  51#include <asm/cio.h>
  52
  53/* change this if you have some constant time drift */
  54#define USECS_PER_JIFFY     ((unsigned long) 1000000/HZ)
  55#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
  56
  57u64 sched_clock_base_cc = -1;   /* Force to data section. */
  58EXPORT_SYMBOL_GPL(sched_clock_base_cc);
  59
  60static DEFINE_PER_CPU(struct clock_event_device, comparators);
  61
  62/*
  63 * Scheduler clock - returns current time in nanosec units.
  64 */
  65unsigned long long notrace __kprobes sched_clock(void)
  66{
  67        return (get_clock_monotonic() * 125) >> 9;
  68}
  69
  70/*
  71 * Monotonic_clock - returns # of nanoseconds passed since time_init()
  72 */
  73unsigned long long monotonic_clock(void)
  74{
  75        return sched_clock();
  76}
  77EXPORT_SYMBOL(monotonic_clock);
  78
  79void tod_to_timeval(__u64 todval, struct timespec *xt)
  80{
  81        unsigned long long sec;
  82
  83        sec = todval >> 12;
  84        do_div(sec, 1000000);
  85        xt->tv_sec = sec;
  86        todval -= (sec * 1000000) << 12;
  87        xt->tv_nsec = ((todval * 1000) >> 12);
  88}
  89EXPORT_SYMBOL(tod_to_timeval);
  90
  91void clock_comparator_work(void)
  92{
  93        struct clock_event_device *cd;
  94
  95        S390_lowcore.clock_comparator = -1ULL;
  96        set_clock_comparator(S390_lowcore.clock_comparator);
  97        cd = &__get_cpu_var(comparators);
  98        cd->event_handler(cd);
  99}
 100
 101/*
 102 * Fixup the clock comparator.
 103 */
 104static void fixup_clock_comparator(unsigned long long delta)
 105{
 106        /* If nobody is waiting there's nothing to fix. */
 107        if (S390_lowcore.clock_comparator == -1ULL)
 108                return;
 109        S390_lowcore.clock_comparator += delta;
 110        set_clock_comparator(S390_lowcore.clock_comparator);
 111}
 112
 113static int s390_next_event(unsigned long delta,
 114                           struct clock_event_device *evt)
 115{
 116        S390_lowcore.clock_comparator = get_clock() + delta;
 117        set_clock_comparator(S390_lowcore.clock_comparator);
 118        return 0;
 119}
 120
 121static void s390_set_mode(enum clock_event_mode mode,
 122                          struct clock_event_device *evt)
 123{
 124}
 125
 126/*
 127 * Set up lowcore and control register of the current cpu to
 128 * enable TOD clock and clock comparator interrupts.
 129 */
 130void init_cpu_timer(void)
 131{
 132        struct clock_event_device *cd;
 133        int cpu;
 134
 135        S390_lowcore.clock_comparator = -1ULL;
 136        set_clock_comparator(S390_lowcore.clock_comparator);
 137
 138        cpu = smp_processor_id();
 139        cd = &per_cpu(comparators, cpu);
 140        cd->name                = "comparator";
 141        cd->features            = CLOCK_EVT_FEAT_ONESHOT;
 142        cd->mult                = 16777;
 143        cd->shift               = 12;
 144        cd->min_delta_ns        = 1;
 145        cd->max_delta_ns        = LONG_MAX;
 146        cd->rating              = 400;
 147        cd->cpumask             = cpumask_of(cpu);
 148        cd->set_next_event      = s390_next_event;
 149        cd->set_mode            = s390_set_mode;
 150
 151        clockevents_register_device(cd);
 152
 153        /* Enable clock comparator timer interrupt. */
 154        __ctl_set_bit(0,11);
 155
 156        /* Always allow the timing alert external interrupt. */
 157        __ctl_set_bit(0, 4);
 158}
 159
 160static void clock_comparator_interrupt(unsigned int ext_int_code,
 161                                       unsigned int param32,
 162                                       unsigned long param64)
 163{
 164        kstat_cpu(smp_processor_id()).irqs[EXTINT_CLK]++;
 165        if (S390_lowcore.clock_comparator == -1ULL)
 166                set_clock_comparator(S390_lowcore.clock_comparator);
 167}
 168
 169static void etr_timing_alert(struct etr_irq_parm *);
 170static void stp_timing_alert(struct stp_irq_parm *);
 171
 172static void timing_alert_interrupt(unsigned int ext_int_code,
 173                                   unsigned int param32, unsigned long param64)
 174{
 175        kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++;
 176        if (param32 & 0x00c40000)
 177                etr_timing_alert((struct etr_irq_parm *) &param32);
 178        if (param32 & 0x00038000)
 179                stp_timing_alert((struct stp_irq_parm *) &param32);
 180}
 181
 182static void etr_reset(void);
 183static void stp_reset(void);
 184
 185void read_persistent_clock(struct timespec *ts)
 186{
 187        tod_to_timeval(get_clock() - TOD_UNIX_EPOCH, ts);
 188}
 189
 190void read_boot_clock(struct timespec *ts)
 191{
 192        tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, ts);
 193}
 194
 195static cycle_t read_tod_clock(struct clocksource *cs)
 196{
 197        return get_clock();
 198}
 199
 200static struct clocksource clocksource_tod = {
 201        .name           = "tod",
 202        .rating         = 400,
 203        .read           = read_tod_clock,
 204        .mask           = -1ULL,
 205        .mult           = 1000,
 206        .shift          = 12,
 207        .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 208};
 209
 210struct clocksource * __init clocksource_default_clock(void)
 211{
 212        return &clocksource_tod;
 213}
 214
 215void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
 216                        struct clocksource *clock, u32 mult)
 217{
 218        if (clock != &clocksource_tod)
 219                return;
 220
 221        /* Make userspace gettimeofday spin until we're done. */
 222        ++vdso_data->tb_update_count;
 223        smp_wmb();
 224        vdso_data->xtime_tod_stamp = clock->cycle_last;
 225        vdso_data->xtime_clock_sec = wall_time->tv_sec;
 226        vdso_data->xtime_clock_nsec = wall_time->tv_nsec;
 227        vdso_data->wtom_clock_sec = wtm->tv_sec;
 228        vdso_data->wtom_clock_nsec = wtm->tv_nsec;
 229        vdso_data->ntp_mult = mult;
 230        smp_wmb();
 231        ++vdso_data->tb_update_count;
 232}
 233
 234extern struct timezone sys_tz;
 235
 236void update_vsyscall_tz(void)
 237{
 238        /* Make userspace gettimeofday spin until we're done. */
 239        ++vdso_data->tb_update_count;
 240        smp_wmb();
 241        vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
 242        vdso_data->tz_dsttime = sys_tz.tz_dsttime;
 243        smp_wmb();
 244        ++vdso_data->tb_update_count;
 245}
 246
 247/*
 248 * Initialize the TOD clock and the CPU timer of
 249 * the boot cpu.
 250 */
 251void __init time_init(void)
 252{
 253        /* Reset time synchronization interfaces. */
 254        etr_reset();
 255        stp_reset();
 256
 257        /* request the clock comparator external interrupt */
 258        if (register_external_interrupt(0x1004, clock_comparator_interrupt))
 259                panic("Couldn't request external interrupt 0x1004");
 260
 261        /* request the timing alert external interrupt */
 262        if (register_external_interrupt(0x1406, timing_alert_interrupt))
 263                panic("Couldn't request external interrupt 0x1406");
 264
 265        if (clocksource_register(&clocksource_tod) != 0)
 266                panic("Could not register TOD clock source");
 267
 268        /* Enable TOD clock interrupts on the boot cpu. */
 269        init_cpu_timer();
 270
 271        /* Enable cpu timer interrupts on the boot cpu. */
 272        vtime_init();
 273}
 274
 275/*
 276 * The time is "clock". old is what we think the time is.
 277 * Adjust the value by a multiple of jiffies and add the delta to ntp.
 278 * "delay" is an approximation how long the synchronization took. If
 279 * the time correction is positive, then "delay" is subtracted from
 280 * the time difference and only the remaining part is passed to ntp.
 281 */
 282static unsigned long long adjust_time(unsigned long long old,
 283                                      unsigned long long clock,
 284                                      unsigned long long delay)
 285{
 286        unsigned long long delta, ticks;
 287        struct timex adjust;
 288
 289        if (clock > old) {
 290                /* It is later than we thought. */
 291                delta = ticks = clock - old;
 292                delta = ticks = (delta < delay) ? 0 : delta - delay;
 293                delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
 294                adjust.offset = ticks * (1000000 / HZ);
 295        } else {
 296                /* It is earlier than we thought. */
 297                delta = ticks = old - clock;
 298                delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
 299                delta = -delta;
 300                adjust.offset = -ticks * (1000000 / HZ);
 301        }
 302        sched_clock_base_cc += delta;
 303        if (adjust.offset != 0) {
 304                pr_notice("The ETR interface has adjusted the clock "
 305                          "by %li microseconds\n", adjust.offset);
 306                adjust.modes = ADJ_OFFSET_SINGLESHOT;
 307                do_adjtimex(&adjust);
 308        }
 309        return delta;
 310}
 311
 312static DEFINE_PER_CPU(atomic_t, clock_sync_word);
 313static DEFINE_MUTEX(clock_sync_mutex);
 314static unsigned long clock_sync_flags;
 315
 316#define CLOCK_SYNC_HAS_ETR      0
 317#define CLOCK_SYNC_HAS_STP      1
 318#define CLOCK_SYNC_ETR          2
 319#define CLOCK_SYNC_STP          3
 320
 321/*
 322 * The synchronous get_clock function. It will write the current clock
 323 * value to the clock pointer and return 0 if the clock is in sync with
 324 * the external time source. If the clock mode is local it will return
 325 * -ENOSYS and -EAGAIN if the clock is not in sync with the external
 326 * reference.
 327 */
 328int get_sync_clock(unsigned long long *clock)
 329{
 330        atomic_t *sw_ptr;
 331        unsigned int sw0, sw1;
 332
 333        sw_ptr = &get_cpu_var(clock_sync_word);
 334        sw0 = atomic_read(sw_ptr);
 335        *clock = get_clock();
 336        sw1 = atomic_read(sw_ptr);
 337        put_cpu_var(clock_sync_word);
 338        if (sw0 == sw1 && (sw0 & 0x80000000U))
 339                /* Success: time is in sync. */
 340                return 0;
 341        if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) &&
 342            !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
 343                return -ENOSYS;
 344        if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) &&
 345            !test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
 346                return -EACCES;
 347        return -EAGAIN;
 348}
 349EXPORT_SYMBOL(get_sync_clock);
 350
 351/*
 352 * Make get_sync_clock return -EAGAIN.
 353 */
 354static void disable_sync_clock(void *dummy)
 355{
 356        atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word);
 357        /*
 358         * Clear the in-sync bit 2^31. All get_sync_clock calls will
 359         * fail until the sync bit is turned back on. In addition
 360         * increase the "sequence" counter to avoid the race of an
 361         * etr event and the complete recovery against get_sync_clock.
 362         */
 363        atomic_clear_mask(0x80000000, sw_ptr);
 364        atomic_inc(sw_ptr);
 365}
 366
 367/*
 368 * Make get_sync_clock return 0 again.
 369 * Needs to be called from a context disabled for preemption.
 370 */
 371static void enable_sync_clock(void)
 372{
 373        atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word);
 374        atomic_set_mask(0x80000000, sw_ptr);
 375}
 376
 377/*
 378 * Function to check if the clock is in sync.
 379 */
 380static inline int check_sync_clock(void)
 381{
 382        atomic_t *sw_ptr;
 383        int rc;
 384
 385        sw_ptr = &get_cpu_var(clock_sync_word);
 386        rc = (atomic_read(sw_ptr) & 0x80000000U) != 0;
 387        put_cpu_var(clock_sync_word);
 388        return rc;
 389}
 390
 391/* Single threaded workqueue used for etr and stp sync events */
 392static struct workqueue_struct *time_sync_wq;
 393
 394static void __init time_init_wq(void)
 395{
 396        if (time_sync_wq)
 397                return;
 398        time_sync_wq = create_singlethread_workqueue("timesync");
 399}
 400
 401/*
 402 * External Time Reference (ETR) code.
 403 */
 404static int etr_port0_online;
 405static int etr_port1_online;
 406static int etr_steai_available;
 407
 408static int __init early_parse_etr(char *p)
 409{
 410        if (strncmp(p, "off", 3) == 0)
 411                etr_port0_online = etr_port1_online = 0;
 412        else if (strncmp(p, "port0", 5) == 0)
 413                etr_port0_online = 1;
 414        else if (strncmp(p, "port1", 5) == 0)
 415                etr_port1_online = 1;
 416        else if (strncmp(p, "on", 2) == 0)
 417                etr_port0_online = etr_port1_online = 1;
 418        return 0;
 419}
 420early_param("etr", early_parse_etr);
 421
 422enum etr_event {
 423        ETR_EVENT_PORT0_CHANGE,
 424        ETR_EVENT_PORT1_CHANGE,
 425        ETR_EVENT_PORT_ALERT,
 426        ETR_EVENT_SYNC_CHECK,
 427        ETR_EVENT_SWITCH_LOCAL,
 428        ETR_EVENT_UPDATE,
 429};
 430
 431/*
 432 * Valid bit combinations of the eacr register are (x = don't care):
 433 * e0 e1 dp p0 p1 ea es sl
 434 *  0  0  x  0  0  0  0  0  initial, disabled state
 435 *  0  0  x  0  1  1  0  0  port 1 online
 436 *  0  0  x  1  0  1  0  0  port 0 online
 437 *  0  0  x  1  1  1  0  0  both ports online
 438 *  0  1  x  0  1  1  0  0  port 1 online and usable, ETR or PPS mode
 439 *  0  1  x  0  1  1  0  1  port 1 online, usable and ETR mode
 440 *  0  1  x  0  1  1  1  0  port 1 online, usable, PPS mode, in-sync
 441 *  0  1  x  0  1  1  1  1  port 1 online, usable, ETR mode, in-sync
 442 *  0  1  x  1  1  1  0  0  both ports online, port 1 usable
 443 *  0  1  x  1  1  1  1  0  both ports online, port 1 usable, PPS mode, in-sync
 444 *  0  1  x  1  1  1  1  1  both ports online, port 1 usable, ETR mode, in-sync
 445 *  1  0  x  1  0  1  0  0  port 0 online and usable, ETR or PPS mode
 446 *  1  0  x  1  0  1  0  1  port 0 online, usable and ETR mode
 447 *  1  0  x  1  0  1  1  0  port 0 online, usable, PPS mode, in-sync
 448 *  1  0  x  1  0  1  1  1  port 0 online, usable, ETR mode, in-sync
 449 *  1  0  x  1  1  1  0  0  both ports online, port 0 usable
 450 *  1  0  x  1  1  1  1  0  both ports online, port 0 usable, PPS mode, in-sync
 451 *  1  0  x  1  1  1  1  1  both ports online, port 0 usable, ETR mode, in-sync
 452 *  1  1  x  1  1  1  1  0  both ports online & usable, ETR, in-sync
 453 *  1  1  x  1  1  1  1  1  both ports online & usable, ETR, in-sync
 454 */
 455static struct etr_eacr etr_eacr;
 456static u64 etr_tolec;                   /* time of last eacr update */
 457static struct etr_aib etr_port0;
 458static int etr_port0_uptodate;
 459static struct etr_aib etr_port1;
 460static int etr_port1_uptodate;
 461static unsigned long etr_events;
 462static struct timer_list etr_timer;
 463
 464static void etr_timeout(unsigned long dummy);
 465static void etr_work_fn(struct work_struct *work);
 466static DEFINE_MUTEX(etr_work_mutex);
 467static DECLARE_WORK(etr_work, etr_work_fn);
 468
 469/*
 470 * Reset ETR attachment.
 471 */
 472static void etr_reset(void)
 473{
 474        etr_eacr =  (struct etr_eacr) {
 475                .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0,
 476                .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0,
 477                .es = 0, .sl = 0 };
 478        if (etr_setr(&etr_eacr) == 0) {
 479                etr_tolec = get_clock();
 480                set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags);
 481                if (etr_port0_online && etr_port1_online)
 482                        set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
 483        } else if (etr_port0_online || etr_port1_online) {
 484                pr_warning("The real or virtual hardware system does "
 485                           "not provide an ETR interface\n");
 486                etr_port0_online = etr_port1_online = 0;
 487        }
 488}
 489
 490static int __init etr_init(void)
 491{
 492        struct etr_aib aib;
 493
 494        if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
 495                return 0;
 496        time_init_wq();
 497        /* Check if this machine has the steai instruction. */
 498        if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
 499                etr_steai_available = 1;
 500        setup_timer(&etr_timer, etr_timeout, 0UL);
 501        if (etr_port0_online) {
 502                set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
 503                queue_work(time_sync_wq, &etr_work);
 504        }
 505        if (etr_port1_online) {
 506                set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
 507                queue_work(time_sync_wq, &etr_work);
 508        }
 509        return 0;
 510}
 511
 512arch_initcall(etr_init);
 513
 514/*
 515 * Two sorts of ETR machine checks. The architecture reads:
 516 * "When a machine-check niterruption occurs and if a switch-to-local or
 517 *  ETR-sync-check interrupt request is pending but disabled, this pending
 518 *  disabled interruption request is indicated and is cleared".
 519 * Which means that we can get etr_switch_to_local events from the machine
 520 * check handler although the interruption condition is disabled. Lovely..
 521 */
 522
 523/*
 524 * Switch to local machine check. This is called when the last usable
 525 * ETR port goes inactive. After switch to local the clock is not in sync.
 526 */
 527void etr_switch_to_local(void)
 528{
 529        if (!etr_eacr.sl)
 530                return;
 531        disable_sync_clock(NULL);
 532        if (!test_and_set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events)) {
 533                etr_eacr.es = etr_eacr.sl = 0;
 534                etr_setr(&etr_eacr);
 535                queue_work(time_sync_wq, &etr_work);
 536        }
 537}
 538
 539/*
 540 * ETR sync check machine check. This is called when the ETR OTE and the
 541 * local clock OTE are farther apart than the ETR sync check tolerance.
 542 * After a ETR sync check the clock is not in sync. The machine check
 543 * is broadcasted to all cpus at the same time.
 544 */
 545void etr_sync_check(void)
 546{
 547        if (!etr_eacr.es)
 548                return;
 549        disable_sync_clock(NULL);
 550        if (!test_and_set_bit(ETR_EVENT_SYNC_CHECK, &etr_events)) {
 551                etr_eacr.es = 0;
 552                etr_setr(&etr_eacr);
 553                queue_work(time_sync_wq, &etr_work);
 554        }
 555}
 556
 557/*
 558 * ETR timing alert. There are two causes:
 559 * 1) port state change, check the usability of the port
 560 * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the
 561 *    sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3)
 562 *    or ETR-data word 4 (edf4) has changed.
 563 */
 564static void etr_timing_alert(struct etr_irq_parm *intparm)
 565{
 566        if (intparm->pc0)
 567                /* ETR port 0 state change. */
 568                set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
 569        if (intparm->pc1)
 570                /* ETR port 1 state change. */
 571                set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
 572        if (intparm->eai)
 573                /*
 574                 * ETR port alert on either port 0, 1 or both.
 575                 * Both ports are not up-to-date now.
 576                 */
 577                set_bit(ETR_EVENT_PORT_ALERT, &etr_events);
 578        queue_work(time_sync_wq, &etr_work);
 579}
 580
 581static void etr_timeout(unsigned long dummy)
 582{
 583        set_bit(ETR_EVENT_UPDATE, &etr_events);
 584        queue_work(time_sync_wq, &etr_work);
 585}
 586
 587/*
 588 * Check if the etr mode is pss.
 589 */
 590static inline int etr_mode_is_pps(struct etr_eacr eacr)
 591{
 592        return eacr.es && !eacr.sl;
 593}
 594
 595/*
 596 * Check if the etr mode is etr.
 597 */
 598static inline int etr_mode_is_etr(struct etr_eacr eacr)
 599{
 600        return eacr.es && eacr.sl;
 601}
 602
 603/*
 604 * Check if the port can be used for TOD synchronization.
 605 * For PPS mode the port has to receive OTEs. For ETR mode
 606 * the port has to receive OTEs, the ETR stepping bit has to
 607 * be zero and the validity bits for data frame 1, 2, and 3
 608 * have to be 1.
 609 */
 610static int etr_port_valid(struct etr_aib *aib, int port)
 611{
 612        unsigned int psc;
 613
 614        /* Check that this port is receiving OTEs. */
 615        if (aib->tsp == 0)
 616                return 0;
 617
 618        psc = port ? aib->esw.psc1 : aib->esw.psc0;
 619        if (psc == etr_lpsc_pps_mode)
 620                return 1;
 621        if (psc == etr_lpsc_operational_step)
 622                return !aib->esw.y && aib->slsw.v1 &&
 623                        aib->slsw.v2 && aib->slsw.v3;
 624        return 0;
 625}
 626
 627/*
 628 * Check if two ports are on the same network.
 629 */
 630static int etr_compare_network(struct etr_aib *aib1, struct etr_aib *aib2)
 631{
 632        // FIXME: any other fields we have to compare?
 633        return aib1->edf1.net_id == aib2->edf1.net_id;
 634}
 635
 636/*
 637 * Wrapper for etr_stei that converts physical port states
 638 * to logical port states to be consistent with the output
 639 * of stetr (see etr_psc vs. etr_lpsc).
 640 */
 641static void etr_steai_cv(struct etr_aib *aib, unsigned int func)
 642{
 643        BUG_ON(etr_steai(aib, func) != 0);
 644        /* Convert port state to logical port state. */
 645        if (aib->esw.psc0 == 1)
 646                aib->esw.psc0 = 2;
 647        else if (aib->esw.psc0 == 0 && aib->esw.p == 0)
 648                aib->esw.psc0 = 1;
 649        if (aib->esw.psc1 == 1)
 650                aib->esw.psc1 = 2;
 651        else if (aib->esw.psc1 == 0 && aib->esw.p == 1)
 652                aib->esw.psc1 = 1;
 653}
 654
 655/*
 656 * Check if the aib a2 is still connected to the same attachment as
 657 * aib a1, the etv values differ by one and a2 is valid.
 658 */
 659static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
 660{
 661        int state_a1, state_a2;
 662
 663        /* Paranoia check: e0/e1 should better be the same. */
 664        if (a1->esw.eacr.e0 != a2->esw.eacr.e0 ||
 665            a1->esw.eacr.e1 != a2->esw.eacr.e1)
 666                return 0;
 667
 668        /* Still connected to the same etr ? */
 669        state_a1 = p ? a1->esw.psc1 : a1->esw.psc0;
 670        state_a2 = p ? a2->esw.psc1 : a2->esw.psc0;
 671        if (state_a1 == etr_lpsc_operational_step) {
 672                if (state_a2 != etr_lpsc_operational_step ||
 673                    a1->edf1.net_id != a2->edf1.net_id ||
 674                    a1->edf1.etr_id != a2->edf1.etr_id ||
 675                    a1->edf1.etr_pn != a2->edf1.etr_pn)
 676                        return 0;
 677        } else if (state_a2 != etr_lpsc_pps_mode)
 678                return 0;
 679
 680        /* The ETV value of a2 needs to be ETV of a1 + 1. */
 681        if (a1->edf2.etv + 1 != a2->edf2.etv)
 682                return 0;
 683
 684        if (!etr_port_valid(a2, p))
 685                return 0;
 686
 687        return 1;
 688}
 689
 690struct clock_sync_data {
 691        atomic_t cpus;
 692        int in_sync;
 693        unsigned long long fixup_cc;
 694        int etr_port;
 695        struct etr_aib *etr_aib;
 696};
 697
 698static void clock_sync_cpu(struct clock_sync_data *sync)
 699{
 700        atomic_dec(&sync->cpus);
 701        enable_sync_clock();
 702        /*
 703         * This looks like a busy wait loop but it isn't. etr_sync_cpus
 704         * is called on all other cpus while the TOD clocks is stopped.
 705         * __udelay will stop the cpu on an enabled wait psw until the
 706         * TOD is running again.
 707         */
 708        while (sync->in_sync == 0) {
 709                __udelay(1);
 710                /*
 711                 * A different cpu changes *in_sync. Therefore use
 712                 * barrier() to force memory access.
 713                 */
 714                barrier();
 715        }
 716        if (sync->in_sync != 1)
 717                /* Didn't work. Clear per-cpu in sync bit again. */
 718                disable_sync_clock(NULL);
 719        /*
 720         * This round of TOD syncing is done. Set the clock comparator
 721         * to the next tick and let the processor continue.
 722         */
 723        fixup_clock_comparator(sync->fixup_cc);
 724}
 725
 726/*
 727 * Sync the TOD clock using the port refered to by aibp. This port
 728 * has to be enabled and the other port has to be disabled. The
 729 * last eacr update has to be more than 1.6 seconds in the past.
 730 */
 731static int etr_sync_clock(void *data)
 732{
 733        static int first;
 734        unsigned long long clock, old_clock, delay, delta;
 735        struct clock_sync_data *etr_sync;
 736        struct etr_aib *sync_port, *aib;
 737        int port;
 738        int rc;
 739
 740        etr_sync = data;
 741
 742        if (xchg(&first, 1) == 1) {
 743                /* Slave */
 744                clock_sync_cpu(etr_sync);
 745                return 0;
 746        }
 747
 748        /* Wait until all other cpus entered the sync function. */
 749        while (atomic_read(&etr_sync->cpus) != 0)
 750                cpu_relax();
 751
 752        port = etr_sync->etr_port;
 753        aib = etr_sync->etr_aib;
 754        sync_port = (port == 0) ? &etr_port0 : &etr_port1;
 755        enable_sync_clock();
 756
 757        /* Set clock to next OTE. */
 758        __ctl_set_bit(14, 21);
 759        __ctl_set_bit(0, 29);
 760        clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32;
 761        old_clock = get_clock();
 762        if (set_clock(clock) == 0) {
 763                __udelay(1);    /* Wait for the clock to start. */
 764                __ctl_clear_bit(0, 29);
 765                __ctl_clear_bit(14, 21);
 766                etr_stetr(aib);
 767                /* Adjust Linux timing variables. */
 768                delay = (unsigned long long)
 769                        (aib->edf2.etv - sync_port->edf2.etv) << 32;
 770                delta = adjust_time(old_clock, clock, delay);
 771                etr_sync->fixup_cc = delta;
 772                fixup_clock_comparator(delta);
 773                /* Verify that the clock is properly set. */
 774                if (!etr_aib_follows(sync_port, aib, port)) {
 775                        /* Didn't work. */
 776                        disable_sync_clock(NULL);
 777                        etr_sync->in_sync = -EAGAIN;
 778                        rc = -EAGAIN;
 779                } else {
 780                        etr_sync->in_sync = 1;
 781                        rc = 0;
 782                }
 783        } else {
 784                /* Could not set the clock ?!? */
 785                __ctl_clear_bit(0, 29);
 786                __ctl_clear_bit(14, 21);
 787                disable_sync_clock(NULL);
 788                etr_sync->in_sync = -EAGAIN;
 789                rc = -EAGAIN;
 790        }
 791        xchg(&first, 0);
 792        return rc;
 793}
 794
 795static int etr_sync_clock_stop(struct etr_aib *aib, int port)
 796{
 797        struct clock_sync_data etr_sync;
 798        struct etr_aib *sync_port;
 799        int follows;
 800        int rc;
 801
 802        /* Check if the current aib is adjacent to the sync port aib. */
 803        sync_port = (port == 0) ? &etr_port0 : &etr_port1;
 804        follows = etr_aib_follows(sync_port, aib, port);
 805        memcpy(sync_port, aib, sizeof(*aib));
 806        if (!follows)
 807                return -EAGAIN;
 808        memset(&etr_sync, 0, sizeof(etr_sync));
 809        etr_sync.etr_aib = aib;
 810        etr_sync.etr_port = port;
 811        get_online_cpus();
 812        atomic_set(&etr_sync.cpus, num_online_cpus() - 1);
 813        rc = stop_machine(etr_sync_clock, &etr_sync, &cpu_online_map);
 814        put_online_cpus();
 815        return rc;
 816}
 817
 818/*
 819 * Handle the immediate effects of the different events.
 820 * The port change event is used for online/offline changes.
 821 */
 822static struct etr_eacr etr_handle_events(struct etr_eacr eacr)
 823{
 824        if (test_and_clear_bit(ETR_EVENT_SYNC_CHECK, &etr_events))
 825                eacr.es = 0;
 826        if (test_and_clear_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events))
 827                eacr.es = eacr.sl = 0;
 828        if (test_and_clear_bit(ETR_EVENT_PORT_ALERT, &etr_events))
 829                etr_port0_uptodate = etr_port1_uptodate = 0;
 830
 831        if (test_and_clear_bit(ETR_EVENT_PORT0_CHANGE, &etr_events)) {
 832                if (eacr.e0)
 833                        /*
 834                         * Port change of an enabled port. We have to
 835                         * assume that this can have caused an stepping
 836                         * port switch.
 837                         */
 838                        etr_tolec = get_clock();
 839                eacr.p0 = etr_port0_online;
 840                if (!eacr.p0)
 841                        eacr.e0 = 0;
 842                etr_port0_uptodate = 0;
 843        }
 844        if (test_and_clear_bit(ETR_EVENT_PORT1_CHANGE, &etr_events)) {
 845                if (eacr.e1)
 846                        /*
 847                         * Port change of an enabled port. We have to
 848                         * assume that this can have caused an stepping
 849                         * port switch.
 850                         */
 851                        etr_tolec = get_clock();
 852                eacr.p1 = etr_port1_online;
 853                if (!eacr.p1)
 854                        eacr.e1 = 0;
 855                etr_port1_uptodate = 0;
 856        }
 857        clear_bit(ETR_EVENT_UPDATE, &etr_events);
 858        return eacr;
 859}
 860
 861/*
 862 * Set up a timer that expires after the etr_tolec + 1.6 seconds if
 863 * one of the ports needs an update.
 864 */
 865static void etr_set_tolec_timeout(unsigned long long now)
 866{
 867        unsigned long micros;
 868
 869        if ((!etr_eacr.p0 || etr_port0_uptodate) &&
 870            (!etr_eacr.p1 || etr_port1_uptodate))
 871                return;
 872        micros = (now > etr_tolec) ? ((now - etr_tolec) >> 12) : 0;
 873        micros = (micros > 1600000) ? 0 : 1600000 - micros;
 874        mod_timer(&etr_timer, jiffies + (micros * HZ) / 1000000 + 1);
 875}
 876
 877/*
 878 * Set up a time that expires after 1/2 second.
 879 */
 880static void etr_set_sync_timeout(void)
 881{
 882        mod_timer(&etr_timer, jiffies + HZ/2);
 883}
 884
 885/*
 886 * Update the aib information for one or both ports.
 887 */
 888static struct etr_eacr etr_handle_update(struct etr_aib *aib,
 889                                         struct etr_eacr eacr)
 890{
 891        /* With both ports disabled the aib information is useless. */
 892        if (!eacr.e0 && !eacr.e1)
 893                return eacr;
 894
 895        /* Update port0 or port1 with aib stored in etr_work_fn. */
 896        if (aib->esw.q == 0) {
 897                /* Information for port 0 stored. */
 898                if (eacr.p0 && !etr_port0_uptodate) {
 899                        etr_port0 = *aib;
 900                        if (etr_port0_online)
 901                                etr_port0_uptodate = 1;
 902                }
 903        } else {
 904                /* Information for port 1 stored. */
 905                if (eacr.p1 && !etr_port1_uptodate) {
 906                        etr_port1 = *aib;
 907                        if (etr_port0_online)
 908                                etr_port1_uptodate = 1;
 909                }
 910        }
 911
 912        /*
 913         * Do not try to get the alternate port aib if the clock
 914         * is not in sync yet.
 915         */
 916        if (!eacr.es || !check_sync_clock())
 917                return eacr;
 918
 919        /*
 920         * If steai is available we can get the information about
 921         * the other port immediately. If only stetr is available the
 922         * data-port bit toggle has to be used.
 923         */
 924        if (etr_steai_available) {
 925                if (eacr.p0 && !etr_port0_uptodate) {
 926                        etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0);
 927                        etr_port0_uptodate = 1;
 928                }
 929                if (eacr.p1 && !etr_port1_uptodate) {
 930                        etr_steai_cv(&etr_port1, ETR_STEAI_PORT_1);
 931                        etr_port1_uptodate = 1;
 932                }
 933        } else {
 934                /*
 935                 * One port was updated above, if the other
 936                 * port is not uptodate toggle dp bit.
 937                 */
 938                if ((eacr.p0 && !etr_port0_uptodate) ||
 939                    (eacr.p1 && !etr_port1_uptodate))
 940                        eacr.dp ^= 1;
 941                else
 942                        eacr.dp = 0;
 943        }
 944        return eacr;
 945}
 946
 947/*
 948 * Write new etr control register if it differs from the current one.
 949 * Return 1 if etr_tolec has been updated as well.
 950 */
 951static void etr_update_eacr(struct etr_eacr eacr)
 952{
 953        int dp_changed;
 954
 955        if (memcmp(&etr_eacr, &eacr, sizeof(eacr)) == 0)
 956                /* No change, return. */
 957                return;
 958        /*
 959         * The disable of an active port of the change of the data port
 960         * bit can/will cause a change in the data port.
 961         */
 962        dp_changed = etr_eacr.e0 > eacr.e0 || etr_eacr.e1 > eacr.e1 ||
 963                (etr_eacr.dp ^ eacr.dp) != 0;
 964        etr_eacr = eacr;
 965        etr_setr(&etr_eacr);
 966        if (dp_changed)
 967                etr_tolec = get_clock();
 968}
 969
 970/*
 971 * ETR work. In this function you'll find the main logic. In
 972 * particular this is the only function that calls etr_update_eacr(),
 973 * it "controls" the etr control register.
 974 */
 975static void etr_work_fn(struct work_struct *work)
 976{
 977        unsigned long long now;
 978        struct etr_eacr eacr;
 979        struct etr_aib aib;
 980        int sync_port;
 981
 982        /* prevent multiple execution. */
 983        mutex_lock(&etr_work_mutex);
 984
 985        /* Create working copy of etr_eacr. */
 986        eacr = etr_eacr;
 987
 988        /* Check for the different events and their immediate effects. */
 989        eacr = etr_handle_events(eacr);
 990
 991        /* Check if ETR is supposed to be active. */
 992        eacr.ea = eacr.p0 || eacr.p1;
 993        if (!eacr.ea) {
 994                /* Both ports offline. Reset everything. */
 995                eacr.dp = eacr.es = eacr.sl = 0;
 996                on_each_cpu(disable_sync_clock, NULL, 1);
 997                del_timer_sync(&etr_timer);
 998                etr_update_eacr(eacr);
 999                goto out_unlock;
1000        }
1001
1002        /* Store aib to get the current ETR status word. */
1003        BUG_ON(etr_stetr(&aib) != 0);
1004        etr_port0.esw = etr_port1.esw = aib.esw;        /* Copy status word. */
1005        now = get_clock();
1006
1007        /*
1008         * Update the port information if the last stepping port change
1009         * or data port change is older than 1.6 seconds.
1010         */
1011        if (now >= etr_tolec + (1600000 << 12))
1012                eacr = etr_handle_update(&aib, eacr);
1013
1014        /*
1015         * Select ports to enable. The prefered synchronization mode is PPS.
1016         * If a port can be enabled depends on a number of things:
1017         * 1) The port needs to be online and uptodate. A port is not
1018         *    disabled just because it is not uptodate, but it is only
1019         *    enabled if it is uptodate.
1020         * 2) The port needs to have the same mode (pps / etr).
1021         * 3) The port needs to be usable -> etr_port_valid() == 1
1022         * 4) To enable the second port the clock needs to be in sync.
1023         * 5) If both ports are useable and are ETR ports, the network id
1024         *    has to be the same.
1025         * The eacr.sl bit is used to indicate etr mode vs. pps mode.
1026         */
1027        if (eacr.p0 && aib.esw.psc0 == etr_lpsc_pps_mode) {
1028                eacr.sl = 0;
1029                eacr.e0 = 1;
1030                if (!etr_mode_is_pps(etr_eacr))
1031                        eacr.es = 0;
1032                if (!eacr.es || !eacr.p1 || aib.esw.psc1 != etr_lpsc_pps_mode)
1033                        eacr.e1 = 0;
1034                // FIXME: uptodate checks ?
1035                else if (etr_port0_uptodate && etr_port1_uptodate)
1036                        eacr.e1 = 1;
1037                sync_port = (etr_port0_uptodate &&
1038                             etr_port_valid(&etr_port0, 0)) ? 0 : -1;
1039        } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) {
1040                eacr.sl = 0;
1041                eacr.e0 = 0;
1042                eacr.e1 = 1;
1043                if (!etr_mode_is_pps(etr_eacr))
1044                        eacr.es = 0;
1045                sync_port = (etr_port1_uptodate &&
1046                             etr_port_valid(&etr_port1, 1)) ? 1 : -1;
1047        } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) {
1048                eacr.sl = 1;
1049                eacr.e0 = 1;
1050                if (!etr_mode_is_etr(etr_eacr))
1051                        eacr.es = 0;
1052                if (!eacr.es || !eacr.p1 ||
1053                    aib.esw.psc1 != etr_lpsc_operational_alt)
1054                        eacr.e1 = 0;
1055                else if (etr_port0_uptodate && etr_port1_uptodate &&
1056                         etr_compare_network(&etr_port0, &etr_port1))
1057                        eacr.e1 = 1;
1058                sync_port = (etr_port0_uptodate &&
1059                             etr_port_valid(&etr_port0, 0)) ? 0 : -1;
1060        } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) {
1061                eacr.sl = 1;
1062                eacr.e0 = 0;
1063                eacr.e1 = 1;
1064                if (!etr_mode_is_etr(etr_eacr))
1065                        eacr.es = 0;
1066                sync_port = (etr_port1_uptodate &&
1067                             etr_port_valid(&etr_port1, 1)) ? 1 : -1;
1068        } else {
1069                /* Both ports not usable. */
1070                eacr.es = eacr.sl = 0;
1071                sync_port = -1;
1072        }
1073
1074        /*
1075         * If the clock is in sync just update the eacr and return.
1076         * If there is no valid sync port wait for a port update.
1077         */
1078        if ((eacr.es && check_sync_clock()) || sync_port < 0) {
1079                etr_update_eacr(eacr);
1080                etr_set_tolec_timeout(now);
1081                goto out_unlock;
1082        }
1083
1084        /*
1085         * Prepare control register for clock syncing
1086         * (reset data port bit, set sync check control.
1087         */
1088        eacr.dp = 0;
1089        eacr.es = 1;
1090
1091        /*
1092         * Update eacr and try to synchronize the clock. If the update
1093         * of eacr caused a stepping port switch (or if we have to
1094         * assume that a stepping port switch has occured) or the
1095         * clock syncing failed, reset the sync check control bit
1096         * and set up a timer to try again after 0.5 seconds
1097         */
1098        etr_update_eacr(eacr);
1099        if (now < etr_tolec + (1600000 << 12) ||
1100            etr_sync_clock_stop(&aib, sync_port) != 0) {
1101                /* Sync failed. Try again in 1/2 second. */
1102                eacr.es = 0;
1103                etr_update_eacr(eacr);
1104                etr_set_sync_timeout();
1105        } else
1106                etr_set_tolec_timeout(now);
1107out_unlock:
1108        mutex_unlock(&etr_work_mutex);
1109}
1110
1111/*
1112 * Sysfs interface functions
1113 */
1114static struct sysdev_class etr_sysclass = {
1115        .name   = "etr",
1116};
1117
1118static struct sys_device etr_port0_dev = {
1119        .id     = 0,
1120        .cls    = &etr_sysclass,
1121};
1122
1123static struct sys_device etr_port1_dev = {
1124        .id     = 1,
1125        .cls    = &etr_sysclass,
1126};
1127
1128/*
1129 * ETR class attributes
1130 */
1131static ssize_t etr_stepping_port_show(struct sysdev_class *class,
1132                                        struct sysdev_class_attribute *attr,
1133                                        char *buf)
1134{
1135        return sprintf(buf, "%i\n", etr_port0.esw.p);
1136}
1137
1138static SYSDEV_CLASS_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL);
1139
1140static ssize_t etr_stepping_mode_show(struct sysdev_class *class,
1141                                        struct sysdev_class_attribute *attr,
1142                                        char *buf)
1143{
1144        char *mode_str;
1145
1146        if (etr_mode_is_pps(etr_eacr))
1147                mode_str = "pps";
1148        else if (etr_mode_is_etr(etr_eacr))
1149                mode_str = "etr";
1150        else
1151                mode_str = "local";
1152        return sprintf(buf, "%s\n", mode_str);
1153}
1154
1155static SYSDEV_CLASS_ATTR(stepping_mode, 0400, etr_stepping_mode_show, NULL);
1156
1157/*
1158 * ETR port attributes
1159 */
1160static inline struct etr_aib *etr_aib_from_dev(struct sys_device *dev)
1161{
1162        if (dev == &etr_port0_dev)
1163                return etr_port0_online ? &etr_port0 : NULL;
1164        else
1165                return etr_port1_online ? &etr_port1 : NULL;
1166}
1167
1168static ssize_t etr_online_show(struct sys_device *dev,
1169                                struct sysdev_attribute *attr,
1170                                char *buf)
1171{
1172        unsigned int online;
1173
1174        online = (dev == &etr_port0_dev) ? etr_port0_online : etr_port1_online;
1175        return sprintf(buf, "%i\n", online);
1176}
1177
1178static ssize_t etr_online_store(struct sys_device *dev,
1179                                struct sysdev_attribute *attr,
1180                                const char *buf, size_t count)
1181{
1182        unsigned int value;
1183
1184        value = simple_strtoul(buf, NULL, 0);
1185        if (value != 0 && value != 1)
1186                return -EINVAL;
1187        if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
1188                return -EOPNOTSUPP;
1189        mutex_lock(&clock_sync_mutex);
1190        if (dev == &etr_port0_dev) {
1191                if (etr_port0_online == value)
1192                        goto out;       /* Nothing to do. */
1193                etr_port0_online = value;
1194                if (etr_port0_online && etr_port1_online)
1195                        set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1196                else
1197                        clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1198                set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
1199                queue_work(time_sync_wq, &etr_work);
1200        } else {
1201                if (etr_port1_online == value)
1202                        goto out;       /* Nothing to do. */
1203                etr_port1_online = value;
1204                if (etr_port0_online && etr_port1_online)
1205                        set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1206                else
1207                        clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1208                set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
1209                queue_work(time_sync_wq, &etr_work);
1210        }
1211out:
1212        mutex_unlock(&clock_sync_mutex);
1213        return count;
1214}
1215
1216static SYSDEV_ATTR(online, 0600, etr_online_show, etr_online_store);
1217
1218static ssize_t etr_stepping_control_show(struct sys_device *dev,
1219                                        struct sysdev_attribute *attr,
1220                                        char *buf)
1221{
1222        return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
1223                       etr_eacr.e0 : etr_eacr.e1);
1224}
1225
1226static SYSDEV_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL);
1227
1228static ssize_t etr_mode_code_show(struct sys_device *dev,
1229                                struct sysdev_attribute *attr, char *buf)
1230{
1231        if (!etr_port0_online && !etr_port1_online)
1232                /* Status word is not uptodate if both ports are offline. */
1233                return -ENODATA;
1234        return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
1235                       etr_port0.esw.psc0 : etr_port0.esw.psc1);
1236}
1237
1238static SYSDEV_ATTR(state_code, 0400, etr_mode_code_show, NULL);
1239
1240static ssize_t etr_untuned_show(struct sys_device *dev,
1241                                struct sysdev_attribute *attr, char *buf)
1242{
1243        struct etr_aib *aib = etr_aib_from_dev(dev);
1244
1245        if (!aib || !aib->slsw.v1)
1246                return -ENODATA;
1247        return sprintf(buf, "%i\n", aib->edf1.u);
1248}
1249
1250static SYSDEV_ATTR(untuned, 0400, etr_untuned_show, NULL);
1251
1252static ssize_t etr_network_id_show(struct sys_device *dev,
1253                                struct sysdev_attribute *attr, char *buf)
1254{
1255        struct etr_aib *aib = etr_aib_from_dev(dev);
1256
1257        if (!aib || !aib->slsw.v1)
1258                return -ENODATA;
1259        return sprintf(buf, "%i\n", aib->edf1.net_id);
1260}
1261
1262static SYSDEV_ATTR(network, 0400, etr_network_id_show, NULL);
1263
1264static ssize_t etr_id_show(struct sys_device *dev,
1265                        struct sysdev_attribute *attr, char *buf)
1266{
1267        struct etr_aib *aib = etr_aib_from_dev(dev);
1268
1269        if (!aib || !aib->slsw.v1)
1270                return -ENODATA;
1271        return sprintf(buf, "%i\n", aib->edf1.etr_id);
1272}
1273
1274static SYSDEV_ATTR(id, 0400, etr_id_show, NULL);
1275
1276static ssize_t etr_port_number_show(struct sys_device *dev,
1277                        struct sysdev_attribute *attr, char *buf)
1278{
1279        struct etr_aib *aib = etr_aib_from_dev(dev);
1280
1281        if (!aib || !aib->slsw.v1)
1282                return -ENODATA;
1283        return sprintf(buf, "%i\n", aib->edf1.etr_pn);
1284}
1285
1286static SYSDEV_ATTR(port, 0400, etr_port_number_show, NULL);
1287
1288static ssize_t etr_coupled_show(struct sys_device *dev,
1289                        struct sysdev_attribute *attr, char *buf)
1290{
1291        struct etr_aib *aib = etr_aib_from_dev(dev);
1292
1293        if (!aib || !aib->slsw.v3)
1294                return -ENODATA;
1295        return sprintf(buf, "%i\n", aib->edf3.c);
1296}
1297
1298static SYSDEV_ATTR(coupled, 0400, etr_coupled_show, NULL);
1299
1300static ssize_t etr_local_time_show(struct sys_device *dev,
1301                        struct sysdev_attribute *attr, char *buf)
1302{
1303        struct etr_aib *aib = etr_aib_from_dev(dev);
1304
1305        if (!aib || !aib->slsw.v3)
1306                return -ENODATA;
1307        return sprintf(buf, "%i\n", aib->edf3.blto);
1308}
1309
1310static SYSDEV_ATTR(local_time, 0400, etr_local_time_show, NULL);
1311
1312static ssize_t etr_utc_offset_show(struct sys_device *dev,
1313                        struct sysdev_attribute *attr, char *buf)
1314{
1315        struct etr_aib *aib = etr_aib_from_dev(dev);
1316
1317        if (!aib || !aib->slsw.v3)
1318                return -ENODATA;
1319        return sprintf(buf, "%i\n", aib->edf3.buo);
1320}
1321
1322static SYSDEV_ATTR(utc_offset, 0400, etr_utc_offset_show, NULL);
1323
1324static struct sysdev_attribute *etr_port_attributes[] = {
1325        &attr_online,
1326        &attr_stepping_control,
1327        &attr_state_code,
1328        &attr_untuned,
1329        &attr_network,
1330        &attr_id,
1331        &attr_port,
1332        &attr_coupled,
1333        &attr_local_time,
1334        &attr_utc_offset,
1335        NULL
1336};
1337
1338static int __init etr_register_port(struct sys_device *dev)
1339{
1340        struct sysdev_attribute **attr;
1341        int rc;
1342
1343        rc = sysdev_register(dev);
1344        if (rc)
1345                goto out;
1346        for (attr = etr_port_attributes; *attr; attr++) {
1347                rc = sysdev_create_file(dev, *attr);
1348                if (rc)
1349                        goto out_unreg;
1350        }
1351        return 0;
1352out_unreg:
1353        for (; attr >= etr_port_attributes; attr--)
1354                sysdev_remove_file(dev, *attr);
1355        sysdev_unregister(dev);
1356out:
1357        return rc;
1358}
1359
1360static void __init etr_unregister_port(struct sys_device *dev)
1361{
1362        struct sysdev_attribute **attr;
1363
1364        for (attr = etr_port_attributes; *attr; attr++)
1365                sysdev_remove_file(dev, *attr);
1366        sysdev_unregister(dev);
1367}
1368
1369static int __init etr_init_sysfs(void)
1370{
1371        int rc;
1372
1373        rc = sysdev_class_register(&etr_sysclass);
1374        if (rc)
1375                goto out;
1376        rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_port);
1377        if (rc)
1378                goto out_unreg_class;
1379        rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_mode);
1380        if (rc)
1381                goto out_remove_stepping_port;
1382        rc = etr_register_port(&etr_port0_dev);
1383        if (rc)
1384                goto out_remove_stepping_mode;
1385        rc = etr_register_port(&etr_port1_dev);
1386        if (rc)
1387                goto out_remove_port0;
1388        return 0;
1389
1390out_remove_port0:
1391        etr_unregister_port(&etr_port0_dev);
1392out_remove_stepping_mode:
1393        sysdev_class_remove_file(&etr_sysclass, &attr_stepping_mode);
1394out_remove_stepping_port:
1395        sysdev_class_remove_file(&etr_sysclass, &attr_stepping_port);
1396out_unreg_class:
1397        sysdev_class_unregister(&etr_sysclass);
1398out:
1399        return rc;
1400}
1401
1402device_initcall(etr_init_sysfs);
1403
1404/*
1405 * Server Time Protocol (STP) code.
1406 */
1407static int stp_online;
1408static struct stp_sstpi stp_info;
1409static void *stp_page;
1410
1411static void stp_work_fn(struct work_struct *work);
1412static DEFINE_MUTEX(stp_work_mutex);
1413static DECLARE_WORK(stp_work, stp_work_fn);
1414static struct timer_list stp_timer;
1415
1416static int __init early_parse_stp(char *p)
1417{
1418        if (strncmp(p, "off", 3) == 0)
1419                stp_online = 0;
1420        else if (strncmp(p, "on", 2) == 0)
1421                stp_online = 1;
1422        return 0;
1423}
1424early_param("stp", early_parse_stp);
1425
1426/*
1427 * Reset STP attachment.
1428 */
1429static void __init stp_reset(void)
1430{
1431        int rc;
1432
1433        stp_page = (void *) get_zeroed_page(GFP_ATOMIC);
1434        rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
1435        if (rc == 0)
1436                set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
1437        else if (stp_online) {
1438                pr_warning("The real or virtual hardware system does "
1439                           "not provide an STP interface\n");
1440                free_page((unsigned long) stp_page);
1441                stp_page = NULL;
1442                stp_online = 0;
1443        }
1444}
1445
1446static void stp_timeout(unsigned long dummy)
1447{
1448        queue_work(time_sync_wq, &stp_work);
1449}
1450
1451static int __init stp_init(void)
1452{
1453        if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
1454                return 0;
1455        setup_timer(&stp_timer, stp_timeout, 0UL);
1456        time_init_wq();
1457        if (!stp_online)
1458                return 0;
1459        queue_work(time_sync_wq, &stp_work);
1460        return 0;
1461}
1462
1463arch_initcall(stp_init);
1464
1465/*
1466 * STP timing alert. There are three causes:
1467 * 1) timing status change
1468 * 2) link availability change
1469 * 3) time control parameter change
1470 * In all three cases we are only interested in the clock source state.
1471 * If a STP clock source is now available use it.
1472 */
1473static void stp_timing_alert(struct stp_irq_parm *intparm)
1474{
1475        if (intparm->tsc || intparm->lac || intparm->tcpc)
1476                queue_work(time_sync_wq, &stp_work);
1477}
1478
1479/*
1480 * STP sync check machine check. This is called when the timing state
1481 * changes from the synchronized state to the unsynchronized state.
1482 * After a STP sync check the clock is not in sync. The machine check
1483 * is broadcasted to all cpus at the same time.
1484 */
1485void stp_sync_check(void)
1486{
1487        disable_sync_clock(NULL);
1488        queue_work(time_sync_wq, &stp_work);
1489}
1490
1491/*
1492 * STP island condition machine check. This is called when an attached
1493 * server  attempts to communicate over an STP link and the servers
1494 * have matching CTN ids and have a valid stratum-1 configuration
1495 * but the configurations do not match.
1496 */
1497void stp_island_check(void)
1498{
1499        disable_sync_clock(NULL);
1500        queue_work(time_sync_wq, &stp_work);
1501}
1502
1503
1504static int stp_sync_clock(void *data)
1505{
1506        static int first;
1507        unsigned long long old_clock, delta;
1508        struct clock_sync_data *stp_sync;
1509        int rc;
1510
1511        stp_sync = data;
1512
1513        if (xchg(&first, 1) == 1) {
1514                /* Slave */
1515                clock_sync_cpu(stp_sync);
1516                return 0;
1517        }
1518
1519        /* Wait until all other cpus entered the sync function. */
1520        while (atomic_read(&stp_sync->cpus) != 0)
1521                cpu_relax();
1522
1523        enable_sync_clock();
1524
1525        rc = 0;
1526        if (stp_info.todoff[0] || stp_info.todoff[1] ||
1527            stp_info.todoff[2] || stp_info.todoff[3] ||
1528            stp_info.tmd != 2) {
1529                old_clock = get_clock();
1530                rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0);
1531                if (rc == 0) {
1532                        delta = adjust_time(old_clock, get_clock(), 0);
1533                        fixup_clock_comparator(delta);
1534                        rc = chsc_sstpi(stp_page, &stp_info,
1535                                        sizeof(struct stp_sstpi));
1536                        if (rc == 0 && stp_info.tmd != 2)
1537                                rc = -EAGAIN;
1538                }
1539        }
1540        if (rc) {
1541                disable_sync_clock(NULL);
1542                stp_sync->in_sync = -EAGAIN;
1543        } else
1544                stp_sync->in_sync = 1;
1545        xchg(&first, 0);
1546        return 0;
1547}
1548
1549/*
1550 * STP work. Check for the STP state and take over the clock
1551 * synchronization if the STP clock source is usable.
1552 */
1553static void stp_work_fn(struct work_struct *work)
1554{
1555        struct clock_sync_data stp_sync;
1556        int rc;
1557
1558        /* prevent multiple execution. */
1559        mutex_lock(&stp_work_mutex);
1560
1561        if (!stp_online) {
1562                chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
1563                del_timer_sync(&stp_timer);
1564                goto out_unlock;
1565        }
1566
1567        rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0);
1568        if (rc)
1569                goto out_unlock;
1570
1571        rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
1572        if (rc || stp_info.c == 0)
1573                goto out_unlock;
1574
1575        /* Skip synchronization if the clock is already in sync. */
1576        if (check_sync_clock())
1577                goto out_unlock;
1578
1579        memset(&stp_sync, 0, sizeof(stp_sync));
1580        get_online_cpus();
1581        atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
1582        stop_machine(stp_sync_clock, &stp_sync, &cpu_online_map);
1583        put_online_cpus();
1584
1585        if (!check_sync_clock())
1586                /*
1587                 * There is a usable clock but the synchonization failed.
1588                 * Retry after a second.
1589                 */
1590                mod_timer(&stp_timer, jiffies + HZ);
1591
1592out_unlock:
1593        mutex_unlock(&stp_work_mutex);
1594}
1595
1596/*
1597 * STP class sysfs interface functions
1598 */
1599static struct sysdev_class stp_sysclass = {
1600        .name   = "stp",
1601};
1602
1603static ssize_t stp_ctn_id_show(struct sysdev_class *class,
1604                                struct sysdev_class_attribute *attr,
1605                                char *buf)
1606{
1607        if (!stp_online)
1608                return -ENODATA;
1609        return sprintf(buf, "%016llx\n",
1610                       *(unsigned long long *) stp_info.ctnid);
1611}
1612
1613static SYSDEV_CLASS_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
1614
1615static ssize_t stp_ctn_type_show(struct sysdev_class *class,
1616                                struct sysdev_class_attribute *attr,
1617                                char *buf)
1618{
1619        if (!stp_online)
1620                return -ENODATA;
1621        return sprintf(buf, "%i\n", stp_info.ctn);
1622}
1623
1624static SYSDEV_CLASS_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
1625
1626static ssize_t stp_dst_offset_show(struct sysdev_class *class,
1627                                   struct sysdev_class_attribute *attr,
1628                                   char *buf)
1629{
1630        if (!stp_online || !(stp_info.vbits & 0x2000))
1631                return -ENODATA;
1632        return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
1633}
1634
1635static SYSDEV_CLASS_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
1636
1637static ssize_t stp_leap_seconds_show(struct sysdev_class *class,
1638                                        struct sysdev_class_attribute *attr,
1639                                        char *buf)
1640{
1641        if (!stp_online || !(stp_info.vbits & 0x8000))
1642                return -ENODATA;
1643        return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
1644}
1645
1646static SYSDEV_CLASS_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
1647
1648static ssize_t stp_stratum_show(struct sysdev_class *class,
1649                                struct sysdev_class_attribute *attr,
1650                                char *buf)
1651{
1652        if (!stp_online)
1653                return -ENODATA;
1654        return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
1655}
1656
1657static SYSDEV_CLASS_ATTR(stratum, 0400, stp_stratum_show, NULL);
1658
1659static ssize_t stp_time_offset_show(struct sysdev_class *class,
1660                                struct sysdev_class_attribute *attr,
1661                                char *buf)
1662{
1663        if (!stp_online || !(stp_info.vbits & 0x0800))
1664                return -ENODATA;
1665        return sprintf(buf, "%i\n", (int) stp_info.tto);
1666}
1667
1668static SYSDEV_CLASS_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
1669
1670static ssize_t stp_time_zone_offset_show(struct sysdev_class *class,
1671                                struct sysdev_class_attribute *attr,
1672                                char *buf)
1673{
1674        if (!stp_online || !(stp_info.vbits & 0x4000))
1675                return -ENODATA;
1676        return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
1677}
1678
1679static SYSDEV_CLASS_ATTR(time_zone_offset, 0400,
1680                         stp_time_zone_offset_show, NULL);
1681
1682static ssize_t stp_timing_mode_show(struct sysdev_class *class,
1683                                struct sysdev_class_attribute *attr,
1684                                char *buf)
1685{
1686        if (!stp_online)
1687                return -ENODATA;
1688        return sprintf(buf, "%i\n", stp_info.tmd);
1689}
1690
1691static SYSDEV_CLASS_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
1692
1693static ssize_t stp_timing_state_show(struct sysdev_class *class,
1694                                struct sysdev_class_attribute *attr,
1695                                char *buf)
1696{
1697        if (!stp_online)
1698                return -ENODATA;
1699        return sprintf(buf, "%i\n", stp_info.tst);
1700}
1701
1702static SYSDEV_CLASS_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
1703
1704static ssize_t stp_online_show(struct sysdev_class *class,
1705                                struct sysdev_class_attribute *attr,
1706                                char *buf)
1707{
1708        return sprintf(buf, "%i\n", stp_online);
1709}
1710
1711static ssize_t stp_online_store(struct sysdev_class *class,
1712                                struct sysdev_class_attribute *attr,
1713                                const char *buf, size_t count)
1714{
1715        unsigned int value;
1716
1717        value = simple_strtoul(buf, NULL, 0);
1718        if (value != 0 && value != 1)
1719                return -EINVAL;
1720        if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
1721                return -EOPNOTSUPP;
1722        mutex_lock(&clock_sync_mutex);
1723        stp_online = value;
1724        if (stp_online)
1725                set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
1726        else
1727                clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
1728        queue_work(time_sync_wq, &stp_work);
1729        mutex_unlock(&clock_sync_mutex);
1730        return count;
1731}
1732
1733/*
1734 * Can't use SYSDEV_CLASS_ATTR because the attribute should be named
1735 * stp/online but attr_online already exists in this file ..
1736 */
1737static struct sysdev_class_attribute attr_stp_online = {
1738        .attr = { .name = "online", .mode = 0600 },
1739        .show   = stp_online_show,
1740        .store  = stp_online_store,
1741};
1742
1743static struct sysdev_class_attribute *stp_attributes[] = {
1744        &attr_ctn_id,
1745        &attr_ctn_type,
1746        &attr_dst_offset,
1747        &attr_leap_seconds,
1748        &attr_stp_online,
1749        &attr_stratum,
1750        &attr_time_offset,
1751        &attr_time_zone_offset,
1752        &attr_timing_mode,
1753        &attr_timing_state,
1754        NULL
1755};
1756
1757static int __init stp_init_sysfs(void)
1758{
1759        struct sysdev_class_attribute **attr;
1760        int rc;
1761
1762        rc = sysdev_class_register(&stp_sysclass);
1763        if (rc)
1764                goto out;
1765        for (attr = stp_attributes; *attr; attr++) {
1766                rc = sysdev_class_create_file(&stp_sysclass, *attr);
1767                if (rc)
1768                        goto out_unreg;
1769        }
1770        return 0;
1771out_unreg:
1772        for (; attr >= stp_attributes; attr--)
1773                sysdev_class_remove_file(&stp_sysclass, *attr);
1774        sysdev_class_unregister(&stp_sysclass);
1775out:
1776        return rc;
1777}
1778
1779device_initcall(stp_init_sysfs);
1780