qemu/hw/ppc/ppc.c
<<
>>
Prefs
   1/*
   2 * QEMU generic PowerPC hardware System Emulator
   3 *
   4 * Copyright (c) 2003-2007 Jocelyn Mayer
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24
  25#include "qemu/osdep.h"
  26#include "hw/irq.h"
  27#include "hw/ppc/ppc.h"
  28#include "hw/ppc/ppc_e500.h"
  29#include "qemu/timer.h"
  30#include "sysemu/cpus.h"
  31#include "qemu/log.h"
  32#include "qemu/main-loop.h"
  33#include "qemu/error-report.h"
  34#include "sysemu/kvm.h"
  35#include "sysemu/runstate.h"
  36#include "kvm_ppc.h"
  37#include "migration/vmstate.h"
  38#include "trace.h"
  39
  40static void cpu_ppc_tb_stop (CPUPPCState *env);
  41static void cpu_ppc_tb_start (CPUPPCState *env);
  42
  43void ppc_set_irq(PowerPCCPU *cpu, int irq, int level)
  44{
  45    CPUPPCState *env = &cpu->env;
  46    unsigned int old_pending;
  47    bool locked = false;
  48
  49    /* We may already have the BQL if coming from the reset path */
  50    if (!qemu_mutex_iothread_locked()) {
  51        locked = true;
  52        qemu_mutex_lock_iothread();
  53    }
  54
  55    old_pending = env->pending_interrupts;
  56
  57    if (level) {
  58        env->pending_interrupts |= irq;
  59    } else {
  60        env->pending_interrupts &= ~irq;
  61    }
  62
  63    if (old_pending != env->pending_interrupts) {
  64        ppc_maybe_interrupt(env);
  65        kvmppc_set_interrupt(cpu, irq, level);
  66    }
  67
  68    trace_ppc_irq_set_exit(env, irq, level, env->pending_interrupts,
  69                           CPU(cpu)->interrupt_request);
  70
  71    if (locked) {
  72        qemu_mutex_unlock_iothread();
  73    }
  74}
  75
  76/* PowerPC 6xx / 7xx internal IRQ controller */
  77static void ppc6xx_set_irq(void *opaque, int pin, int level)
  78{
  79    PowerPCCPU *cpu = opaque;
  80    CPUPPCState *env = &cpu->env;
  81    int cur_level;
  82
  83    trace_ppc_irq_set(env, pin, level);
  84
  85    cur_level = (env->irq_input_state >> pin) & 1;
  86    /* Don't generate spurious events */
  87    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
  88        CPUState *cs = CPU(cpu);
  89
  90        switch (pin) {
  91        case PPC6xx_INPUT_TBEN:
  92            /* Level sensitive - active high */
  93            trace_ppc_irq_set_state("time base", level);
  94            if (level) {
  95                cpu_ppc_tb_start(env);
  96            } else {
  97                cpu_ppc_tb_stop(env);
  98            }
  99            break;
 100        case PPC6xx_INPUT_INT:
 101            /* Level sensitive - active high */
 102            trace_ppc_irq_set_state("external IRQ", level);
 103            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 104            break;
 105        case PPC6xx_INPUT_SMI:
 106            /* Level sensitive - active high */
 107            trace_ppc_irq_set_state("SMI IRQ", level);
 108            ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level);
 109            break;
 110        case PPC6xx_INPUT_MCP:
 111            /* Negative edge sensitive */
 112            /* XXX: TODO: actual reaction may depends on HID0 status
 113             *            603/604/740/750: check HID0[EMCP]
 114             */
 115            if (cur_level == 1 && level == 0) {
 116                trace_ppc_irq_set_state("machine check", 1);
 117                ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
 118            }
 119            break;
 120        case PPC6xx_INPUT_CKSTP_IN:
 121            /* Level sensitive - active low */
 122            /* XXX: TODO: relay the signal to CKSTP_OUT pin */
 123            /* XXX: Note that the only way to restart the CPU is to reset it */
 124            if (level) {
 125                trace_ppc_irq_cpu("stop");
 126                cs->halted = 1;
 127            }
 128            break;
 129        case PPC6xx_INPUT_HRESET:
 130            /* Level sensitive - active low */
 131            if (level) {
 132                trace_ppc_irq_reset("CPU");
 133                cpu_interrupt(cs, CPU_INTERRUPT_RESET);
 134            }
 135            break;
 136        case PPC6xx_INPUT_SRESET:
 137            trace_ppc_irq_set_state("RESET IRQ", level);
 138            ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
 139            break;
 140        default:
 141            g_assert_not_reached();
 142        }
 143        if (level)
 144            env->irq_input_state |= 1 << pin;
 145        else
 146            env->irq_input_state &= ~(1 << pin);
 147    }
 148}
 149
 150void ppc6xx_irq_init(PowerPCCPU *cpu)
 151{
 152    qdev_init_gpio_in(DEVICE(cpu), ppc6xx_set_irq, PPC6xx_INPUT_NB);
 153}
 154
 155#if defined(TARGET_PPC64)
 156/* PowerPC 970 internal IRQ controller */
 157static void ppc970_set_irq(void *opaque, int pin, int level)
 158{
 159    PowerPCCPU *cpu = opaque;
 160    CPUPPCState *env = &cpu->env;
 161    int cur_level;
 162
 163    trace_ppc_irq_set(env, pin, level);
 164
 165    cur_level = (env->irq_input_state >> pin) & 1;
 166    /* Don't generate spurious events */
 167    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
 168        CPUState *cs = CPU(cpu);
 169
 170        switch (pin) {
 171        case PPC970_INPUT_INT:
 172            /* Level sensitive - active high */
 173            trace_ppc_irq_set_state("external IRQ", level);
 174            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 175            break;
 176        case PPC970_INPUT_THINT:
 177            /* Level sensitive - active high */
 178            trace_ppc_irq_set_state("SMI IRQ", level);
 179            ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level);
 180            break;
 181        case PPC970_INPUT_MCP:
 182            /* Negative edge sensitive */
 183            /* XXX: TODO: actual reaction may depends on HID0 status
 184             *            603/604/740/750: check HID0[EMCP]
 185             */
 186            if (cur_level == 1 && level == 0) {
 187                trace_ppc_irq_set_state("machine check", 1);
 188                ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
 189            }
 190            break;
 191        case PPC970_INPUT_CKSTP:
 192            /* Level sensitive - active low */
 193            /* XXX: TODO: relay the signal to CKSTP_OUT pin */
 194            if (level) {
 195                trace_ppc_irq_cpu("stop");
 196                cs->halted = 1;
 197            } else {
 198                trace_ppc_irq_cpu("restart");
 199                cs->halted = 0;
 200                qemu_cpu_kick(cs);
 201            }
 202            break;
 203        case PPC970_INPUT_HRESET:
 204            /* Level sensitive - active low */
 205            if (level) {
 206                cpu_interrupt(cs, CPU_INTERRUPT_RESET);
 207            }
 208            break;
 209        case PPC970_INPUT_SRESET:
 210            trace_ppc_irq_set_state("RESET IRQ", level);
 211            ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
 212            break;
 213        case PPC970_INPUT_TBEN:
 214            trace_ppc_irq_set_state("TBEN IRQ", level);
 215            /* XXX: TODO */
 216            break;
 217        default:
 218            g_assert_not_reached();
 219        }
 220        if (level)
 221            env->irq_input_state |= 1 << pin;
 222        else
 223            env->irq_input_state &= ~(1 << pin);
 224    }
 225}
 226
 227void ppc970_irq_init(PowerPCCPU *cpu)
 228{
 229    qdev_init_gpio_in(DEVICE(cpu), ppc970_set_irq, PPC970_INPUT_NB);
 230}
 231
 232/* POWER7 internal IRQ controller */
 233static void power7_set_irq(void *opaque, int pin, int level)
 234{
 235    PowerPCCPU *cpu = opaque;
 236
 237    trace_ppc_irq_set(&cpu->env, pin, level);
 238
 239    switch (pin) {
 240    case POWER7_INPUT_INT:
 241        /* Level sensitive - active high */
 242        trace_ppc_irq_set_state("external IRQ", level);
 243        ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 244        break;
 245    default:
 246        g_assert_not_reached();
 247    }
 248}
 249
 250void ppcPOWER7_irq_init(PowerPCCPU *cpu)
 251{
 252    qdev_init_gpio_in(DEVICE(cpu), power7_set_irq, POWER7_INPUT_NB);
 253}
 254
 255/* POWER9 internal IRQ controller */
 256static void power9_set_irq(void *opaque, int pin, int level)
 257{
 258    PowerPCCPU *cpu = opaque;
 259
 260    trace_ppc_irq_set(&cpu->env, pin, level);
 261
 262    switch (pin) {
 263    case POWER9_INPUT_INT:
 264        /* Level sensitive - active high */
 265        trace_ppc_irq_set_state("external IRQ", level);
 266        ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 267        break;
 268    case POWER9_INPUT_HINT:
 269        /* Level sensitive - active high */
 270        trace_ppc_irq_set_state("HV external IRQ", level);
 271        ppc_set_irq(cpu, PPC_INTERRUPT_HVIRT, level);
 272        break;
 273    default:
 274        g_assert_not_reached();
 275        return;
 276    }
 277}
 278
 279void ppcPOWER9_irq_init(PowerPCCPU *cpu)
 280{
 281    qdev_init_gpio_in(DEVICE(cpu), power9_set_irq, POWER9_INPUT_NB);
 282}
 283#endif /* defined(TARGET_PPC64) */
 284
 285void ppc40x_core_reset(PowerPCCPU *cpu)
 286{
 287    CPUPPCState *env = &cpu->env;
 288    target_ulong dbsr;
 289
 290    qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC core\n");
 291    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
 292    dbsr = env->spr[SPR_40x_DBSR];
 293    dbsr &= ~0x00000300;
 294    dbsr |= 0x00000100;
 295    env->spr[SPR_40x_DBSR] = dbsr;
 296}
 297
 298void ppc40x_chip_reset(PowerPCCPU *cpu)
 299{
 300    CPUPPCState *env = &cpu->env;
 301    target_ulong dbsr;
 302
 303    qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC chip\n");
 304    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
 305    /* XXX: TODO reset all internal peripherals */
 306    dbsr = env->spr[SPR_40x_DBSR];
 307    dbsr &= ~0x00000300;
 308    dbsr |= 0x00000200;
 309    env->spr[SPR_40x_DBSR] = dbsr;
 310}
 311
 312void ppc40x_system_reset(PowerPCCPU *cpu)
 313{
 314    qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC system\n");
 315    qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
 316}
 317
 318void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
 319{
 320    PowerPCCPU *cpu = env_archcpu(env);
 321
 322    qemu_mutex_lock_iothread();
 323
 324    switch ((val >> 28) & 0x3) {
 325    case 0x0:
 326        /* No action */
 327        break;
 328    case 0x1:
 329        /* Core reset */
 330        ppc40x_core_reset(cpu);
 331        break;
 332    case 0x2:
 333        /* Chip reset */
 334        ppc40x_chip_reset(cpu);
 335        break;
 336    case 0x3:
 337        /* System reset */
 338        ppc40x_system_reset(cpu);
 339        break;
 340    }
 341
 342    qemu_mutex_unlock_iothread();
 343}
 344
 345/* PowerPC 40x internal IRQ controller */
 346static void ppc40x_set_irq(void *opaque, int pin, int level)
 347{
 348    PowerPCCPU *cpu = opaque;
 349    CPUPPCState *env = &cpu->env;
 350    int cur_level;
 351
 352    trace_ppc_irq_set(env, pin, level);
 353
 354    cur_level = (env->irq_input_state >> pin) & 1;
 355    /* Don't generate spurious events */
 356    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
 357        CPUState *cs = CPU(cpu);
 358
 359        switch (pin) {
 360        case PPC40x_INPUT_RESET_SYS:
 361            if (level) {
 362                trace_ppc_irq_reset("system");
 363                ppc40x_system_reset(cpu);
 364            }
 365            break;
 366        case PPC40x_INPUT_RESET_CHIP:
 367            if (level) {
 368                trace_ppc_irq_reset("chip");
 369                ppc40x_chip_reset(cpu);
 370            }
 371            break;
 372        case PPC40x_INPUT_RESET_CORE:
 373            /* XXX: TODO: update DBSR[MRR] */
 374            if (level) {
 375                trace_ppc_irq_reset("core");
 376                ppc40x_core_reset(cpu);
 377            }
 378            break;
 379        case PPC40x_INPUT_CINT:
 380            /* Level sensitive - active high */
 381            trace_ppc_irq_set_state("critical IRQ", level);
 382            ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
 383            break;
 384        case PPC40x_INPUT_INT:
 385            /* Level sensitive - active high */
 386            trace_ppc_irq_set_state("external IRQ", level);
 387            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 388            break;
 389        case PPC40x_INPUT_HALT:
 390            /* Level sensitive - active low */
 391            if (level) {
 392                trace_ppc_irq_cpu("stop");
 393                cs->halted = 1;
 394            } else {
 395                trace_ppc_irq_cpu("restart");
 396                cs->halted = 0;
 397                qemu_cpu_kick(cs);
 398            }
 399            break;
 400        case PPC40x_INPUT_DEBUG:
 401            /* Level sensitive - active high */
 402            trace_ppc_irq_set_state("debug pin", level);
 403            ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
 404            break;
 405        default:
 406            g_assert_not_reached();
 407        }
 408        if (level)
 409            env->irq_input_state |= 1 << pin;
 410        else
 411            env->irq_input_state &= ~(1 << pin);
 412    }
 413}
 414
 415void ppc40x_irq_init(PowerPCCPU *cpu)
 416{
 417    qdev_init_gpio_in(DEVICE(cpu), ppc40x_set_irq, PPC40x_INPUT_NB);
 418}
 419
 420/* PowerPC E500 internal IRQ controller */
 421static void ppce500_set_irq(void *opaque, int pin, int level)
 422{
 423    PowerPCCPU *cpu = opaque;
 424    CPUPPCState *env = &cpu->env;
 425    int cur_level;
 426
 427    trace_ppc_irq_set(env, pin, level);
 428
 429    cur_level = (env->irq_input_state >> pin) & 1;
 430    /* Don't generate spurious events */
 431    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
 432        switch (pin) {
 433        case PPCE500_INPUT_MCK:
 434            if (level) {
 435                trace_ppc_irq_reset("system");
 436                qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
 437            }
 438            break;
 439        case PPCE500_INPUT_RESET_CORE:
 440            if (level) {
 441                trace_ppc_irq_reset("core");
 442                ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level);
 443            }
 444            break;
 445        case PPCE500_INPUT_CINT:
 446            /* Level sensitive - active high */
 447            trace_ppc_irq_set_state("critical IRQ", level);
 448            ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
 449            break;
 450        case PPCE500_INPUT_INT:
 451            /* Level sensitive - active high */
 452            trace_ppc_irq_set_state("core IRQ", level);
 453            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 454            break;
 455        case PPCE500_INPUT_DEBUG:
 456            /* Level sensitive - active high */
 457            trace_ppc_irq_set_state("debug pin", level);
 458            ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
 459            break;
 460        default:
 461            g_assert_not_reached();
 462        }
 463        if (level)
 464            env->irq_input_state |= 1 << pin;
 465        else
 466            env->irq_input_state &= ~(1 << pin);
 467    }
 468}
 469
 470void ppce500_irq_init(PowerPCCPU *cpu)
 471{
 472    qdev_init_gpio_in(DEVICE(cpu), ppce500_set_irq, PPCE500_INPUT_NB);
 473}
 474
 475/* Enable or Disable the E500 EPR capability */
 476void ppce500_set_mpic_proxy(bool enabled)
 477{
 478    CPUState *cs;
 479
 480    CPU_FOREACH(cs) {
 481        PowerPCCPU *cpu = POWERPC_CPU(cs);
 482
 483        cpu->env.mpic_proxy = enabled;
 484        if (kvm_enabled()) {
 485            kvmppc_set_mpic_proxy(cpu, enabled);
 486        }
 487    }
 488}
 489
 490/*****************************************************************************/
 491/* PowerPC time base and decrementer emulation */
 492
 493uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset)
 494{
 495    /* TB time in tb periods */
 496    return muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND) + tb_offset;
 497}
 498
 499uint64_t cpu_ppc_load_tbl (CPUPPCState *env)
 500{
 501    ppc_tb_t *tb_env = env->tb_env;
 502    uint64_t tb;
 503
 504    if (kvm_enabled()) {
 505        return env->spr[SPR_TBL];
 506    }
 507
 508    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
 509    trace_ppc_tb_load(tb);
 510
 511    return tb;
 512}
 513
 514static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env)
 515{
 516    ppc_tb_t *tb_env = env->tb_env;
 517    uint64_t tb;
 518
 519    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
 520    trace_ppc_tb_load(tb);
 521
 522    return tb >> 32;
 523}
 524
 525uint32_t cpu_ppc_load_tbu (CPUPPCState *env)
 526{
 527    if (kvm_enabled()) {
 528        return env->spr[SPR_TBU];
 529    }
 530
 531    return _cpu_ppc_load_tbu(env);
 532}
 533
 534static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk,
 535                                    int64_t *tb_offsetp, uint64_t value)
 536{
 537    *tb_offsetp = value -
 538        muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND);
 539
 540    trace_ppc_tb_store(value, *tb_offsetp);
 541}
 542
 543void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value)
 544{
 545    ppc_tb_t *tb_env = env->tb_env;
 546    uint64_t tb;
 547
 548    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
 549    tb &= 0xFFFFFFFF00000000ULL;
 550    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 551                     &tb_env->tb_offset, tb | (uint64_t)value);
 552}
 553
 554static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value)
 555{
 556    ppc_tb_t *tb_env = env->tb_env;
 557    uint64_t tb;
 558
 559    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
 560    tb &= 0x00000000FFFFFFFFULL;
 561    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 562                     &tb_env->tb_offset, ((uint64_t)value << 32) | tb);
 563}
 564
 565void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value)
 566{
 567    _cpu_ppc_store_tbu(env, value);
 568}
 569
 570uint64_t cpu_ppc_load_atbl (CPUPPCState *env)
 571{
 572    ppc_tb_t *tb_env = env->tb_env;
 573    uint64_t tb;
 574
 575    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
 576    trace_ppc_tb_load(tb);
 577
 578    return tb;
 579}
 580
 581uint32_t cpu_ppc_load_atbu (CPUPPCState *env)
 582{
 583    ppc_tb_t *tb_env = env->tb_env;
 584    uint64_t tb;
 585
 586    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
 587    trace_ppc_tb_load(tb);
 588
 589    return tb >> 32;
 590}
 591
 592void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value)
 593{
 594    ppc_tb_t *tb_env = env->tb_env;
 595    uint64_t tb;
 596
 597    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
 598    tb &= 0xFFFFFFFF00000000ULL;
 599    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 600                     &tb_env->atb_offset, tb | (uint64_t)value);
 601}
 602
 603void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value)
 604{
 605    ppc_tb_t *tb_env = env->tb_env;
 606    uint64_t tb;
 607
 608    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
 609    tb &= 0x00000000FFFFFFFFULL;
 610    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 611                     &tb_env->atb_offset, ((uint64_t)value << 32) | tb);
 612}
 613
 614uint64_t cpu_ppc_load_vtb(CPUPPCState *env)
 615{
 616    ppc_tb_t *tb_env = env->tb_env;
 617
 618    return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 619                          tb_env->vtb_offset);
 620}
 621
 622void cpu_ppc_store_vtb(CPUPPCState *env, uint64_t value)
 623{
 624    ppc_tb_t *tb_env = env->tb_env;
 625
 626    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 627                     &tb_env->vtb_offset, value);
 628}
 629
 630void cpu_ppc_store_tbu40(CPUPPCState *env, uint64_t value)
 631{
 632    ppc_tb_t *tb_env = env->tb_env;
 633    uint64_t tb;
 634
 635    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 636                        tb_env->tb_offset);
 637    tb &= 0xFFFFFFUL;
 638    tb |= (value & ~0xFFFFFFUL);
 639    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 640                     &tb_env->tb_offset, tb);
 641}
 642
 643static void cpu_ppc_tb_stop (CPUPPCState *env)
 644{
 645    ppc_tb_t *tb_env = env->tb_env;
 646    uint64_t tb, atb, vmclk;
 647
 648    /* If the time base is already frozen, do nothing */
 649    if (tb_env->tb_freq != 0) {
 650        vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 651        /* Get the time base */
 652        tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset);
 653        /* Get the alternate time base */
 654        atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset);
 655        /* Store the time base value (ie compute the current offset) */
 656        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
 657        /* Store the alternate time base value (compute the current offset) */
 658        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
 659        /* Set the time base frequency to zero */
 660        tb_env->tb_freq = 0;
 661        /* Now, the time bases are frozen to tb_offset / atb_offset value */
 662    }
 663}
 664
 665static void cpu_ppc_tb_start (CPUPPCState *env)
 666{
 667    ppc_tb_t *tb_env = env->tb_env;
 668    uint64_t tb, atb, vmclk;
 669
 670    /* If the time base is not frozen, do nothing */
 671    if (tb_env->tb_freq == 0) {
 672        vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 673        /* Get the time base from tb_offset */
 674        tb = tb_env->tb_offset;
 675        /* Get the alternate time base from atb_offset */
 676        atb = tb_env->atb_offset;
 677        /* Restore the tb frequency from the decrementer frequency */
 678        tb_env->tb_freq = tb_env->decr_freq;
 679        /* Store the time base value */
 680        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
 681        /* Store the alternate time base value */
 682        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
 683    }
 684}
 685
 686bool ppc_decr_clear_on_delivery(CPUPPCState *env)
 687{
 688    ppc_tb_t *tb_env = env->tb_env;
 689    int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL;
 690    return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED);
 691}
 692
 693static inline int64_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next)
 694{
 695    ppc_tb_t *tb_env = env->tb_env;
 696    int64_t decr, diff;
 697
 698    diff = next - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 699    if (diff >= 0) {
 700        decr = muldiv64(diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
 701    } else if (tb_env->flags & PPC_TIMER_BOOKE) {
 702        decr = 0;
 703    }  else {
 704        decr = -muldiv64(-diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
 705    }
 706    trace_ppc_decr_load(decr);
 707
 708    return decr;
 709}
 710
 711target_ulong cpu_ppc_load_decr(CPUPPCState *env)
 712{
 713    ppc_tb_t *tb_env = env->tb_env;
 714    uint64_t decr;
 715
 716    if (kvm_enabled()) {
 717        return env->spr[SPR_DECR];
 718    }
 719
 720    decr = _cpu_ppc_load_decr(env, tb_env->decr_next);
 721
 722    /*
 723     * If large decrementer is enabled then the decrementer is signed extened
 724     * to 64 bits, otherwise it is a 32 bit value.
 725     */
 726    if (env->spr[SPR_LPCR] & LPCR_LD) {
 727        return decr;
 728    }
 729    return (uint32_t) decr;
 730}
 731
 732target_ulong cpu_ppc_load_hdecr(CPUPPCState *env)
 733{
 734    PowerPCCPU *cpu = env_archcpu(env);
 735    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
 736    ppc_tb_t *tb_env = env->tb_env;
 737    uint64_t hdecr;
 738
 739    hdecr =  _cpu_ppc_load_decr(env, tb_env->hdecr_next);
 740
 741    /*
 742     * If we have a large decrementer (POWER9 or later) then hdecr is sign
 743     * extended to 64 bits, otherwise it is 32 bits.
 744     */
 745    if (pcc->lrg_decr_bits > 32) {
 746        return hdecr;
 747    }
 748    return (uint32_t) hdecr;
 749}
 750
 751uint64_t cpu_ppc_load_purr (CPUPPCState *env)
 752{
 753    ppc_tb_t *tb_env = env->tb_env;
 754
 755    return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 756                          tb_env->purr_offset);
 757}
 758
 759/* When decrementer expires,
 760 * all we need to do is generate or queue a CPU exception
 761 */
 762static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu)
 763{
 764    /* Raise it */
 765    trace_ppc_decr_excp("raise");
 766    ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1);
 767}
 768
 769static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu)
 770{
 771    ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
 772}
 773
 774static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
 775{
 776    CPUPPCState *env = &cpu->env;
 777
 778    /* Raise it */
 779    trace_ppc_decr_excp("raise HV");
 780
 781    /* The architecture specifies that we don't deliver HDEC
 782     * interrupts in a PM state. Not only they don't cause a
 783     * wakeup but they also get effectively discarded.
 784     */
 785    if (!env->resume_as_sreset) {
 786        ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
 787    }
 788}
 789
 790static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu)
 791{
 792    ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
 793}
 794
 795static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
 796                                 QEMUTimer *timer,
 797                                 void (*raise_excp)(void *),
 798                                 void (*lower_excp)(PowerPCCPU *),
 799                                 target_ulong decr, target_ulong value,
 800                                 int nr_bits)
 801{
 802    CPUPPCState *env = &cpu->env;
 803    ppc_tb_t *tb_env = env->tb_env;
 804    uint64_t now, next;
 805    int64_t signed_value;
 806    int64_t signed_decr;
 807
 808    /* Truncate value to decr_width and sign extend for simplicity */
 809    signed_value = sextract64(value, 0, nr_bits);
 810    signed_decr = sextract64(decr, 0, nr_bits);
 811
 812    trace_ppc_decr_store(nr_bits, decr, value);
 813
 814    if (kvm_enabled()) {
 815        /* KVM handles decrementer exceptions, we don't need our own timer */
 816        return;
 817    }
 818
 819    /*
 820     * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC
 821     * interrupt.
 822     *
 823     * If we get a really small DEC value, we can assume that by the time we
 824     * handled it we should inject an interrupt already.
 825     *
 826     * On MSB level based DEC implementations the MSB always means the interrupt
 827     * is pending, so raise it on those.
 828     *
 829     * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers
 830     * an edge interrupt, so raise it here too.
 831     */
 832    if ((value < 3) ||
 833        ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && signed_value < 0) ||
 834        ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && signed_value < 0
 835          && signed_decr >= 0)) {
 836        (*raise_excp)(cpu);
 837        return;
 838    }
 839
 840    /* On MSB level based systems a 0 for the MSB stops interrupt delivery */
 841    if (signed_value >= 0 && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) {
 842        (*lower_excp)(cpu);
 843    }
 844
 845    /* Calculate the next timer event */
 846    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 847    next = now + muldiv64(value, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
 848    *nextp = next;
 849
 850    /* Adjust timer */
 851    timer_mod(timer, next);
 852}
 853
 854static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, target_ulong decr,
 855                                       target_ulong value, int nr_bits)
 856{
 857    ppc_tb_t *tb_env = cpu->env.tb_env;
 858
 859    __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer,
 860                         tb_env->decr_timer->cb, &cpu_ppc_decr_lower, decr,
 861                         value, nr_bits);
 862}
 863
 864void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value)
 865{
 866    PowerPCCPU *cpu = env_archcpu(env);
 867    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
 868    int nr_bits = 32;
 869
 870    if (env->spr[SPR_LPCR] & LPCR_LD) {
 871        nr_bits = pcc->lrg_decr_bits;
 872    }
 873
 874    _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value, nr_bits);
 875}
 876
 877static void cpu_ppc_decr_cb(void *opaque)
 878{
 879    PowerPCCPU *cpu = opaque;
 880
 881    cpu_ppc_decr_excp(cpu);
 882}
 883
 884static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, target_ulong hdecr,
 885                                        target_ulong value, int nr_bits)
 886{
 887    ppc_tb_t *tb_env = cpu->env.tb_env;
 888
 889    if (tb_env->hdecr_timer != NULL) {
 890        __cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer,
 891                             tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower,
 892                             hdecr, value, nr_bits);
 893    }
 894}
 895
 896void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value)
 897{
 898    PowerPCCPU *cpu = env_archcpu(env);
 899    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
 900
 901    _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value,
 902                         pcc->lrg_decr_bits);
 903}
 904
 905static void cpu_ppc_hdecr_cb(void *opaque)
 906{
 907    PowerPCCPU *cpu = opaque;
 908
 909    cpu_ppc_hdecr_excp(cpu);
 910}
 911
 912void cpu_ppc_store_purr(CPUPPCState *env, uint64_t value)
 913{
 914    ppc_tb_t *tb_env = env->tb_env;
 915
 916    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 917                     &tb_env->purr_offset, value);
 918}
 919
 920static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq)
 921{
 922    CPUPPCState *env = opaque;
 923    PowerPCCPU *cpu = env_archcpu(env);
 924    ppc_tb_t *tb_env = env->tb_env;
 925
 926    tb_env->tb_freq = freq;
 927    tb_env->decr_freq = freq;
 928    /* There is a bug in Linux 2.4 kernels:
 929     * if a decrementer exception is pending when it enables msr_ee at startup,
 930     * it's not ready to handle it...
 931     */
 932    _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32);
 933    _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32);
 934    cpu_ppc_store_purr(env, 0x0000000000000000ULL);
 935}
 936
 937static void timebase_save(PPCTimebase *tb)
 938{
 939    uint64_t ticks = cpu_get_host_ticks();
 940    PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
 941
 942    if (!first_ppc_cpu->env.tb_env) {
 943        error_report("No timebase object");
 944        return;
 945    }
 946
 947    /* not used anymore, we keep it for compatibility */
 948    tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
 949    /*
 950     * tb_offset is only expected to be changed by QEMU so
 951     * there is no need to update it from KVM here
 952     */
 953    tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset;
 954
 955    tb->runstate_paused =
 956        runstate_check(RUN_STATE_PAUSED) || runstate_check(RUN_STATE_SAVE_VM);
 957}
 958
 959static void timebase_load(PPCTimebase *tb)
 960{
 961    CPUState *cpu;
 962    PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
 963    int64_t tb_off_adj, tb_off;
 964    unsigned long freq;
 965
 966    if (!first_ppc_cpu->env.tb_env) {
 967        error_report("No timebase object");
 968        return;
 969    }
 970
 971    freq = first_ppc_cpu->env.tb_env->tb_freq;
 972
 973    tb_off_adj = tb->guest_timebase - cpu_get_host_ticks();
 974
 975    tb_off = first_ppc_cpu->env.tb_env->tb_offset;
 976    trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off,
 977                        (tb_off_adj - tb_off) / freq);
 978
 979    /* Set new offset to all CPUs */
 980    CPU_FOREACH(cpu) {
 981        PowerPCCPU *pcpu = POWERPC_CPU(cpu);
 982        pcpu->env.tb_env->tb_offset = tb_off_adj;
 983        kvmppc_set_reg_tb_offset(pcpu, pcpu->env.tb_env->tb_offset);
 984    }
 985}
 986
 987void cpu_ppc_clock_vm_state_change(void *opaque, bool running,
 988                                   RunState state)
 989{
 990    PPCTimebase *tb = opaque;
 991
 992    if (running) {
 993        timebase_load(tb);
 994    } else {
 995        timebase_save(tb);
 996    }
 997}
 998
 999/*
1000 * When migrating a running guest, read the clock just
1001 * before migration, so that the guest clock counts
1002 * during the events between:
1003 *
1004 *  * vm_stop()
1005 *  *
1006 *  * pre_save()
1007 *
1008 *  This reduces clock difference on migration from 5s
1009 *  to 0.1s (when max_downtime == 5s), because sending the
1010 *  final pages of memory (which happens between vm_stop()
1011 *  and pre_save()) takes max_downtime.
1012 */
1013static int timebase_pre_save(void *opaque)
1014{
1015    PPCTimebase *tb = opaque;
1016
1017    /* guest_timebase won't be overridden in case of paused guest or savevm */
1018    if (!tb->runstate_paused) {
1019        timebase_save(tb);
1020    }
1021
1022    return 0;
1023}
1024
1025const VMStateDescription vmstate_ppc_timebase = {
1026    .name = "timebase",
1027    .version_id = 1,
1028    .minimum_version_id = 1,
1029    .pre_save = timebase_pre_save,
1030    .fields      = (VMStateField []) {
1031        VMSTATE_UINT64(guest_timebase, PPCTimebase),
1032        VMSTATE_INT64(time_of_the_day_ns, PPCTimebase),
1033        VMSTATE_END_OF_LIST()
1034    },
1035};
1036
1037/* Set up (once) timebase frequency (in Hz) */
1038clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
1039{
1040    PowerPCCPU *cpu = env_archcpu(env);
1041    ppc_tb_t *tb_env;
1042
1043    tb_env = g_new0(ppc_tb_t, 1);
1044    env->tb_env = tb_env;
1045    tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1046    if (is_book3s_arch2x(env)) {
1047        /* All Book3S 64bit CPUs implement level based DEC logic */
1048        tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL;
1049    }
1050    /* Create new timer */
1051    tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu);
1052    if (env->has_hv_mode && !cpu->vhyp) {
1053        tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb,
1054                                                cpu);
1055    } else {
1056        tb_env->hdecr_timer = NULL;
1057    }
1058    cpu_ppc_set_tb_clk(env, freq);
1059
1060    return &cpu_ppc_set_tb_clk;
1061}
1062
1063void cpu_ppc_tb_free(CPUPPCState *env)
1064{
1065    timer_free(env->tb_env->decr_timer);
1066    timer_free(env->tb_env->hdecr_timer);
1067    g_free(env->tb_env);
1068}
1069
1070/* cpu_ppc_hdecr_init may be used if the timer is not used by HDEC emulation */
1071void cpu_ppc_hdecr_init(CPUPPCState *env)
1072{
1073    PowerPCCPU *cpu = env_archcpu(env);
1074
1075    assert(env->tb_env->hdecr_timer == NULL);
1076
1077    env->tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1078                                            &cpu_ppc_hdecr_cb, cpu);
1079}
1080
1081void cpu_ppc_hdecr_exit(CPUPPCState *env)
1082{
1083    PowerPCCPU *cpu = env_archcpu(env);
1084
1085    timer_free(env->tb_env->hdecr_timer);
1086    env->tb_env->hdecr_timer = NULL;
1087
1088    cpu_ppc_hdecr_lower(cpu);
1089}
1090
1091/*****************************************************************************/
1092/* PowerPC 40x timers */
1093
1094/* PIT, FIT & WDT */
1095typedef struct ppc40x_timer_t ppc40x_timer_t;
1096struct ppc40x_timer_t {
1097    uint64_t pit_reload;  /* PIT auto-reload value        */
1098    uint64_t fit_next;    /* Tick for next FIT interrupt  */
1099    QEMUTimer *fit_timer;
1100    uint64_t wdt_next;    /* Tick for next WDT interrupt  */
1101    QEMUTimer *wdt_timer;
1102
1103    /* 405 have the PIT, 440 have a DECR.  */
1104    unsigned int decr_excp;
1105};
1106
1107/* Fixed interval timer */
1108static void cpu_4xx_fit_cb (void *opaque)
1109{
1110    PowerPCCPU *cpu = opaque;
1111    CPUPPCState *env = &cpu->env;
1112    ppc_tb_t *tb_env;
1113    ppc40x_timer_t *ppc40x_timer;
1114    uint64_t now, next;
1115
1116    tb_env = env->tb_env;
1117    ppc40x_timer = tb_env->opaque;
1118    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1119    switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) {
1120    case 0:
1121        next = 1 << 9;
1122        break;
1123    case 1:
1124        next = 1 << 13;
1125        break;
1126    case 2:
1127        next = 1 << 17;
1128        break;
1129    case 3:
1130        next = 1 << 21;
1131        break;
1132    default:
1133        /* Cannot occur, but makes gcc happy */
1134        return;
1135    }
1136    next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->tb_freq);
1137    if (next == now)
1138        next++;
1139    timer_mod(ppc40x_timer->fit_timer, next);
1140    env->spr[SPR_40x_TSR] |= 1 << 26;
1141    if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) {
1142        ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1);
1143    }
1144    trace_ppc4xx_fit((int)((env->spr[SPR_40x_TCR] >> 23) & 0x1),
1145                         env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1146}
1147
1148/* Programmable interval timer */
1149static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp)
1150{
1151    ppc40x_timer_t *ppc40x_timer;
1152    uint64_t now, next;
1153
1154    ppc40x_timer = tb_env->opaque;
1155    if (ppc40x_timer->pit_reload <= 1 ||
1156        !((env->spr[SPR_40x_TCR] >> 26) & 0x1) ||
1157        (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) {
1158        /* Stop PIT */
1159        trace_ppc4xx_pit_stop();
1160        timer_del(tb_env->decr_timer);
1161    } else {
1162        trace_ppc4xx_pit_start(ppc40x_timer->pit_reload);
1163        now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1164        next = now + muldiv64(ppc40x_timer->pit_reload,
1165                              NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1166        if (is_excp)
1167            next += tb_env->decr_next - now;
1168        if (next == now)
1169            next++;
1170        timer_mod(tb_env->decr_timer, next);
1171        tb_env->decr_next = next;
1172    }
1173}
1174
1175static void cpu_4xx_pit_cb (void *opaque)
1176{
1177    PowerPCCPU *cpu = opaque;
1178    CPUPPCState *env = &cpu->env;
1179    ppc_tb_t *tb_env;
1180    ppc40x_timer_t *ppc40x_timer;
1181
1182    tb_env = env->tb_env;
1183    ppc40x_timer = tb_env->opaque;
1184    env->spr[SPR_40x_TSR] |= 1 << 27;
1185    if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) {
1186        ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1);
1187    }
1188    start_stop_pit(env, tb_env, 1);
1189    trace_ppc4xx_pit((int)((env->spr[SPR_40x_TCR] >> 22) & 0x1),
1190           (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1),
1191           env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR],
1192           ppc40x_timer->pit_reload);
1193}
1194
1195/* Watchdog timer */
1196static void cpu_4xx_wdt_cb (void *opaque)
1197{
1198    PowerPCCPU *cpu = opaque;
1199    CPUPPCState *env = &cpu->env;
1200    ppc_tb_t *tb_env;
1201    ppc40x_timer_t *ppc40x_timer;
1202    uint64_t now, next;
1203
1204    tb_env = env->tb_env;
1205    ppc40x_timer = tb_env->opaque;
1206    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1207    switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) {
1208    case 0:
1209        next = 1 << 17;
1210        break;
1211    case 1:
1212        next = 1 << 21;
1213        break;
1214    case 2:
1215        next = 1 << 25;
1216        break;
1217    case 3:
1218        next = 1 << 29;
1219        break;
1220    default:
1221        /* Cannot occur, but makes gcc happy */
1222        return;
1223    }
1224    next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1225    if (next == now)
1226        next++;
1227    trace_ppc4xx_wdt(env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1228    switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) {
1229    case 0x0:
1230    case 0x1:
1231        timer_mod(ppc40x_timer->wdt_timer, next);
1232        ppc40x_timer->wdt_next = next;
1233        env->spr[SPR_40x_TSR] |= 1U << 31;
1234        break;
1235    case 0x2:
1236        timer_mod(ppc40x_timer->wdt_timer, next);
1237        ppc40x_timer->wdt_next = next;
1238        env->spr[SPR_40x_TSR] |= 1 << 30;
1239        if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) {
1240            ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1);
1241        }
1242        break;
1243    case 0x3:
1244        env->spr[SPR_40x_TSR] &= ~0x30000000;
1245        env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000;
1246        switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) {
1247        case 0x0:
1248            /* No reset */
1249            break;
1250        case 0x1: /* Core reset */
1251            ppc40x_core_reset(cpu);
1252            break;
1253        case 0x2: /* Chip reset */
1254            ppc40x_chip_reset(cpu);
1255            break;
1256        case 0x3: /* System reset */
1257            ppc40x_system_reset(cpu);
1258            break;
1259        }
1260    }
1261}
1262
1263void store_40x_pit (CPUPPCState *env, target_ulong val)
1264{
1265    ppc_tb_t *tb_env;
1266    ppc40x_timer_t *ppc40x_timer;
1267
1268    tb_env = env->tb_env;
1269    ppc40x_timer = tb_env->opaque;
1270    trace_ppc40x_store_pit(val);
1271    ppc40x_timer->pit_reload = val;
1272    start_stop_pit(env, tb_env, 0);
1273}
1274
1275target_ulong load_40x_pit (CPUPPCState *env)
1276{
1277    return cpu_ppc_load_decr(env);
1278}
1279
1280void store_40x_tsr(CPUPPCState *env, target_ulong val)
1281{
1282    PowerPCCPU *cpu = env_archcpu(env);
1283
1284    trace_ppc40x_store_tcr(val);
1285
1286    env->spr[SPR_40x_TSR] &= ~(val & 0xFC000000);
1287    if (val & 0x80000000) {
1288        ppc_set_irq(cpu, PPC_INTERRUPT_PIT, 0);
1289    }
1290}
1291
1292void store_40x_tcr(CPUPPCState *env, target_ulong val)
1293{
1294    PowerPCCPU *cpu = env_archcpu(env);
1295    ppc_tb_t *tb_env;
1296
1297    trace_ppc40x_store_tsr(val);
1298
1299    tb_env = env->tb_env;
1300    env->spr[SPR_40x_TCR] = val & 0xFFC00000;
1301    start_stop_pit(env, tb_env, 1);
1302    cpu_4xx_wdt_cb(cpu);
1303}
1304
1305static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq)
1306{
1307    CPUPPCState *env = opaque;
1308    ppc_tb_t *tb_env = env->tb_env;
1309
1310    trace_ppc40x_set_tb_clk(freq);
1311    tb_env->tb_freq = freq;
1312    tb_env->decr_freq = freq;
1313    /* XXX: we should also update all timers */
1314}
1315
1316clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq,
1317                                  unsigned int decr_excp)
1318{
1319    ppc_tb_t *tb_env;
1320    ppc40x_timer_t *ppc40x_timer;
1321    PowerPCCPU *cpu = env_archcpu(env);
1322
1323    trace_ppc40x_timers_init(freq);
1324
1325    tb_env = g_new0(ppc_tb_t, 1);
1326    ppc40x_timer = g_new0(ppc40x_timer_t, 1);
1327
1328    env->tb_env = tb_env;
1329    tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1330    tb_env->tb_freq = freq;
1331    tb_env->decr_freq = freq;
1332    tb_env->opaque = ppc40x_timer;
1333
1334    /* We use decr timer for PIT */
1335    tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, cpu);
1336    ppc40x_timer->fit_timer =
1337        timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, cpu);
1338    ppc40x_timer->wdt_timer =
1339        timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, cpu);
1340    ppc40x_timer->decr_excp = decr_excp;
1341
1342    return &ppc_40x_set_tb_clk;
1343}
1344
1345/*****************************************************************************/
1346/* Embedded PowerPC Device Control Registers */
1347typedef struct ppc_dcrn_t ppc_dcrn_t;
1348struct ppc_dcrn_t {
1349    dcr_read_cb dcr_read;
1350    dcr_write_cb dcr_write;
1351    void *opaque;
1352};
1353
1354/* XXX: on 460, DCR addresses are 32 bits wide,
1355 *      using DCRIPR to get the 22 upper bits of the DCR address
1356 */
1357#define DCRN_NB 1024
1358struct ppc_dcr_t {
1359    ppc_dcrn_t dcrn[DCRN_NB];
1360    int (*read_error)(int dcrn);
1361    int (*write_error)(int dcrn);
1362};
1363
1364int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
1365{
1366    ppc_dcrn_t *dcr;
1367
1368    if (dcrn < 0 || dcrn >= DCRN_NB)
1369        goto error;
1370    dcr = &dcr_env->dcrn[dcrn];
1371    if (dcr->dcr_read == NULL)
1372        goto error;
1373    *valp = (*dcr->dcr_read)(dcr->opaque, dcrn);
1374    trace_ppc_dcr_read(dcrn, *valp);
1375
1376    return 0;
1377
1378 error:
1379    if (dcr_env->read_error != NULL)
1380        return (*dcr_env->read_error)(dcrn);
1381
1382    return -1;
1383}
1384
1385int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
1386{
1387    ppc_dcrn_t *dcr;
1388
1389    if (dcrn < 0 || dcrn >= DCRN_NB)
1390        goto error;
1391    dcr = &dcr_env->dcrn[dcrn];
1392    if (dcr->dcr_write == NULL)
1393        goto error;
1394    trace_ppc_dcr_write(dcrn, val);
1395    (*dcr->dcr_write)(dcr->opaque, dcrn, val);
1396
1397    return 0;
1398
1399 error:
1400    if (dcr_env->write_error != NULL)
1401        return (*dcr_env->write_error)(dcrn);
1402
1403    return -1;
1404}
1405
1406int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque,
1407                      dcr_read_cb dcr_read, dcr_write_cb dcr_write)
1408{
1409    ppc_dcr_t *dcr_env;
1410    ppc_dcrn_t *dcr;
1411
1412    dcr_env = env->dcr_env;
1413    if (dcr_env == NULL)
1414        return -1;
1415    if (dcrn < 0 || dcrn >= DCRN_NB)
1416        return -1;
1417    dcr = &dcr_env->dcrn[dcrn];
1418    if (dcr->opaque != NULL ||
1419        dcr->dcr_read != NULL ||
1420        dcr->dcr_write != NULL)
1421        return -1;
1422    dcr->opaque = opaque;
1423    dcr->dcr_read = dcr_read;
1424    dcr->dcr_write = dcr_write;
1425
1426    return 0;
1427}
1428
1429int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn),
1430                  int (*write_error)(int dcrn))
1431{
1432    ppc_dcr_t *dcr_env;
1433
1434    dcr_env = g_new0(ppc_dcr_t, 1);
1435    dcr_env->read_error = read_error;
1436    dcr_env->write_error = write_error;
1437    env->dcr_env = dcr_env;
1438
1439    return 0;
1440}
1441
1442/*****************************************************************************/
1443
1444int ppc_cpu_pir(PowerPCCPU *cpu)
1445{
1446    CPUPPCState *env = &cpu->env;
1447    return env->spr_cb[SPR_PIR].default_value;
1448}
1449
1450PowerPCCPU *ppc_get_vcpu_by_pir(int pir)
1451{
1452    CPUState *cs;
1453
1454    CPU_FOREACH(cs) {
1455        PowerPCCPU *cpu = POWERPC_CPU(cs);
1456
1457        if (ppc_cpu_pir(cpu) == pir) {
1458            return cpu;
1459        }
1460    }
1461
1462    return NULL;
1463}
1464
1465void ppc_irq_reset(PowerPCCPU *cpu)
1466{
1467    CPUPPCState *env = &cpu->env;
1468
1469    env->irq_input_state = 0;
1470    kvmppc_set_interrupt(cpu, PPC_INTERRUPT_EXT, 0);
1471}
1472