qemu/hw/ppc/ppc.c
<<
>>
Prefs
   1/*
   2 * QEMU generic PowerPC hardware System Emulator
   3 *
   4 * Copyright (c) 2003-2007 Jocelyn Mayer
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24
  25#include "qemu/osdep.h"
  26#include "hw/irq.h"
  27#include "hw/ppc/ppc.h"
  28#include "hw/ppc/ppc_e500.h"
  29#include "qemu/timer.h"
  30#include "sysemu/cpus.h"
  31#include "qemu/log.h"
  32#include "qemu/main-loop.h"
  33#include "qemu/error-report.h"
  34#include "sysemu/kvm.h"
  35#include "sysemu/runstate.h"
  36#include "kvm_ppc.h"
  37#include "migration/vmstate.h"
  38#include "trace.h"
  39
  40static void cpu_ppc_tb_stop (CPUPPCState *env);
  41static void cpu_ppc_tb_start (CPUPPCState *env);
  42
  43void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level)
  44{
  45    CPUState *cs = CPU(cpu);
  46    CPUPPCState *env = &cpu->env;
  47    unsigned int old_pending;
  48    bool locked = false;
  49
  50    /* We may already have the BQL if coming from the reset path */
  51    if (!qemu_mutex_iothread_locked()) {
  52        locked = true;
  53        qemu_mutex_lock_iothread();
  54    }
  55
  56    old_pending = env->pending_interrupts;
  57
  58    if (level) {
  59        env->pending_interrupts |= 1 << n_IRQ;
  60        cpu_interrupt(cs, CPU_INTERRUPT_HARD);
  61    } else {
  62        env->pending_interrupts &= ~(1 << n_IRQ);
  63        if (env->pending_interrupts == 0) {
  64            cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
  65        }
  66    }
  67
  68    if (old_pending != env->pending_interrupts) {
  69        kvmppc_set_interrupt(cpu, n_IRQ, level);
  70    }
  71
  72
  73    trace_ppc_irq_set_exit(env, n_IRQ, level, env->pending_interrupts,
  74                           CPU(cpu)->interrupt_request);
  75
  76    if (locked) {
  77        qemu_mutex_unlock_iothread();
  78    }
  79}
  80
  81/* PowerPC 6xx / 7xx internal IRQ controller */
  82static void ppc6xx_set_irq(void *opaque, int pin, int level)
  83{
  84    PowerPCCPU *cpu = opaque;
  85    CPUPPCState *env = &cpu->env;
  86    int cur_level;
  87
  88    trace_ppc_irq_set(env, pin, level);
  89
  90    cur_level = (env->irq_input_state >> pin) & 1;
  91    /* Don't generate spurious events */
  92    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
  93        CPUState *cs = CPU(cpu);
  94
  95        switch (pin) {
  96        case PPC6xx_INPUT_TBEN:
  97            /* Level sensitive - active high */
  98            trace_ppc_irq_set_state("time base", level);
  99            if (level) {
 100                cpu_ppc_tb_start(env);
 101            } else {
 102                cpu_ppc_tb_stop(env);
 103            }
 104            break;
 105        case PPC6xx_INPUT_INT:
 106            /* Level sensitive - active high */
 107            trace_ppc_irq_set_state("external IRQ", level);
 108            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 109            break;
 110        case PPC6xx_INPUT_SMI:
 111            /* Level sensitive - active high */
 112            trace_ppc_irq_set_state("SMI IRQ", level);
 113            ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level);
 114            break;
 115        case PPC6xx_INPUT_MCP:
 116            /* Negative edge sensitive */
 117            /* XXX: TODO: actual reaction may depends on HID0 status
 118             *            603/604/740/750: check HID0[EMCP]
 119             */
 120            if (cur_level == 1 && level == 0) {
 121                trace_ppc_irq_set_state("machine check", 1);
 122                ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
 123            }
 124            break;
 125        case PPC6xx_INPUT_CKSTP_IN:
 126            /* Level sensitive - active low */
 127            /* XXX: TODO: relay the signal to CKSTP_OUT pin */
 128            /* XXX: Note that the only way to restart the CPU is to reset it */
 129            if (level) {
 130                trace_ppc_irq_cpu("stop");
 131                cs->halted = 1;
 132            }
 133            break;
 134        case PPC6xx_INPUT_HRESET:
 135            /* Level sensitive - active low */
 136            if (level) {
 137                trace_ppc_irq_reset("CPU");
 138                cpu_interrupt(cs, CPU_INTERRUPT_RESET);
 139            }
 140            break;
 141        case PPC6xx_INPUT_SRESET:
 142            trace_ppc_irq_set_state("RESET IRQ", level);
 143            ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
 144            break;
 145        default:
 146            g_assert_not_reached();
 147        }
 148        if (level)
 149            env->irq_input_state |= 1 << pin;
 150        else
 151            env->irq_input_state &= ~(1 << pin);
 152    }
 153}
 154
 155void ppc6xx_irq_init(PowerPCCPU *cpu)
 156{
 157    qdev_init_gpio_in(DEVICE(cpu), ppc6xx_set_irq, PPC6xx_INPUT_NB);
 158}
 159
 160#if defined(TARGET_PPC64)
 161/* PowerPC 970 internal IRQ controller */
 162static void ppc970_set_irq(void *opaque, int pin, int level)
 163{
 164    PowerPCCPU *cpu = opaque;
 165    CPUPPCState *env = &cpu->env;
 166    int cur_level;
 167
 168    trace_ppc_irq_set(env, pin, level);
 169
 170    cur_level = (env->irq_input_state >> pin) & 1;
 171    /* Don't generate spurious events */
 172    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
 173        CPUState *cs = CPU(cpu);
 174
 175        switch (pin) {
 176        case PPC970_INPUT_INT:
 177            /* Level sensitive - active high */
 178            trace_ppc_irq_set_state("external IRQ", level);
 179            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 180            break;
 181        case PPC970_INPUT_THINT:
 182            /* Level sensitive - active high */
 183            trace_ppc_irq_set_state("SMI IRQ", level);
 184            ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level);
 185            break;
 186        case PPC970_INPUT_MCP:
 187            /* Negative edge sensitive */
 188            /* XXX: TODO: actual reaction may depends on HID0 status
 189             *            603/604/740/750: check HID0[EMCP]
 190             */
 191            if (cur_level == 1 && level == 0) {
 192                trace_ppc_irq_set_state("machine check", 1);
 193                ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
 194            }
 195            break;
 196        case PPC970_INPUT_CKSTP:
 197            /* Level sensitive - active low */
 198            /* XXX: TODO: relay the signal to CKSTP_OUT pin */
 199            if (level) {
 200                trace_ppc_irq_cpu("stop");
 201                cs->halted = 1;
 202            } else {
 203                trace_ppc_irq_cpu("restart");
 204                cs->halted = 0;
 205                qemu_cpu_kick(cs);
 206            }
 207            break;
 208        case PPC970_INPUT_HRESET:
 209            /* Level sensitive - active low */
 210            if (level) {
 211                cpu_interrupt(cs, CPU_INTERRUPT_RESET);
 212            }
 213            break;
 214        case PPC970_INPUT_SRESET:
 215            trace_ppc_irq_set_state("RESET IRQ", level);
 216            ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
 217            break;
 218        case PPC970_INPUT_TBEN:
 219            trace_ppc_irq_set_state("TBEN IRQ", level);
 220            /* XXX: TODO */
 221            break;
 222        default:
 223            g_assert_not_reached();
 224        }
 225        if (level)
 226            env->irq_input_state |= 1 << pin;
 227        else
 228            env->irq_input_state &= ~(1 << pin);
 229    }
 230}
 231
 232void ppc970_irq_init(PowerPCCPU *cpu)
 233{
 234    qdev_init_gpio_in(DEVICE(cpu), ppc970_set_irq, PPC970_INPUT_NB);
 235}
 236
 237/* POWER7 internal IRQ controller */
 238static void power7_set_irq(void *opaque, int pin, int level)
 239{
 240    PowerPCCPU *cpu = opaque;
 241
 242    trace_ppc_irq_set(&cpu->env, pin, level);
 243
 244    switch (pin) {
 245    case POWER7_INPUT_INT:
 246        /* Level sensitive - active high */
 247        trace_ppc_irq_set_state("external IRQ", level);
 248        ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 249        break;
 250    default:
 251        g_assert_not_reached();
 252    }
 253}
 254
 255void ppcPOWER7_irq_init(PowerPCCPU *cpu)
 256{
 257    qdev_init_gpio_in(DEVICE(cpu), power7_set_irq, POWER7_INPUT_NB);
 258}
 259
 260/* POWER9 internal IRQ controller */
 261static void power9_set_irq(void *opaque, int pin, int level)
 262{
 263    PowerPCCPU *cpu = opaque;
 264
 265    trace_ppc_irq_set(&cpu->env, pin, level);
 266
 267    switch (pin) {
 268    case POWER9_INPUT_INT:
 269        /* Level sensitive - active high */
 270        trace_ppc_irq_set_state("external IRQ", level);
 271        ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 272        break;
 273    case POWER9_INPUT_HINT:
 274        /* Level sensitive - active high */
 275        trace_ppc_irq_set_state("HV external IRQ", level);
 276        ppc_set_irq(cpu, PPC_INTERRUPT_HVIRT, level);
 277        break;
 278    default:
 279        g_assert_not_reached();
 280        return;
 281    }
 282}
 283
 284void ppcPOWER9_irq_init(PowerPCCPU *cpu)
 285{
 286    qdev_init_gpio_in(DEVICE(cpu), power9_set_irq, POWER9_INPUT_NB);
 287}
 288#endif /* defined(TARGET_PPC64) */
 289
 290void ppc40x_core_reset(PowerPCCPU *cpu)
 291{
 292    CPUPPCState *env = &cpu->env;
 293    target_ulong dbsr;
 294
 295    qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC core\n");
 296    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
 297    dbsr = env->spr[SPR_40x_DBSR];
 298    dbsr &= ~0x00000300;
 299    dbsr |= 0x00000100;
 300    env->spr[SPR_40x_DBSR] = dbsr;
 301}
 302
 303void ppc40x_chip_reset(PowerPCCPU *cpu)
 304{
 305    CPUPPCState *env = &cpu->env;
 306    target_ulong dbsr;
 307
 308    qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC chip\n");
 309    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
 310    /* XXX: TODO reset all internal peripherals */
 311    dbsr = env->spr[SPR_40x_DBSR];
 312    dbsr &= ~0x00000300;
 313    dbsr |= 0x00000200;
 314    env->spr[SPR_40x_DBSR] = dbsr;
 315}
 316
 317void ppc40x_system_reset(PowerPCCPU *cpu)
 318{
 319    qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC system\n");
 320    qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
 321}
 322
 323void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
 324{
 325    PowerPCCPU *cpu = env_archcpu(env);
 326
 327    qemu_mutex_lock_iothread();
 328
 329    switch ((val >> 28) & 0x3) {
 330    case 0x0:
 331        /* No action */
 332        break;
 333    case 0x1:
 334        /* Core reset */
 335        ppc40x_core_reset(cpu);
 336        break;
 337    case 0x2:
 338        /* Chip reset */
 339        ppc40x_chip_reset(cpu);
 340        break;
 341    case 0x3:
 342        /* System reset */
 343        ppc40x_system_reset(cpu);
 344        break;
 345    }
 346
 347    qemu_mutex_unlock_iothread();
 348}
 349
 350/* PowerPC 40x internal IRQ controller */
 351static void ppc40x_set_irq(void *opaque, int pin, int level)
 352{
 353    PowerPCCPU *cpu = opaque;
 354    CPUPPCState *env = &cpu->env;
 355    int cur_level;
 356
 357    trace_ppc_irq_set(env, pin, level);
 358
 359    cur_level = (env->irq_input_state >> pin) & 1;
 360    /* Don't generate spurious events */
 361    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
 362        CPUState *cs = CPU(cpu);
 363
 364        switch (pin) {
 365        case PPC40x_INPUT_RESET_SYS:
 366            if (level) {
 367                trace_ppc_irq_reset("system");
 368                ppc40x_system_reset(cpu);
 369            }
 370            break;
 371        case PPC40x_INPUT_RESET_CHIP:
 372            if (level) {
 373                trace_ppc_irq_reset("chip");
 374                ppc40x_chip_reset(cpu);
 375            }
 376            break;
 377        case PPC40x_INPUT_RESET_CORE:
 378            /* XXX: TODO: update DBSR[MRR] */
 379            if (level) {
 380                trace_ppc_irq_reset("core");
 381                ppc40x_core_reset(cpu);
 382            }
 383            break;
 384        case PPC40x_INPUT_CINT:
 385            /* Level sensitive - active high */
 386            trace_ppc_irq_set_state("critical IRQ", level);
 387            ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
 388            break;
 389        case PPC40x_INPUT_INT:
 390            /* Level sensitive - active high */
 391            trace_ppc_irq_set_state("external IRQ", level);
 392            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 393            break;
 394        case PPC40x_INPUT_HALT:
 395            /* Level sensitive - active low */
 396            if (level) {
 397                trace_ppc_irq_cpu("stop");
 398                cs->halted = 1;
 399            } else {
 400                trace_ppc_irq_cpu("restart");
 401                cs->halted = 0;
 402                qemu_cpu_kick(cs);
 403            }
 404            break;
 405        case PPC40x_INPUT_DEBUG:
 406            /* Level sensitive - active high */
 407            trace_ppc_irq_set_state("debug pin", level);
 408            ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
 409            break;
 410        default:
 411            g_assert_not_reached();
 412        }
 413        if (level)
 414            env->irq_input_state |= 1 << pin;
 415        else
 416            env->irq_input_state &= ~(1 << pin);
 417    }
 418}
 419
 420void ppc40x_irq_init(PowerPCCPU *cpu)
 421{
 422    qdev_init_gpio_in(DEVICE(cpu), ppc40x_set_irq, PPC40x_INPUT_NB);
 423}
 424
 425/* PowerPC E500 internal IRQ controller */
 426static void ppce500_set_irq(void *opaque, int pin, int level)
 427{
 428    PowerPCCPU *cpu = opaque;
 429    CPUPPCState *env = &cpu->env;
 430    int cur_level;
 431
 432    trace_ppc_irq_set(env, pin, level);
 433
 434    cur_level = (env->irq_input_state >> pin) & 1;
 435    /* Don't generate spurious events */
 436    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
 437        switch (pin) {
 438        case PPCE500_INPUT_MCK:
 439            if (level) {
 440                trace_ppc_irq_reset("system");
 441                qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
 442            }
 443            break;
 444        case PPCE500_INPUT_RESET_CORE:
 445            if (level) {
 446                trace_ppc_irq_reset("core");
 447                ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level);
 448            }
 449            break;
 450        case PPCE500_INPUT_CINT:
 451            /* Level sensitive - active high */
 452            trace_ppc_irq_set_state("critical IRQ", level);
 453            ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
 454            break;
 455        case PPCE500_INPUT_INT:
 456            /* Level sensitive - active high */
 457            trace_ppc_irq_set_state("core IRQ", level);
 458            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 459            break;
 460        case PPCE500_INPUT_DEBUG:
 461            /* Level sensitive - active high */
 462            trace_ppc_irq_set_state("debug pin", level);
 463            ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
 464            break;
 465        default:
 466            g_assert_not_reached();
 467        }
 468        if (level)
 469            env->irq_input_state |= 1 << pin;
 470        else
 471            env->irq_input_state &= ~(1 << pin);
 472    }
 473}
 474
 475void ppce500_irq_init(PowerPCCPU *cpu)
 476{
 477    qdev_init_gpio_in(DEVICE(cpu), ppce500_set_irq, PPCE500_INPUT_NB);
 478}
 479
 480/* Enable or Disable the E500 EPR capability */
 481void ppce500_set_mpic_proxy(bool enabled)
 482{
 483    CPUState *cs;
 484
 485    CPU_FOREACH(cs) {
 486        PowerPCCPU *cpu = POWERPC_CPU(cs);
 487
 488        cpu->env.mpic_proxy = enabled;
 489        if (kvm_enabled()) {
 490            kvmppc_set_mpic_proxy(cpu, enabled);
 491        }
 492    }
 493}
 494
 495/*****************************************************************************/
 496/* PowerPC time base and decrementer emulation */
 497
 498uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset)
 499{
 500    /* TB time in tb periods */
 501    return muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND) + tb_offset;
 502}
 503
 504uint64_t cpu_ppc_load_tbl (CPUPPCState *env)
 505{
 506    ppc_tb_t *tb_env = env->tb_env;
 507    uint64_t tb;
 508
 509    if (kvm_enabled()) {
 510        return env->spr[SPR_TBL];
 511    }
 512
 513    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
 514    trace_ppc_tb_load(tb);
 515
 516    return tb;
 517}
 518
 519static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env)
 520{
 521    ppc_tb_t *tb_env = env->tb_env;
 522    uint64_t tb;
 523
 524    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
 525    trace_ppc_tb_load(tb);
 526
 527    return tb >> 32;
 528}
 529
 530uint32_t cpu_ppc_load_tbu (CPUPPCState *env)
 531{
 532    if (kvm_enabled()) {
 533        return env->spr[SPR_TBU];
 534    }
 535
 536    return _cpu_ppc_load_tbu(env);
 537}
 538
 539static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk,
 540                                    int64_t *tb_offsetp, uint64_t value)
 541{
 542    *tb_offsetp = value -
 543        muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND);
 544
 545    trace_ppc_tb_store(value, *tb_offsetp);
 546}
 547
 548void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value)
 549{
 550    ppc_tb_t *tb_env = env->tb_env;
 551    uint64_t tb;
 552
 553    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
 554    tb &= 0xFFFFFFFF00000000ULL;
 555    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 556                     &tb_env->tb_offset, tb | (uint64_t)value);
 557}
 558
 559static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value)
 560{
 561    ppc_tb_t *tb_env = env->tb_env;
 562    uint64_t tb;
 563
 564    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
 565    tb &= 0x00000000FFFFFFFFULL;
 566    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 567                     &tb_env->tb_offset, ((uint64_t)value << 32) | tb);
 568}
 569
 570void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value)
 571{
 572    _cpu_ppc_store_tbu(env, value);
 573}
 574
 575uint64_t cpu_ppc_load_atbl (CPUPPCState *env)
 576{
 577    ppc_tb_t *tb_env = env->tb_env;
 578    uint64_t tb;
 579
 580    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
 581    trace_ppc_tb_load(tb);
 582
 583    return tb;
 584}
 585
 586uint32_t cpu_ppc_load_atbu (CPUPPCState *env)
 587{
 588    ppc_tb_t *tb_env = env->tb_env;
 589    uint64_t tb;
 590
 591    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
 592    trace_ppc_tb_load(tb);
 593
 594    return tb >> 32;
 595}
 596
 597void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value)
 598{
 599    ppc_tb_t *tb_env = env->tb_env;
 600    uint64_t tb;
 601
 602    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
 603    tb &= 0xFFFFFFFF00000000ULL;
 604    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 605                     &tb_env->atb_offset, tb | (uint64_t)value);
 606}
 607
 608void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value)
 609{
 610    ppc_tb_t *tb_env = env->tb_env;
 611    uint64_t tb;
 612
 613    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
 614    tb &= 0x00000000FFFFFFFFULL;
 615    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 616                     &tb_env->atb_offset, ((uint64_t)value << 32) | tb);
 617}
 618
 619uint64_t cpu_ppc_load_vtb(CPUPPCState *env)
 620{
 621    ppc_tb_t *tb_env = env->tb_env;
 622
 623    return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 624                          tb_env->vtb_offset);
 625}
 626
 627void cpu_ppc_store_vtb(CPUPPCState *env, uint64_t value)
 628{
 629    ppc_tb_t *tb_env = env->tb_env;
 630
 631    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 632                     &tb_env->vtb_offset, value);
 633}
 634
 635void cpu_ppc_store_tbu40(CPUPPCState *env, uint64_t value)
 636{
 637    ppc_tb_t *tb_env = env->tb_env;
 638    uint64_t tb;
 639
 640    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 641                        tb_env->tb_offset);
 642    tb &= 0xFFFFFFUL;
 643    tb |= (value & ~0xFFFFFFUL);
 644    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 645                     &tb_env->tb_offset, tb);
 646}
 647
 648static void cpu_ppc_tb_stop (CPUPPCState *env)
 649{
 650    ppc_tb_t *tb_env = env->tb_env;
 651    uint64_t tb, atb, vmclk;
 652
 653    /* If the time base is already frozen, do nothing */
 654    if (tb_env->tb_freq != 0) {
 655        vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 656        /* Get the time base */
 657        tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset);
 658        /* Get the alternate time base */
 659        atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset);
 660        /* Store the time base value (ie compute the current offset) */
 661        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
 662        /* Store the alternate time base value (compute the current offset) */
 663        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
 664        /* Set the time base frequency to zero */
 665        tb_env->tb_freq = 0;
 666        /* Now, the time bases are frozen to tb_offset / atb_offset value */
 667    }
 668}
 669
 670static void cpu_ppc_tb_start (CPUPPCState *env)
 671{
 672    ppc_tb_t *tb_env = env->tb_env;
 673    uint64_t tb, atb, vmclk;
 674
 675    /* If the time base is not frozen, do nothing */
 676    if (tb_env->tb_freq == 0) {
 677        vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 678        /* Get the time base from tb_offset */
 679        tb = tb_env->tb_offset;
 680        /* Get the alternate time base from atb_offset */
 681        atb = tb_env->atb_offset;
 682        /* Restore the tb frequency from the decrementer frequency */
 683        tb_env->tb_freq = tb_env->decr_freq;
 684        /* Store the time base value */
 685        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
 686        /* Store the alternate time base value */
 687        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
 688    }
 689}
 690
 691bool ppc_decr_clear_on_delivery(CPUPPCState *env)
 692{
 693    ppc_tb_t *tb_env = env->tb_env;
 694    int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL;
 695    return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED);
 696}
 697
 698static inline int64_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next)
 699{
 700    ppc_tb_t *tb_env = env->tb_env;
 701    int64_t decr, diff;
 702
 703    diff = next - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 704    if (diff >= 0) {
 705        decr = muldiv64(diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
 706    } else if (tb_env->flags & PPC_TIMER_BOOKE) {
 707        decr = 0;
 708    }  else {
 709        decr = -muldiv64(-diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
 710    }
 711    trace_ppc_decr_load(decr);
 712
 713    return decr;
 714}
 715
 716target_ulong cpu_ppc_load_decr(CPUPPCState *env)
 717{
 718    ppc_tb_t *tb_env = env->tb_env;
 719    uint64_t decr;
 720
 721    if (kvm_enabled()) {
 722        return env->spr[SPR_DECR];
 723    }
 724
 725    decr = _cpu_ppc_load_decr(env, tb_env->decr_next);
 726
 727    /*
 728     * If large decrementer is enabled then the decrementer is signed extened
 729     * to 64 bits, otherwise it is a 32 bit value.
 730     */
 731    if (env->spr[SPR_LPCR] & LPCR_LD) {
 732        return decr;
 733    }
 734    return (uint32_t) decr;
 735}
 736
 737target_ulong cpu_ppc_load_hdecr(CPUPPCState *env)
 738{
 739    PowerPCCPU *cpu = env_archcpu(env);
 740    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
 741    ppc_tb_t *tb_env = env->tb_env;
 742    uint64_t hdecr;
 743
 744    hdecr =  _cpu_ppc_load_decr(env, tb_env->hdecr_next);
 745
 746    /*
 747     * If we have a large decrementer (POWER9 or later) then hdecr is sign
 748     * extended to 64 bits, otherwise it is 32 bits.
 749     */
 750    if (pcc->lrg_decr_bits > 32) {
 751        return hdecr;
 752    }
 753    return (uint32_t) hdecr;
 754}
 755
 756uint64_t cpu_ppc_load_purr (CPUPPCState *env)
 757{
 758    ppc_tb_t *tb_env = env->tb_env;
 759
 760    return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 761                          tb_env->purr_offset);
 762}
 763
 764/* When decrementer expires,
 765 * all we need to do is generate or queue a CPU exception
 766 */
 767static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu)
 768{
 769    /* Raise it */
 770    trace_ppc_decr_excp("raise");
 771    ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1);
 772}
 773
 774static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu)
 775{
 776    ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
 777}
 778
 779static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
 780{
 781    CPUPPCState *env = &cpu->env;
 782
 783    /* Raise it */
 784    trace_ppc_decr_excp("raise HV");
 785
 786    /* The architecture specifies that we don't deliver HDEC
 787     * interrupts in a PM state. Not only they don't cause a
 788     * wakeup but they also get effectively discarded.
 789     */
 790    if (!env->resume_as_sreset) {
 791        ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
 792    }
 793}
 794
 795static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu)
 796{
 797    ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
 798}
 799
 800static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
 801                                 QEMUTimer *timer,
 802                                 void (*raise_excp)(void *),
 803                                 void (*lower_excp)(PowerPCCPU *),
 804                                 target_ulong decr, target_ulong value,
 805                                 int nr_bits)
 806{
 807    CPUPPCState *env = &cpu->env;
 808    ppc_tb_t *tb_env = env->tb_env;
 809    uint64_t now, next;
 810    int64_t signed_value;
 811    int64_t signed_decr;
 812
 813    /* Truncate value to decr_width and sign extend for simplicity */
 814    signed_value = sextract64(value, 0, nr_bits);
 815    signed_decr = sextract64(decr, 0, nr_bits);
 816
 817    trace_ppc_decr_store(nr_bits, decr, value);
 818
 819    if (kvm_enabled()) {
 820        /* KVM handles decrementer exceptions, we don't need our own timer */
 821        return;
 822    }
 823
 824    /*
 825     * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC
 826     * interrupt.
 827     *
 828     * If we get a really small DEC value, we can assume that by the time we
 829     * handled it we should inject an interrupt already.
 830     *
 831     * On MSB level based DEC implementations the MSB always means the interrupt
 832     * is pending, so raise it on those.
 833     *
 834     * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers
 835     * an edge interrupt, so raise it here too.
 836     */
 837    if ((value < 3) ||
 838        ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && signed_value < 0) ||
 839        ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && signed_value < 0
 840          && signed_decr >= 0)) {
 841        (*raise_excp)(cpu);
 842        return;
 843    }
 844
 845    /* On MSB level based systems a 0 for the MSB stops interrupt delivery */
 846    if (signed_value >= 0 && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) {
 847        (*lower_excp)(cpu);
 848    }
 849
 850    /* Calculate the next timer event */
 851    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 852    next = now + muldiv64(value, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
 853    *nextp = next;
 854
 855    /* Adjust timer */
 856    timer_mod(timer, next);
 857}
 858
 859static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, target_ulong decr,
 860                                       target_ulong value, int nr_bits)
 861{
 862    ppc_tb_t *tb_env = cpu->env.tb_env;
 863
 864    __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer,
 865                         tb_env->decr_timer->cb, &cpu_ppc_decr_lower, decr,
 866                         value, nr_bits);
 867}
 868
 869void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value)
 870{
 871    PowerPCCPU *cpu = env_archcpu(env);
 872    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
 873    int nr_bits = 32;
 874
 875    if (env->spr[SPR_LPCR] & LPCR_LD) {
 876        nr_bits = pcc->lrg_decr_bits;
 877    }
 878
 879    _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value, nr_bits);
 880}
 881
 882static void cpu_ppc_decr_cb(void *opaque)
 883{
 884    PowerPCCPU *cpu = opaque;
 885
 886    cpu_ppc_decr_excp(cpu);
 887}
 888
 889static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, target_ulong hdecr,
 890                                        target_ulong value, int nr_bits)
 891{
 892    ppc_tb_t *tb_env = cpu->env.tb_env;
 893
 894    if (tb_env->hdecr_timer != NULL) {
 895        __cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer,
 896                             tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower,
 897                             hdecr, value, nr_bits);
 898    }
 899}
 900
 901void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value)
 902{
 903    PowerPCCPU *cpu = env_archcpu(env);
 904    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
 905
 906    _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value,
 907                         pcc->lrg_decr_bits);
 908}
 909
 910static void cpu_ppc_hdecr_cb(void *opaque)
 911{
 912    PowerPCCPU *cpu = opaque;
 913
 914    cpu_ppc_hdecr_excp(cpu);
 915}
 916
 917void cpu_ppc_store_purr(CPUPPCState *env, uint64_t value)
 918{
 919    ppc_tb_t *tb_env = env->tb_env;
 920
 921    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 922                     &tb_env->purr_offset, value);
 923}
 924
 925static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq)
 926{
 927    CPUPPCState *env = opaque;
 928    PowerPCCPU *cpu = env_archcpu(env);
 929    ppc_tb_t *tb_env = env->tb_env;
 930
 931    tb_env->tb_freq = freq;
 932    tb_env->decr_freq = freq;
 933    /* There is a bug in Linux 2.4 kernels:
 934     * if a decrementer exception is pending when it enables msr_ee at startup,
 935     * it's not ready to handle it...
 936     */
 937    _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32);
 938    _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32);
 939    cpu_ppc_store_purr(env, 0x0000000000000000ULL);
 940}
 941
 942static void timebase_save(PPCTimebase *tb)
 943{
 944    uint64_t ticks = cpu_get_host_ticks();
 945    PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
 946
 947    if (!first_ppc_cpu->env.tb_env) {
 948        error_report("No timebase object");
 949        return;
 950    }
 951
 952    /* not used anymore, we keep it for compatibility */
 953    tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
 954    /*
 955     * tb_offset is only expected to be changed by QEMU so
 956     * there is no need to update it from KVM here
 957     */
 958    tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset;
 959
 960    tb->runstate_paused =
 961        runstate_check(RUN_STATE_PAUSED) || runstate_check(RUN_STATE_SAVE_VM);
 962}
 963
 964static void timebase_load(PPCTimebase *tb)
 965{
 966    CPUState *cpu;
 967    PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
 968    int64_t tb_off_adj, tb_off;
 969    unsigned long freq;
 970
 971    if (!first_ppc_cpu->env.tb_env) {
 972        error_report("No timebase object");
 973        return;
 974    }
 975
 976    freq = first_ppc_cpu->env.tb_env->tb_freq;
 977
 978    tb_off_adj = tb->guest_timebase - cpu_get_host_ticks();
 979
 980    tb_off = first_ppc_cpu->env.tb_env->tb_offset;
 981    trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off,
 982                        (tb_off_adj - tb_off) / freq);
 983
 984    /* Set new offset to all CPUs */
 985    CPU_FOREACH(cpu) {
 986        PowerPCCPU *pcpu = POWERPC_CPU(cpu);
 987        pcpu->env.tb_env->tb_offset = tb_off_adj;
 988        kvmppc_set_reg_tb_offset(pcpu, pcpu->env.tb_env->tb_offset);
 989    }
 990}
 991
 992void cpu_ppc_clock_vm_state_change(void *opaque, bool running,
 993                                   RunState state)
 994{
 995    PPCTimebase *tb = opaque;
 996
 997    if (running) {
 998        timebase_load(tb);
 999    } else {
1000        timebase_save(tb);
1001    }
1002}
1003
1004/*
1005 * When migrating a running guest, read the clock just
1006 * before migration, so that the guest clock counts
1007 * during the events between:
1008 *
1009 *  * vm_stop()
1010 *  *
1011 *  * pre_save()
1012 *
1013 *  This reduces clock difference on migration from 5s
1014 *  to 0.1s (when max_downtime == 5s), because sending the
1015 *  final pages of memory (which happens between vm_stop()
1016 *  and pre_save()) takes max_downtime.
1017 */
1018static int timebase_pre_save(void *opaque)
1019{
1020    PPCTimebase *tb = opaque;
1021
1022    /* guest_timebase won't be overridden in case of paused guest or savevm */
1023    if (!tb->runstate_paused) {
1024        timebase_save(tb);
1025    }
1026
1027    return 0;
1028}
1029
1030const VMStateDescription vmstate_ppc_timebase = {
1031    .name = "timebase",
1032    .version_id = 1,
1033    .minimum_version_id = 1,
1034    .pre_save = timebase_pre_save,
1035    .fields      = (VMStateField []) {
1036        VMSTATE_UINT64(guest_timebase, PPCTimebase),
1037        VMSTATE_INT64(time_of_the_day_ns, PPCTimebase),
1038        VMSTATE_END_OF_LIST()
1039    },
1040};
1041
1042/* Set up (once) timebase frequency (in Hz) */
1043clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
1044{
1045    PowerPCCPU *cpu = env_archcpu(env);
1046    ppc_tb_t *tb_env;
1047
1048    tb_env = g_new0(ppc_tb_t, 1);
1049    env->tb_env = tb_env;
1050    tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1051    if (is_book3s_arch2x(env)) {
1052        /* All Book3S 64bit CPUs implement level based DEC logic */
1053        tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL;
1054    }
1055    /* Create new timer */
1056    tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu);
1057    if (env->has_hv_mode && !cpu->vhyp) {
1058        tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb,
1059                                                cpu);
1060    } else {
1061        tb_env->hdecr_timer = NULL;
1062    }
1063    cpu_ppc_set_tb_clk(env, freq);
1064
1065    return &cpu_ppc_set_tb_clk;
1066}
1067
1068void cpu_ppc_tb_free(CPUPPCState *env)
1069{
1070    timer_free(env->tb_env->decr_timer);
1071    timer_free(env->tb_env->hdecr_timer);
1072    g_free(env->tb_env);
1073}
1074
1075/* cpu_ppc_hdecr_init may be used if the timer is not used by HDEC emulation */
1076void cpu_ppc_hdecr_init(CPUPPCState *env)
1077{
1078    PowerPCCPU *cpu = env_archcpu(env);
1079
1080    assert(env->tb_env->hdecr_timer == NULL);
1081
1082    env->tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1083                                            &cpu_ppc_hdecr_cb, cpu);
1084}
1085
1086void cpu_ppc_hdecr_exit(CPUPPCState *env)
1087{
1088    PowerPCCPU *cpu = env_archcpu(env);
1089
1090    timer_free(env->tb_env->hdecr_timer);
1091    env->tb_env->hdecr_timer = NULL;
1092
1093    cpu_ppc_hdecr_lower(cpu);
1094}
1095
1096/*****************************************************************************/
1097/* PowerPC 40x timers */
1098
1099/* PIT, FIT & WDT */
1100typedef struct ppc40x_timer_t ppc40x_timer_t;
1101struct ppc40x_timer_t {
1102    uint64_t pit_reload;  /* PIT auto-reload value        */
1103    uint64_t fit_next;    /* Tick for next FIT interrupt  */
1104    QEMUTimer *fit_timer;
1105    uint64_t wdt_next;    /* Tick for next WDT interrupt  */
1106    QEMUTimer *wdt_timer;
1107
1108    /* 405 have the PIT, 440 have a DECR.  */
1109    unsigned int decr_excp;
1110};
1111
1112/* Fixed interval timer */
1113static void cpu_4xx_fit_cb (void *opaque)
1114{
1115    PowerPCCPU *cpu = opaque;
1116    CPUPPCState *env = &cpu->env;
1117    ppc_tb_t *tb_env;
1118    ppc40x_timer_t *ppc40x_timer;
1119    uint64_t now, next;
1120
1121    tb_env = env->tb_env;
1122    ppc40x_timer = tb_env->opaque;
1123    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1124    switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) {
1125    case 0:
1126        next = 1 << 9;
1127        break;
1128    case 1:
1129        next = 1 << 13;
1130        break;
1131    case 2:
1132        next = 1 << 17;
1133        break;
1134    case 3:
1135        next = 1 << 21;
1136        break;
1137    default:
1138        /* Cannot occur, but makes gcc happy */
1139        return;
1140    }
1141    next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->tb_freq);
1142    if (next == now)
1143        next++;
1144    timer_mod(ppc40x_timer->fit_timer, next);
1145    env->spr[SPR_40x_TSR] |= 1 << 26;
1146    if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) {
1147        ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1);
1148    }
1149    trace_ppc4xx_fit((int)((env->spr[SPR_40x_TCR] >> 23) & 0x1),
1150                         env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1151}
1152
1153/* Programmable interval timer */
1154static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp)
1155{
1156    ppc40x_timer_t *ppc40x_timer;
1157    uint64_t now, next;
1158
1159    ppc40x_timer = tb_env->opaque;
1160    if (ppc40x_timer->pit_reload <= 1 ||
1161        !((env->spr[SPR_40x_TCR] >> 26) & 0x1) ||
1162        (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) {
1163        /* Stop PIT */
1164        trace_ppc4xx_pit_stop();
1165        timer_del(tb_env->decr_timer);
1166    } else {
1167        trace_ppc4xx_pit_start(ppc40x_timer->pit_reload);
1168        now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1169        next = now + muldiv64(ppc40x_timer->pit_reload,
1170                              NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1171        if (is_excp)
1172            next += tb_env->decr_next - now;
1173        if (next == now)
1174            next++;
1175        timer_mod(tb_env->decr_timer, next);
1176        tb_env->decr_next = next;
1177    }
1178}
1179
1180static void cpu_4xx_pit_cb (void *opaque)
1181{
1182    PowerPCCPU *cpu = opaque;
1183    CPUPPCState *env = &cpu->env;
1184    ppc_tb_t *tb_env;
1185    ppc40x_timer_t *ppc40x_timer;
1186
1187    tb_env = env->tb_env;
1188    ppc40x_timer = tb_env->opaque;
1189    env->spr[SPR_40x_TSR] |= 1 << 27;
1190    if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) {
1191        ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1);
1192    }
1193    start_stop_pit(env, tb_env, 1);
1194    trace_ppc4xx_pit((int)((env->spr[SPR_40x_TCR] >> 22) & 0x1),
1195           (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1),
1196           env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR],
1197           ppc40x_timer->pit_reload);
1198}
1199
1200/* Watchdog timer */
1201static void cpu_4xx_wdt_cb (void *opaque)
1202{
1203    PowerPCCPU *cpu = opaque;
1204    CPUPPCState *env = &cpu->env;
1205    ppc_tb_t *tb_env;
1206    ppc40x_timer_t *ppc40x_timer;
1207    uint64_t now, next;
1208
1209    tb_env = env->tb_env;
1210    ppc40x_timer = tb_env->opaque;
1211    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1212    switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) {
1213    case 0:
1214        next = 1 << 17;
1215        break;
1216    case 1:
1217        next = 1 << 21;
1218        break;
1219    case 2:
1220        next = 1 << 25;
1221        break;
1222    case 3:
1223        next = 1 << 29;
1224        break;
1225    default:
1226        /* Cannot occur, but makes gcc happy */
1227        return;
1228    }
1229    next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1230    if (next == now)
1231        next++;
1232    trace_ppc4xx_wdt(env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1233    switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) {
1234    case 0x0:
1235    case 0x1:
1236        timer_mod(ppc40x_timer->wdt_timer, next);
1237        ppc40x_timer->wdt_next = next;
1238        env->spr[SPR_40x_TSR] |= 1U << 31;
1239        break;
1240    case 0x2:
1241        timer_mod(ppc40x_timer->wdt_timer, next);
1242        ppc40x_timer->wdt_next = next;
1243        env->spr[SPR_40x_TSR] |= 1 << 30;
1244        if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) {
1245            ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1);
1246        }
1247        break;
1248    case 0x3:
1249        env->spr[SPR_40x_TSR] &= ~0x30000000;
1250        env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000;
1251        switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) {
1252        case 0x0:
1253            /* No reset */
1254            break;
1255        case 0x1: /* Core reset */
1256            ppc40x_core_reset(cpu);
1257            break;
1258        case 0x2: /* Chip reset */
1259            ppc40x_chip_reset(cpu);
1260            break;
1261        case 0x3: /* System reset */
1262            ppc40x_system_reset(cpu);
1263            break;
1264        }
1265    }
1266}
1267
1268void store_40x_pit (CPUPPCState *env, target_ulong val)
1269{
1270    ppc_tb_t *tb_env;
1271    ppc40x_timer_t *ppc40x_timer;
1272
1273    tb_env = env->tb_env;
1274    ppc40x_timer = tb_env->opaque;
1275    trace_ppc40x_store_pit(val);
1276    ppc40x_timer->pit_reload = val;
1277    start_stop_pit(env, tb_env, 0);
1278}
1279
1280target_ulong load_40x_pit (CPUPPCState *env)
1281{
1282    return cpu_ppc_load_decr(env);
1283}
1284
1285void store_40x_tsr(CPUPPCState *env, target_ulong val)
1286{
1287    PowerPCCPU *cpu = env_archcpu(env);
1288
1289    trace_ppc40x_store_tcr(val);
1290
1291    env->spr[SPR_40x_TSR] &= ~(val & 0xFC000000);
1292    if (val & 0x80000000) {
1293        ppc_set_irq(cpu, PPC_INTERRUPT_PIT, 0);
1294    }
1295}
1296
1297void store_40x_tcr(CPUPPCState *env, target_ulong val)
1298{
1299    PowerPCCPU *cpu = env_archcpu(env);
1300    ppc_tb_t *tb_env;
1301
1302    trace_ppc40x_store_tsr(val);
1303
1304    tb_env = env->tb_env;
1305    env->spr[SPR_40x_TCR] = val & 0xFFC00000;
1306    start_stop_pit(env, tb_env, 1);
1307    cpu_4xx_wdt_cb(cpu);
1308}
1309
1310static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq)
1311{
1312    CPUPPCState *env = opaque;
1313    ppc_tb_t *tb_env = env->tb_env;
1314
1315    trace_ppc40x_set_tb_clk(freq);
1316    tb_env->tb_freq = freq;
1317    tb_env->decr_freq = freq;
1318    /* XXX: we should also update all timers */
1319}
1320
1321clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq,
1322                                  unsigned int decr_excp)
1323{
1324    ppc_tb_t *tb_env;
1325    ppc40x_timer_t *ppc40x_timer;
1326    PowerPCCPU *cpu = env_archcpu(env);
1327
1328    trace_ppc40x_timers_init(freq);
1329
1330    tb_env = g_new0(ppc_tb_t, 1);
1331    ppc40x_timer = g_new0(ppc40x_timer_t, 1);
1332
1333    env->tb_env = tb_env;
1334    tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1335    tb_env->tb_freq = freq;
1336    tb_env->decr_freq = freq;
1337    tb_env->opaque = ppc40x_timer;
1338
1339    /* We use decr timer for PIT */
1340    tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, cpu);
1341    ppc40x_timer->fit_timer =
1342        timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, cpu);
1343    ppc40x_timer->wdt_timer =
1344        timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, cpu);
1345    ppc40x_timer->decr_excp = decr_excp;
1346
1347    return &ppc_40x_set_tb_clk;
1348}
1349
1350/*****************************************************************************/
1351/* Embedded PowerPC Device Control Registers */
1352typedef struct ppc_dcrn_t ppc_dcrn_t;
1353struct ppc_dcrn_t {
1354    dcr_read_cb dcr_read;
1355    dcr_write_cb dcr_write;
1356    void *opaque;
1357};
1358
1359/* XXX: on 460, DCR addresses are 32 bits wide,
1360 *      using DCRIPR to get the 22 upper bits of the DCR address
1361 */
1362#define DCRN_NB 1024
1363struct ppc_dcr_t {
1364    ppc_dcrn_t dcrn[DCRN_NB];
1365    int (*read_error)(int dcrn);
1366    int (*write_error)(int dcrn);
1367};
1368
1369int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
1370{
1371    ppc_dcrn_t *dcr;
1372
1373    if (dcrn < 0 || dcrn >= DCRN_NB)
1374        goto error;
1375    dcr = &dcr_env->dcrn[dcrn];
1376    if (dcr->dcr_read == NULL)
1377        goto error;
1378    *valp = (*dcr->dcr_read)(dcr->opaque, dcrn);
1379    trace_ppc_dcr_read(dcrn, *valp);
1380
1381    return 0;
1382
1383 error:
1384    if (dcr_env->read_error != NULL)
1385        return (*dcr_env->read_error)(dcrn);
1386
1387    return -1;
1388}
1389
1390int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
1391{
1392    ppc_dcrn_t *dcr;
1393
1394    if (dcrn < 0 || dcrn >= DCRN_NB)
1395        goto error;
1396    dcr = &dcr_env->dcrn[dcrn];
1397    if (dcr->dcr_write == NULL)
1398        goto error;
1399    trace_ppc_dcr_write(dcrn, val);
1400    (*dcr->dcr_write)(dcr->opaque, dcrn, val);
1401
1402    return 0;
1403
1404 error:
1405    if (dcr_env->write_error != NULL)
1406        return (*dcr_env->write_error)(dcrn);
1407
1408    return -1;
1409}
1410
1411int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque,
1412                      dcr_read_cb dcr_read, dcr_write_cb dcr_write)
1413{
1414    ppc_dcr_t *dcr_env;
1415    ppc_dcrn_t *dcr;
1416
1417    dcr_env = env->dcr_env;
1418    if (dcr_env == NULL)
1419        return -1;
1420    if (dcrn < 0 || dcrn >= DCRN_NB)
1421        return -1;
1422    dcr = &dcr_env->dcrn[dcrn];
1423    if (dcr->opaque != NULL ||
1424        dcr->dcr_read != NULL ||
1425        dcr->dcr_write != NULL)
1426        return -1;
1427    dcr->opaque = opaque;
1428    dcr->dcr_read = dcr_read;
1429    dcr->dcr_write = dcr_write;
1430
1431    return 0;
1432}
1433
1434int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn),
1435                  int (*write_error)(int dcrn))
1436{
1437    ppc_dcr_t *dcr_env;
1438
1439    dcr_env = g_new0(ppc_dcr_t, 1);
1440    dcr_env->read_error = read_error;
1441    dcr_env->write_error = write_error;
1442    env->dcr_env = dcr_env;
1443
1444    return 0;
1445}
1446
1447/*****************************************************************************/
1448
1449int ppc_cpu_pir(PowerPCCPU *cpu)
1450{
1451    CPUPPCState *env = &cpu->env;
1452    return env->spr_cb[SPR_PIR].default_value;
1453}
1454
1455PowerPCCPU *ppc_get_vcpu_by_pir(int pir)
1456{
1457    CPUState *cs;
1458
1459    CPU_FOREACH(cs) {
1460        PowerPCCPU *cpu = POWERPC_CPU(cs);
1461
1462        if (ppc_cpu_pir(cpu) == pir) {
1463            return cpu;
1464        }
1465    }
1466
1467    return NULL;
1468}
1469
1470void ppc_irq_reset(PowerPCCPU *cpu)
1471{
1472    CPUPPCState *env = &cpu->env;
1473
1474    env->irq_input_state = 0;
1475    kvmppc_set_interrupt(cpu, PPC_INTERRUPT_EXT, 0);
1476}
1477