qemu/hw/ppc/ppc.c
<<
>>
Prefs
   1/*
   2 * QEMU generic PowerPC hardware System Emulator
   3 *
   4 * Copyright (c) 2003-2007 Jocelyn Mayer
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24
  25#include "qemu/osdep.h"
  26#include "hw/irq.h"
  27#include "hw/ppc/ppc.h"
  28#include "hw/ppc/ppc_e500.h"
  29#include "qemu/timer.h"
  30#include "sysemu/cpus.h"
  31#include "qemu/log.h"
  32#include "qemu/main-loop.h"
  33#include "qemu/error-report.h"
  34#include "sysemu/kvm.h"
  35#include "sysemu/runstate.h"
  36#include "kvm_ppc.h"
  37#include "migration/vmstate.h"
  38#include "trace.h"
  39
  40static void cpu_ppc_tb_stop (CPUPPCState *env);
  41static void cpu_ppc_tb_start (CPUPPCState *env);
  42
  43void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level)
  44{
  45    CPUState *cs = CPU(cpu);
  46    CPUPPCState *env = &cpu->env;
  47    unsigned int old_pending;
  48    bool locked = false;
  49
  50    /* We may already have the BQL if coming from the reset path */
  51    if (!qemu_mutex_iothread_locked()) {
  52        locked = true;
  53        qemu_mutex_lock_iothread();
  54    }
  55
  56    old_pending = env->pending_interrupts;
  57
  58    if (level) {
  59        env->pending_interrupts |= 1 << n_IRQ;
  60        cpu_interrupt(cs, CPU_INTERRUPT_HARD);
  61    } else {
  62        env->pending_interrupts &= ~(1 << n_IRQ);
  63        if (env->pending_interrupts == 0) {
  64            cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
  65        }
  66    }
  67
  68    if (old_pending != env->pending_interrupts) {
  69        kvmppc_set_interrupt(cpu, n_IRQ, level);
  70    }
  71
  72
  73    trace_ppc_irq_set_exit(env, n_IRQ, level, env->pending_interrupts,
  74                           CPU(cpu)->interrupt_request);
  75
  76    if (locked) {
  77        qemu_mutex_unlock_iothread();
  78    }
  79}
  80
  81/* PowerPC 6xx / 7xx internal IRQ controller */
  82static void ppc6xx_set_irq(void *opaque, int pin, int level)
  83{
  84    PowerPCCPU *cpu = opaque;
  85    CPUPPCState *env = &cpu->env;
  86    int cur_level;
  87
  88    trace_ppc_irq_set(env, pin, level);
  89
  90    cur_level = (env->irq_input_state >> pin) & 1;
  91    /* Don't generate spurious events */
  92    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
  93        CPUState *cs = CPU(cpu);
  94
  95        switch (pin) {
  96        case PPC6xx_INPUT_TBEN:
  97            /* Level sensitive - active high */
  98            trace_ppc_irq_set_state("time base", level);
  99            if (level) {
 100                cpu_ppc_tb_start(env);
 101            } else {
 102                cpu_ppc_tb_stop(env);
 103            }
 104            break;
 105        case PPC6xx_INPUT_INT:
 106            /* Level sensitive - active high */
 107            trace_ppc_irq_set_state("external IRQ", level);
 108            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 109            break;
 110        case PPC6xx_INPUT_SMI:
 111            /* Level sensitive - active high */
 112            trace_ppc_irq_set_state("SMI IRQ", level);
 113            ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level);
 114            break;
 115        case PPC6xx_INPUT_MCP:
 116            /* Negative edge sensitive */
 117            /* XXX: TODO: actual reaction may depends on HID0 status
 118             *            603/604/740/750: check HID0[EMCP]
 119             */
 120            if (cur_level == 1 && level == 0) {
 121                trace_ppc_irq_set_state("machine check", 1);
 122                ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
 123            }
 124            break;
 125        case PPC6xx_INPUT_CKSTP_IN:
 126            /* Level sensitive - active low */
 127            /* XXX: TODO: relay the signal to CKSTP_OUT pin */
 128            /* XXX: Note that the only way to restart the CPU is to reset it */
 129            if (level) {
 130                trace_ppc_irq_cpu("stop");
 131                cs->halted = 1;
 132            }
 133            break;
 134        case PPC6xx_INPUT_HRESET:
 135            /* Level sensitive - active low */
 136            if (level) {
 137                trace_ppc_irq_reset("CPU");
 138                cpu_interrupt(cs, CPU_INTERRUPT_RESET);
 139            }
 140            break;
 141        case PPC6xx_INPUT_SRESET:
 142            trace_ppc_irq_set_state("RESET IRQ", level);
 143            ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
 144            break;
 145        default:
 146            g_assert_not_reached();
 147        }
 148        if (level)
 149            env->irq_input_state |= 1 << pin;
 150        else
 151            env->irq_input_state &= ~(1 << pin);
 152    }
 153}
 154
 155void ppc6xx_irq_init(PowerPCCPU *cpu)
 156{
 157    CPUPPCState *env = &cpu->env;
 158
 159    env->irq_inputs = (void **)qemu_allocate_irqs(&ppc6xx_set_irq, cpu,
 160                                                  PPC6xx_INPUT_NB);
 161}
 162
 163#if defined(TARGET_PPC64)
 164/* PowerPC 970 internal IRQ controller */
 165static void ppc970_set_irq(void *opaque, int pin, int level)
 166{
 167    PowerPCCPU *cpu = opaque;
 168    CPUPPCState *env = &cpu->env;
 169    int cur_level;
 170
 171    trace_ppc_irq_set(env, pin, level);
 172
 173    cur_level = (env->irq_input_state >> pin) & 1;
 174    /* Don't generate spurious events */
 175    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
 176        CPUState *cs = CPU(cpu);
 177
 178        switch (pin) {
 179        case PPC970_INPUT_INT:
 180            /* Level sensitive - active high */
 181            trace_ppc_irq_set_state("external IRQ", level);
 182            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 183            break;
 184        case PPC970_INPUT_THINT:
 185            /* Level sensitive - active high */
 186            trace_ppc_irq_set_state("SMI IRQ", level);
 187            ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level);
 188            break;
 189        case PPC970_INPUT_MCP:
 190            /* Negative edge sensitive */
 191            /* XXX: TODO: actual reaction may depends on HID0 status
 192             *            603/604/740/750: check HID0[EMCP]
 193             */
 194            if (cur_level == 1 && level == 0) {
 195                trace_ppc_irq_set_state("machine check", 1);
 196                ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
 197            }
 198            break;
 199        case PPC970_INPUT_CKSTP:
 200            /* Level sensitive - active low */
 201            /* XXX: TODO: relay the signal to CKSTP_OUT pin */
 202            if (level) {
 203                trace_ppc_irq_cpu("stop");
 204                cs->halted = 1;
 205            } else {
 206                trace_ppc_irq_cpu("restart");
 207                cs->halted = 0;
 208                qemu_cpu_kick(cs);
 209            }
 210            break;
 211        case PPC970_INPUT_HRESET:
 212            /* Level sensitive - active low */
 213            if (level) {
 214                cpu_interrupt(cs, CPU_INTERRUPT_RESET);
 215            }
 216            break;
 217        case PPC970_INPUT_SRESET:
 218            trace_ppc_irq_set_state("RESET IRQ", level);
 219            ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
 220            break;
 221        case PPC970_INPUT_TBEN:
 222            trace_ppc_irq_set_state("TBEN IRQ", level);
 223            /* XXX: TODO */
 224            break;
 225        default:
 226            g_assert_not_reached();
 227        }
 228        if (level)
 229            env->irq_input_state |= 1 << pin;
 230        else
 231            env->irq_input_state &= ~(1 << pin);
 232    }
 233}
 234
 235void ppc970_irq_init(PowerPCCPU *cpu)
 236{
 237    CPUPPCState *env = &cpu->env;
 238
 239    env->irq_inputs = (void **)qemu_allocate_irqs(&ppc970_set_irq, cpu,
 240                                                  PPC970_INPUT_NB);
 241}
 242
 243/* POWER7 internal IRQ controller */
 244static void power7_set_irq(void *opaque, int pin, int level)
 245{
 246    PowerPCCPU *cpu = opaque;
 247
 248    trace_ppc_irq_set(&cpu->env, pin, level);
 249
 250    switch (pin) {
 251    case POWER7_INPUT_INT:
 252        /* Level sensitive - active high */
 253        trace_ppc_irq_set_state("external IRQ", level);
 254        ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 255        break;
 256    default:
 257        g_assert_not_reached();
 258    }
 259}
 260
 261void ppcPOWER7_irq_init(PowerPCCPU *cpu)
 262{
 263    CPUPPCState *env = &cpu->env;
 264
 265    env->irq_inputs = (void **)qemu_allocate_irqs(&power7_set_irq, cpu,
 266                                                  POWER7_INPUT_NB);
 267}
 268
 269/* POWER9 internal IRQ controller */
 270static void power9_set_irq(void *opaque, int pin, int level)
 271{
 272    PowerPCCPU *cpu = opaque;
 273
 274    trace_ppc_irq_set(&cpu->env, pin, level);
 275
 276    switch (pin) {
 277    case POWER9_INPUT_INT:
 278        /* Level sensitive - active high */
 279        trace_ppc_irq_set_state("external IRQ", level);
 280        ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 281        break;
 282    case POWER9_INPUT_HINT:
 283        /* Level sensitive - active high */
 284        trace_ppc_irq_set_state("HV external IRQ", level);
 285        ppc_set_irq(cpu, PPC_INTERRUPT_HVIRT, level);
 286        break;
 287    default:
 288        g_assert_not_reached();
 289        return;
 290    }
 291}
 292
 293void ppcPOWER9_irq_init(PowerPCCPU *cpu)
 294{
 295    CPUPPCState *env = &cpu->env;
 296
 297    env->irq_inputs = (void **)qemu_allocate_irqs(&power9_set_irq, cpu,
 298                                                  POWER9_INPUT_NB);
 299}
 300#endif /* defined(TARGET_PPC64) */
 301
 302void ppc40x_core_reset(PowerPCCPU *cpu)
 303{
 304    CPUPPCState *env = &cpu->env;
 305    target_ulong dbsr;
 306
 307    qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC core\n");
 308    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
 309    dbsr = env->spr[SPR_40x_DBSR];
 310    dbsr &= ~0x00000300;
 311    dbsr |= 0x00000100;
 312    env->spr[SPR_40x_DBSR] = dbsr;
 313}
 314
 315void ppc40x_chip_reset(PowerPCCPU *cpu)
 316{
 317    CPUPPCState *env = &cpu->env;
 318    target_ulong dbsr;
 319
 320    qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC chip\n");
 321    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
 322    /* XXX: TODO reset all internal peripherals */
 323    dbsr = env->spr[SPR_40x_DBSR];
 324    dbsr &= ~0x00000300;
 325    dbsr |= 0x00000200;
 326    env->spr[SPR_40x_DBSR] = dbsr;
 327}
 328
 329void ppc40x_system_reset(PowerPCCPU *cpu)
 330{
 331    qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC system\n");
 332    qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
 333}
 334
 335void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
 336{
 337    PowerPCCPU *cpu = env_archcpu(env);
 338
 339    qemu_mutex_lock_iothread();
 340
 341    switch ((val >> 28) & 0x3) {
 342    case 0x0:
 343        /* No action */
 344        break;
 345    case 0x1:
 346        /* Core reset */
 347        ppc40x_core_reset(cpu);
 348        break;
 349    case 0x2:
 350        /* Chip reset */
 351        ppc40x_chip_reset(cpu);
 352        break;
 353    case 0x3:
 354        /* System reset */
 355        ppc40x_system_reset(cpu);
 356        break;
 357    }
 358
 359    qemu_mutex_unlock_iothread();
 360}
 361
 362/* PowerPC 40x internal IRQ controller */
 363static void ppc40x_set_irq(void *opaque, int pin, int level)
 364{
 365    PowerPCCPU *cpu = opaque;
 366    CPUPPCState *env = &cpu->env;
 367    int cur_level;
 368
 369    trace_ppc_irq_set(env, pin, level);
 370
 371    cur_level = (env->irq_input_state >> pin) & 1;
 372    /* Don't generate spurious events */
 373    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
 374        CPUState *cs = CPU(cpu);
 375
 376        switch (pin) {
 377        case PPC40x_INPUT_RESET_SYS:
 378            if (level) {
 379                trace_ppc_irq_reset("system");
 380                ppc40x_system_reset(cpu);
 381            }
 382            break;
 383        case PPC40x_INPUT_RESET_CHIP:
 384            if (level) {
 385                trace_ppc_irq_reset("chip");
 386                ppc40x_chip_reset(cpu);
 387            }
 388            break;
 389        case PPC40x_INPUT_RESET_CORE:
 390            /* XXX: TODO: update DBSR[MRR] */
 391            if (level) {
 392                trace_ppc_irq_reset("core");
 393                ppc40x_core_reset(cpu);
 394            }
 395            break;
 396        case PPC40x_INPUT_CINT:
 397            /* Level sensitive - active high */
 398            trace_ppc_irq_set_state("critical IRQ", level);
 399            ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
 400            break;
 401        case PPC40x_INPUT_INT:
 402            /* Level sensitive - active high */
 403            trace_ppc_irq_set_state("external IRQ", level);
 404            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 405            break;
 406        case PPC40x_INPUT_HALT:
 407            /* Level sensitive - active low */
 408            if (level) {
 409                trace_ppc_irq_cpu("stop");
 410                cs->halted = 1;
 411            } else {
 412                trace_ppc_irq_cpu("restart");
 413                cs->halted = 0;
 414                qemu_cpu_kick(cs);
 415            }
 416            break;
 417        case PPC40x_INPUT_DEBUG:
 418            /* Level sensitive - active high */
 419            trace_ppc_irq_set_state("debug pin", level);
 420            ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
 421            break;
 422        default:
 423            g_assert_not_reached();
 424        }
 425        if (level)
 426            env->irq_input_state |= 1 << pin;
 427        else
 428            env->irq_input_state &= ~(1 << pin);
 429    }
 430}
 431
 432void ppc40x_irq_init(PowerPCCPU *cpu)
 433{
 434    CPUPPCState *env = &cpu->env;
 435
 436    env->irq_inputs = (void **)qemu_allocate_irqs(&ppc40x_set_irq,
 437                                                  cpu, PPC40x_INPUT_NB);
 438}
 439
 440/* PowerPC E500 internal IRQ controller */
 441static void ppce500_set_irq(void *opaque, int pin, int level)
 442{
 443    PowerPCCPU *cpu = opaque;
 444    CPUPPCState *env = &cpu->env;
 445    int cur_level;
 446
 447    trace_ppc_irq_set(env, pin, level);
 448
 449    cur_level = (env->irq_input_state >> pin) & 1;
 450    /* Don't generate spurious events */
 451    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
 452        switch (pin) {
 453        case PPCE500_INPUT_MCK:
 454            if (level) {
 455                trace_ppc_irq_reset("system");
 456                qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
 457            }
 458            break;
 459        case PPCE500_INPUT_RESET_CORE:
 460            if (level) {
 461                trace_ppc_irq_reset("core");
 462                ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level);
 463            }
 464            break;
 465        case PPCE500_INPUT_CINT:
 466            /* Level sensitive - active high */
 467            trace_ppc_irq_set_state("critical IRQ", level);
 468            ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
 469            break;
 470        case PPCE500_INPUT_INT:
 471            /* Level sensitive - active high */
 472            trace_ppc_irq_set_state("core IRQ", level);
 473            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 474            break;
 475        case PPCE500_INPUT_DEBUG:
 476            /* Level sensitive - active high */
 477            trace_ppc_irq_set_state("debug pin", level);
 478            ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
 479            break;
 480        default:
 481            g_assert_not_reached();
 482        }
 483        if (level)
 484            env->irq_input_state |= 1 << pin;
 485        else
 486            env->irq_input_state &= ~(1 << pin);
 487    }
 488}
 489
 490void ppce500_irq_init(PowerPCCPU *cpu)
 491{
 492    CPUPPCState *env = &cpu->env;
 493
 494    env->irq_inputs = (void **)qemu_allocate_irqs(&ppce500_set_irq,
 495                                                  cpu, PPCE500_INPUT_NB);
 496}
 497
 498/* Enable or Disable the E500 EPR capability */
 499void ppce500_set_mpic_proxy(bool enabled)
 500{
 501    CPUState *cs;
 502
 503    CPU_FOREACH(cs) {
 504        PowerPCCPU *cpu = POWERPC_CPU(cs);
 505
 506        cpu->env.mpic_proxy = enabled;
 507        if (kvm_enabled()) {
 508            kvmppc_set_mpic_proxy(cpu, enabled);
 509        }
 510    }
 511}
 512
 513/*****************************************************************************/
 514/* PowerPC time base and decrementer emulation */
 515
 516uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset)
 517{
 518    /* TB time in tb periods */
 519    return muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND) + tb_offset;
 520}
 521
 522uint64_t cpu_ppc_load_tbl (CPUPPCState *env)
 523{
 524    ppc_tb_t *tb_env = env->tb_env;
 525    uint64_t tb;
 526
 527    if (kvm_enabled()) {
 528        return env->spr[SPR_TBL];
 529    }
 530
 531    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
 532    trace_ppc_tb_load(tb);
 533
 534    return tb;
 535}
 536
 537static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env)
 538{
 539    ppc_tb_t *tb_env = env->tb_env;
 540    uint64_t tb;
 541
 542    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
 543    trace_ppc_tb_load(tb);
 544
 545    return tb >> 32;
 546}
 547
 548uint32_t cpu_ppc_load_tbu (CPUPPCState *env)
 549{
 550    if (kvm_enabled()) {
 551        return env->spr[SPR_TBU];
 552    }
 553
 554    return _cpu_ppc_load_tbu(env);
 555}
 556
 557static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk,
 558                                    int64_t *tb_offsetp, uint64_t value)
 559{
 560    *tb_offsetp = value -
 561        muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND);
 562
 563    trace_ppc_tb_store(value, *tb_offsetp);
 564}
 565
 566void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value)
 567{
 568    ppc_tb_t *tb_env = env->tb_env;
 569    uint64_t tb;
 570
 571    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
 572    tb &= 0xFFFFFFFF00000000ULL;
 573    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 574                     &tb_env->tb_offset, tb | (uint64_t)value);
 575}
 576
 577static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value)
 578{
 579    ppc_tb_t *tb_env = env->tb_env;
 580    uint64_t tb;
 581
 582    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
 583    tb &= 0x00000000FFFFFFFFULL;
 584    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 585                     &tb_env->tb_offset, ((uint64_t)value << 32) | tb);
 586}
 587
 588void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value)
 589{
 590    _cpu_ppc_store_tbu(env, value);
 591}
 592
 593uint64_t cpu_ppc_load_atbl (CPUPPCState *env)
 594{
 595    ppc_tb_t *tb_env = env->tb_env;
 596    uint64_t tb;
 597
 598    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
 599    trace_ppc_tb_load(tb);
 600
 601    return tb;
 602}
 603
 604uint32_t cpu_ppc_load_atbu (CPUPPCState *env)
 605{
 606    ppc_tb_t *tb_env = env->tb_env;
 607    uint64_t tb;
 608
 609    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
 610    trace_ppc_tb_load(tb);
 611
 612    return tb >> 32;
 613}
 614
 615void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value)
 616{
 617    ppc_tb_t *tb_env = env->tb_env;
 618    uint64_t tb;
 619
 620    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
 621    tb &= 0xFFFFFFFF00000000ULL;
 622    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 623                     &tb_env->atb_offset, tb | (uint64_t)value);
 624}
 625
 626void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value)
 627{
 628    ppc_tb_t *tb_env = env->tb_env;
 629    uint64_t tb;
 630
 631    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
 632    tb &= 0x00000000FFFFFFFFULL;
 633    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 634                     &tb_env->atb_offset, ((uint64_t)value << 32) | tb);
 635}
 636
 637uint64_t cpu_ppc_load_vtb(CPUPPCState *env)
 638{
 639    ppc_tb_t *tb_env = env->tb_env;
 640
 641    return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 642                          tb_env->vtb_offset);
 643}
 644
 645void cpu_ppc_store_vtb(CPUPPCState *env, uint64_t value)
 646{
 647    ppc_tb_t *tb_env = env->tb_env;
 648
 649    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 650                     &tb_env->vtb_offset, value);
 651}
 652
 653void cpu_ppc_store_tbu40(CPUPPCState *env, uint64_t value)
 654{
 655    ppc_tb_t *tb_env = env->tb_env;
 656    uint64_t tb;
 657
 658    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 659                        tb_env->tb_offset);
 660    tb &= 0xFFFFFFUL;
 661    tb |= (value & ~0xFFFFFFUL);
 662    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 663                     &tb_env->tb_offset, tb);
 664}
 665
 666static void cpu_ppc_tb_stop (CPUPPCState *env)
 667{
 668    ppc_tb_t *tb_env = env->tb_env;
 669    uint64_t tb, atb, vmclk;
 670
 671    /* If the time base is already frozen, do nothing */
 672    if (tb_env->tb_freq != 0) {
 673        vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 674        /* Get the time base */
 675        tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset);
 676        /* Get the alternate time base */
 677        atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset);
 678        /* Store the time base value (ie compute the current offset) */
 679        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
 680        /* Store the alternate time base value (compute the current offset) */
 681        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
 682        /* Set the time base frequency to zero */
 683        tb_env->tb_freq = 0;
 684        /* Now, the time bases are frozen to tb_offset / atb_offset value */
 685    }
 686}
 687
 688static void cpu_ppc_tb_start (CPUPPCState *env)
 689{
 690    ppc_tb_t *tb_env = env->tb_env;
 691    uint64_t tb, atb, vmclk;
 692
 693    /* If the time base is not frozen, do nothing */
 694    if (tb_env->tb_freq == 0) {
 695        vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 696        /* Get the time base from tb_offset */
 697        tb = tb_env->tb_offset;
 698        /* Get the alternate time base from atb_offset */
 699        atb = tb_env->atb_offset;
 700        /* Restore the tb frequency from the decrementer frequency */
 701        tb_env->tb_freq = tb_env->decr_freq;
 702        /* Store the time base value */
 703        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
 704        /* Store the alternate time base value */
 705        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
 706    }
 707}
 708
 709bool ppc_decr_clear_on_delivery(CPUPPCState *env)
 710{
 711    ppc_tb_t *tb_env = env->tb_env;
 712    int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL;
 713    return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED);
 714}
 715
 716static inline int64_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next)
 717{
 718    ppc_tb_t *tb_env = env->tb_env;
 719    int64_t decr, diff;
 720
 721    diff = next - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 722    if (diff >= 0) {
 723        decr = muldiv64(diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
 724    } else if (tb_env->flags & PPC_TIMER_BOOKE) {
 725        decr = 0;
 726    }  else {
 727        decr = -muldiv64(-diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
 728    }
 729    trace_ppc_decr_load(decr);
 730
 731    return decr;
 732}
 733
 734target_ulong cpu_ppc_load_decr(CPUPPCState *env)
 735{
 736    ppc_tb_t *tb_env = env->tb_env;
 737    uint64_t decr;
 738
 739    if (kvm_enabled()) {
 740        return env->spr[SPR_DECR];
 741    }
 742
 743    decr = _cpu_ppc_load_decr(env, tb_env->decr_next);
 744
 745    /*
 746     * If large decrementer is enabled then the decrementer is signed extened
 747     * to 64 bits, otherwise it is a 32 bit value.
 748     */
 749    if (env->spr[SPR_LPCR] & LPCR_LD) {
 750        return decr;
 751    }
 752    return (uint32_t) decr;
 753}
 754
 755target_ulong cpu_ppc_load_hdecr(CPUPPCState *env)
 756{
 757    PowerPCCPU *cpu = env_archcpu(env);
 758    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
 759    ppc_tb_t *tb_env = env->tb_env;
 760    uint64_t hdecr;
 761
 762    hdecr =  _cpu_ppc_load_decr(env, tb_env->hdecr_next);
 763
 764    /*
 765     * If we have a large decrementer (POWER9 or later) then hdecr is sign
 766     * extended to 64 bits, otherwise it is 32 bits.
 767     */
 768    if (pcc->lrg_decr_bits > 32) {
 769        return hdecr;
 770    }
 771    return (uint32_t) hdecr;
 772}
 773
 774uint64_t cpu_ppc_load_purr (CPUPPCState *env)
 775{
 776    ppc_tb_t *tb_env = env->tb_env;
 777
 778    return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 779                          tb_env->purr_offset);
 780}
 781
 782/* When decrementer expires,
 783 * all we need to do is generate or queue a CPU exception
 784 */
 785static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu)
 786{
 787    /* Raise it */
 788    trace_ppc_decr_excp("raise");
 789    ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1);
 790}
 791
 792static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu)
 793{
 794    ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
 795}
 796
 797static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
 798{
 799    CPUPPCState *env = &cpu->env;
 800
 801    /* Raise it */
 802    trace_ppc_decr_excp("raise HV");
 803
 804    /* The architecture specifies that we don't deliver HDEC
 805     * interrupts in a PM state. Not only they don't cause a
 806     * wakeup but they also get effectively discarded.
 807     */
 808    if (!env->resume_as_sreset) {
 809        ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
 810    }
 811}
 812
 813static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu)
 814{
 815    ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
 816}
 817
 818static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
 819                                 QEMUTimer *timer,
 820                                 void (*raise_excp)(void *),
 821                                 void (*lower_excp)(PowerPCCPU *),
 822                                 target_ulong decr, target_ulong value,
 823                                 int nr_bits)
 824{
 825    CPUPPCState *env = &cpu->env;
 826    ppc_tb_t *tb_env = env->tb_env;
 827    uint64_t now, next;
 828    int64_t signed_value;
 829    int64_t signed_decr;
 830
 831    /* Truncate value to decr_width and sign extend for simplicity */
 832    signed_value = sextract64(value, 0, nr_bits);
 833    signed_decr = sextract64(decr, 0, nr_bits);
 834
 835    trace_ppc_decr_store(nr_bits, decr, value);
 836
 837    if (kvm_enabled()) {
 838        /* KVM handles decrementer exceptions, we don't need our own timer */
 839        return;
 840    }
 841
 842    /*
 843     * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC
 844     * interrupt.
 845     *
 846     * If we get a really small DEC value, we can assume that by the time we
 847     * handled it we should inject an interrupt already.
 848     *
 849     * On MSB level based DEC implementations the MSB always means the interrupt
 850     * is pending, so raise it on those.
 851     *
 852     * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers
 853     * an edge interrupt, so raise it here too.
 854     */
 855    if ((value < 3) ||
 856        ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && signed_value < 0) ||
 857        ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && signed_value < 0
 858          && signed_decr >= 0)) {
 859        (*raise_excp)(cpu);
 860        return;
 861    }
 862
 863    /* On MSB level based systems a 0 for the MSB stops interrupt delivery */
 864    if (signed_value >= 0 && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) {
 865        (*lower_excp)(cpu);
 866    }
 867
 868    /* Calculate the next timer event */
 869    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 870    next = now + muldiv64(value, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
 871    *nextp = next;
 872
 873    /* Adjust timer */
 874    timer_mod(timer, next);
 875}
 876
 877static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, target_ulong decr,
 878                                       target_ulong value, int nr_bits)
 879{
 880    ppc_tb_t *tb_env = cpu->env.tb_env;
 881
 882    __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer,
 883                         tb_env->decr_timer->cb, &cpu_ppc_decr_lower, decr,
 884                         value, nr_bits);
 885}
 886
 887void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value)
 888{
 889    PowerPCCPU *cpu = env_archcpu(env);
 890    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
 891    int nr_bits = 32;
 892
 893    if (env->spr[SPR_LPCR] & LPCR_LD) {
 894        nr_bits = pcc->lrg_decr_bits;
 895    }
 896
 897    _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value, nr_bits);
 898}
 899
 900static void cpu_ppc_decr_cb(void *opaque)
 901{
 902    PowerPCCPU *cpu = opaque;
 903
 904    cpu_ppc_decr_excp(cpu);
 905}
 906
 907static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, target_ulong hdecr,
 908                                        target_ulong value, int nr_bits)
 909{
 910    ppc_tb_t *tb_env = cpu->env.tb_env;
 911
 912    if (tb_env->hdecr_timer != NULL) {
 913        __cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer,
 914                             tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower,
 915                             hdecr, value, nr_bits);
 916    }
 917}
 918
 919void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value)
 920{
 921    PowerPCCPU *cpu = env_archcpu(env);
 922    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
 923
 924    _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value,
 925                         pcc->lrg_decr_bits);
 926}
 927
 928static void cpu_ppc_hdecr_cb(void *opaque)
 929{
 930    PowerPCCPU *cpu = opaque;
 931
 932    cpu_ppc_hdecr_excp(cpu);
 933}
 934
 935void cpu_ppc_store_purr(CPUPPCState *env, uint64_t value)
 936{
 937    ppc_tb_t *tb_env = env->tb_env;
 938
 939    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 940                     &tb_env->purr_offset, value);
 941}
 942
 943static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq)
 944{
 945    CPUPPCState *env = opaque;
 946    PowerPCCPU *cpu = env_archcpu(env);
 947    ppc_tb_t *tb_env = env->tb_env;
 948
 949    tb_env->tb_freq = freq;
 950    tb_env->decr_freq = freq;
 951    /* There is a bug in Linux 2.4 kernels:
 952     * if a decrementer exception is pending when it enables msr_ee at startup,
 953     * it's not ready to handle it...
 954     */
 955    _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32);
 956    _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32);
 957    cpu_ppc_store_purr(env, 0x0000000000000000ULL);
 958}
 959
 960static void timebase_save(PPCTimebase *tb)
 961{
 962    uint64_t ticks = cpu_get_host_ticks();
 963    PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
 964
 965    if (!first_ppc_cpu->env.tb_env) {
 966        error_report("No timebase object");
 967        return;
 968    }
 969
 970    /* not used anymore, we keep it for compatibility */
 971    tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
 972    /*
 973     * tb_offset is only expected to be changed by QEMU so
 974     * there is no need to update it from KVM here
 975     */
 976    tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset;
 977
 978    tb->runstate_paused =
 979        runstate_check(RUN_STATE_PAUSED) || runstate_check(RUN_STATE_SAVE_VM);
 980}
 981
 982static void timebase_load(PPCTimebase *tb)
 983{
 984    CPUState *cpu;
 985    PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
 986    int64_t tb_off_adj, tb_off;
 987    unsigned long freq;
 988
 989    if (!first_ppc_cpu->env.tb_env) {
 990        error_report("No timebase object");
 991        return;
 992    }
 993
 994    freq = first_ppc_cpu->env.tb_env->tb_freq;
 995
 996    tb_off_adj = tb->guest_timebase - cpu_get_host_ticks();
 997
 998    tb_off = first_ppc_cpu->env.tb_env->tb_offset;
 999    trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off,
1000                        (tb_off_adj - tb_off) / freq);
1001
1002    /* Set new offset to all CPUs */
1003    CPU_FOREACH(cpu) {
1004        PowerPCCPU *pcpu = POWERPC_CPU(cpu);
1005        pcpu->env.tb_env->tb_offset = tb_off_adj;
1006        kvmppc_set_reg_tb_offset(pcpu, pcpu->env.tb_env->tb_offset);
1007    }
1008}
1009
1010void cpu_ppc_clock_vm_state_change(void *opaque, bool running,
1011                                   RunState state)
1012{
1013    PPCTimebase *tb = opaque;
1014
1015    if (running) {
1016        timebase_load(tb);
1017    } else {
1018        timebase_save(tb);
1019    }
1020}
1021
1022/*
1023 * When migrating a running guest, read the clock just
1024 * before migration, so that the guest clock counts
1025 * during the events between:
1026 *
1027 *  * vm_stop()
1028 *  *
1029 *  * pre_save()
1030 *
1031 *  This reduces clock difference on migration from 5s
1032 *  to 0.1s (when max_downtime == 5s), because sending the
1033 *  final pages of memory (which happens between vm_stop()
1034 *  and pre_save()) takes max_downtime.
1035 */
1036static int timebase_pre_save(void *opaque)
1037{
1038    PPCTimebase *tb = opaque;
1039
1040    /* guest_timebase won't be overridden in case of paused guest or savevm */
1041    if (!tb->runstate_paused) {
1042        timebase_save(tb);
1043    }
1044
1045    return 0;
1046}
1047
1048const VMStateDescription vmstate_ppc_timebase = {
1049    .name = "timebase",
1050    .version_id = 1,
1051    .minimum_version_id = 1,
1052    .pre_save = timebase_pre_save,
1053    .fields      = (VMStateField []) {
1054        VMSTATE_UINT64(guest_timebase, PPCTimebase),
1055        VMSTATE_INT64(time_of_the_day_ns, PPCTimebase),
1056        VMSTATE_END_OF_LIST()
1057    },
1058};
1059
1060/* Set up (once) timebase frequency (in Hz) */
1061clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
1062{
1063    PowerPCCPU *cpu = env_archcpu(env);
1064    ppc_tb_t *tb_env;
1065
1066    tb_env = g_new0(ppc_tb_t, 1);
1067    env->tb_env = tb_env;
1068    tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1069    if (is_book3s_arch2x(env)) {
1070        /* All Book3S 64bit CPUs implement level based DEC logic */
1071        tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL;
1072    }
1073    /* Create new timer */
1074    tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu);
1075    if (env->has_hv_mode && !cpu->vhyp) {
1076        tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb,
1077                                                cpu);
1078    } else {
1079        tb_env->hdecr_timer = NULL;
1080    }
1081    cpu_ppc_set_tb_clk(env, freq);
1082
1083    return &cpu_ppc_set_tb_clk;
1084}
1085
1086void cpu_ppc_tb_free(CPUPPCState *env)
1087{
1088    timer_free(env->tb_env->decr_timer);
1089    timer_free(env->tb_env->hdecr_timer);
1090    g_free(env->tb_env);
1091}
1092
1093/* cpu_ppc_hdecr_init may be used if the timer is not used by HDEC emulation */
1094void cpu_ppc_hdecr_init(CPUPPCState *env)
1095{
1096    PowerPCCPU *cpu = env_archcpu(env);
1097
1098    assert(env->tb_env->hdecr_timer == NULL);
1099
1100    env->tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1101                                            &cpu_ppc_hdecr_cb, cpu);
1102}
1103
1104void cpu_ppc_hdecr_exit(CPUPPCState *env)
1105{
1106    PowerPCCPU *cpu = env_archcpu(env);
1107
1108    timer_free(env->tb_env->hdecr_timer);
1109    env->tb_env->hdecr_timer = NULL;
1110
1111    cpu_ppc_hdecr_lower(cpu);
1112}
1113
1114/*****************************************************************************/
1115/* PowerPC 40x timers */
1116
1117/* PIT, FIT & WDT */
1118typedef struct ppc40x_timer_t ppc40x_timer_t;
1119struct ppc40x_timer_t {
1120    uint64_t pit_reload;  /* PIT auto-reload value        */
1121    uint64_t fit_next;    /* Tick for next FIT interrupt  */
1122    QEMUTimer *fit_timer;
1123    uint64_t wdt_next;    /* Tick for next WDT interrupt  */
1124    QEMUTimer *wdt_timer;
1125
1126    /* 405 have the PIT, 440 have a DECR.  */
1127    unsigned int decr_excp;
1128};
1129
1130/* Fixed interval timer */
1131static void cpu_4xx_fit_cb (void *opaque)
1132{
1133    PowerPCCPU *cpu = opaque;
1134    CPUPPCState *env = &cpu->env;
1135    ppc_tb_t *tb_env;
1136    ppc40x_timer_t *ppc40x_timer;
1137    uint64_t now, next;
1138
1139    tb_env = env->tb_env;
1140    ppc40x_timer = tb_env->opaque;
1141    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1142    switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) {
1143    case 0:
1144        next = 1 << 9;
1145        break;
1146    case 1:
1147        next = 1 << 13;
1148        break;
1149    case 2:
1150        next = 1 << 17;
1151        break;
1152    case 3:
1153        next = 1 << 21;
1154        break;
1155    default:
1156        /* Cannot occur, but makes gcc happy */
1157        return;
1158    }
1159    next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->tb_freq);
1160    if (next == now)
1161        next++;
1162    timer_mod(ppc40x_timer->fit_timer, next);
1163    env->spr[SPR_40x_TSR] |= 1 << 26;
1164    if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) {
1165        ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1);
1166    }
1167    trace_ppc4xx_fit((int)((env->spr[SPR_40x_TCR] >> 23) & 0x1),
1168                         env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1169}
1170
1171/* Programmable interval timer */
1172static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp)
1173{
1174    ppc40x_timer_t *ppc40x_timer;
1175    uint64_t now, next;
1176
1177    ppc40x_timer = tb_env->opaque;
1178    if (ppc40x_timer->pit_reload <= 1 ||
1179        !((env->spr[SPR_40x_TCR] >> 26) & 0x1) ||
1180        (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) {
1181        /* Stop PIT */
1182        trace_ppc4xx_pit_stop();
1183        timer_del(tb_env->decr_timer);
1184    } else {
1185        trace_ppc4xx_pit_start(ppc40x_timer->pit_reload);
1186        now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1187        next = now + muldiv64(ppc40x_timer->pit_reload,
1188                              NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1189        if (is_excp)
1190            next += tb_env->decr_next - now;
1191        if (next == now)
1192            next++;
1193        timer_mod(tb_env->decr_timer, next);
1194        tb_env->decr_next = next;
1195    }
1196}
1197
1198static void cpu_4xx_pit_cb (void *opaque)
1199{
1200    PowerPCCPU *cpu = opaque;
1201    CPUPPCState *env = &cpu->env;
1202    ppc_tb_t *tb_env;
1203    ppc40x_timer_t *ppc40x_timer;
1204
1205    tb_env = env->tb_env;
1206    ppc40x_timer = tb_env->opaque;
1207    env->spr[SPR_40x_TSR] |= 1 << 27;
1208    if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) {
1209        ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1);
1210    }
1211    start_stop_pit(env, tb_env, 1);
1212    trace_ppc4xx_pit((int)((env->spr[SPR_40x_TCR] >> 22) & 0x1),
1213           (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1),
1214           env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR],
1215           ppc40x_timer->pit_reload);
1216}
1217
1218/* Watchdog timer */
1219static void cpu_4xx_wdt_cb (void *opaque)
1220{
1221    PowerPCCPU *cpu = opaque;
1222    CPUPPCState *env = &cpu->env;
1223    ppc_tb_t *tb_env;
1224    ppc40x_timer_t *ppc40x_timer;
1225    uint64_t now, next;
1226
1227    tb_env = env->tb_env;
1228    ppc40x_timer = tb_env->opaque;
1229    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1230    switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) {
1231    case 0:
1232        next = 1 << 17;
1233        break;
1234    case 1:
1235        next = 1 << 21;
1236        break;
1237    case 2:
1238        next = 1 << 25;
1239        break;
1240    case 3:
1241        next = 1 << 29;
1242        break;
1243    default:
1244        /* Cannot occur, but makes gcc happy */
1245        return;
1246    }
1247    next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1248    if (next == now)
1249        next++;
1250    trace_ppc4xx_wdt(env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1251    switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) {
1252    case 0x0:
1253    case 0x1:
1254        timer_mod(ppc40x_timer->wdt_timer, next);
1255        ppc40x_timer->wdt_next = next;
1256        env->spr[SPR_40x_TSR] |= 1U << 31;
1257        break;
1258    case 0x2:
1259        timer_mod(ppc40x_timer->wdt_timer, next);
1260        ppc40x_timer->wdt_next = next;
1261        env->spr[SPR_40x_TSR] |= 1 << 30;
1262        if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) {
1263            ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1);
1264        }
1265        break;
1266    case 0x3:
1267        env->spr[SPR_40x_TSR] &= ~0x30000000;
1268        env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000;
1269        switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) {
1270        case 0x0:
1271            /* No reset */
1272            break;
1273        case 0x1: /* Core reset */
1274            ppc40x_core_reset(cpu);
1275            break;
1276        case 0x2: /* Chip reset */
1277            ppc40x_chip_reset(cpu);
1278            break;
1279        case 0x3: /* System reset */
1280            ppc40x_system_reset(cpu);
1281            break;
1282        }
1283    }
1284}
1285
1286void store_40x_pit (CPUPPCState *env, target_ulong val)
1287{
1288    ppc_tb_t *tb_env;
1289    ppc40x_timer_t *ppc40x_timer;
1290
1291    tb_env = env->tb_env;
1292    ppc40x_timer = tb_env->opaque;
1293    trace_ppc40x_store_pit(val);
1294    ppc40x_timer->pit_reload = val;
1295    start_stop_pit(env, tb_env, 0);
1296}
1297
1298target_ulong load_40x_pit (CPUPPCState *env)
1299{
1300    return cpu_ppc_load_decr(env);
1301}
1302
1303void store_40x_tsr(CPUPPCState *env, target_ulong val)
1304{
1305    PowerPCCPU *cpu = env_archcpu(env);
1306
1307    trace_ppc40x_store_tcr(val);
1308
1309    env->spr[SPR_40x_TSR] &= ~(val & 0xFC000000);
1310    if (val & 0x80000000) {
1311        ppc_set_irq(cpu, PPC_INTERRUPT_PIT, 0);
1312    }
1313}
1314
1315void store_40x_tcr(CPUPPCState *env, target_ulong val)
1316{
1317    PowerPCCPU *cpu = env_archcpu(env);
1318    ppc_tb_t *tb_env;
1319
1320    trace_ppc40x_store_tsr(val);
1321
1322    tb_env = env->tb_env;
1323    env->spr[SPR_40x_TCR] = val & 0xFFC00000;
1324    start_stop_pit(env, tb_env, 1);
1325    cpu_4xx_wdt_cb(cpu);
1326}
1327
1328static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq)
1329{
1330    CPUPPCState *env = opaque;
1331    ppc_tb_t *tb_env = env->tb_env;
1332
1333    trace_ppc40x_set_tb_clk(freq);
1334    tb_env->tb_freq = freq;
1335    tb_env->decr_freq = freq;
1336    /* XXX: we should also update all timers */
1337}
1338
1339clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq,
1340                                  unsigned int decr_excp)
1341{
1342    ppc_tb_t *tb_env;
1343    ppc40x_timer_t *ppc40x_timer;
1344    PowerPCCPU *cpu = env_archcpu(env);
1345
1346    trace_ppc40x_timers_init(freq);
1347
1348    tb_env = g_new0(ppc_tb_t, 1);
1349    ppc40x_timer = g_new0(ppc40x_timer_t, 1);
1350
1351    env->tb_env = tb_env;
1352    tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1353    tb_env->tb_freq = freq;
1354    tb_env->decr_freq = freq;
1355    tb_env->opaque = ppc40x_timer;
1356
1357    /* We use decr timer for PIT */
1358    tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, cpu);
1359    ppc40x_timer->fit_timer =
1360        timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, cpu);
1361    ppc40x_timer->wdt_timer =
1362        timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, cpu);
1363    ppc40x_timer->decr_excp = decr_excp;
1364
1365    return &ppc_40x_set_tb_clk;
1366}
1367
1368/*****************************************************************************/
1369/* Embedded PowerPC Device Control Registers */
1370typedef struct ppc_dcrn_t ppc_dcrn_t;
1371struct ppc_dcrn_t {
1372    dcr_read_cb dcr_read;
1373    dcr_write_cb dcr_write;
1374    void *opaque;
1375};
1376
1377/* XXX: on 460, DCR addresses are 32 bits wide,
1378 *      using DCRIPR to get the 22 upper bits of the DCR address
1379 */
1380#define DCRN_NB 1024
1381struct ppc_dcr_t {
1382    ppc_dcrn_t dcrn[DCRN_NB];
1383    int (*read_error)(int dcrn);
1384    int (*write_error)(int dcrn);
1385};
1386
1387int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
1388{
1389    ppc_dcrn_t *dcr;
1390
1391    if (dcrn < 0 || dcrn >= DCRN_NB)
1392        goto error;
1393    dcr = &dcr_env->dcrn[dcrn];
1394    if (dcr->dcr_read == NULL)
1395        goto error;
1396    *valp = (*dcr->dcr_read)(dcr->opaque, dcrn);
1397    trace_ppc_dcr_read(dcrn, *valp);
1398
1399    return 0;
1400
1401 error:
1402    if (dcr_env->read_error != NULL)
1403        return (*dcr_env->read_error)(dcrn);
1404
1405    return -1;
1406}
1407
1408int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
1409{
1410    ppc_dcrn_t *dcr;
1411
1412    if (dcrn < 0 || dcrn >= DCRN_NB)
1413        goto error;
1414    dcr = &dcr_env->dcrn[dcrn];
1415    if (dcr->dcr_write == NULL)
1416        goto error;
1417    trace_ppc_dcr_write(dcrn, val);
1418    (*dcr->dcr_write)(dcr->opaque, dcrn, val);
1419
1420    return 0;
1421
1422 error:
1423    if (dcr_env->write_error != NULL)
1424        return (*dcr_env->write_error)(dcrn);
1425
1426    return -1;
1427}
1428
1429int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque,
1430                      dcr_read_cb dcr_read, dcr_write_cb dcr_write)
1431{
1432    ppc_dcr_t *dcr_env;
1433    ppc_dcrn_t *dcr;
1434
1435    dcr_env = env->dcr_env;
1436    if (dcr_env == NULL)
1437        return -1;
1438    if (dcrn < 0 || dcrn >= DCRN_NB)
1439        return -1;
1440    dcr = &dcr_env->dcrn[dcrn];
1441    if (dcr->opaque != NULL ||
1442        dcr->dcr_read != NULL ||
1443        dcr->dcr_write != NULL)
1444        return -1;
1445    dcr->opaque = opaque;
1446    dcr->dcr_read = dcr_read;
1447    dcr->dcr_write = dcr_write;
1448
1449    return 0;
1450}
1451
1452int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn),
1453                  int (*write_error)(int dcrn))
1454{
1455    ppc_dcr_t *dcr_env;
1456
1457    dcr_env = g_new0(ppc_dcr_t, 1);
1458    dcr_env->read_error = read_error;
1459    dcr_env->write_error = write_error;
1460    env->dcr_env = dcr_env;
1461
1462    return 0;
1463}
1464
1465/*****************************************************************************/
1466
1467int ppc_cpu_pir(PowerPCCPU *cpu)
1468{
1469    CPUPPCState *env = &cpu->env;
1470    return env->spr_cb[SPR_PIR].default_value;
1471}
1472
1473PowerPCCPU *ppc_get_vcpu_by_pir(int pir)
1474{
1475    CPUState *cs;
1476
1477    CPU_FOREACH(cs) {
1478        PowerPCCPU *cpu = POWERPC_CPU(cs);
1479
1480        if (ppc_cpu_pir(cpu) == pir) {
1481            return cpu;
1482        }
1483    }
1484
1485    return NULL;
1486}
1487
1488void ppc_irq_reset(PowerPCCPU *cpu)
1489{
1490    CPUPPCState *env = &cpu->env;
1491
1492    env->irq_input_state = 0;
1493    kvmppc_set_interrupt(cpu, PPC_INTERRUPT_EXT, 0);
1494}
1495