qemu/hw/ppc/ppc.c
<<
>>
Prefs
   1/*
   2 * QEMU generic PowerPC hardware System Emulator
   3 *
   4 * Copyright (c) 2003-2007 Jocelyn Mayer
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24
  25#include "qemu/osdep.h"
  26#include "hw/irq.h"
  27#include "hw/ppc/ppc.h"
  28#include "hw/ppc/ppc_e500.h"
  29#include "qemu/timer.h"
  30#include "sysemu/cpus.h"
  31#include "qemu/log.h"
  32#include "qemu/main-loop.h"
  33#include "qemu/error-report.h"
  34#include "sysemu/kvm.h"
  35#include "sysemu/runstate.h"
  36#include "kvm_ppc.h"
  37#include "migration/vmstate.h"
  38#include "trace.h"
  39
  40static void cpu_ppc_tb_stop (CPUPPCState *env);
  41static void cpu_ppc_tb_start (CPUPPCState *env);
  42
  43void ppc_set_irq(PowerPCCPU *cpu, int irq, int level)
  44{
  45    CPUPPCState *env = &cpu->env;
  46    unsigned int old_pending;
  47
  48    /* We may already have the BQL if coming from the reset path */
  49    QEMU_IOTHREAD_LOCK_GUARD();
  50
  51    old_pending = env->pending_interrupts;
  52
  53    if (level) {
  54        env->pending_interrupts |= irq;
  55    } else {
  56        env->pending_interrupts &= ~irq;
  57    }
  58
  59    if (old_pending != env->pending_interrupts) {
  60        ppc_maybe_interrupt(env);
  61        kvmppc_set_interrupt(cpu, irq, level);
  62    }
  63
  64    trace_ppc_irq_set_exit(env, irq, level, env->pending_interrupts,
  65                           CPU(cpu)->interrupt_request);
  66}
  67
  68/* PowerPC 6xx / 7xx internal IRQ controller */
  69static void ppc6xx_set_irq(void *opaque, int pin, int level)
  70{
  71    PowerPCCPU *cpu = opaque;
  72    CPUPPCState *env = &cpu->env;
  73    int cur_level;
  74
  75    trace_ppc_irq_set(env, pin, level);
  76
  77    cur_level = (env->irq_input_state >> pin) & 1;
  78    /* Don't generate spurious events */
  79    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
  80        CPUState *cs = CPU(cpu);
  81
  82        switch (pin) {
  83        case PPC6xx_INPUT_TBEN:
  84            /* Level sensitive - active high */
  85            trace_ppc_irq_set_state("time base", level);
  86            if (level) {
  87                cpu_ppc_tb_start(env);
  88            } else {
  89                cpu_ppc_tb_stop(env);
  90            }
  91            break;
  92        case PPC6xx_INPUT_INT:
  93            /* Level sensitive - active high */
  94            trace_ppc_irq_set_state("external IRQ", level);
  95            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
  96            break;
  97        case PPC6xx_INPUT_SMI:
  98            /* Level sensitive - active high */
  99            trace_ppc_irq_set_state("SMI IRQ", level);
 100            ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level);
 101            break;
 102        case PPC6xx_INPUT_MCP:
 103            /* Negative edge sensitive */
 104            /* XXX: TODO: actual reaction may depends on HID0 status
 105             *            603/604/740/750: check HID0[EMCP]
 106             */
 107            if (cur_level == 1 && level == 0) {
 108                trace_ppc_irq_set_state("machine check", 1);
 109                ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
 110            }
 111            break;
 112        case PPC6xx_INPUT_CKSTP_IN:
 113            /* Level sensitive - active low */
 114            /* XXX: TODO: relay the signal to CKSTP_OUT pin */
 115            /* XXX: Note that the only way to restart the CPU is to reset it */
 116            if (level) {
 117                trace_ppc_irq_cpu("stop");
 118                cs->halted = 1;
 119            }
 120            break;
 121        case PPC6xx_INPUT_HRESET:
 122            /* Level sensitive - active low */
 123            if (level) {
 124                trace_ppc_irq_reset("CPU");
 125                cpu_interrupt(cs, CPU_INTERRUPT_RESET);
 126            }
 127            break;
 128        case PPC6xx_INPUT_SRESET:
 129            trace_ppc_irq_set_state("RESET IRQ", level);
 130            ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
 131            break;
 132        default:
 133            g_assert_not_reached();
 134        }
 135        if (level)
 136            env->irq_input_state |= 1 << pin;
 137        else
 138            env->irq_input_state &= ~(1 << pin);
 139    }
 140}
 141
 142void ppc6xx_irq_init(PowerPCCPU *cpu)
 143{
 144    qdev_init_gpio_in(DEVICE(cpu), ppc6xx_set_irq, PPC6xx_INPUT_NB);
 145}
 146
 147#if defined(TARGET_PPC64)
 148/* PowerPC 970 internal IRQ controller */
 149static void ppc970_set_irq(void *opaque, int pin, int level)
 150{
 151    PowerPCCPU *cpu = opaque;
 152    CPUPPCState *env = &cpu->env;
 153    int cur_level;
 154
 155    trace_ppc_irq_set(env, pin, level);
 156
 157    cur_level = (env->irq_input_state >> pin) & 1;
 158    /* Don't generate spurious events */
 159    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
 160        CPUState *cs = CPU(cpu);
 161
 162        switch (pin) {
 163        case PPC970_INPUT_INT:
 164            /* Level sensitive - active high */
 165            trace_ppc_irq_set_state("external IRQ", level);
 166            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 167            break;
 168        case PPC970_INPUT_THINT:
 169            /* Level sensitive - active high */
 170            trace_ppc_irq_set_state("SMI IRQ", level);
 171            ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level);
 172            break;
 173        case PPC970_INPUT_MCP:
 174            /* Negative edge sensitive */
 175            /* XXX: TODO: actual reaction may depends on HID0 status
 176             *            603/604/740/750: check HID0[EMCP]
 177             */
 178            if (cur_level == 1 && level == 0) {
 179                trace_ppc_irq_set_state("machine check", 1);
 180                ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
 181            }
 182            break;
 183        case PPC970_INPUT_CKSTP:
 184            /* Level sensitive - active low */
 185            /* XXX: TODO: relay the signal to CKSTP_OUT pin */
 186            if (level) {
 187                trace_ppc_irq_cpu("stop");
 188                cs->halted = 1;
 189            } else {
 190                trace_ppc_irq_cpu("restart");
 191                cs->halted = 0;
 192                qemu_cpu_kick(cs);
 193            }
 194            break;
 195        case PPC970_INPUT_HRESET:
 196            /* Level sensitive - active low */
 197            if (level) {
 198                cpu_interrupt(cs, CPU_INTERRUPT_RESET);
 199            }
 200            break;
 201        case PPC970_INPUT_SRESET:
 202            trace_ppc_irq_set_state("RESET IRQ", level);
 203            ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
 204            break;
 205        case PPC970_INPUT_TBEN:
 206            trace_ppc_irq_set_state("TBEN IRQ", level);
 207            /* XXX: TODO */
 208            break;
 209        default:
 210            g_assert_not_reached();
 211        }
 212        if (level)
 213            env->irq_input_state |= 1 << pin;
 214        else
 215            env->irq_input_state &= ~(1 << pin);
 216    }
 217}
 218
 219void ppc970_irq_init(PowerPCCPU *cpu)
 220{
 221    qdev_init_gpio_in(DEVICE(cpu), ppc970_set_irq, PPC970_INPUT_NB);
 222}
 223
 224/* POWER7 internal IRQ controller */
 225static void power7_set_irq(void *opaque, int pin, int level)
 226{
 227    PowerPCCPU *cpu = opaque;
 228
 229    trace_ppc_irq_set(&cpu->env, pin, level);
 230
 231    switch (pin) {
 232    case POWER7_INPUT_INT:
 233        /* Level sensitive - active high */
 234        trace_ppc_irq_set_state("external IRQ", level);
 235        ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 236        break;
 237    default:
 238        g_assert_not_reached();
 239    }
 240}
 241
 242void ppcPOWER7_irq_init(PowerPCCPU *cpu)
 243{
 244    qdev_init_gpio_in(DEVICE(cpu), power7_set_irq, POWER7_INPUT_NB);
 245}
 246
 247/* POWER9 internal IRQ controller */
 248static void power9_set_irq(void *opaque, int pin, int level)
 249{
 250    PowerPCCPU *cpu = opaque;
 251
 252    trace_ppc_irq_set(&cpu->env, pin, level);
 253
 254    switch (pin) {
 255    case POWER9_INPUT_INT:
 256        /* Level sensitive - active high */
 257        trace_ppc_irq_set_state("external IRQ", level);
 258        ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 259        break;
 260    case POWER9_INPUT_HINT:
 261        /* Level sensitive - active high */
 262        trace_ppc_irq_set_state("HV external IRQ", level);
 263        ppc_set_irq(cpu, PPC_INTERRUPT_HVIRT, level);
 264        break;
 265    default:
 266        g_assert_not_reached();
 267        return;
 268    }
 269}
 270
 271void ppcPOWER9_irq_init(PowerPCCPU *cpu)
 272{
 273    qdev_init_gpio_in(DEVICE(cpu), power9_set_irq, POWER9_INPUT_NB);
 274}
 275#endif /* defined(TARGET_PPC64) */
 276
 277void ppc40x_core_reset(PowerPCCPU *cpu)
 278{
 279    CPUPPCState *env = &cpu->env;
 280    target_ulong dbsr;
 281
 282    qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC core\n");
 283    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
 284    dbsr = env->spr[SPR_40x_DBSR];
 285    dbsr &= ~0x00000300;
 286    dbsr |= 0x00000100;
 287    env->spr[SPR_40x_DBSR] = dbsr;
 288}
 289
 290void ppc40x_chip_reset(PowerPCCPU *cpu)
 291{
 292    CPUPPCState *env = &cpu->env;
 293    target_ulong dbsr;
 294
 295    qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC chip\n");
 296    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
 297    /* XXX: TODO reset all internal peripherals */
 298    dbsr = env->spr[SPR_40x_DBSR];
 299    dbsr &= ~0x00000300;
 300    dbsr |= 0x00000200;
 301    env->spr[SPR_40x_DBSR] = dbsr;
 302}
 303
 304void ppc40x_system_reset(PowerPCCPU *cpu)
 305{
 306    qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC system\n");
 307    qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
 308}
 309
 310void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
 311{
 312    PowerPCCPU *cpu = env_archcpu(env);
 313
 314    qemu_mutex_lock_iothread();
 315
 316    switch ((val >> 28) & 0x3) {
 317    case 0x0:
 318        /* No action */
 319        break;
 320    case 0x1:
 321        /* Core reset */
 322        ppc40x_core_reset(cpu);
 323        break;
 324    case 0x2:
 325        /* Chip reset */
 326        ppc40x_chip_reset(cpu);
 327        break;
 328    case 0x3:
 329        /* System reset */
 330        ppc40x_system_reset(cpu);
 331        break;
 332    }
 333
 334    qemu_mutex_unlock_iothread();
 335}
 336
 337/* PowerPC 40x internal IRQ controller */
 338static void ppc40x_set_irq(void *opaque, int pin, int level)
 339{
 340    PowerPCCPU *cpu = opaque;
 341    CPUPPCState *env = &cpu->env;
 342    int cur_level;
 343
 344    trace_ppc_irq_set(env, pin, level);
 345
 346    cur_level = (env->irq_input_state >> pin) & 1;
 347    /* Don't generate spurious events */
 348    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
 349        CPUState *cs = CPU(cpu);
 350
 351        switch (pin) {
 352        case PPC40x_INPUT_RESET_SYS:
 353            if (level) {
 354                trace_ppc_irq_reset("system");
 355                ppc40x_system_reset(cpu);
 356            }
 357            break;
 358        case PPC40x_INPUT_RESET_CHIP:
 359            if (level) {
 360                trace_ppc_irq_reset("chip");
 361                ppc40x_chip_reset(cpu);
 362            }
 363            break;
 364        case PPC40x_INPUT_RESET_CORE:
 365            /* XXX: TODO: update DBSR[MRR] */
 366            if (level) {
 367                trace_ppc_irq_reset("core");
 368                ppc40x_core_reset(cpu);
 369            }
 370            break;
 371        case PPC40x_INPUT_CINT:
 372            /* Level sensitive - active high */
 373            trace_ppc_irq_set_state("critical IRQ", level);
 374            ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
 375            break;
 376        case PPC40x_INPUT_INT:
 377            /* Level sensitive - active high */
 378            trace_ppc_irq_set_state("external IRQ", level);
 379            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 380            break;
 381        case PPC40x_INPUT_HALT:
 382            /* Level sensitive - active low */
 383            if (level) {
 384                trace_ppc_irq_cpu("stop");
 385                cs->halted = 1;
 386            } else {
 387                trace_ppc_irq_cpu("restart");
 388                cs->halted = 0;
 389                qemu_cpu_kick(cs);
 390            }
 391            break;
 392        case PPC40x_INPUT_DEBUG:
 393            /* Level sensitive - active high */
 394            trace_ppc_irq_set_state("debug pin", level);
 395            ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
 396            break;
 397        default:
 398            g_assert_not_reached();
 399        }
 400        if (level)
 401            env->irq_input_state |= 1 << pin;
 402        else
 403            env->irq_input_state &= ~(1 << pin);
 404    }
 405}
 406
 407void ppc40x_irq_init(PowerPCCPU *cpu)
 408{
 409    qdev_init_gpio_in(DEVICE(cpu), ppc40x_set_irq, PPC40x_INPUT_NB);
 410}
 411
 412/* PowerPC E500 internal IRQ controller */
 413static void ppce500_set_irq(void *opaque, int pin, int level)
 414{
 415    PowerPCCPU *cpu = opaque;
 416    CPUPPCState *env = &cpu->env;
 417    int cur_level;
 418
 419    trace_ppc_irq_set(env, pin, level);
 420
 421    cur_level = (env->irq_input_state >> pin) & 1;
 422    /* Don't generate spurious events */
 423    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
 424        switch (pin) {
 425        case PPCE500_INPUT_MCK:
 426            if (level) {
 427                trace_ppc_irq_reset("system");
 428                qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
 429            }
 430            break;
 431        case PPCE500_INPUT_RESET_CORE:
 432            if (level) {
 433                trace_ppc_irq_reset("core");
 434                ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level);
 435            }
 436            break;
 437        case PPCE500_INPUT_CINT:
 438            /* Level sensitive - active high */
 439            trace_ppc_irq_set_state("critical IRQ", level);
 440            ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
 441            break;
 442        case PPCE500_INPUT_INT:
 443            /* Level sensitive - active high */
 444            trace_ppc_irq_set_state("core IRQ", level);
 445            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 446            break;
 447        case PPCE500_INPUT_DEBUG:
 448            /* Level sensitive - active high */
 449            trace_ppc_irq_set_state("debug pin", level);
 450            ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
 451            break;
 452        default:
 453            g_assert_not_reached();
 454        }
 455        if (level)
 456            env->irq_input_state |= 1 << pin;
 457        else
 458            env->irq_input_state &= ~(1 << pin);
 459    }
 460}
 461
 462void ppce500_irq_init(PowerPCCPU *cpu)
 463{
 464    qdev_init_gpio_in(DEVICE(cpu), ppce500_set_irq, PPCE500_INPUT_NB);
 465}
 466
 467/* Enable or Disable the E500 EPR capability */
 468void ppce500_set_mpic_proxy(bool enabled)
 469{
 470    CPUState *cs;
 471
 472    CPU_FOREACH(cs) {
 473        PowerPCCPU *cpu = POWERPC_CPU(cs);
 474
 475        cpu->env.mpic_proxy = enabled;
 476        if (kvm_enabled()) {
 477            kvmppc_set_mpic_proxy(cpu, enabled);
 478        }
 479    }
 480}
 481
 482/*****************************************************************************/
 483/* PowerPC time base and decrementer emulation */
 484
 485uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset)
 486{
 487    /* TB time in tb periods */
 488    return muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND) + tb_offset;
 489}
 490
 491uint64_t cpu_ppc_load_tbl (CPUPPCState *env)
 492{
 493    ppc_tb_t *tb_env = env->tb_env;
 494    uint64_t tb;
 495
 496    if (kvm_enabled()) {
 497        return env->spr[SPR_TBL];
 498    }
 499
 500    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
 501    trace_ppc_tb_load(tb);
 502
 503    return tb;
 504}
 505
 506static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env)
 507{
 508    ppc_tb_t *tb_env = env->tb_env;
 509    uint64_t tb;
 510
 511    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
 512    trace_ppc_tb_load(tb);
 513
 514    return tb >> 32;
 515}
 516
 517uint32_t cpu_ppc_load_tbu (CPUPPCState *env)
 518{
 519    if (kvm_enabled()) {
 520        return env->spr[SPR_TBU];
 521    }
 522
 523    return _cpu_ppc_load_tbu(env);
 524}
 525
 526static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk,
 527                                    int64_t *tb_offsetp, uint64_t value)
 528{
 529    *tb_offsetp = value -
 530        muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND);
 531
 532    trace_ppc_tb_store(value, *tb_offsetp);
 533}
 534
 535void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value)
 536{
 537    ppc_tb_t *tb_env = env->tb_env;
 538    uint64_t tb;
 539
 540    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
 541    tb &= 0xFFFFFFFF00000000ULL;
 542    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 543                     &tb_env->tb_offset, tb | (uint64_t)value);
 544}
 545
 546static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value)
 547{
 548    ppc_tb_t *tb_env = env->tb_env;
 549    uint64_t tb;
 550
 551    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
 552    tb &= 0x00000000FFFFFFFFULL;
 553    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 554                     &tb_env->tb_offset, ((uint64_t)value << 32) | tb);
 555}
 556
 557void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value)
 558{
 559    _cpu_ppc_store_tbu(env, value);
 560}
 561
 562uint64_t cpu_ppc_load_atbl (CPUPPCState *env)
 563{
 564    ppc_tb_t *tb_env = env->tb_env;
 565    uint64_t tb;
 566
 567    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
 568    trace_ppc_tb_load(tb);
 569
 570    return tb;
 571}
 572
 573uint32_t cpu_ppc_load_atbu (CPUPPCState *env)
 574{
 575    ppc_tb_t *tb_env = env->tb_env;
 576    uint64_t tb;
 577
 578    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
 579    trace_ppc_tb_load(tb);
 580
 581    return tb >> 32;
 582}
 583
 584void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value)
 585{
 586    ppc_tb_t *tb_env = env->tb_env;
 587    uint64_t tb;
 588
 589    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
 590    tb &= 0xFFFFFFFF00000000ULL;
 591    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 592                     &tb_env->atb_offset, tb | (uint64_t)value);
 593}
 594
 595void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value)
 596{
 597    ppc_tb_t *tb_env = env->tb_env;
 598    uint64_t tb;
 599
 600    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
 601    tb &= 0x00000000FFFFFFFFULL;
 602    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 603                     &tb_env->atb_offset, ((uint64_t)value << 32) | tb);
 604}
 605
 606uint64_t cpu_ppc_load_vtb(CPUPPCState *env)
 607{
 608    ppc_tb_t *tb_env = env->tb_env;
 609
 610    return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 611                          tb_env->vtb_offset);
 612}
 613
 614void cpu_ppc_store_vtb(CPUPPCState *env, uint64_t value)
 615{
 616    ppc_tb_t *tb_env = env->tb_env;
 617
 618    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 619                     &tb_env->vtb_offset, value);
 620}
 621
 622void cpu_ppc_store_tbu40(CPUPPCState *env, uint64_t value)
 623{
 624    ppc_tb_t *tb_env = env->tb_env;
 625    uint64_t tb;
 626
 627    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 628                        tb_env->tb_offset);
 629    tb &= 0xFFFFFFUL;
 630    tb |= (value & ~0xFFFFFFUL);
 631    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 632                     &tb_env->tb_offset, tb);
 633}
 634
 635static void cpu_ppc_tb_stop (CPUPPCState *env)
 636{
 637    ppc_tb_t *tb_env = env->tb_env;
 638    uint64_t tb, atb, vmclk;
 639
 640    /* If the time base is already frozen, do nothing */
 641    if (tb_env->tb_freq != 0) {
 642        vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 643        /* Get the time base */
 644        tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset);
 645        /* Get the alternate time base */
 646        atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset);
 647        /* Store the time base value (ie compute the current offset) */
 648        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
 649        /* Store the alternate time base value (compute the current offset) */
 650        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
 651        /* Set the time base frequency to zero */
 652        tb_env->tb_freq = 0;
 653        /* Now, the time bases are frozen to tb_offset / atb_offset value */
 654    }
 655}
 656
 657static void cpu_ppc_tb_start (CPUPPCState *env)
 658{
 659    ppc_tb_t *tb_env = env->tb_env;
 660    uint64_t tb, atb, vmclk;
 661
 662    /* If the time base is not frozen, do nothing */
 663    if (tb_env->tb_freq == 0) {
 664        vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 665        /* Get the time base from tb_offset */
 666        tb = tb_env->tb_offset;
 667        /* Get the alternate time base from atb_offset */
 668        atb = tb_env->atb_offset;
 669        /* Restore the tb frequency from the decrementer frequency */
 670        tb_env->tb_freq = tb_env->decr_freq;
 671        /* Store the time base value */
 672        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
 673        /* Store the alternate time base value */
 674        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
 675    }
 676}
 677
 678bool ppc_decr_clear_on_delivery(CPUPPCState *env)
 679{
 680    ppc_tb_t *tb_env = env->tb_env;
 681    int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL;
 682    return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED);
 683}
 684
 685static inline int64_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next)
 686{
 687    ppc_tb_t *tb_env = env->tb_env;
 688    int64_t decr, diff;
 689
 690    diff = next - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 691    if (diff >= 0) {
 692        decr = muldiv64(diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
 693    } else if (tb_env->flags & PPC_TIMER_BOOKE) {
 694        decr = 0;
 695    }  else {
 696        decr = -muldiv64(-diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
 697    }
 698    trace_ppc_decr_load(decr);
 699
 700    return decr;
 701}
 702
 703target_ulong cpu_ppc_load_decr(CPUPPCState *env)
 704{
 705    ppc_tb_t *tb_env = env->tb_env;
 706    uint64_t decr;
 707
 708    if (kvm_enabled()) {
 709        return env->spr[SPR_DECR];
 710    }
 711
 712    decr = _cpu_ppc_load_decr(env, tb_env->decr_next);
 713
 714    /*
 715     * If large decrementer is enabled then the decrementer is signed extened
 716     * to 64 bits, otherwise it is a 32 bit value.
 717     */
 718    if (env->spr[SPR_LPCR] & LPCR_LD) {
 719        return decr;
 720    }
 721    return (uint32_t) decr;
 722}
 723
 724target_ulong cpu_ppc_load_hdecr(CPUPPCState *env)
 725{
 726    PowerPCCPU *cpu = env_archcpu(env);
 727    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
 728    ppc_tb_t *tb_env = env->tb_env;
 729    uint64_t hdecr;
 730
 731    hdecr =  _cpu_ppc_load_decr(env, tb_env->hdecr_next);
 732
 733    /*
 734     * If we have a large decrementer (POWER9 or later) then hdecr is sign
 735     * extended to 64 bits, otherwise it is 32 bits.
 736     */
 737    if (pcc->lrg_decr_bits > 32) {
 738        return hdecr;
 739    }
 740    return (uint32_t) hdecr;
 741}
 742
 743uint64_t cpu_ppc_load_purr (CPUPPCState *env)
 744{
 745    ppc_tb_t *tb_env = env->tb_env;
 746
 747    return cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 748                          tb_env->purr_offset);
 749}
 750
 751/* When decrementer expires,
 752 * all we need to do is generate or queue a CPU exception
 753 */
 754static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu)
 755{
 756    /* Raise it */
 757    trace_ppc_decr_excp("raise");
 758    ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1);
 759}
 760
 761static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu)
 762{
 763    ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
 764}
 765
 766static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
 767{
 768    CPUPPCState *env = &cpu->env;
 769
 770    /* Raise it */
 771    trace_ppc_decr_excp("raise HV");
 772
 773    /* The architecture specifies that we don't deliver HDEC
 774     * interrupts in a PM state. Not only they don't cause a
 775     * wakeup but they also get effectively discarded.
 776     */
 777    if (!env->resume_as_sreset) {
 778        ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
 779    }
 780}
 781
 782static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu)
 783{
 784    ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
 785}
 786
 787static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
 788                                 QEMUTimer *timer,
 789                                 void (*raise_excp)(void *),
 790                                 void (*lower_excp)(PowerPCCPU *),
 791                                 target_ulong decr, target_ulong value,
 792                                 int nr_bits)
 793{
 794    CPUPPCState *env = &cpu->env;
 795    ppc_tb_t *tb_env = env->tb_env;
 796    uint64_t now, next;
 797    int64_t signed_value;
 798    int64_t signed_decr;
 799
 800    /* Truncate value to decr_width and sign extend for simplicity */
 801    signed_value = sextract64(value, 0, nr_bits);
 802    signed_decr = sextract64(decr, 0, nr_bits);
 803
 804    trace_ppc_decr_store(nr_bits, decr, value);
 805
 806    if (kvm_enabled()) {
 807        /* KVM handles decrementer exceptions, we don't need our own timer */
 808        return;
 809    }
 810
 811    /*
 812     * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC
 813     * interrupt.
 814     *
 815     * If we get a really small DEC value, we can assume that by the time we
 816     * handled it we should inject an interrupt already.
 817     *
 818     * On MSB level based DEC implementations the MSB always means the interrupt
 819     * is pending, so raise it on those.
 820     *
 821     * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers
 822     * an edge interrupt, so raise it here too.
 823     */
 824    if ((value < 3) ||
 825        ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && signed_value < 0) ||
 826        ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && signed_value < 0
 827          && signed_decr >= 0)) {
 828        (*raise_excp)(cpu);
 829        return;
 830    }
 831
 832    /* On MSB level based systems a 0 for the MSB stops interrupt delivery */
 833    if (signed_value >= 0 && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) {
 834        (*lower_excp)(cpu);
 835    }
 836
 837    /* Calculate the next timer event */
 838    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 839    next = now + muldiv64(value, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
 840    *nextp = next;
 841
 842    /* Adjust timer */
 843    timer_mod(timer, next);
 844}
 845
 846static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, target_ulong decr,
 847                                       target_ulong value, int nr_bits)
 848{
 849    ppc_tb_t *tb_env = cpu->env.tb_env;
 850
 851    __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer,
 852                         tb_env->decr_timer->cb, &cpu_ppc_decr_lower, decr,
 853                         value, nr_bits);
 854}
 855
 856void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value)
 857{
 858    PowerPCCPU *cpu = env_archcpu(env);
 859    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
 860    int nr_bits = 32;
 861
 862    if (env->spr[SPR_LPCR] & LPCR_LD) {
 863        nr_bits = pcc->lrg_decr_bits;
 864    }
 865
 866    _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value, nr_bits);
 867}
 868
 869static void cpu_ppc_decr_cb(void *opaque)
 870{
 871    PowerPCCPU *cpu = opaque;
 872
 873    cpu_ppc_decr_excp(cpu);
 874}
 875
 876static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, target_ulong hdecr,
 877                                        target_ulong value, int nr_bits)
 878{
 879    ppc_tb_t *tb_env = cpu->env.tb_env;
 880
 881    if (tb_env->hdecr_timer != NULL) {
 882        __cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer,
 883                             tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower,
 884                             hdecr, value, nr_bits);
 885    }
 886}
 887
 888void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value)
 889{
 890    PowerPCCPU *cpu = env_archcpu(env);
 891    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
 892
 893    _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value,
 894                         pcc->lrg_decr_bits);
 895}
 896
 897static void cpu_ppc_hdecr_cb(void *opaque)
 898{
 899    PowerPCCPU *cpu = opaque;
 900
 901    cpu_ppc_hdecr_excp(cpu);
 902}
 903
 904void cpu_ppc_store_purr(CPUPPCState *env, uint64_t value)
 905{
 906    ppc_tb_t *tb_env = env->tb_env;
 907
 908    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 909                     &tb_env->purr_offset, value);
 910}
 911
 912static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq)
 913{
 914    CPUPPCState *env = opaque;
 915    PowerPCCPU *cpu = env_archcpu(env);
 916    ppc_tb_t *tb_env = env->tb_env;
 917
 918    tb_env->tb_freq = freq;
 919    tb_env->decr_freq = freq;
 920    /* There is a bug in Linux 2.4 kernels:
 921     * if a decrementer exception is pending when it enables msr_ee at startup,
 922     * it's not ready to handle it...
 923     */
 924    _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32);
 925    _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32);
 926    cpu_ppc_store_purr(env, 0x0000000000000000ULL);
 927}
 928
 929static void timebase_save(PPCTimebase *tb)
 930{
 931    uint64_t ticks = cpu_get_host_ticks();
 932    PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
 933
 934    if (!first_ppc_cpu->env.tb_env) {
 935        error_report("No timebase object");
 936        return;
 937    }
 938
 939    /* not used anymore, we keep it for compatibility */
 940    tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
 941    /*
 942     * tb_offset is only expected to be changed by QEMU so
 943     * there is no need to update it from KVM here
 944     */
 945    tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset;
 946
 947    tb->runstate_paused =
 948        runstate_check(RUN_STATE_PAUSED) || runstate_check(RUN_STATE_SAVE_VM);
 949}
 950
 951static void timebase_load(PPCTimebase *tb)
 952{
 953    CPUState *cpu;
 954    PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
 955    int64_t tb_off_adj, tb_off;
 956    unsigned long freq;
 957
 958    if (!first_ppc_cpu->env.tb_env) {
 959        error_report("No timebase object");
 960        return;
 961    }
 962
 963    freq = first_ppc_cpu->env.tb_env->tb_freq;
 964
 965    tb_off_adj = tb->guest_timebase - cpu_get_host_ticks();
 966
 967    tb_off = first_ppc_cpu->env.tb_env->tb_offset;
 968    trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off,
 969                        (tb_off_adj - tb_off) / freq);
 970
 971    /* Set new offset to all CPUs */
 972    CPU_FOREACH(cpu) {
 973        PowerPCCPU *pcpu = POWERPC_CPU(cpu);
 974        pcpu->env.tb_env->tb_offset = tb_off_adj;
 975        kvmppc_set_reg_tb_offset(pcpu, pcpu->env.tb_env->tb_offset);
 976    }
 977}
 978
 979void cpu_ppc_clock_vm_state_change(void *opaque, bool running,
 980                                   RunState state)
 981{
 982    PPCTimebase *tb = opaque;
 983
 984    if (running) {
 985        timebase_load(tb);
 986    } else {
 987        timebase_save(tb);
 988    }
 989}
 990
 991/*
 992 * When migrating a running guest, read the clock just
 993 * before migration, so that the guest clock counts
 994 * during the events between:
 995 *
 996 *  * vm_stop()
 997 *  *
 998 *  * pre_save()
 999 *
1000 *  This reduces clock difference on migration from 5s
1001 *  to 0.1s (when max_downtime == 5s), because sending the
1002 *  final pages of memory (which happens between vm_stop()
1003 *  and pre_save()) takes max_downtime.
1004 */
1005static int timebase_pre_save(void *opaque)
1006{
1007    PPCTimebase *tb = opaque;
1008
1009    /* guest_timebase won't be overridden in case of paused guest or savevm */
1010    if (!tb->runstate_paused) {
1011        timebase_save(tb);
1012    }
1013
1014    return 0;
1015}
1016
1017const VMStateDescription vmstate_ppc_timebase = {
1018    .name = "timebase",
1019    .version_id = 1,
1020    .minimum_version_id = 1,
1021    .pre_save = timebase_pre_save,
1022    .fields      = (VMStateField []) {
1023        VMSTATE_UINT64(guest_timebase, PPCTimebase),
1024        VMSTATE_INT64(time_of_the_day_ns, PPCTimebase),
1025        VMSTATE_END_OF_LIST()
1026    },
1027};
1028
1029/* Set up (once) timebase frequency (in Hz) */
1030clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
1031{
1032    PowerPCCPU *cpu = env_archcpu(env);
1033    ppc_tb_t *tb_env;
1034
1035    tb_env = g_new0(ppc_tb_t, 1);
1036    env->tb_env = tb_env;
1037    tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1038    if (is_book3s_arch2x(env)) {
1039        /* All Book3S 64bit CPUs implement level based DEC logic */
1040        tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL;
1041    }
1042    /* Create new timer */
1043    tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu);
1044    if (env->has_hv_mode && !cpu->vhyp) {
1045        tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb,
1046                                                cpu);
1047    } else {
1048        tb_env->hdecr_timer = NULL;
1049    }
1050    cpu_ppc_set_tb_clk(env, freq);
1051
1052    return &cpu_ppc_set_tb_clk;
1053}
1054
1055void cpu_ppc_tb_free(CPUPPCState *env)
1056{
1057    timer_free(env->tb_env->decr_timer);
1058    timer_free(env->tb_env->hdecr_timer);
1059    g_free(env->tb_env);
1060}
1061
1062/* cpu_ppc_hdecr_init may be used if the timer is not used by HDEC emulation */
1063void cpu_ppc_hdecr_init(CPUPPCState *env)
1064{
1065    PowerPCCPU *cpu = env_archcpu(env);
1066
1067    assert(env->tb_env->hdecr_timer == NULL);
1068
1069    env->tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1070                                            &cpu_ppc_hdecr_cb, cpu);
1071}
1072
1073void cpu_ppc_hdecr_exit(CPUPPCState *env)
1074{
1075    PowerPCCPU *cpu = env_archcpu(env);
1076
1077    timer_free(env->tb_env->hdecr_timer);
1078    env->tb_env->hdecr_timer = NULL;
1079
1080    cpu_ppc_hdecr_lower(cpu);
1081}
1082
1083/*****************************************************************************/
1084/* PowerPC 40x timers */
1085
1086/* PIT, FIT & WDT */
1087typedef struct ppc40x_timer_t ppc40x_timer_t;
1088struct ppc40x_timer_t {
1089    uint64_t pit_reload;  /* PIT auto-reload value        */
1090    uint64_t fit_next;    /* Tick for next FIT interrupt  */
1091    QEMUTimer *fit_timer;
1092    uint64_t wdt_next;    /* Tick for next WDT interrupt  */
1093    QEMUTimer *wdt_timer;
1094
1095    /* 405 have the PIT, 440 have a DECR.  */
1096    unsigned int decr_excp;
1097};
1098
1099/* Fixed interval timer */
1100static void cpu_4xx_fit_cb (void *opaque)
1101{
1102    PowerPCCPU *cpu = opaque;
1103    CPUPPCState *env = &cpu->env;
1104    ppc_tb_t *tb_env;
1105    ppc40x_timer_t *ppc40x_timer;
1106    uint64_t now, next;
1107
1108    tb_env = env->tb_env;
1109    ppc40x_timer = tb_env->opaque;
1110    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1111    switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) {
1112    case 0:
1113        next = 1 << 9;
1114        break;
1115    case 1:
1116        next = 1 << 13;
1117        break;
1118    case 2:
1119        next = 1 << 17;
1120        break;
1121    case 3:
1122        next = 1 << 21;
1123        break;
1124    default:
1125        /* Cannot occur, but makes gcc happy */
1126        return;
1127    }
1128    next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->tb_freq);
1129    if (next == now)
1130        next++;
1131    timer_mod(ppc40x_timer->fit_timer, next);
1132    env->spr[SPR_40x_TSR] |= 1 << 26;
1133    if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) {
1134        ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1);
1135    }
1136    trace_ppc4xx_fit((int)((env->spr[SPR_40x_TCR] >> 23) & 0x1),
1137                         env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1138}
1139
1140/* Programmable interval timer */
1141static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp)
1142{
1143    ppc40x_timer_t *ppc40x_timer;
1144    uint64_t now, next;
1145
1146    ppc40x_timer = tb_env->opaque;
1147    if (ppc40x_timer->pit_reload <= 1 ||
1148        !((env->spr[SPR_40x_TCR] >> 26) & 0x1) ||
1149        (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) {
1150        /* Stop PIT */
1151        trace_ppc4xx_pit_stop();
1152        timer_del(tb_env->decr_timer);
1153    } else {
1154        trace_ppc4xx_pit_start(ppc40x_timer->pit_reload);
1155        now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1156        next = now + muldiv64(ppc40x_timer->pit_reload,
1157                              NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1158        if (is_excp)
1159            next += tb_env->decr_next - now;
1160        if (next == now)
1161            next++;
1162        timer_mod(tb_env->decr_timer, next);
1163        tb_env->decr_next = next;
1164    }
1165}
1166
1167static void cpu_4xx_pit_cb (void *opaque)
1168{
1169    PowerPCCPU *cpu = opaque;
1170    CPUPPCState *env = &cpu->env;
1171    ppc_tb_t *tb_env;
1172    ppc40x_timer_t *ppc40x_timer;
1173
1174    tb_env = env->tb_env;
1175    ppc40x_timer = tb_env->opaque;
1176    env->spr[SPR_40x_TSR] |= 1 << 27;
1177    if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) {
1178        ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1);
1179    }
1180    start_stop_pit(env, tb_env, 1);
1181    trace_ppc4xx_pit((int)((env->spr[SPR_40x_TCR] >> 22) & 0x1),
1182           (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1),
1183           env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR],
1184           ppc40x_timer->pit_reload);
1185}
1186
1187/* Watchdog timer */
1188static void cpu_4xx_wdt_cb (void *opaque)
1189{
1190    PowerPCCPU *cpu = opaque;
1191    CPUPPCState *env = &cpu->env;
1192    ppc_tb_t *tb_env;
1193    ppc40x_timer_t *ppc40x_timer;
1194    uint64_t now, next;
1195
1196    tb_env = env->tb_env;
1197    ppc40x_timer = tb_env->opaque;
1198    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1199    switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) {
1200    case 0:
1201        next = 1 << 17;
1202        break;
1203    case 1:
1204        next = 1 << 21;
1205        break;
1206    case 2:
1207        next = 1 << 25;
1208        break;
1209    case 3:
1210        next = 1 << 29;
1211        break;
1212    default:
1213        /* Cannot occur, but makes gcc happy */
1214        return;
1215    }
1216    next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1217    if (next == now)
1218        next++;
1219    trace_ppc4xx_wdt(env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1220    switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) {
1221    case 0x0:
1222    case 0x1:
1223        timer_mod(ppc40x_timer->wdt_timer, next);
1224        ppc40x_timer->wdt_next = next;
1225        env->spr[SPR_40x_TSR] |= 1U << 31;
1226        break;
1227    case 0x2:
1228        timer_mod(ppc40x_timer->wdt_timer, next);
1229        ppc40x_timer->wdt_next = next;
1230        env->spr[SPR_40x_TSR] |= 1 << 30;
1231        if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) {
1232            ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1);
1233        }
1234        break;
1235    case 0x3:
1236        env->spr[SPR_40x_TSR] &= ~0x30000000;
1237        env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000;
1238        switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) {
1239        case 0x0:
1240            /* No reset */
1241            break;
1242        case 0x1: /* Core reset */
1243            ppc40x_core_reset(cpu);
1244            break;
1245        case 0x2: /* Chip reset */
1246            ppc40x_chip_reset(cpu);
1247            break;
1248        case 0x3: /* System reset */
1249            ppc40x_system_reset(cpu);
1250            break;
1251        }
1252    }
1253}
1254
1255void store_40x_pit (CPUPPCState *env, target_ulong val)
1256{
1257    ppc_tb_t *tb_env;
1258    ppc40x_timer_t *ppc40x_timer;
1259
1260    tb_env = env->tb_env;
1261    ppc40x_timer = tb_env->opaque;
1262    trace_ppc40x_store_pit(val);
1263    ppc40x_timer->pit_reload = val;
1264    start_stop_pit(env, tb_env, 0);
1265}
1266
1267target_ulong load_40x_pit (CPUPPCState *env)
1268{
1269    return cpu_ppc_load_decr(env);
1270}
1271
1272void store_40x_tsr(CPUPPCState *env, target_ulong val)
1273{
1274    PowerPCCPU *cpu = env_archcpu(env);
1275
1276    trace_ppc40x_store_tcr(val);
1277
1278    env->spr[SPR_40x_TSR] &= ~(val & 0xFC000000);
1279    if (val & 0x80000000) {
1280        ppc_set_irq(cpu, PPC_INTERRUPT_PIT, 0);
1281    }
1282}
1283
1284void store_40x_tcr(CPUPPCState *env, target_ulong val)
1285{
1286    PowerPCCPU *cpu = env_archcpu(env);
1287    ppc_tb_t *tb_env;
1288
1289    trace_ppc40x_store_tsr(val);
1290
1291    tb_env = env->tb_env;
1292    env->spr[SPR_40x_TCR] = val & 0xFFC00000;
1293    start_stop_pit(env, tb_env, 1);
1294    cpu_4xx_wdt_cb(cpu);
1295}
1296
1297static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq)
1298{
1299    CPUPPCState *env = opaque;
1300    ppc_tb_t *tb_env = env->tb_env;
1301
1302    trace_ppc40x_set_tb_clk(freq);
1303    tb_env->tb_freq = freq;
1304    tb_env->decr_freq = freq;
1305    /* XXX: we should also update all timers */
1306}
1307
1308clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq,
1309                                  unsigned int decr_excp)
1310{
1311    ppc_tb_t *tb_env;
1312    ppc40x_timer_t *ppc40x_timer;
1313    PowerPCCPU *cpu = env_archcpu(env);
1314
1315    trace_ppc40x_timers_init(freq);
1316
1317    tb_env = g_new0(ppc_tb_t, 1);
1318    ppc40x_timer = g_new0(ppc40x_timer_t, 1);
1319
1320    env->tb_env = tb_env;
1321    tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1322    tb_env->tb_freq = freq;
1323    tb_env->decr_freq = freq;
1324    tb_env->opaque = ppc40x_timer;
1325
1326    /* We use decr timer for PIT */
1327    tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, cpu);
1328    ppc40x_timer->fit_timer =
1329        timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, cpu);
1330    ppc40x_timer->wdt_timer =
1331        timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, cpu);
1332    ppc40x_timer->decr_excp = decr_excp;
1333
1334    return &ppc_40x_set_tb_clk;
1335}
1336
1337/*****************************************************************************/
1338/* Embedded PowerPC Device Control Registers */
1339typedef struct ppc_dcrn_t ppc_dcrn_t;
1340struct ppc_dcrn_t {
1341    dcr_read_cb dcr_read;
1342    dcr_write_cb dcr_write;
1343    void *opaque;
1344};
1345
1346/* XXX: on 460, DCR addresses are 32 bits wide,
1347 *      using DCRIPR to get the 22 upper bits of the DCR address
1348 */
1349#define DCRN_NB 1024
1350struct ppc_dcr_t {
1351    ppc_dcrn_t dcrn[DCRN_NB];
1352    int (*read_error)(int dcrn);
1353    int (*write_error)(int dcrn);
1354};
1355
1356int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
1357{
1358    ppc_dcrn_t *dcr;
1359
1360    if (dcrn < 0 || dcrn >= DCRN_NB)
1361        goto error;
1362    dcr = &dcr_env->dcrn[dcrn];
1363    if (dcr->dcr_read == NULL)
1364        goto error;
1365    *valp = (*dcr->dcr_read)(dcr->opaque, dcrn);
1366    trace_ppc_dcr_read(dcrn, *valp);
1367
1368    return 0;
1369
1370 error:
1371    if (dcr_env->read_error != NULL)
1372        return (*dcr_env->read_error)(dcrn);
1373
1374    return -1;
1375}
1376
1377int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
1378{
1379    ppc_dcrn_t *dcr;
1380
1381    if (dcrn < 0 || dcrn >= DCRN_NB)
1382        goto error;
1383    dcr = &dcr_env->dcrn[dcrn];
1384    if (dcr->dcr_write == NULL)
1385        goto error;
1386    trace_ppc_dcr_write(dcrn, val);
1387    (*dcr->dcr_write)(dcr->opaque, dcrn, val);
1388
1389    return 0;
1390
1391 error:
1392    if (dcr_env->write_error != NULL)
1393        return (*dcr_env->write_error)(dcrn);
1394
1395    return -1;
1396}
1397
1398int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque,
1399                      dcr_read_cb dcr_read, dcr_write_cb dcr_write)
1400{
1401    ppc_dcr_t *dcr_env;
1402    ppc_dcrn_t *dcr;
1403
1404    dcr_env = env->dcr_env;
1405    if (dcr_env == NULL)
1406        return -1;
1407    if (dcrn < 0 || dcrn >= DCRN_NB)
1408        return -1;
1409    dcr = &dcr_env->dcrn[dcrn];
1410    if (dcr->opaque != NULL ||
1411        dcr->dcr_read != NULL ||
1412        dcr->dcr_write != NULL)
1413        return -1;
1414    dcr->opaque = opaque;
1415    dcr->dcr_read = dcr_read;
1416    dcr->dcr_write = dcr_write;
1417
1418    return 0;
1419}
1420
1421int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn),
1422                  int (*write_error)(int dcrn))
1423{
1424    ppc_dcr_t *dcr_env;
1425
1426    dcr_env = g_new0(ppc_dcr_t, 1);
1427    dcr_env->read_error = read_error;
1428    dcr_env->write_error = write_error;
1429    env->dcr_env = dcr_env;
1430
1431    return 0;
1432}
1433
1434/*****************************************************************************/
1435
1436int ppc_cpu_pir(PowerPCCPU *cpu)
1437{
1438    CPUPPCState *env = &cpu->env;
1439    return env->spr_cb[SPR_PIR].default_value;
1440}
1441
1442PowerPCCPU *ppc_get_vcpu_by_pir(int pir)
1443{
1444    CPUState *cs;
1445
1446    CPU_FOREACH(cs) {
1447        PowerPCCPU *cpu = POWERPC_CPU(cs);
1448
1449        if (ppc_cpu_pir(cpu) == pir) {
1450            return cpu;
1451        }
1452    }
1453
1454    return NULL;
1455}
1456
1457void ppc_irq_reset(PowerPCCPU *cpu)
1458{
1459    CPUPPCState *env = &cpu->env;
1460
1461    env->irq_input_state = 0;
1462    kvmppc_set_interrupt(cpu, PPC_INTERRUPT_EXT, 0);
1463}
1464