qemu/hw/ppc/ppc.c
<<
>>
Prefs
   1/*
   2 * QEMU generic PowerPC hardware System Emulator
   3 *
   4 * Copyright (c) 2003-2007 Jocelyn Mayer
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24#include "qemu/osdep.h"
  25#include "cpu.h"
  26#include "hw/hw.h"
  27#include "hw/ppc/ppc.h"
  28#include "hw/ppc/ppc_e500.h"
  29#include "qemu/timer.h"
  30#include "sysemu/sysemu.h"
  31#include "sysemu/cpus.h"
  32#include "qemu/log.h"
  33#include "qemu/error-report.h"
  34#include "sysemu/kvm.h"
  35#include "kvm_ppc.h"
  36#include "trace.h"
  37
  38//#define PPC_DEBUG_IRQ
  39//#define PPC_DEBUG_TB
  40
  41#ifdef PPC_DEBUG_IRQ
  42#  define LOG_IRQ(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__)
  43#else
  44#  define LOG_IRQ(...) do { } while (0)
  45#endif
  46
  47
  48#ifdef PPC_DEBUG_TB
  49#  define LOG_TB(...) qemu_log(__VA_ARGS__)
  50#else
  51#  define LOG_TB(...) do { } while (0)
  52#endif
  53
  54static void cpu_ppc_tb_stop (CPUPPCState *env);
  55static void cpu_ppc_tb_start (CPUPPCState *env);
  56
  57void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level)
  58{
  59    CPUState *cs = CPU(cpu);
  60    CPUPPCState *env = &cpu->env;
  61    unsigned int old_pending;
  62    bool locked = false;
  63
  64    /* We may already have the BQL if coming from the reset path */
  65    if (!qemu_mutex_iothread_locked()) {
  66        locked = true;
  67        qemu_mutex_lock_iothread();
  68    }
  69
  70    old_pending = env->pending_interrupts;
  71
  72    if (level) {
  73        env->pending_interrupts |= 1 << n_IRQ;
  74        cpu_interrupt(cs, CPU_INTERRUPT_HARD);
  75    } else {
  76        env->pending_interrupts &= ~(1 << n_IRQ);
  77        if (env->pending_interrupts == 0) {
  78            cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
  79        }
  80    }
  81
  82    if (old_pending != env->pending_interrupts) {
  83        kvmppc_set_interrupt(cpu, n_IRQ, level);
  84    }
  85
  86
  87    LOG_IRQ("%s: %p n_IRQ %d level %d => pending %08" PRIx32
  88                "req %08x\n", __func__, env, n_IRQ, level,
  89                env->pending_interrupts, CPU(cpu)->interrupt_request);
  90
  91    if (locked) {
  92        qemu_mutex_unlock_iothread();
  93    }
  94}
  95
  96/* PowerPC 6xx / 7xx internal IRQ controller */
  97static void ppc6xx_set_irq(void *opaque, int pin, int level)
  98{
  99    PowerPCCPU *cpu = opaque;
 100    CPUPPCState *env = &cpu->env;
 101    int cur_level;
 102
 103    LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
 104                env, pin, level);
 105    cur_level = (env->irq_input_state >> pin) & 1;
 106    /* Don't generate spurious events */
 107    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
 108        CPUState *cs = CPU(cpu);
 109
 110        switch (pin) {
 111        case PPC6xx_INPUT_TBEN:
 112            /* Level sensitive - active high */
 113            LOG_IRQ("%s: %s the time base\n",
 114                        __func__, level ? "start" : "stop");
 115            if (level) {
 116                cpu_ppc_tb_start(env);
 117            } else {
 118                cpu_ppc_tb_stop(env);
 119            }
 120        case PPC6xx_INPUT_INT:
 121            /* Level sensitive - active high */
 122            LOG_IRQ("%s: set the external IRQ state to %d\n",
 123                        __func__, level);
 124            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 125            break;
 126        case PPC6xx_INPUT_SMI:
 127            /* Level sensitive - active high */
 128            LOG_IRQ("%s: set the SMI IRQ state to %d\n",
 129                        __func__, level);
 130            ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level);
 131            break;
 132        case PPC6xx_INPUT_MCP:
 133            /* Negative edge sensitive */
 134            /* XXX: TODO: actual reaction may depends on HID0 status
 135             *            603/604/740/750: check HID0[EMCP]
 136             */
 137            if (cur_level == 1 && level == 0) {
 138                LOG_IRQ("%s: raise machine check state\n",
 139                            __func__);
 140                ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
 141            }
 142            break;
 143        case PPC6xx_INPUT_CKSTP_IN:
 144            /* Level sensitive - active low */
 145            /* XXX: TODO: relay the signal to CKSTP_OUT pin */
 146            /* XXX: Note that the only way to restart the CPU is to reset it */
 147            if (level) {
 148                LOG_IRQ("%s: stop the CPU\n", __func__);
 149                cs->halted = 1;
 150            }
 151            break;
 152        case PPC6xx_INPUT_HRESET:
 153            /* Level sensitive - active low */
 154            if (level) {
 155                LOG_IRQ("%s: reset the CPU\n", __func__);
 156                cpu_interrupt(cs, CPU_INTERRUPT_RESET);
 157            }
 158            break;
 159        case PPC6xx_INPUT_SRESET:
 160            LOG_IRQ("%s: set the RESET IRQ state to %d\n",
 161                        __func__, level);
 162            ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
 163            break;
 164        default:
 165            /* Unknown pin - do nothing */
 166            LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
 167            return;
 168        }
 169        if (level)
 170            env->irq_input_state |= 1 << pin;
 171        else
 172            env->irq_input_state &= ~(1 << pin);
 173    }
 174}
 175
 176void ppc6xx_irq_init(PowerPCCPU *cpu)
 177{
 178    CPUPPCState *env = &cpu->env;
 179
 180    env->irq_inputs = (void **)qemu_allocate_irqs(&ppc6xx_set_irq, cpu,
 181                                                  PPC6xx_INPUT_NB);
 182}
 183
 184#if defined(TARGET_PPC64)
 185/* PowerPC 970 internal IRQ controller */
 186static void ppc970_set_irq(void *opaque, int pin, int level)
 187{
 188    PowerPCCPU *cpu = opaque;
 189    CPUPPCState *env = &cpu->env;
 190    int cur_level;
 191
 192    LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
 193                env, pin, level);
 194    cur_level = (env->irq_input_state >> pin) & 1;
 195    /* Don't generate spurious events */
 196    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
 197        CPUState *cs = CPU(cpu);
 198
 199        switch (pin) {
 200        case PPC970_INPUT_INT:
 201            /* Level sensitive - active high */
 202            LOG_IRQ("%s: set the external IRQ state to %d\n",
 203                        __func__, level);
 204            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 205            break;
 206        case PPC970_INPUT_THINT:
 207            /* Level sensitive - active high */
 208            LOG_IRQ("%s: set the SMI IRQ state to %d\n", __func__,
 209                        level);
 210            ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level);
 211            break;
 212        case PPC970_INPUT_MCP:
 213            /* Negative edge sensitive */
 214            /* XXX: TODO: actual reaction may depends on HID0 status
 215             *            603/604/740/750: check HID0[EMCP]
 216             */
 217            if (cur_level == 1 && level == 0) {
 218                LOG_IRQ("%s: raise machine check state\n",
 219                            __func__);
 220                ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1);
 221            }
 222            break;
 223        case PPC970_INPUT_CKSTP:
 224            /* Level sensitive - active low */
 225            /* XXX: TODO: relay the signal to CKSTP_OUT pin */
 226            if (level) {
 227                LOG_IRQ("%s: stop the CPU\n", __func__);
 228                cs->halted = 1;
 229            } else {
 230                LOG_IRQ("%s: restart the CPU\n", __func__);
 231                cs->halted = 0;
 232                qemu_cpu_kick(cs);
 233            }
 234            break;
 235        case PPC970_INPUT_HRESET:
 236            /* Level sensitive - active low */
 237            if (level) {
 238                cpu_interrupt(cs, CPU_INTERRUPT_RESET);
 239            }
 240            break;
 241        case PPC970_INPUT_SRESET:
 242            LOG_IRQ("%s: set the RESET IRQ state to %d\n",
 243                        __func__, level);
 244            ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level);
 245            break;
 246        case PPC970_INPUT_TBEN:
 247            LOG_IRQ("%s: set the TBEN state to %d\n", __func__,
 248                        level);
 249            /* XXX: TODO */
 250            break;
 251        default:
 252            /* Unknown pin - do nothing */
 253            LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
 254            return;
 255        }
 256        if (level)
 257            env->irq_input_state |= 1 << pin;
 258        else
 259            env->irq_input_state &= ~(1 << pin);
 260    }
 261}
 262
 263void ppc970_irq_init(PowerPCCPU *cpu)
 264{
 265    CPUPPCState *env = &cpu->env;
 266
 267    env->irq_inputs = (void **)qemu_allocate_irqs(&ppc970_set_irq, cpu,
 268                                                  PPC970_INPUT_NB);
 269}
 270
 271/* POWER7 internal IRQ controller */
 272static void power7_set_irq(void *opaque, int pin, int level)
 273{
 274    PowerPCCPU *cpu = opaque;
 275    CPUPPCState *env = &cpu->env;
 276
 277    LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
 278                env, pin, level);
 279
 280    switch (pin) {
 281    case POWER7_INPUT_INT:
 282        /* Level sensitive - active high */
 283        LOG_IRQ("%s: set the external IRQ state to %d\n",
 284                __func__, level);
 285        ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 286        break;
 287    default:
 288        /* Unknown pin - do nothing */
 289        LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
 290        return;
 291    }
 292    if (level) {
 293        env->irq_input_state |= 1 << pin;
 294    } else {
 295        env->irq_input_state &= ~(1 << pin);
 296    }
 297}
 298
 299void ppcPOWER7_irq_init(PowerPCCPU *cpu)
 300{
 301    CPUPPCState *env = &cpu->env;
 302
 303    env->irq_inputs = (void **)qemu_allocate_irqs(&power7_set_irq, cpu,
 304                                                  POWER7_INPUT_NB);
 305}
 306
 307/* POWER9 internal IRQ controller */
 308static void power9_set_irq(void *opaque, int pin, int level)
 309{
 310    PowerPCCPU *cpu = opaque;
 311    CPUPPCState *env = &cpu->env;
 312
 313    LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
 314                env, pin, level);
 315
 316    switch (pin) {
 317    case POWER9_INPUT_INT:
 318        /* Level sensitive - active high */
 319        LOG_IRQ("%s: set the external IRQ state to %d\n",
 320                __func__, level);
 321        ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 322        break;
 323    case POWER9_INPUT_HINT:
 324        /* Level sensitive - active high */
 325        LOG_IRQ("%s: set the external IRQ state to %d\n",
 326                __func__, level);
 327        ppc_set_irq(cpu, PPC_INTERRUPT_HVIRT, level);
 328        break;
 329    default:
 330        /* Unknown pin - do nothing */
 331        LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
 332        return;
 333    }
 334    if (level) {
 335        env->irq_input_state |= 1 << pin;
 336    } else {
 337        env->irq_input_state &= ~(1 << pin);
 338    }
 339}
 340
 341void ppcPOWER9_irq_init(PowerPCCPU *cpu)
 342{
 343    CPUPPCState *env = &cpu->env;
 344
 345    env->irq_inputs = (void **)qemu_allocate_irqs(&power9_set_irq, cpu,
 346                                                  POWER9_INPUT_NB);
 347}
 348#endif /* defined(TARGET_PPC64) */
 349
 350void ppc40x_core_reset(PowerPCCPU *cpu)
 351{
 352    CPUPPCState *env = &cpu->env;
 353    target_ulong dbsr;
 354
 355    qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC core\n");
 356    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
 357    dbsr = env->spr[SPR_40x_DBSR];
 358    dbsr &= ~0x00000300;
 359    dbsr |= 0x00000100;
 360    env->spr[SPR_40x_DBSR] = dbsr;
 361}
 362
 363void ppc40x_chip_reset(PowerPCCPU *cpu)
 364{
 365    CPUPPCState *env = &cpu->env;
 366    target_ulong dbsr;
 367
 368    qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC chip\n");
 369    cpu_interrupt(CPU(cpu), CPU_INTERRUPT_RESET);
 370    /* XXX: TODO reset all internal peripherals */
 371    dbsr = env->spr[SPR_40x_DBSR];
 372    dbsr &= ~0x00000300;
 373    dbsr |= 0x00000200;
 374    env->spr[SPR_40x_DBSR] = dbsr;
 375}
 376
 377void ppc40x_system_reset(PowerPCCPU *cpu)
 378{
 379    qemu_log_mask(CPU_LOG_RESET, "Reset PowerPC system\n");
 380    qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
 381}
 382
 383void store_40x_dbcr0(CPUPPCState *env, uint32_t val)
 384{
 385    PowerPCCPU *cpu = env_archcpu(env);
 386
 387    switch ((val >> 28) & 0x3) {
 388    case 0x0:
 389        /* No action */
 390        break;
 391    case 0x1:
 392        /* Core reset */
 393        ppc40x_core_reset(cpu);
 394        break;
 395    case 0x2:
 396        /* Chip reset */
 397        ppc40x_chip_reset(cpu);
 398        break;
 399    case 0x3:
 400        /* System reset */
 401        ppc40x_system_reset(cpu);
 402        break;
 403    }
 404}
 405
 406/* PowerPC 40x internal IRQ controller */
 407static void ppc40x_set_irq(void *opaque, int pin, int level)
 408{
 409    PowerPCCPU *cpu = opaque;
 410    CPUPPCState *env = &cpu->env;
 411    int cur_level;
 412
 413    LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
 414                env, pin, level);
 415    cur_level = (env->irq_input_state >> pin) & 1;
 416    /* Don't generate spurious events */
 417    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
 418        CPUState *cs = CPU(cpu);
 419
 420        switch (pin) {
 421        case PPC40x_INPUT_RESET_SYS:
 422            if (level) {
 423                LOG_IRQ("%s: reset the PowerPC system\n",
 424                            __func__);
 425                ppc40x_system_reset(cpu);
 426            }
 427            break;
 428        case PPC40x_INPUT_RESET_CHIP:
 429            if (level) {
 430                LOG_IRQ("%s: reset the PowerPC chip\n", __func__);
 431                ppc40x_chip_reset(cpu);
 432            }
 433            break;
 434        case PPC40x_INPUT_RESET_CORE:
 435            /* XXX: TODO: update DBSR[MRR] */
 436            if (level) {
 437                LOG_IRQ("%s: reset the PowerPC core\n", __func__);
 438                ppc40x_core_reset(cpu);
 439            }
 440            break;
 441        case PPC40x_INPUT_CINT:
 442            /* Level sensitive - active high */
 443            LOG_IRQ("%s: set the critical IRQ state to %d\n",
 444                        __func__, level);
 445            ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
 446            break;
 447        case PPC40x_INPUT_INT:
 448            /* Level sensitive - active high */
 449            LOG_IRQ("%s: set the external IRQ state to %d\n",
 450                        __func__, level);
 451            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 452            break;
 453        case PPC40x_INPUT_HALT:
 454            /* Level sensitive - active low */
 455            if (level) {
 456                LOG_IRQ("%s: stop the CPU\n", __func__);
 457                cs->halted = 1;
 458            } else {
 459                LOG_IRQ("%s: restart the CPU\n", __func__);
 460                cs->halted = 0;
 461                qemu_cpu_kick(cs);
 462            }
 463            break;
 464        case PPC40x_INPUT_DEBUG:
 465            /* Level sensitive - active high */
 466            LOG_IRQ("%s: set the debug pin state to %d\n",
 467                        __func__, level);
 468            ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
 469            break;
 470        default:
 471            /* Unknown pin - do nothing */
 472            LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
 473            return;
 474        }
 475        if (level)
 476            env->irq_input_state |= 1 << pin;
 477        else
 478            env->irq_input_state &= ~(1 << pin);
 479    }
 480}
 481
 482void ppc40x_irq_init(PowerPCCPU *cpu)
 483{
 484    CPUPPCState *env = &cpu->env;
 485
 486    env->irq_inputs = (void **)qemu_allocate_irqs(&ppc40x_set_irq,
 487                                                  cpu, PPC40x_INPUT_NB);
 488}
 489
 490/* PowerPC E500 internal IRQ controller */
 491static void ppce500_set_irq(void *opaque, int pin, int level)
 492{
 493    PowerPCCPU *cpu = opaque;
 494    CPUPPCState *env = &cpu->env;
 495    int cur_level;
 496
 497    LOG_IRQ("%s: env %p pin %d level %d\n", __func__,
 498                env, pin, level);
 499    cur_level = (env->irq_input_state >> pin) & 1;
 500    /* Don't generate spurious events */
 501    if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) {
 502        switch (pin) {
 503        case PPCE500_INPUT_MCK:
 504            if (level) {
 505                LOG_IRQ("%s: reset the PowerPC system\n",
 506                            __func__);
 507                qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
 508            }
 509            break;
 510        case PPCE500_INPUT_RESET_CORE:
 511            if (level) {
 512                LOG_IRQ("%s: reset the PowerPC core\n", __func__);
 513                ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level);
 514            }
 515            break;
 516        case PPCE500_INPUT_CINT:
 517            /* Level sensitive - active high */
 518            LOG_IRQ("%s: set the critical IRQ state to %d\n",
 519                        __func__, level);
 520            ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level);
 521            break;
 522        case PPCE500_INPUT_INT:
 523            /* Level sensitive - active high */
 524            LOG_IRQ("%s: set the core IRQ state to %d\n",
 525                        __func__, level);
 526            ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level);
 527            break;
 528        case PPCE500_INPUT_DEBUG:
 529            /* Level sensitive - active high */
 530            LOG_IRQ("%s: set the debug pin state to %d\n",
 531                        __func__, level);
 532            ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level);
 533            break;
 534        default:
 535            /* Unknown pin - do nothing */
 536            LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin);
 537            return;
 538        }
 539        if (level)
 540            env->irq_input_state |= 1 << pin;
 541        else
 542            env->irq_input_state &= ~(1 << pin);
 543    }
 544}
 545
 546void ppce500_irq_init(PowerPCCPU *cpu)
 547{
 548    CPUPPCState *env = &cpu->env;
 549
 550    env->irq_inputs = (void **)qemu_allocate_irqs(&ppce500_set_irq,
 551                                                  cpu, PPCE500_INPUT_NB);
 552}
 553
 554/* Enable or Disable the E500 EPR capability */
 555void ppce500_set_mpic_proxy(bool enabled)
 556{
 557    CPUState *cs;
 558
 559    CPU_FOREACH(cs) {
 560        PowerPCCPU *cpu = POWERPC_CPU(cs);
 561
 562        cpu->env.mpic_proxy = enabled;
 563        if (kvm_enabled()) {
 564            kvmppc_set_mpic_proxy(cpu, enabled);
 565        }
 566    }
 567}
 568
 569/*****************************************************************************/
 570/* PowerPC time base and decrementer emulation */
 571
 572uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset)
 573{
 574    /* TB time in tb periods */
 575    return muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND) + tb_offset;
 576}
 577
 578uint64_t cpu_ppc_load_tbl (CPUPPCState *env)
 579{
 580    ppc_tb_t *tb_env = env->tb_env;
 581    uint64_t tb;
 582
 583    if (kvm_enabled()) {
 584        return env->spr[SPR_TBL];
 585    }
 586
 587    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
 588    LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
 589
 590    return tb;
 591}
 592
 593static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env)
 594{
 595    ppc_tb_t *tb_env = env->tb_env;
 596    uint64_t tb;
 597
 598    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
 599    LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
 600
 601    return tb >> 32;
 602}
 603
 604uint32_t cpu_ppc_load_tbu (CPUPPCState *env)
 605{
 606    if (kvm_enabled()) {
 607        return env->spr[SPR_TBU];
 608    }
 609
 610    return _cpu_ppc_load_tbu(env);
 611}
 612
 613static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk,
 614                                    int64_t *tb_offsetp, uint64_t value)
 615{
 616    *tb_offsetp = value -
 617        muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND);
 618
 619    LOG_TB("%s: tb %016" PRIx64 " offset %08" PRIx64 "\n",
 620                __func__, value, *tb_offsetp);
 621}
 622
 623void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value)
 624{
 625    ppc_tb_t *tb_env = env->tb_env;
 626    uint64_t tb;
 627
 628    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
 629    tb &= 0xFFFFFFFF00000000ULL;
 630    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 631                     &tb_env->tb_offset, tb | (uint64_t)value);
 632}
 633
 634static inline void _cpu_ppc_store_tbu(CPUPPCState *env, uint32_t value)
 635{
 636    ppc_tb_t *tb_env = env->tb_env;
 637    uint64_t tb;
 638
 639    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset);
 640    tb &= 0x00000000FFFFFFFFULL;
 641    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 642                     &tb_env->tb_offset, ((uint64_t)value << 32) | tb);
 643}
 644
 645void cpu_ppc_store_tbu (CPUPPCState *env, uint32_t value)
 646{
 647    _cpu_ppc_store_tbu(env, value);
 648}
 649
 650uint64_t cpu_ppc_load_atbl (CPUPPCState *env)
 651{
 652    ppc_tb_t *tb_env = env->tb_env;
 653    uint64_t tb;
 654
 655    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
 656    LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
 657
 658    return tb;
 659}
 660
 661uint32_t cpu_ppc_load_atbu (CPUPPCState *env)
 662{
 663    ppc_tb_t *tb_env = env->tb_env;
 664    uint64_t tb;
 665
 666    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
 667    LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb);
 668
 669    return tb >> 32;
 670}
 671
 672void cpu_ppc_store_atbl (CPUPPCState *env, uint32_t value)
 673{
 674    ppc_tb_t *tb_env = env->tb_env;
 675    uint64_t tb;
 676
 677    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
 678    tb &= 0xFFFFFFFF00000000ULL;
 679    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 680                     &tb_env->atb_offset, tb | (uint64_t)value);
 681}
 682
 683void cpu_ppc_store_atbu (CPUPPCState *env, uint32_t value)
 684{
 685    ppc_tb_t *tb_env = env->tb_env;
 686    uint64_t tb;
 687
 688    tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset);
 689    tb &= 0x00000000FFFFFFFFULL;
 690    cpu_ppc_store_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
 691                     &tb_env->atb_offset, ((uint64_t)value << 32) | tb);
 692}
 693
 694static void cpu_ppc_tb_stop (CPUPPCState *env)
 695{
 696    ppc_tb_t *tb_env = env->tb_env;
 697    uint64_t tb, atb, vmclk;
 698
 699    /* If the time base is already frozen, do nothing */
 700    if (tb_env->tb_freq != 0) {
 701        vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 702        /* Get the time base */
 703        tb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->tb_offset);
 704        /* Get the alternate time base */
 705        atb = cpu_ppc_get_tb(tb_env, vmclk, tb_env->atb_offset);
 706        /* Store the time base value (ie compute the current offset) */
 707        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
 708        /* Store the alternate time base value (compute the current offset) */
 709        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
 710        /* Set the time base frequency to zero */
 711        tb_env->tb_freq = 0;
 712        /* Now, the time bases are frozen to tb_offset / atb_offset value */
 713    }
 714}
 715
 716static void cpu_ppc_tb_start (CPUPPCState *env)
 717{
 718    ppc_tb_t *tb_env = env->tb_env;
 719    uint64_t tb, atb, vmclk;
 720
 721    /* If the time base is not frozen, do nothing */
 722    if (tb_env->tb_freq == 0) {
 723        vmclk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 724        /* Get the time base from tb_offset */
 725        tb = tb_env->tb_offset;
 726        /* Get the alternate time base from atb_offset */
 727        atb = tb_env->atb_offset;
 728        /* Restore the tb frequency from the decrementer frequency */
 729        tb_env->tb_freq = tb_env->decr_freq;
 730        /* Store the time base value */
 731        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->tb_offset, tb);
 732        /* Store the alternate time base value */
 733        cpu_ppc_store_tb(tb_env, vmclk, &tb_env->atb_offset, atb);
 734    }
 735}
 736
 737bool ppc_decr_clear_on_delivery(CPUPPCState *env)
 738{
 739    ppc_tb_t *tb_env = env->tb_env;
 740    int flags = PPC_DECR_UNDERFLOW_TRIGGERED | PPC_DECR_UNDERFLOW_LEVEL;
 741    return ((tb_env->flags & flags) == PPC_DECR_UNDERFLOW_TRIGGERED);
 742}
 743
 744static inline int64_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next)
 745{
 746    ppc_tb_t *tb_env = env->tb_env;
 747    int64_t decr, diff;
 748
 749    diff = next - qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 750    if (diff >= 0) {
 751        decr = muldiv64(diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
 752    } else if (tb_env->flags & PPC_TIMER_BOOKE) {
 753        decr = 0;
 754    }  else {
 755        decr = -muldiv64(-diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND);
 756    }
 757    LOG_TB("%s: %016" PRIx64 "\n", __func__, decr);
 758
 759    return decr;
 760}
 761
 762target_ulong cpu_ppc_load_decr(CPUPPCState *env)
 763{
 764    ppc_tb_t *tb_env = env->tb_env;
 765    uint64_t decr;
 766
 767    if (kvm_enabled()) {
 768        return env->spr[SPR_DECR];
 769    }
 770
 771    decr = _cpu_ppc_load_decr(env, tb_env->decr_next);
 772
 773    /*
 774     * If large decrementer is enabled then the decrementer is signed extened
 775     * to 64 bits, otherwise it is a 32 bit value.
 776     */
 777    if (env->spr[SPR_LPCR] & LPCR_LD) {
 778        return decr;
 779    }
 780    return (uint32_t) decr;
 781}
 782
 783target_ulong cpu_ppc_load_hdecr(CPUPPCState *env)
 784{
 785    PowerPCCPU *cpu = env_archcpu(env);
 786    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
 787    ppc_tb_t *tb_env = env->tb_env;
 788    uint64_t hdecr;
 789
 790    hdecr =  _cpu_ppc_load_decr(env, tb_env->hdecr_next);
 791
 792    /*
 793     * If we have a large decrementer (POWER9 or later) then hdecr is sign
 794     * extended to 64 bits, otherwise it is 32 bits.
 795     */
 796    if (pcc->lrg_decr_bits > 32) {
 797        return hdecr;
 798    }
 799    return (uint32_t) hdecr;
 800}
 801
 802uint64_t cpu_ppc_load_purr (CPUPPCState *env)
 803{
 804    ppc_tb_t *tb_env = env->tb_env;
 805    uint64_t diff;
 806
 807    diff = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - tb_env->purr_start;
 808
 809    return tb_env->purr_load +
 810        muldiv64(diff, tb_env->tb_freq, NANOSECONDS_PER_SECOND);
 811}
 812
 813/* When decrementer expires,
 814 * all we need to do is generate or queue a CPU exception
 815 */
 816static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu)
 817{
 818    /* Raise it */
 819    LOG_TB("raise decrementer exception\n");
 820    ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1);
 821}
 822
 823static inline void cpu_ppc_decr_lower(PowerPCCPU *cpu)
 824{
 825    ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 0);
 826}
 827
 828static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu)
 829{
 830    CPUPPCState *env = &cpu->env;
 831
 832    /* Raise it */
 833    LOG_TB("raise hv decrementer exception\n");
 834
 835    /* The architecture specifies that we don't deliver HDEC
 836     * interrupts in a PM state. Not only they don't cause a
 837     * wakeup but they also get effectively discarded.
 838     */
 839    if (!env->resume_as_sreset) {
 840        ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 1);
 841    }
 842}
 843
 844static inline void cpu_ppc_hdecr_lower(PowerPCCPU *cpu)
 845{
 846    ppc_set_irq(cpu, PPC_INTERRUPT_HDECR, 0);
 847}
 848
 849static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp,
 850                                 QEMUTimer *timer,
 851                                 void (*raise_excp)(void *),
 852                                 void (*lower_excp)(PowerPCCPU *),
 853                                 target_ulong decr, target_ulong value,
 854                                 int nr_bits)
 855{
 856    CPUPPCState *env = &cpu->env;
 857    ppc_tb_t *tb_env = env->tb_env;
 858    uint64_t now, next;
 859    bool negative;
 860
 861    /* Truncate value to decr_width and sign extend for simplicity */
 862    value &= ((1ULL << nr_bits) - 1);
 863    negative = !!(value & (1ULL << (nr_bits - 1)));
 864    if (negative) {
 865        value |= (0xFFFFFFFFULL << nr_bits);
 866    }
 867
 868    LOG_TB("%s: " TARGET_FMT_lx " => " TARGET_FMT_lx "\n", __func__,
 869                decr, value);
 870
 871    if (kvm_enabled()) {
 872        /* KVM handles decrementer exceptions, we don't need our own timer */
 873        return;
 874    }
 875
 876    /*
 877     * Going from 2 -> 1, 1 -> 0 or 0 -> -1 is the event to generate a DEC
 878     * interrupt.
 879     *
 880     * If we get a really small DEC value, we can assume that by the time we
 881     * handled it we should inject an interrupt already.
 882     *
 883     * On MSB level based DEC implementations the MSB always means the interrupt
 884     * is pending, so raise it on those.
 885     *
 886     * On MSB edge based DEC implementations the MSB going from 0 -> 1 triggers
 887     * an edge interrupt, so raise it here too.
 888     */
 889    if ((value < 3) ||
 890        ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && negative) ||
 891        ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && negative
 892          && !(decr & (1ULL << (nr_bits - 1))))) {
 893        (*raise_excp)(cpu);
 894        return;
 895    }
 896
 897    /* On MSB level based systems a 0 for the MSB stops interrupt delivery */
 898    if (!negative && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) {
 899        (*lower_excp)(cpu);
 900    }
 901
 902    /* Calculate the next timer event */
 903    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 904    next = now + muldiv64(value, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
 905    *nextp = next;
 906
 907    /* Adjust timer */
 908    timer_mod(timer, next);
 909}
 910
 911static inline void _cpu_ppc_store_decr(PowerPCCPU *cpu, target_ulong decr,
 912                                       target_ulong value, int nr_bits)
 913{
 914    ppc_tb_t *tb_env = cpu->env.tb_env;
 915
 916    __cpu_ppc_store_decr(cpu, &tb_env->decr_next, tb_env->decr_timer,
 917                         tb_env->decr_timer->cb, &cpu_ppc_decr_lower, decr,
 918                         value, nr_bits);
 919}
 920
 921void cpu_ppc_store_decr(CPUPPCState *env, target_ulong value)
 922{
 923    PowerPCCPU *cpu = env_archcpu(env);
 924    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
 925    int nr_bits = 32;
 926
 927    if (env->spr[SPR_LPCR] & LPCR_LD) {
 928        nr_bits = pcc->lrg_decr_bits;
 929    }
 930
 931    _cpu_ppc_store_decr(cpu, cpu_ppc_load_decr(env), value, nr_bits);
 932}
 933
 934static void cpu_ppc_decr_cb(void *opaque)
 935{
 936    PowerPCCPU *cpu = opaque;
 937
 938    cpu_ppc_decr_excp(cpu);
 939}
 940
 941static inline void _cpu_ppc_store_hdecr(PowerPCCPU *cpu, target_ulong hdecr,
 942                                        target_ulong value, int nr_bits)
 943{
 944    ppc_tb_t *tb_env = cpu->env.tb_env;
 945
 946    if (tb_env->hdecr_timer != NULL) {
 947        __cpu_ppc_store_decr(cpu, &tb_env->hdecr_next, tb_env->hdecr_timer,
 948                             tb_env->hdecr_timer->cb, &cpu_ppc_hdecr_lower,
 949                             hdecr, value, nr_bits);
 950    }
 951}
 952
 953void cpu_ppc_store_hdecr(CPUPPCState *env, target_ulong value)
 954{
 955    PowerPCCPU *cpu = env_archcpu(env);
 956    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
 957
 958    _cpu_ppc_store_hdecr(cpu, cpu_ppc_load_hdecr(env), value,
 959                         pcc->lrg_decr_bits);
 960}
 961
 962static void cpu_ppc_hdecr_cb(void *opaque)
 963{
 964    PowerPCCPU *cpu = opaque;
 965
 966    cpu_ppc_hdecr_excp(cpu);
 967}
 968
 969static void cpu_ppc_store_purr(PowerPCCPU *cpu, uint64_t value)
 970{
 971    ppc_tb_t *tb_env = cpu->env.tb_env;
 972
 973    tb_env->purr_load = value;
 974    tb_env->purr_start = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
 975}
 976
 977static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq)
 978{
 979    CPUPPCState *env = opaque;
 980    PowerPCCPU *cpu = env_archcpu(env);
 981    ppc_tb_t *tb_env = env->tb_env;
 982
 983    tb_env->tb_freq = freq;
 984    tb_env->decr_freq = freq;
 985    /* There is a bug in Linux 2.4 kernels:
 986     * if a decrementer exception is pending when it enables msr_ee at startup,
 987     * it's not ready to handle it...
 988     */
 989    _cpu_ppc_store_decr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32);
 990    _cpu_ppc_store_hdecr(cpu, 0xFFFFFFFF, 0xFFFFFFFF, 32);
 991    cpu_ppc_store_purr(cpu, 0x0000000000000000ULL);
 992}
 993
 994static void timebase_save(PPCTimebase *tb)
 995{
 996    uint64_t ticks = cpu_get_host_ticks();
 997    PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
 998
 999    if (!first_ppc_cpu->env.tb_env) {
1000        error_report("No timebase object");
1001        return;
1002    }
1003
1004    /* not used anymore, we keep it for compatibility */
1005    tb->time_of_the_day_ns = qemu_clock_get_ns(QEMU_CLOCK_HOST);
1006    /*
1007     * tb_offset is only expected to be changed by QEMU so
1008     * there is no need to update it from KVM here
1009     */
1010    tb->guest_timebase = ticks + first_ppc_cpu->env.tb_env->tb_offset;
1011}
1012
1013static void timebase_load(PPCTimebase *tb)
1014{
1015    CPUState *cpu;
1016    PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
1017    int64_t tb_off_adj, tb_off;
1018    unsigned long freq;
1019
1020    if (!first_ppc_cpu->env.tb_env) {
1021        error_report("No timebase object");
1022        return;
1023    }
1024
1025    freq = first_ppc_cpu->env.tb_env->tb_freq;
1026
1027    tb_off_adj = tb->guest_timebase - cpu_get_host_ticks();
1028
1029    tb_off = first_ppc_cpu->env.tb_env->tb_offset;
1030    trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off,
1031                        (tb_off_adj - tb_off) / freq);
1032
1033    /* Set new offset to all CPUs */
1034    CPU_FOREACH(cpu) {
1035        PowerPCCPU *pcpu = POWERPC_CPU(cpu);
1036        pcpu->env.tb_env->tb_offset = tb_off_adj;
1037        kvmppc_set_reg_tb_offset(pcpu, pcpu->env.tb_env->tb_offset);
1038    }
1039}
1040
1041void cpu_ppc_clock_vm_state_change(void *opaque, int running,
1042                                   RunState state)
1043{
1044    PPCTimebase *tb = opaque;
1045
1046    if (running) {
1047        timebase_load(tb);
1048    } else {
1049        timebase_save(tb);
1050    }
1051}
1052
1053/*
1054 * When migrating, read the clock just before migration,
1055 * so that the guest clock counts during the events
1056 * between:
1057 *
1058 *  * vm_stop()
1059 *  *
1060 *  * pre_save()
1061 *
1062 *  This reduces clock difference on migration from 5s
1063 *  to 0.1s (when max_downtime == 5s), because sending the
1064 *  final pages of memory (which happens between vm_stop()
1065 *  and pre_save()) takes max_downtime.
1066 */
1067static int timebase_pre_save(void *opaque)
1068{
1069    PPCTimebase *tb = opaque;
1070
1071    timebase_save(tb);
1072
1073    return 0;
1074}
1075
1076const VMStateDescription vmstate_ppc_timebase = {
1077    .name = "timebase",
1078    .version_id = 1,
1079    .minimum_version_id = 1,
1080    .minimum_version_id_old = 1,
1081    .pre_save = timebase_pre_save,
1082    .fields      = (VMStateField []) {
1083        VMSTATE_UINT64(guest_timebase, PPCTimebase),
1084        VMSTATE_INT64(time_of_the_day_ns, PPCTimebase),
1085        VMSTATE_END_OF_LIST()
1086    },
1087};
1088
1089/* Set up (once) timebase frequency (in Hz) */
1090clk_setup_cb cpu_ppc_tb_init (CPUPPCState *env, uint32_t freq)
1091{
1092    PowerPCCPU *cpu = env_archcpu(env);
1093    ppc_tb_t *tb_env;
1094
1095    tb_env = g_malloc0(sizeof(ppc_tb_t));
1096    env->tb_env = tb_env;
1097    tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1098    if (is_book3s_arch2x(env)) {
1099        /* All Book3S 64bit CPUs implement level based DEC logic */
1100        tb_env->flags |= PPC_DECR_UNDERFLOW_LEVEL;
1101    }
1102    /* Create new timer */
1103    tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_decr_cb, cpu);
1104    if (env->has_hv_mode) {
1105        tb_env->hdecr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_ppc_hdecr_cb,
1106                                                cpu);
1107    } else {
1108        tb_env->hdecr_timer = NULL;
1109    }
1110    cpu_ppc_set_tb_clk(env, freq);
1111
1112    return &cpu_ppc_set_tb_clk;
1113}
1114
1115/* Specific helpers for POWER & PowerPC 601 RTC */
1116void cpu_ppc601_store_rtcu (CPUPPCState *env, uint32_t value)
1117{
1118    _cpu_ppc_store_tbu(env, value);
1119}
1120
1121uint32_t cpu_ppc601_load_rtcu (CPUPPCState *env)
1122{
1123    return _cpu_ppc_load_tbu(env);
1124}
1125
1126void cpu_ppc601_store_rtcl (CPUPPCState *env, uint32_t value)
1127{
1128    cpu_ppc_store_tbl(env, value & 0x3FFFFF80);
1129}
1130
1131uint32_t cpu_ppc601_load_rtcl (CPUPPCState *env)
1132{
1133    return cpu_ppc_load_tbl(env) & 0x3FFFFF80;
1134}
1135
1136/*****************************************************************************/
1137/* PowerPC 40x timers */
1138
1139/* PIT, FIT & WDT */
1140typedef struct ppc40x_timer_t ppc40x_timer_t;
1141struct ppc40x_timer_t {
1142    uint64_t pit_reload;  /* PIT auto-reload value        */
1143    uint64_t fit_next;    /* Tick for next FIT interrupt  */
1144    QEMUTimer *fit_timer;
1145    uint64_t wdt_next;    /* Tick for next WDT interrupt  */
1146    QEMUTimer *wdt_timer;
1147
1148    /* 405 have the PIT, 440 have a DECR.  */
1149    unsigned int decr_excp;
1150};
1151
1152/* Fixed interval timer */
1153static void cpu_4xx_fit_cb (void *opaque)
1154{
1155    PowerPCCPU *cpu;
1156    CPUPPCState *env;
1157    ppc_tb_t *tb_env;
1158    ppc40x_timer_t *ppc40x_timer;
1159    uint64_t now, next;
1160
1161    env = opaque;
1162    cpu = env_archcpu(env);
1163    tb_env = env->tb_env;
1164    ppc40x_timer = tb_env->opaque;
1165    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1166    switch ((env->spr[SPR_40x_TCR] >> 24) & 0x3) {
1167    case 0:
1168        next = 1 << 9;
1169        break;
1170    case 1:
1171        next = 1 << 13;
1172        break;
1173    case 2:
1174        next = 1 << 17;
1175        break;
1176    case 3:
1177        next = 1 << 21;
1178        break;
1179    default:
1180        /* Cannot occur, but makes gcc happy */
1181        return;
1182    }
1183    next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->tb_freq);
1184    if (next == now)
1185        next++;
1186    timer_mod(ppc40x_timer->fit_timer, next);
1187    env->spr[SPR_40x_TSR] |= 1 << 26;
1188    if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) {
1189        ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1);
1190    }
1191    LOG_TB("%s: ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__,
1192           (int)((env->spr[SPR_40x_TCR] >> 23) & 0x1),
1193           env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1194}
1195
1196/* Programmable interval timer */
1197static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp)
1198{
1199    ppc40x_timer_t *ppc40x_timer;
1200    uint64_t now, next;
1201
1202    ppc40x_timer = tb_env->opaque;
1203    if (ppc40x_timer->pit_reload <= 1 ||
1204        !((env->spr[SPR_40x_TCR] >> 26) & 0x1) ||
1205        (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) {
1206        /* Stop PIT */
1207        LOG_TB("%s: stop PIT\n", __func__);
1208        timer_del(tb_env->decr_timer);
1209    } else {
1210        LOG_TB("%s: start PIT %016" PRIx64 "\n",
1211                    __func__, ppc40x_timer->pit_reload);
1212        now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1213        next = now + muldiv64(ppc40x_timer->pit_reload,
1214                              NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1215        if (is_excp)
1216            next += tb_env->decr_next - now;
1217        if (next == now)
1218            next++;
1219        timer_mod(tb_env->decr_timer, next);
1220        tb_env->decr_next = next;
1221    }
1222}
1223
1224static void cpu_4xx_pit_cb (void *opaque)
1225{
1226    PowerPCCPU *cpu;
1227    CPUPPCState *env;
1228    ppc_tb_t *tb_env;
1229    ppc40x_timer_t *ppc40x_timer;
1230
1231    env = opaque;
1232    cpu = env_archcpu(env);
1233    tb_env = env->tb_env;
1234    ppc40x_timer = tb_env->opaque;
1235    env->spr[SPR_40x_TSR] |= 1 << 27;
1236    if ((env->spr[SPR_40x_TCR] >> 26) & 0x1) {
1237        ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1);
1238    }
1239    start_stop_pit(env, tb_env, 1);
1240    LOG_TB("%s: ar %d ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx " "
1241           "%016" PRIx64 "\n", __func__,
1242           (int)((env->spr[SPR_40x_TCR] >> 22) & 0x1),
1243           (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1),
1244           env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR],
1245           ppc40x_timer->pit_reload);
1246}
1247
1248/* Watchdog timer */
1249static void cpu_4xx_wdt_cb (void *opaque)
1250{
1251    PowerPCCPU *cpu;
1252    CPUPPCState *env;
1253    ppc_tb_t *tb_env;
1254    ppc40x_timer_t *ppc40x_timer;
1255    uint64_t now, next;
1256
1257    env = opaque;
1258    cpu = env_archcpu(env);
1259    tb_env = env->tb_env;
1260    ppc40x_timer = tb_env->opaque;
1261    now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
1262    switch ((env->spr[SPR_40x_TCR] >> 30) & 0x3) {
1263    case 0:
1264        next = 1 << 17;
1265        break;
1266    case 1:
1267        next = 1 << 21;
1268        break;
1269    case 2:
1270        next = 1 << 25;
1271        break;
1272    case 3:
1273        next = 1 << 29;
1274        break;
1275    default:
1276        /* Cannot occur, but makes gcc happy */
1277        return;
1278    }
1279    next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->decr_freq);
1280    if (next == now)
1281        next++;
1282    LOG_TB("%s: TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__,
1283           env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]);
1284    switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) {
1285    case 0x0:
1286    case 0x1:
1287        timer_mod(ppc40x_timer->wdt_timer, next);
1288        ppc40x_timer->wdt_next = next;
1289        env->spr[SPR_40x_TSR] |= 1U << 31;
1290        break;
1291    case 0x2:
1292        timer_mod(ppc40x_timer->wdt_timer, next);
1293        ppc40x_timer->wdt_next = next;
1294        env->spr[SPR_40x_TSR] |= 1 << 30;
1295        if ((env->spr[SPR_40x_TCR] >> 27) & 0x1) {
1296            ppc_set_irq(cpu, PPC_INTERRUPT_WDT, 1);
1297        }
1298        break;
1299    case 0x3:
1300        env->spr[SPR_40x_TSR] &= ~0x30000000;
1301        env->spr[SPR_40x_TSR] |= env->spr[SPR_40x_TCR] & 0x30000000;
1302        switch ((env->spr[SPR_40x_TCR] >> 28) & 0x3) {
1303        case 0x0:
1304            /* No reset */
1305            break;
1306        case 0x1: /* Core reset */
1307            ppc40x_core_reset(cpu);
1308            break;
1309        case 0x2: /* Chip reset */
1310            ppc40x_chip_reset(cpu);
1311            break;
1312        case 0x3: /* System reset */
1313            ppc40x_system_reset(cpu);
1314            break;
1315        }
1316    }
1317}
1318
1319void store_40x_pit (CPUPPCState *env, target_ulong val)
1320{
1321    ppc_tb_t *tb_env;
1322    ppc40x_timer_t *ppc40x_timer;
1323
1324    tb_env = env->tb_env;
1325    ppc40x_timer = tb_env->opaque;
1326    LOG_TB("%s val" TARGET_FMT_lx "\n", __func__, val);
1327    ppc40x_timer->pit_reload = val;
1328    start_stop_pit(env, tb_env, 0);
1329}
1330
1331target_ulong load_40x_pit (CPUPPCState *env)
1332{
1333    return cpu_ppc_load_decr(env);
1334}
1335
1336static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq)
1337{
1338    CPUPPCState *env = opaque;
1339    ppc_tb_t *tb_env = env->tb_env;
1340
1341    LOG_TB("%s set new frequency to %" PRIu32 "\n", __func__,
1342                freq);
1343    tb_env->tb_freq = freq;
1344    tb_env->decr_freq = freq;
1345    /* XXX: we should also update all timers */
1346}
1347
1348clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq,
1349                                  unsigned int decr_excp)
1350{
1351    ppc_tb_t *tb_env;
1352    ppc40x_timer_t *ppc40x_timer;
1353
1354    tb_env = g_malloc0(sizeof(ppc_tb_t));
1355    env->tb_env = tb_env;
1356    tb_env->flags = PPC_DECR_UNDERFLOW_TRIGGERED;
1357    ppc40x_timer = g_malloc0(sizeof(ppc40x_timer_t));
1358    tb_env->tb_freq = freq;
1359    tb_env->decr_freq = freq;
1360    tb_env->opaque = ppc40x_timer;
1361    LOG_TB("%s freq %" PRIu32 "\n", __func__, freq);
1362    if (ppc40x_timer != NULL) {
1363        /* We use decr timer for PIT */
1364        tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, env);
1365        ppc40x_timer->fit_timer =
1366            timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_fit_cb, env);
1367        ppc40x_timer->wdt_timer =
1368            timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_wdt_cb, env);
1369        ppc40x_timer->decr_excp = decr_excp;
1370    }
1371
1372    return &ppc_40x_set_tb_clk;
1373}
1374
1375/*****************************************************************************/
1376/* Embedded PowerPC Device Control Registers */
1377typedef struct ppc_dcrn_t ppc_dcrn_t;
1378struct ppc_dcrn_t {
1379    dcr_read_cb dcr_read;
1380    dcr_write_cb dcr_write;
1381    void *opaque;
1382};
1383
1384/* XXX: on 460, DCR addresses are 32 bits wide,
1385 *      using DCRIPR to get the 22 upper bits of the DCR address
1386 */
1387#define DCRN_NB 1024
1388struct ppc_dcr_t {
1389    ppc_dcrn_t dcrn[DCRN_NB];
1390    int (*read_error)(int dcrn);
1391    int (*write_error)(int dcrn);
1392};
1393
1394int ppc_dcr_read (ppc_dcr_t *dcr_env, int dcrn, uint32_t *valp)
1395{
1396    ppc_dcrn_t *dcr;
1397
1398    if (dcrn < 0 || dcrn >= DCRN_NB)
1399        goto error;
1400    dcr = &dcr_env->dcrn[dcrn];
1401    if (dcr->dcr_read == NULL)
1402        goto error;
1403    *valp = (*dcr->dcr_read)(dcr->opaque, dcrn);
1404
1405    return 0;
1406
1407 error:
1408    if (dcr_env->read_error != NULL)
1409        return (*dcr_env->read_error)(dcrn);
1410
1411    return -1;
1412}
1413
1414int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val)
1415{
1416    ppc_dcrn_t *dcr;
1417
1418    if (dcrn < 0 || dcrn >= DCRN_NB)
1419        goto error;
1420    dcr = &dcr_env->dcrn[dcrn];
1421    if (dcr->dcr_write == NULL)
1422        goto error;
1423    (*dcr->dcr_write)(dcr->opaque, dcrn, val);
1424
1425    return 0;
1426
1427 error:
1428    if (dcr_env->write_error != NULL)
1429        return (*dcr_env->write_error)(dcrn);
1430
1431    return -1;
1432}
1433
1434int ppc_dcr_register (CPUPPCState *env, int dcrn, void *opaque,
1435                      dcr_read_cb dcr_read, dcr_write_cb dcr_write)
1436{
1437    ppc_dcr_t *dcr_env;
1438    ppc_dcrn_t *dcr;
1439
1440    dcr_env = env->dcr_env;
1441    if (dcr_env == NULL)
1442        return -1;
1443    if (dcrn < 0 || dcrn >= DCRN_NB)
1444        return -1;
1445    dcr = &dcr_env->dcrn[dcrn];
1446    if (dcr->opaque != NULL ||
1447        dcr->dcr_read != NULL ||
1448        dcr->dcr_write != NULL)
1449        return -1;
1450    dcr->opaque = opaque;
1451    dcr->dcr_read = dcr_read;
1452    dcr->dcr_write = dcr_write;
1453
1454    return 0;
1455}
1456
1457int ppc_dcr_init (CPUPPCState *env, int (*read_error)(int dcrn),
1458                  int (*write_error)(int dcrn))
1459{
1460    ppc_dcr_t *dcr_env;
1461
1462    dcr_env = g_malloc0(sizeof(ppc_dcr_t));
1463    dcr_env->read_error = read_error;
1464    dcr_env->write_error = write_error;
1465    env->dcr_env = dcr_env;
1466
1467    return 0;
1468}
1469
1470/*****************************************************************************/
1471/* Debug port */
1472void PPC_debug_write (void *opaque, uint32_t addr, uint32_t val)
1473{
1474    addr &= 0xF;
1475    switch (addr) {
1476    case 0:
1477        printf("%c", val);
1478        break;
1479    case 1:
1480        printf("\n");
1481        fflush(stdout);
1482        break;
1483    case 2:
1484        printf("Set loglevel to %04" PRIx32 "\n", val);
1485        qemu_set_log(val | 0x100);
1486        break;
1487    }
1488}
1489
1490PowerPCCPU *ppc_get_vcpu_by_pir(int pir)
1491{
1492    CPUState *cs;
1493
1494    CPU_FOREACH(cs) {
1495        PowerPCCPU *cpu = POWERPC_CPU(cs);
1496        CPUPPCState *env = &cpu->env;
1497
1498        if (env->spr_cb[SPR_PIR].default_value == pir) {
1499            return cpu;
1500        }
1501    }
1502
1503    return NULL;
1504}
1505