qemu/cputlb.c
<<
>>
Prefs
   1/*
   2 *  Common CPU TLB handling
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "cpu.h"
  22#include "exec/exec-all.h"
  23#include "exec/memory.h"
  24#include "exec/address-spaces.h"
  25#include "exec/cpu_ldst.h"
  26
  27#include "exec/cputlb.h"
  28
  29#include "exec/memory-internal.h"
  30#include "exec/ram_addr.h"
  31#include "tcg/tcg.h"
  32
  33/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
  34/* #define DEBUG_TLB */
  35/* #define DEBUG_TLB_LOG */
  36
  37#ifdef DEBUG_TLB
  38# define DEBUG_TLB_GATE 1
  39# ifdef DEBUG_TLB_LOG
  40#  define DEBUG_TLB_LOG_GATE 1
  41# else
  42#  define DEBUG_TLB_LOG_GATE 0
  43# endif
  44#else
  45# define DEBUG_TLB_GATE 0
  46# define DEBUG_TLB_LOG_GATE 0
  47#endif
  48
  49#define tlb_debug(fmt, ...) do { \
  50    if (DEBUG_TLB_LOG_GATE) { \
  51        qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
  52                      ## __VA_ARGS__); \
  53    } else if (DEBUG_TLB_GATE) { \
  54        fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
  55    } \
  56} while (0)
  57
  58/* statistics */
  59int tlb_flush_count;
  60
  61/* NOTE:
  62 * If flush_global is true (the usual case), flush all tlb entries.
  63 * If flush_global is false, flush (at least) all tlb entries not
  64 * marked global.
  65 *
  66 * Since QEMU doesn't currently implement a global/not-global flag
  67 * for tlb entries, at the moment tlb_flush() will also flush all
  68 * tlb entries in the flush_global == false case. This is OK because
  69 * CPU architectures generally permit an implementation to drop
  70 * entries from the TLB at any time, so flushing more entries than
  71 * required is only an efficiency issue, not a correctness issue.
  72 */
  73void tlb_flush(CPUState *cpu, int flush_global)
  74{
  75    CPUArchState *env = cpu->env_ptr;
  76
  77    tlb_debug("(%d)\n", flush_global);
  78
  79    /* must reset current TB so that interrupts cannot modify the
  80       links while we are modifying them */
  81    cpu->current_tb = NULL;
  82
  83    memset(env->tlb_table, -1, sizeof(env->tlb_table));
  84    memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
  85    memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
  86
  87    env->vtlb_index = 0;
  88    env->tlb_flush_addr = -1;
  89    env->tlb_flush_mask = 0;
  90    tlb_flush_count++;
  91}
  92
  93static inline void v_tlb_flush_by_mmuidx(CPUState *cpu, va_list argp)
  94{
  95    CPUArchState *env = cpu->env_ptr;
  96
  97    tlb_debug("start\n");
  98    /* must reset current TB so that interrupts cannot modify the
  99       links while we are modifying them */
 100    cpu->current_tb = NULL;
 101
 102    for (;;) {
 103        int mmu_idx = va_arg(argp, int);
 104
 105        if (mmu_idx < 0) {
 106            break;
 107        }
 108
 109        tlb_debug("%d\n", mmu_idx);
 110
 111        memset(env->tlb_table[mmu_idx], -1, sizeof(env->tlb_table[0]));
 112        memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0]));
 113    }
 114
 115    memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
 116}
 117
 118void tlb_flush_by_mmuidx(CPUState *cpu, ...)
 119{
 120    va_list argp;
 121    va_start(argp, cpu);
 122    v_tlb_flush_by_mmuidx(cpu, argp);
 123    va_end(argp);
 124}
 125
 126static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
 127{
 128    if (addr == (tlb_entry->addr_read &
 129                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
 130        addr == (tlb_entry->addr_write &
 131                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
 132        addr == (tlb_entry->addr_code &
 133                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 134        memset(tlb_entry, -1, sizeof(*tlb_entry));
 135    }
 136}
 137
 138void tlb_flush_page(CPUState *cpu, target_ulong addr)
 139{
 140    CPUArchState *env = cpu->env_ptr;
 141    int i;
 142    int mmu_idx;
 143
 144    tlb_debug("page :" TARGET_FMT_lx "\n", addr);
 145
 146    /* Check if we need to flush due to large pages.  */
 147    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
 148        tlb_debug("forcing full flush ("
 149                  TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
 150                  env->tlb_flush_addr, env->tlb_flush_mask);
 151
 152        tlb_flush(cpu, 1);
 153        return;
 154    }
 155    /* must reset current TB so that interrupts cannot modify the
 156       links while we are modifying them */
 157    cpu->current_tb = NULL;
 158
 159    addr &= TARGET_PAGE_MASK;
 160    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 161    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
 162        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
 163    }
 164
 165    /* check whether there are entries that need to be flushed in the vtlb */
 166    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
 167        int k;
 168        for (k = 0; k < CPU_VTLB_SIZE; k++) {
 169            tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
 170        }
 171    }
 172
 173    tb_flush_jmp_cache(cpu, addr);
 174}
 175
 176void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...)
 177{
 178    CPUArchState *env = cpu->env_ptr;
 179    int i, k;
 180    va_list argp;
 181
 182    va_start(argp, addr);
 183
 184    tlb_debug("addr "TARGET_FMT_lx"\n", addr);
 185
 186    /* Check if we need to flush due to large pages.  */
 187    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
 188        tlb_debug("forced full flush ("
 189                  TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
 190                  env->tlb_flush_addr, env->tlb_flush_mask);
 191
 192        v_tlb_flush_by_mmuidx(cpu, argp);
 193        va_end(argp);
 194        return;
 195    }
 196    /* must reset current TB so that interrupts cannot modify the
 197       links while we are modifying them */
 198    cpu->current_tb = NULL;
 199
 200    addr &= TARGET_PAGE_MASK;
 201    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 202
 203    for (;;) {
 204        int mmu_idx = va_arg(argp, int);
 205
 206        if (mmu_idx < 0) {
 207            break;
 208        }
 209
 210        tlb_debug("idx %d\n", mmu_idx);
 211
 212        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
 213
 214        /* check whether there are vltb entries that need to be flushed */
 215        for (k = 0; k < CPU_VTLB_SIZE; k++) {
 216            tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
 217        }
 218    }
 219    va_end(argp);
 220
 221    tb_flush_jmp_cache(cpu, addr);
 222}
 223
 224/* update the TLBs so that writes to code in the virtual page 'addr'
 225   can be detected */
 226void tlb_protect_code(ram_addr_t ram_addr)
 227{
 228    cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
 229                                             DIRTY_MEMORY_CODE);
 230}
 231
 232/* update the TLB so that writes in physical page 'phys_addr' are no longer
 233   tested for self modifying code */
 234void tlb_unprotect_code(ram_addr_t ram_addr)
 235{
 236    cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
 237}
 238
 239static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
 240{
 241    return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
 242}
 243
 244void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
 245                           uintptr_t length)
 246{
 247    uintptr_t addr;
 248
 249    if (tlb_is_dirty_ram(tlb_entry)) {
 250        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
 251        if ((addr - start) < length) {
 252            tlb_entry->addr_write |= TLB_NOTDIRTY;
 253        }
 254    }
 255}
 256
 257static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
 258{
 259    ram_addr_t ram_addr;
 260
 261    if (qemu_ram_addr_from_host(ptr, &ram_addr) == NULL) {
 262        fprintf(stderr, "Bad ram pointer %p\n", ptr);
 263        abort();
 264    }
 265    return ram_addr;
 266}
 267
 268void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
 269{
 270    CPUArchState *env;
 271
 272    int mmu_idx;
 273
 274    env = cpu->env_ptr;
 275    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
 276        unsigned int i;
 277
 278        for (i = 0; i < CPU_TLB_SIZE; i++) {
 279            tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
 280                                  start1, length);
 281        }
 282
 283        for (i = 0; i < CPU_VTLB_SIZE; i++) {
 284            tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
 285                                  start1, length);
 286        }
 287    }
 288}
 289
 290static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
 291{
 292    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
 293        tlb_entry->addr_write = vaddr;
 294    }
 295}
 296
 297/* update the TLB corresponding to virtual page vaddr
 298   so that it is no longer dirty */
 299void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
 300{
 301    CPUArchState *env = cpu->env_ptr;
 302    int i;
 303    int mmu_idx;
 304
 305    vaddr &= TARGET_PAGE_MASK;
 306    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 307    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
 308        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
 309    }
 310
 311    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
 312        int k;
 313        for (k = 0; k < CPU_VTLB_SIZE; k++) {
 314            tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
 315        }
 316    }
 317}
 318
 319/* Our TLB does not support large pages, so remember the area covered by
 320   large pages and trigger a full TLB flush if these are invalidated.  */
 321static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
 322                               target_ulong size)
 323{
 324    target_ulong mask = ~(size - 1);
 325
 326    if (env->tlb_flush_addr == (target_ulong)-1) {
 327        env->tlb_flush_addr = vaddr & mask;
 328        env->tlb_flush_mask = mask;
 329        return;
 330    }
 331    /* Extend the existing region to include the new page.
 332       This is a compromise between unnecessary flushes and the cost
 333       of maintaining a full variable size TLB.  */
 334    mask &= env->tlb_flush_mask;
 335    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
 336        mask <<= 1;
 337    }
 338    env->tlb_flush_addr &= mask;
 339    env->tlb_flush_mask = mask;
 340}
 341
 342/* Add a new TLB entry. At most one entry for a given virtual address
 343 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
 344 * supplied size is only used by tlb_flush_page.
 345 *
 346 * Called from TCG-generated code, which is under an RCU read-side
 347 * critical section.
 348 */
 349void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
 350                             hwaddr paddr, MemTxAttrs attrs, int prot,
 351                             int mmu_idx, target_ulong size)
 352{
 353    CPUArchState *env = cpu->env_ptr;
 354    MemoryRegionSection *section;
 355    unsigned int index;
 356    target_ulong address;
 357    target_ulong code_address;
 358    uintptr_t addend;
 359    CPUTLBEntry *te;
 360    hwaddr iotlb, xlat, sz;
 361    unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
 362    int asidx = cpu_asidx_from_attrs(cpu, attrs);
 363
 364    assert(size >= TARGET_PAGE_SIZE);
 365    if (size != TARGET_PAGE_SIZE) {
 366        tlb_add_large_page(env, vaddr, size);
 367    }
 368
 369    sz = size;
 370    section = address_space_translate_for_iotlb(cpu, asidx, paddr, &xlat, &sz);
 371    assert(sz >= TARGET_PAGE_SIZE);
 372
 373    tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
 374              " prot=%x idx=%d\n",
 375              vaddr, paddr, prot, mmu_idx);
 376
 377    address = vaddr;
 378    if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
 379        /* IO memory case */
 380        address |= TLB_MMIO;
 381        addend = 0;
 382    } else {
 383        /* TLB_MMIO for rom/romd handled below */
 384        addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
 385    }
 386
 387    code_address = address;
 388    iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
 389                                            prot, &address);
 390
 391    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 392    te = &env->tlb_table[mmu_idx][index];
 393
 394    /* do not discard the translation in te, evict it into a victim tlb */
 395    env->tlb_v_table[mmu_idx][vidx] = *te;
 396    env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
 397
 398    /* refill the tlb */
 399    env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
 400    env->iotlb[mmu_idx][index].attrs = attrs;
 401    te->addend = addend - vaddr;
 402    if (prot & PAGE_READ) {
 403        te->addr_read = address;
 404    } else {
 405        te->addr_read = -1;
 406    }
 407
 408    if (prot & PAGE_EXEC) {
 409        te->addr_code = code_address;
 410    } else {
 411        te->addr_code = -1;
 412    }
 413    if (prot & PAGE_WRITE) {
 414        if ((memory_region_is_ram(section->mr) && section->readonly)
 415            || memory_region_is_romd(section->mr)) {
 416            /* Write access calls the I/O callback.  */
 417            te->addr_write = address | TLB_MMIO;
 418        } else if (memory_region_is_ram(section->mr)
 419                   && cpu_physical_memory_is_clean(
 420                        memory_region_get_ram_addr(section->mr) + xlat)) {
 421            te->addr_write = address | TLB_NOTDIRTY;
 422        } else {
 423            te->addr_write = address;
 424        }
 425    } else {
 426        te->addr_write = -1;
 427    }
 428}
 429
 430/* Add a new TLB entry, but without specifying the memory
 431 * transaction attributes to be used.
 432 */
 433void tlb_set_page(CPUState *cpu, target_ulong vaddr,
 434                  hwaddr paddr, int prot,
 435                  int mmu_idx, target_ulong size)
 436{
 437    tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
 438                            prot, mmu_idx, size);
 439}
 440
 441/* NOTE: this function can trigger an exception */
 442/* NOTE2: the returned address is not exactly the physical address: it
 443 * is actually a ram_addr_t (in system mode; the user mode emulation
 444 * version of this function returns a guest virtual address).
 445 */
 446tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
 447{
 448    int mmu_idx, page_index, pd;
 449    void *p;
 450    MemoryRegion *mr;
 451    CPUState *cpu = ENV_GET_CPU(env1);
 452    CPUIOTLBEntry *iotlbentry;
 453
 454    page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 455    mmu_idx = cpu_mmu_index(env1, true);
 456    if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
 457                 (addr & TARGET_PAGE_MASK))) {
 458        cpu_ldub_code(env1, addr);
 459    }
 460    iotlbentry = &env1->iotlb[mmu_idx][page_index];
 461    pd = iotlbentry->addr & ~TARGET_PAGE_MASK;
 462    mr = iotlb_to_region(cpu, pd, iotlbentry->attrs);
 463    if (memory_region_is_unassigned(mr)) {
 464        CPUClass *cc = CPU_GET_CLASS(cpu);
 465
 466        if (cc->do_unassigned_access) {
 467            cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
 468        } else {
 469            cpu_abort(cpu, "Trying to execute code outside RAM or ROM at 0x"
 470                      TARGET_FMT_lx "\n", addr);
 471        }
 472    }
 473    p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
 474    return qemu_ram_addr_from_host_nofail(p);
 475}
 476
 477#define MMUSUFFIX _mmu
 478
 479#define SHIFT 0
 480#include "softmmu_template.h"
 481
 482#define SHIFT 1
 483#include "softmmu_template.h"
 484
 485#define SHIFT 2
 486#include "softmmu_template.h"
 487
 488#define SHIFT 3
 489#include "softmmu_template.h"
 490#undef MMUSUFFIX
 491
 492#define MMUSUFFIX _cmmu
 493#undef GETPC_ADJ
 494#define GETPC_ADJ 0
 495#undef GETRA
 496#define GETRA() ((uintptr_t)0)
 497#define SOFTMMU_CODE_ACCESS
 498
 499#define SHIFT 0
 500#include "softmmu_template.h"
 501
 502#define SHIFT 1
 503#include "softmmu_template.h"
 504
 505#define SHIFT 2
 506#include "softmmu_template.h"
 507
 508#define SHIFT 3
 509#include "softmmu_template.h"
 510