qemu/accel/tcg/tb-maint.c
<<
>>
Prefs
   1/*
   2 * Translation Block Maintaince
   3 *
   4 *  Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include "qemu/osdep.h"
  21#include "exec/cputlb.h"
  22#include "exec/log.h"
  23#include "exec/exec-all.h"
  24#include "exec/translate-all.h"
  25#include "sysemu/tcg.h"
  26#include "tcg/tcg.h"
  27#include "tb-hash.h"
  28#include "tb-context.h"
  29#include "internal.h"
  30
  31
  32static bool tb_cmp(const void *ap, const void *bp)
  33{
  34    const TranslationBlock *a = ap;
  35    const TranslationBlock *b = bp;
  36
  37    return ((TARGET_TB_PCREL || tb_pc(a) == tb_pc(b)) &&
  38            a->cs_base == b->cs_base &&
  39            a->flags == b->flags &&
  40            (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
  41            a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
  42            tb_page_addr0(a) == tb_page_addr0(b) &&
  43            tb_page_addr1(a) == tb_page_addr1(b));
  44}
  45
  46void tb_htable_init(void)
  47{
  48    unsigned int mode = QHT_MODE_AUTO_RESIZE;
  49
  50    qht_init(&tb_ctx.htable, tb_cmp, CODE_GEN_HTABLE_SIZE, mode);
  51}
  52
  53/* Set to NULL all the 'first_tb' fields in all PageDescs. */
  54static void page_flush_tb_1(int level, void **lp)
  55{
  56    int i;
  57
  58    if (*lp == NULL) {
  59        return;
  60    }
  61    if (level == 0) {
  62        PageDesc *pd = *lp;
  63
  64        for (i = 0; i < V_L2_SIZE; ++i) {
  65            page_lock(&pd[i]);
  66            pd[i].first_tb = (uintptr_t)NULL;
  67            page_unlock(&pd[i]);
  68        }
  69    } else {
  70        void **pp = *lp;
  71
  72        for (i = 0; i < V_L2_SIZE; ++i) {
  73            page_flush_tb_1(level - 1, pp + i);
  74        }
  75    }
  76}
  77
  78static void page_flush_tb(void)
  79{
  80    int i, l1_sz = v_l1_size;
  81
  82    for (i = 0; i < l1_sz; i++) {
  83        page_flush_tb_1(v_l2_levels, l1_map + i);
  84    }
  85}
  86
  87/* flush all the translation blocks */
  88static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
  89{
  90    bool did_flush = false;
  91
  92    mmap_lock();
  93    /* If it is already been done on request of another CPU, just retry. */
  94    if (tb_ctx.tb_flush_count != tb_flush_count.host_int) {
  95        goto done;
  96    }
  97    did_flush = true;
  98
  99    CPU_FOREACH(cpu) {
 100        tcg_flush_jmp_cache(cpu);
 101    }
 102
 103    qht_reset_size(&tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
 104    page_flush_tb();
 105
 106    tcg_region_reset_all();
 107    /* XXX: flush processor icache at this point if cache flush is expensive */
 108    qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
 109
 110done:
 111    mmap_unlock();
 112    if (did_flush) {
 113        qemu_plugin_flush_cb();
 114    }
 115}
 116
 117void tb_flush(CPUState *cpu)
 118{
 119    if (tcg_enabled()) {
 120        unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
 121
 122        if (cpu_in_exclusive_context(cpu)) {
 123            do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
 124        } else {
 125            async_safe_run_on_cpu(cpu, do_tb_flush,
 126                                  RUN_ON_CPU_HOST_INT(tb_flush_count));
 127        }
 128    }
 129}
 130
 131/*
 132 * user-mode: call with mmap_lock held
 133 * !user-mode: call with @pd->lock held
 134 */
 135static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
 136{
 137    TranslationBlock *tb1;
 138    uintptr_t *pprev;
 139    unsigned int n1;
 140
 141    assert_page_locked(pd);
 142    pprev = &pd->first_tb;
 143    PAGE_FOR_EACH_TB(pd, tb1, n1) {
 144        if (tb1 == tb) {
 145            *pprev = tb1->page_next[n1];
 146            return;
 147        }
 148        pprev = &tb1->page_next[n1];
 149    }
 150    g_assert_not_reached();
 151}
 152
 153/* remove @orig from its @n_orig-th jump list */
 154static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig)
 155{
 156    uintptr_t ptr, ptr_locked;
 157    TranslationBlock *dest;
 158    TranslationBlock *tb;
 159    uintptr_t *pprev;
 160    int n;
 161
 162    /* mark the LSB of jmp_dest[] so that no further jumps can be inserted */
 163    ptr = qatomic_or_fetch(&orig->jmp_dest[n_orig], 1);
 164    dest = (TranslationBlock *)(ptr & ~1);
 165    if (dest == NULL) {
 166        return;
 167    }
 168
 169    qemu_spin_lock(&dest->jmp_lock);
 170    /*
 171     * While acquiring the lock, the jump might have been removed if the
 172     * destination TB was invalidated; check again.
 173     */
 174    ptr_locked = qatomic_read(&orig->jmp_dest[n_orig]);
 175    if (ptr_locked != ptr) {
 176        qemu_spin_unlock(&dest->jmp_lock);
 177        /*
 178         * The only possibility is that the jump was unlinked via
 179         * tb_jump_unlink(dest). Seeing here another destination would be a bug,
 180         * because we set the LSB above.
 181         */
 182        g_assert(ptr_locked == 1 && dest->cflags & CF_INVALID);
 183        return;
 184    }
 185    /*
 186     * We first acquired the lock, and since the destination pointer matches,
 187     * we know for sure that @orig is in the jmp list.
 188     */
 189    pprev = &dest->jmp_list_head;
 190    TB_FOR_EACH_JMP(dest, tb, n) {
 191        if (tb == orig && n == n_orig) {
 192            *pprev = tb->jmp_list_next[n];
 193            /* no need to set orig->jmp_dest[n]; setting the LSB was enough */
 194            qemu_spin_unlock(&dest->jmp_lock);
 195            return;
 196        }
 197        pprev = &tb->jmp_list_next[n];
 198    }
 199    g_assert_not_reached();
 200}
 201
 202/*
 203 * Reset the jump entry 'n' of a TB so that it is not chained to another TB.
 204 */
 205void tb_reset_jump(TranslationBlock *tb, int n)
 206{
 207    uintptr_t addr = (uintptr_t)(tb->tc.ptr + tb->jmp_reset_offset[n]);
 208    tb_set_jmp_target(tb, n, addr);
 209}
 210
 211/* remove any jumps to the TB */
 212static inline void tb_jmp_unlink(TranslationBlock *dest)
 213{
 214    TranslationBlock *tb;
 215    int n;
 216
 217    qemu_spin_lock(&dest->jmp_lock);
 218
 219    TB_FOR_EACH_JMP(dest, tb, n) {
 220        tb_reset_jump(tb, n);
 221        qatomic_and(&tb->jmp_dest[n], (uintptr_t)NULL | 1);
 222        /* No need to clear the list entry; setting the dest ptr is enough */
 223    }
 224    dest->jmp_list_head = (uintptr_t)NULL;
 225
 226    qemu_spin_unlock(&dest->jmp_lock);
 227}
 228
 229static void tb_jmp_cache_inval_tb(TranslationBlock *tb)
 230{
 231    CPUState *cpu;
 232
 233    if (TARGET_TB_PCREL) {
 234        /* A TB may be at any virtual address */
 235        CPU_FOREACH(cpu) {
 236            tcg_flush_jmp_cache(cpu);
 237        }
 238    } else {
 239        uint32_t h = tb_jmp_cache_hash_func(tb_pc(tb));
 240
 241        CPU_FOREACH(cpu) {
 242            CPUJumpCache *jc = cpu->tb_jmp_cache;
 243
 244            if (qatomic_read(&jc->array[h].tb) == tb) {
 245                qatomic_set(&jc->array[h].tb, NULL);
 246            }
 247        }
 248    }
 249}
 250
 251/*
 252 * In user-mode, call with mmap_lock held.
 253 * In !user-mode, if @rm_from_page_list is set, call with the TB's pages'
 254 * locks held.
 255 */
 256static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
 257{
 258    PageDesc *p;
 259    uint32_t h;
 260    tb_page_addr_t phys_pc;
 261    uint32_t orig_cflags = tb_cflags(tb);
 262
 263    assert_memory_lock();
 264
 265    /* make sure no further incoming jumps will be chained to this TB */
 266    qemu_spin_lock(&tb->jmp_lock);
 267    qatomic_set(&tb->cflags, tb->cflags | CF_INVALID);
 268    qemu_spin_unlock(&tb->jmp_lock);
 269
 270    /* remove the TB from the hash list */
 271    phys_pc = tb_page_addr0(tb);
 272    h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
 273                     tb->flags, orig_cflags, tb->trace_vcpu_dstate);
 274    if (!qht_remove(&tb_ctx.htable, tb, h)) {
 275        return;
 276    }
 277
 278    /* remove the TB from the page list */
 279    if (rm_from_page_list) {
 280        p = page_find(phys_pc >> TARGET_PAGE_BITS);
 281        tb_page_remove(p, tb);
 282        phys_pc = tb_page_addr1(tb);
 283        if (phys_pc != -1) {
 284            p = page_find(phys_pc >> TARGET_PAGE_BITS);
 285            tb_page_remove(p, tb);
 286        }
 287    }
 288
 289    /* remove the TB from the hash list */
 290    tb_jmp_cache_inval_tb(tb);
 291
 292    /* suppress this TB from the two jump lists */
 293    tb_remove_from_jmp_list(tb, 0);
 294    tb_remove_from_jmp_list(tb, 1);
 295
 296    /* suppress any remaining jumps to this TB */
 297    tb_jmp_unlink(tb);
 298
 299    qatomic_set(&tb_ctx.tb_phys_invalidate_count,
 300                tb_ctx.tb_phys_invalidate_count + 1);
 301}
 302
 303static void tb_phys_invalidate__locked(TranslationBlock *tb)
 304{
 305    qemu_thread_jit_write();
 306    do_tb_phys_invalidate(tb, true);
 307    qemu_thread_jit_execute();
 308}
 309
 310static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
 311                           PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc)
 312{
 313    PageDesc *p1, *p2;
 314    tb_page_addr_t page1;
 315    tb_page_addr_t page2;
 316
 317    assert_memory_lock();
 318    g_assert(phys1 != -1);
 319
 320    page1 = phys1 >> TARGET_PAGE_BITS;
 321    page2 = phys2 >> TARGET_PAGE_BITS;
 322
 323    p1 = page_find_alloc(page1, alloc);
 324    if (ret_p1) {
 325        *ret_p1 = p1;
 326    }
 327    if (likely(phys2 == -1)) {
 328        page_lock(p1);
 329        return;
 330    } else if (page1 == page2) {
 331        page_lock(p1);
 332        if (ret_p2) {
 333            *ret_p2 = p1;
 334        }
 335        return;
 336    }
 337    p2 = page_find_alloc(page2, alloc);
 338    if (ret_p2) {
 339        *ret_p2 = p2;
 340    }
 341    if (page1 < page2) {
 342        page_lock(p1);
 343        page_lock(p2);
 344    } else {
 345        page_lock(p2);
 346        page_lock(p1);
 347    }
 348}
 349
 350#ifdef CONFIG_USER_ONLY
 351static inline void page_lock_tb(const TranslationBlock *tb) { }
 352static inline void page_unlock_tb(const TranslationBlock *tb) { }
 353#else
 354/* lock the page(s) of a TB in the correct acquisition order */
 355static void page_lock_tb(const TranslationBlock *tb)
 356{
 357    page_lock_pair(NULL, tb_page_addr0(tb), NULL, tb_page_addr1(tb), false);
 358}
 359
 360static void page_unlock_tb(const TranslationBlock *tb)
 361{
 362    PageDesc *p1 = page_find(tb_page_addr0(tb) >> TARGET_PAGE_BITS);
 363
 364    page_unlock(p1);
 365    if (unlikely(tb_page_addr1(tb) != -1)) {
 366        PageDesc *p2 = page_find(tb_page_addr1(tb) >> TARGET_PAGE_BITS);
 367
 368        if (p2 != p1) {
 369            page_unlock(p2);
 370        }
 371    }
 372}
 373#endif
 374
 375/*
 376 * Invalidate one TB.
 377 * Called with mmap_lock held in user-mode.
 378 */
 379void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
 380{
 381    if (page_addr == -1 && tb_page_addr0(tb) != -1) {
 382        page_lock_tb(tb);
 383        do_tb_phys_invalidate(tb, true);
 384        page_unlock_tb(tb);
 385    } else {
 386        do_tb_phys_invalidate(tb, false);
 387    }
 388}
 389
 390/*
 391 * Add the tb in the target page and protect it if necessary.
 392 * Called with mmap_lock held for user-mode emulation.
 393 * Called with @p->lock held in !user-mode.
 394 */
 395static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
 396                               unsigned int n, tb_page_addr_t page_addr)
 397{
 398#ifndef CONFIG_USER_ONLY
 399    bool page_already_protected;
 400#endif
 401
 402    assert_page_locked(p);
 403
 404    tb->page_next[n] = p->first_tb;
 405#ifndef CONFIG_USER_ONLY
 406    page_already_protected = p->first_tb != (uintptr_t)NULL;
 407#endif
 408    p->first_tb = (uintptr_t)tb | n;
 409
 410#if defined(CONFIG_USER_ONLY)
 411    /* translator_loop() must have made all TB pages non-writable */
 412    assert(!(p->flags & PAGE_WRITE));
 413#else
 414    /*
 415     * If some code is already present, then the pages are already
 416     * protected. So we handle the case where only the first TB is
 417     * allocated in a physical page.
 418     */
 419    if (!page_already_protected) {
 420        tlb_protect_code(page_addr);
 421    }
 422#endif
 423}
 424
 425/*
 426 * Add a new TB and link it to the physical page tables. phys_page2 is
 427 * (-1) to indicate that only one page contains the TB.
 428 *
 429 * Called with mmap_lock held for user-mode emulation.
 430 *
 431 * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
 432 * Note that in !user-mode, another thread might have already added a TB
 433 * for the same block of guest code that @tb corresponds to. In that case,
 434 * the caller should discard the original @tb, and use instead the returned TB.
 435 */
 436TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
 437                               tb_page_addr_t phys_page2)
 438{
 439    PageDesc *p;
 440    PageDesc *p2 = NULL;
 441    void *existing_tb = NULL;
 442    uint32_t h;
 443
 444    assert_memory_lock();
 445    tcg_debug_assert(!(tb->cflags & CF_INVALID));
 446
 447    /*
 448     * Add the TB to the page list, acquiring first the pages's locks.
 449     * We keep the locks held until after inserting the TB in the hash table,
 450     * so that if the insertion fails we know for sure that the TBs are still
 451     * in the page descriptors.
 452     * Note that inserting into the hash table first isn't an option, since
 453     * we can only insert TBs that are fully initialized.
 454     */
 455    page_lock_pair(&p, phys_pc, &p2, phys_page2, true);
 456    tb_page_add(p, tb, 0, phys_pc);
 457    if (p2) {
 458        tb_page_add(p2, tb, 1, phys_page2);
 459    }
 460
 461    /* add in the hash table */
 462    h = tb_hash_func(phys_pc, (TARGET_TB_PCREL ? 0 : tb_pc(tb)),
 463                     tb->flags, tb->cflags, tb->trace_vcpu_dstate);
 464    qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
 465
 466    /* remove TB from the page(s) if we couldn't insert it */
 467    if (unlikely(existing_tb)) {
 468        tb_page_remove(p, tb);
 469        if (p2) {
 470            tb_page_remove(p2, tb);
 471        }
 472        tb = existing_tb;
 473    }
 474
 475    if (p2 && p2 != p) {
 476        page_unlock(p2);
 477    }
 478    page_unlock(p);
 479    return tb;
 480}
 481
 482/*
 483 * @p must be non-NULL.
 484 * user-mode: call with mmap_lock held.
 485 * !user-mode: call with all @pages locked.
 486 */
 487static void
 488tb_invalidate_phys_page_range__locked(struct page_collection *pages,
 489                                      PageDesc *p, tb_page_addr_t start,
 490                                      tb_page_addr_t end,
 491                                      uintptr_t retaddr)
 492{
 493    TranslationBlock *tb;
 494    tb_page_addr_t tb_start, tb_end;
 495    int n;
 496#ifdef TARGET_HAS_PRECISE_SMC
 497    CPUState *cpu = current_cpu;
 498    bool current_tb_not_found = retaddr != 0;
 499    bool current_tb_modified = false;
 500    TranslationBlock *current_tb = NULL;
 501#endif /* TARGET_HAS_PRECISE_SMC */
 502
 503    assert_page_locked(p);
 504
 505    /*
 506     * We remove all the TBs in the range [start, end[.
 507     * XXX: see if in some cases it could be faster to invalidate all the code
 508     */
 509    PAGE_FOR_EACH_TB(p, tb, n) {
 510        assert_page_locked(p);
 511        /* NOTE: this is subtle as a TB may span two physical pages */
 512        if (n == 0) {
 513            /* NOTE: tb_end may be after the end of the page, but
 514               it is not a problem */
 515            tb_start = tb_page_addr0(tb);
 516            tb_end = tb_start + tb->size;
 517        } else {
 518            tb_start = tb_page_addr1(tb);
 519            tb_end = tb_start + ((tb_page_addr0(tb) + tb->size)
 520                                 & ~TARGET_PAGE_MASK);
 521        }
 522        if (!(tb_end <= start || tb_start >= end)) {
 523#ifdef TARGET_HAS_PRECISE_SMC
 524            if (current_tb_not_found) {
 525                current_tb_not_found = false;
 526                /* now we have a real cpu fault */
 527                current_tb = tcg_tb_lookup(retaddr);
 528            }
 529            if (current_tb == tb &&
 530                (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
 531                /*
 532                 * If we are modifying the current TB, we must stop
 533                 * its execution. We could be more precise by checking
 534                 * that the modification is after the current PC, but it
 535                 * would require a specialized function to partially
 536                 * restore the CPU state.
 537                 */
 538                current_tb_modified = true;
 539                cpu_restore_state_from_tb(cpu, current_tb, retaddr);
 540            }
 541#endif /* TARGET_HAS_PRECISE_SMC */
 542            tb_phys_invalidate__locked(tb);
 543        }
 544    }
 545#if !defined(CONFIG_USER_ONLY)
 546    /* if no code remaining, no need to continue to use slow writes */
 547    if (!p->first_tb) {
 548        tlb_unprotect_code(start);
 549    }
 550#endif
 551#ifdef TARGET_HAS_PRECISE_SMC
 552    if (current_tb_modified) {
 553        page_collection_unlock(pages);
 554        /* Force execution of one insn next time.  */
 555        cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
 556        mmap_unlock();
 557        cpu_loop_exit_noexc(cpu);
 558    }
 559#endif
 560}
 561
 562/*
 563 * Invalidate all TBs which intersect with the target physical
 564 * address page @addr.
 565 *
 566 * Called with mmap_lock held for user-mode emulation
 567 */
 568void tb_invalidate_phys_page(tb_page_addr_t addr)
 569{
 570    struct page_collection *pages;
 571    tb_page_addr_t start, end;
 572    PageDesc *p;
 573
 574    assert_memory_lock();
 575
 576    p = page_find(addr >> TARGET_PAGE_BITS);
 577    if (p == NULL) {
 578        return;
 579    }
 580
 581    start = addr & TARGET_PAGE_MASK;
 582    end = start + TARGET_PAGE_SIZE;
 583    pages = page_collection_lock(start, end);
 584    tb_invalidate_phys_page_range__locked(pages, p, start, end, 0);
 585    page_collection_unlock(pages);
 586}
 587
 588/*
 589 * Invalidate all TBs which intersect with the target physical address range
 590 * [start;end[. NOTE: start and end may refer to *different* physical pages.
 591 * 'is_cpu_write_access' should be true if called from a real cpu write
 592 * access: the virtual CPU will exit the current TB if code is modified inside
 593 * this TB.
 594 *
 595 * Called with mmap_lock held for user-mode emulation.
 596 */
 597void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end)
 598{
 599    struct page_collection *pages;
 600    tb_page_addr_t next;
 601
 602    assert_memory_lock();
 603
 604    pages = page_collection_lock(start, end);
 605    for (next = (start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
 606         start < end;
 607         start = next, next += TARGET_PAGE_SIZE) {
 608        PageDesc *pd = page_find(start >> TARGET_PAGE_BITS);
 609        tb_page_addr_t bound = MIN(next, end);
 610
 611        if (pd == NULL) {
 612            continue;
 613        }
 614        tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
 615    }
 616    page_collection_unlock(pages);
 617}
 618
 619#ifdef CONFIG_SOFTMMU
 620/*
 621 * len must be <= 8 and start must be a multiple of len.
 622 * Called via softmmu_template.h when code areas are written to with
 623 * iothread mutex not held.
 624 *
 625 * Call with all @pages in the range [@start, @start + len[ locked.
 626 */
 627void tb_invalidate_phys_page_fast(struct page_collection *pages,
 628                                  tb_page_addr_t start, int len,
 629                                  uintptr_t retaddr)
 630{
 631    PageDesc *p;
 632
 633    assert_memory_lock();
 634
 635    p = page_find(start >> TARGET_PAGE_BITS);
 636    if (!p) {
 637        return;
 638    }
 639
 640    assert_page_locked(p);
 641    tb_invalidate_phys_page_range__locked(pages, p, start, start + len,
 642                                          retaddr);
 643}
 644#else
 645/*
 646 * Called with mmap_lock held. If pc is not 0 then it indicates the
 647 * host PC of the faulting store instruction that caused this invalidate.
 648 * Returns true if the caller needs to abort execution of the current
 649 * TB (because it was modified by this store and the guest CPU has
 650 * precise-SMC semantics).
 651 */
 652bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc)
 653{
 654    TranslationBlock *tb;
 655    PageDesc *p;
 656    int n;
 657#ifdef TARGET_HAS_PRECISE_SMC
 658    TranslationBlock *current_tb = NULL;
 659    CPUState *cpu = current_cpu;
 660    bool current_tb_modified = false;
 661#endif
 662
 663    assert_memory_lock();
 664
 665    addr &= TARGET_PAGE_MASK;
 666    p = page_find(addr >> TARGET_PAGE_BITS);
 667    if (!p) {
 668        return false;
 669    }
 670
 671#ifdef TARGET_HAS_PRECISE_SMC
 672    if (p->first_tb && pc != 0) {
 673        current_tb = tcg_tb_lookup(pc);
 674    }
 675#endif
 676    assert_page_locked(p);
 677    PAGE_FOR_EACH_TB(p, tb, n) {
 678#ifdef TARGET_HAS_PRECISE_SMC
 679        if (current_tb == tb &&
 680            (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) {
 681            /*
 682             * If we are modifying the current TB, we must stop its execution.
 683             * We could be more precise by checking that the modification is
 684             * after the current PC, but it would require a specialized
 685             * function to partially restore the CPU state.
 686             */
 687            current_tb_modified = true;
 688            cpu_restore_state_from_tb(cpu, current_tb, pc);
 689        }
 690#endif /* TARGET_HAS_PRECISE_SMC */
 691        tb_phys_invalidate(tb, addr);
 692    }
 693    p->first_tb = (uintptr_t)NULL;
 694#ifdef TARGET_HAS_PRECISE_SMC
 695    if (current_tb_modified) {
 696        /* Force execution of one insn next time.  */
 697        cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
 698        return true;
 699    }
 700#endif
 701
 702    return false;
 703}
 704#endif
 705