qemu/accel/tcg/user-exec.c
<<
>>
Prefs
   1/*
   2 *  User emulator execution
   3 *
   4 *  Copyright (c) 2003-2005 Fabrice Bellard
   5 *
   6 * This library is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU Lesser General Public
   8 * License as published by the Free Software Foundation; either
   9 * version 2.1 of the License, or (at your option) any later version.
  10 *
  11 * This library is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * Lesser General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU Lesser General Public
  17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include "qemu/osdep.h"
  20#include "hw/core/tcg-cpu-ops.h"
  21#include "disas/disas.h"
  22#include "exec/exec-all.h"
  23#include "tcg/tcg.h"
  24#include "qemu/bitops.h"
  25#include "qemu/rcu.h"
  26#include "exec/cpu_ldst.h"
  27#include "exec/translate-all.h"
  28#include "exec/helper-proto.h"
  29#include "qemu/atomic128.h"
  30#include "trace/trace-root.h"
  31#include "tcg/tcg-ldst.h"
  32#include "internal.h"
  33
  34__thread uintptr_t helper_retaddr;
  35
  36//#define DEBUG_SIGNAL
  37
  38/*
  39 * Adjust the pc to pass to cpu_restore_state; return the memop type.
  40 */
  41MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write)
  42{
  43    switch (helper_retaddr) {
  44    default:
  45        /*
  46         * Fault during host memory operation within a helper function.
  47         * The helper's host return address, saved here, gives us a
  48         * pointer into the generated code that will unwind to the
  49         * correct guest pc.
  50         */
  51        *pc = helper_retaddr;
  52        break;
  53
  54    case 0:
  55        /*
  56         * Fault during host memory operation within generated code.
  57         * (Or, a unrelated bug within qemu, but we can't tell from here).
  58         *
  59         * We take the host pc from the signal frame.  However, we cannot
  60         * use that value directly.  Within cpu_restore_state_from_tb, we
  61         * assume PC comes from GETPC(), as used by the helper functions,
  62         * so we adjust the address by -GETPC_ADJ to form an address that
  63         * is within the call insn, so that the address does not accidentally
  64         * match the beginning of the next guest insn.  However, when the
  65         * pc comes from the signal frame it points to the actual faulting
  66         * host memory insn and not the return from a call insn.
  67         *
  68         * Therefore, adjust to compensate for what will be done later
  69         * by cpu_restore_state_from_tb.
  70         */
  71        *pc += GETPC_ADJ;
  72        break;
  73
  74    case 1:
  75        /*
  76         * Fault during host read for translation, or loosely, "execution".
  77         *
  78         * The guest pc is already pointing to the start of the TB for which
  79         * code is being generated.  If the guest translator manages the
  80         * page crossings correctly, this is exactly the correct address
  81         * (and if the translator doesn't handle page boundaries correctly
  82         * there's little we can do about that here).  Therefore, do not
  83         * trigger the unwinder.
  84         */
  85        *pc = 0;
  86        return MMU_INST_FETCH;
  87    }
  88
  89    return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
  90}
  91
  92/**
  93 * handle_sigsegv_accerr_write:
  94 * @cpu: the cpu context
  95 * @old_set: the sigset_t from the signal ucontext_t
  96 * @host_pc: the host pc, adjusted for the signal
  97 * @guest_addr: the guest address of the fault
  98 *
  99 * Return true if the write fault has been handled, and should be re-tried.
 100 *
 101 * Note that it is important that we don't call page_unprotect() unless
 102 * this is really a "write to nonwritable page" fault, because
 103 * page_unprotect() assumes that if it is called for an access to
 104 * a page that's writable this means we had two threads racing and
 105 * another thread got there first and already made the page writable;
 106 * so we will retry the access. If we were to call page_unprotect()
 107 * for some other kind of fault that should really be passed to the
 108 * guest, we'd end up in an infinite loop of retrying the faulting access.
 109 */
 110bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set,
 111                                 uintptr_t host_pc, abi_ptr guest_addr)
 112{
 113    switch (page_unprotect(guest_addr, host_pc)) {
 114    case 0:
 115        /*
 116         * Fault not caused by a page marked unwritable to protect
 117         * cached translations, must be the guest binary's problem.
 118         */
 119        return false;
 120    case 1:
 121        /*
 122         * Fault caused by protection of cached translation; TBs
 123         * invalidated, so resume execution.
 124         */
 125        return true;
 126    case 2:
 127        /*
 128         * Fault caused by protection of cached translation, and the
 129         * currently executing TB was modified and must be exited immediately.
 130         */
 131        sigprocmask(SIG_SETMASK, old_set, NULL);
 132        cpu_loop_exit_noexc(cpu);
 133        /* NORETURN */
 134    default:
 135        g_assert_not_reached();
 136    }
 137}
 138
 139typedef struct PageFlagsNode {
 140    struct rcu_head rcu;
 141    IntervalTreeNode itree;
 142    int flags;
 143} PageFlagsNode;
 144
 145static IntervalTreeRoot pageflags_root;
 146
 147static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
 148{
 149    IntervalTreeNode *n;
 150
 151    n = interval_tree_iter_first(&pageflags_root, start, last);
 152    return n ? container_of(n, PageFlagsNode, itree) : NULL;
 153}
 154
 155static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
 156                                     target_ulong last)
 157{
 158    IntervalTreeNode *n;
 159
 160    n = interval_tree_iter_next(&p->itree, start, last);
 161    return n ? container_of(n, PageFlagsNode, itree) : NULL;
 162}
 163
 164int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
 165{
 166    IntervalTreeNode *n;
 167    int rc = 0;
 168
 169    mmap_lock();
 170    for (n = interval_tree_iter_first(&pageflags_root, 0, -1);
 171         n != NULL;
 172         n = interval_tree_iter_next(n, 0, -1)) {
 173        PageFlagsNode *p = container_of(n, PageFlagsNode, itree);
 174
 175        rc = fn(priv, n->start, n->last + 1, p->flags);
 176        if (rc != 0) {
 177            break;
 178        }
 179    }
 180    mmap_unlock();
 181
 182    return rc;
 183}
 184
 185static int dump_region(void *priv, target_ulong start,
 186                       target_ulong end, unsigned long prot)
 187{
 188    FILE *f = (FILE *)priv;
 189
 190    fprintf(f, TARGET_FMT_lx"-"TARGET_FMT_lx" "TARGET_FMT_lx" %c%c%c\n",
 191            start, end, end - start,
 192            ((prot & PAGE_READ) ? 'r' : '-'),
 193            ((prot & PAGE_WRITE) ? 'w' : '-'),
 194            ((prot & PAGE_EXEC) ? 'x' : '-'));
 195    return 0;
 196}
 197
 198/* dump memory mappings */
 199void page_dump(FILE *f)
 200{
 201    const int length = sizeof(target_ulong) * 2;
 202
 203    fprintf(f, "%-*s %-*s %-*s %s\n",
 204            length, "start", length, "end", length, "size", "prot");
 205    walk_memory_regions(f, dump_region);
 206}
 207
 208int page_get_flags(target_ulong address)
 209{
 210    PageFlagsNode *p = pageflags_find(address, address);
 211
 212    /*
 213     * See util/interval-tree.c re lockless lookups: no false positives but
 214     * there are false negatives.  If we find nothing, retry with the mmap
 215     * lock acquired.
 216     */
 217    if (p) {
 218        return p->flags;
 219    }
 220    if (have_mmap_lock()) {
 221        return 0;
 222    }
 223
 224    mmap_lock();
 225    p = pageflags_find(address, address);
 226    mmap_unlock();
 227    return p ? p->flags : 0;
 228}
 229
 230/* A subroutine of page_set_flags: insert a new node for [start,last]. */
 231static void pageflags_create(target_ulong start, target_ulong last, int flags)
 232{
 233    PageFlagsNode *p = g_new(PageFlagsNode, 1);
 234
 235    p->itree.start = start;
 236    p->itree.last = last;
 237    p->flags = flags;
 238    interval_tree_insert(&p->itree, &pageflags_root);
 239}
 240
 241/* A subroutine of page_set_flags: remove everything in [start,last]. */
 242static bool pageflags_unset(target_ulong start, target_ulong last)
 243{
 244    bool inval_tb = false;
 245
 246    while (true) {
 247        PageFlagsNode *p = pageflags_find(start, last);
 248        target_ulong p_last;
 249
 250        if (!p) {
 251            break;
 252        }
 253
 254        if (p->flags & PAGE_EXEC) {
 255            inval_tb = true;
 256        }
 257
 258        interval_tree_remove(&p->itree, &pageflags_root);
 259        p_last = p->itree.last;
 260
 261        if (p->itree.start < start) {
 262            /* Truncate the node from the end, or split out the middle. */
 263            p->itree.last = start - 1;
 264            interval_tree_insert(&p->itree, &pageflags_root);
 265            if (last < p_last) {
 266                pageflags_create(last + 1, p_last, p->flags);
 267                break;
 268            }
 269        } else if (p_last <= last) {
 270            /* Range completely covers node -- remove it. */
 271            g_free_rcu(p, rcu);
 272        } else {
 273            /* Truncate the node from the start. */
 274            p->itree.start = last + 1;
 275            interval_tree_insert(&p->itree, &pageflags_root);
 276            break;
 277        }
 278    }
 279
 280    return inval_tb;
 281}
 282
 283/*
 284 * A subroutine of page_set_flags: nothing overlaps [start,last],
 285 * but check adjacent mappings and maybe merge into a single range.
 286 */
 287static void pageflags_create_merge(target_ulong start, target_ulong last,
 288                                   int flags)
 289{
 290    PageFlagsNode *next = NULL, *prev = NULL;
 291
 292    if (start > 0) {
 293        prev = pageflags_find(start - 1, start - 1);
 294        if (prev) {
 295            if (prev->flags == flags) {
 296                interval_tree_remove(&prev->itree, &pageflags_root);
 297            } else {
 298                prev = NULL;
 299            }
 300        }
 301    }
 302    if (last + 1 != 0) {
 303        next = pageflags_find(last + 1, last + 1);
 304        if (next) {
 305            if (next->flags == flags) {
 306                interval_tree_remove(&next->itree, &pageflags_root);
 307            } else {
 308                next = NULL;
 309            }
 310        }
 311    }
 312
 313    if (prev) {
 314        if (next) {
 315            prev->itree.last = next->itree.last;
 316            g_free_rcu(next, rcu);
 317        } else {
 318            prev->itree.last = last;
 319        }
 320        interval_tree_insert(&prev->itree, &pageflags_root);
 321    } else if (next) {
 322        next->itree.start = start;
 323        interval_tree_insert(&next->itree, &pageflags_root);
 324    } else {
 325        pageflags_create(start, last, flags);
 326    }
 327}
 328
 329/*
 330 * Allow the target to decide if PAGE_TARGET_[12] may be reset.
 331 * By default, they are not kept.
 332 */
 333#ifndef PAGE_TARGET_STICKY
 334#define PAGE_TARGET_STICKY  0
 335#endif
 336#define PAGE_STICKY  (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY)
 337
 338/* A subroutine of page_set_flags: add flags to [start,last]. */
 339static bool pageflags_set_clear(target_ulong start, target_ulong last,
 340                                int set_flags, int clear_flags)
 341{
 342    PageFlagsNode *p;
 343    target_ulong p_start, p_last;
 344    int p_flags, merge_flags;
 345    bool inval_tb = false;
 346
 347 restart:
 348    p = pageflags_find(start, last);
 349    if (!p) {
 350        if (set_flags) {
 351            pageflags_create_merge(start, last, set_flags);
 352        }
 353        goto done;
 354    }
 355
 356    p_start = p->itree.start;
 357    p_last = p->itree.last;
 358    p_flags = p->flags;
 359    /* Using mprotect on a page does not change sticky bits. */
 360    merge_flags = (p_flags & ~clear_flags) | set_flags;
 361
 362    /*
 363     * Need to flush if an overlapping executable region
 364     * removes exec, or adds write.
 365     */
 366    if ((p_flags & PAGE_EXEC)
 367        && (!(merge_flags & PAGE_EXEC)
 368            || (merge_flags & ~p_flags & PAGE_WRITE))) {
 369        inval_tb = true;
 370    }
 371
 372    /*
 373     * If there is an exact range match, update and return without
 374     * attempting to merge with adjacent regions.
 375     */
 376    if (start == p_start && last == p_last) {
 377        if (merge_flags) {
 378            p->flags = merge_flags;
 379        } else {
 380            interval_tree_remove(&p->itree, &pageflags_root);
 381            g_free_rcu(p, rcu);
 382        }
 383        goto done;
 384    }
 385
 386    /*
 387     * If sticky bits affect the original mapping, then we must be more
 388     * careful about the existing intervals and the separate flags.
 389     */
 390    if (set_flags != merge_flags) {
 391        if (p_start < start) {
 392            interval_tree_remove(&p->itree, &pageflags_root);
 393            p->itree.last = start - 1;
 394            interval_tree_insert(&p->itree, &pageflags_root);
 395
 396            if (last < p_last) {
 397                if (merge_flags) {
 398                    pageflags_create(start, last, merge_flags);
 399                }
 400                pageflags_create(last + 1, p_last, p_flags);
 401            } else {
 402                if (merge_flags) {
 403                    pageflags_create(start, p_last, merge_flags);
 404                }
 405                if (p_last < last) {
 406                    start = p_last + 1;
 407                    goto restart;
 408                }
 409            }
 410        } else {
 411            if (start < p_start && set_flags) {
 412                pageflags_create(start, p_start - 1, set_flags);
 413            }
 414            if (last < p_last) {
 415                interval_tree_remove(&p->itree, &pageflags_root);
 416                p->itree.start = last + 1;
 417                interval_tree_insert(&p->itree, &pageflags_root);
 418                if (merge_flags) {
 419                    pageflags_create(start, last, merge_flags);
 420                }
 421            } else {
 422                if (merge_flags) {
 423                    p->flags = merge_flags;
 424                } else {
 425                    interval_tree_remove(&p->itree, &pageflags_root);
 426                    g_free_rcu(p, rcu);
 427                }
 428                if (p_last < last) {
 429                    start = p_last + 1;
 430                    goto restart;
 431                }
 432            }
 433        }
 434        goto done;
 435    }
 436
 437    /* If flags are not changing for this range, incorporate it. */
 438    if (set_flags == p_flags) {
 439        if (start < p_start) {
 440            interval_tree_remove(&p->itree, &pageflags_root);
 441            p->itree.start = start;
 442            interval_tree_insert(&p->itree, &pageflags_root);
 443        }
 444        if (p_last < last) {
 445            start = p_last + 1;
 446            goto restart;
 447        }
 448        goto done;
 449    }
 450
 451    /* Maybe split out head and/or tail ranges with the original flags. */
 452    interval_tree_remove(&p->itree, &pageflags_root);
 453    if (p_start < start) {
 454        p->itree.last = start - 1;
 455        interval_tree_insert(&p->itree, &pageflags_root);
 456
 457        if (p_last < last) {
 458            goto restart;
 459        }
 460        if (last < p_last) {
 461            pageflags_create(last + 1, p_last, p_flags);
 462        }
 463    } else if (last < p_last) {
 464        p->itree.start = last + 1;
 465        interval_tree_insert(&p->itree, &pageflags_root);
 466    } else {
 467        g_free_rcu(p, rcu);
 468        goto restart;
 469    }
 470    if (set_flags) {
 471        pageflags_create(start, last, set_flags);
 472    }
 473
 474 done:
 475    return inval_tb;
 476}
 477
 478/*
 479 * Modify the flags of a page and invalidate the code if necessary.
 480 * The flag PAGE_WRITE_ORG is positioned automatically depending
 481 * on PAGE_WRITE.  The mmap_lock should already be held.
 482 */
 483void page_set_flags(target_ulong start, target_ulong last, int flags)
 484{
 485    bool reset = false;
 486    bool inval_tb = false;
 487
 488    /* This function should never be called with addresses outside the
 489       guest address space.  If this assert fires, it probably indicates
 490       a missing call to h2g_valid.  */
 491    assert(start <= last);
 492    assert(last <= GUEST_ADDR_MAX);
 493    /* Only set PAGE_ANON with new mappings. */
 494    assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET));
 495    assert_memory_lock();
 496
 497    start &= TARGET_PAGE_MASK;
 498    last |= ~TARGET_PAGE_MASK;
 499
 500    if (!(flags & PAGE_VALID)) {
 501        flags = 0;
 502    } else {
 503        reset = flags & PAGE_RESET;
 504        flags &= ~PAGE_RESET;
 505        if (flags & PAGE_WRITE) {
 506            flags |= PAGE_WRITE_ORG;
 507        }
 508    }
 509
 510    if (!flags || reset) {
 511        page_reset_target_data(start, last);
 512        inval_tb |= pageflags_unset(start, last);
 513    }
 514    if (flags) {
 515        inval_tb |= pageflags_set_clear(start, last, flags,
 516                                        ~(reset ? 0 : PAGE_STICKY));
 517    }
 518    if (inval_tb) {
 519        tb_invalidate_phys_range(start, last);
 520    }
 521}
 522
 523bool page_check_range(target_ulong start, target_ulong len, int flags)
 524{
 525    target_ulong last;
 526    int locked;  /* tri-state: =0: unlocked, +1: global, -1: local */
 527    bool ret;
 528
 529    if (len == 0) {
 530        return true;  /* trivial length */
 531    }
 532
 533    last = start + len - 1;
 534    if (last < start) {
 535        return false; /* wrap around */
 536    }
 537
 538    locked = have_mmap_lock();
 539    while (true) {
 540        PageFlagsNode *p = pageflags_find(start, last);
 541        int missing;
 542
 543        if (!p) {
 544            if (!locked) {
 545                /*
 546                 * Lockless lookups have false negatives.
 547                 * Retry with the lock held.
 548                 */
 549                mmap_lock();
 550                locked = -1;
 551                p = pageflags_find(start, last);
 552            }
 553            if (!p) {
 554                ret = false; /* entire region invalid */
 555                break;
 556            }
 557        }
 558        if (start < p->itree.start) {
 559            ret = false; /* initial bytes invalid */
 560            break;
 561        }
 562
 563        missing = flags & ~p->flags;
 564        if (missing & ~PAGE_WRITE) {
 565            ret = false; /* page doesn't match */
 566            break;
 567        }
 568        if (missing & PAGE_WRITE) {
 569            if (!(p->flags & PAGE_WRITE_ORG)) {
 570                ret = false; /* page not writable */
 571                break;
 572            }
 573            /* Asking about writable, but has been protected: undo. */
 574            if (!page_unprotect(start, 0)) {
 575                ret = false;
 576                break;
 577            }
 578            /* TODO: page_unprotect should take a range, not a single page. */
 579            if (last - start < TARGET_PAGE_SIZE) {
 580                ret = true; /* ok */
 581                break;
 582            }
 583            start += TARGET_PAGE_SIZE;
 584            continue;
 585        }
 586
 587        if (last <= p->itree.last) {
 588            ret = true; /* ok */
 589            break;
 590        }
 591        start = p->itree.last + 1;
 592    }
 593
 594    /* Release the lock if acquired locally. */
 595    if (locked < 0) {
 596        mmap_unlock();
 597    }
 598    return ret;
 599}
 600
 601bool page_check_range_empty(target_ulong start, target_ulong last)
 602{
 603    assert(last >= start);
 604    assert_memory_lock();
 605    return pageflags_find(start, last) == NULL;
 606}
 607
 608target_ulong page_find_range_empty(target_ulong min, target_ulong max,
 609                                   target_ulong len, target_ulong align)
 610{
 611    target_ulong len_m1, align_m1;
 612
 613    assert(min <= max);
 614    assert(max <= GUEST_ADDR_MAX);
 615    assert(len != 0);
 616    assert(is_power_of_2(align));
 617    assert_memory_lock();
 618
 619    len_m1 = len - 1;
 620    align_m1 = align - 1;
 621
 622    /* Iteratively narrow the search region. */
 623    while (1) {
 624        PageFlagsNode *p;
 625
 626        /* Align min and double-check there's enough space remaining. */
 627        min = (min + align_m1) & ~align_m1;
 628        if (min > max) {
 629            return -1;
 630        }
 631        if (len_m1 > max - min) {
 632            return -1;
 633        }
 634
 635        p = pageflags_find(min, min + len_m1);
 636        if (p == NULL) {
 637            /* Found! */
 638            return min;
 639        }
 640        if (max <= p->itree.last) {
 641            /* Existing allocation fills the remainder of the search region. */
 642            return -1;
 643        }
 644        /* Skip across existing allocation. */
 645        min = p->itree.last + 1;
 646    }
 647}
 648
 649void page_protect(tb_page_addr_t address)
 650{
 651    PageFlagsNode *p;
 652    target_ulong start, last;
 653    int prot;
 654
 655    assert_memory_lock();
 656
 657    if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
 658        start = address & TARGET_PAGE_MASK;
 659        last = start + TARGET_PAGE_SIZE - 1;
 660    } else {
 661        start = address & qemu_host_page_mask;
 662        last = start + qemu_host_page_size - 1;
 663    }
 664
 665    p = pageflags_find(start, last);
 666    if (!p) {
 667        return;
 668    }
 669    prot = p->flags;
 670
 671    if (unlikely(p->itree.last < last)) {
 672        /* More than one protection region covers the one host page. */
 673        assert(TARGET_PAGE_SIZE < qemu_host_page_size);
 674        while ((p = pageflags_next(p, start, last)) != NULL) {
 675            prot |= p->flags;
 676        }
 677    }
 678
 679    if (prot & PAGE_WRITE) {
 680        pageflags_set_clear(start, last, 0, PAGE_WRITE);
 681        mprotect(g2h_untagged(start), qemu_host_page_size,
 682                 prot & (PAGE_READ | PAGE_EXEC) ? PROT_READ : PROT_NONE);
 683    }
 684}
 685
 686/*
 687 * Called from signal handler: invalidate the code and unprotect the
 688 * page. Return 0 if the fault was not handled, 1 if it was handled,
 689 * and 2 if it was handled but the caller must cause the TB to be
 690 * immediately exited. (We can only return 2 if the 'pc' argument is
 691 * non-zero.)
 692 */
 693int page_unprotect(target_ulong address, uintptr_t pc)
 694{
 695    PageFlagsNode *p;
 696    bool current_tb_invalidated;
 697
 698    /*
 699     * Technically this isn't safe inside a signal handler.  However we
 700     * know this only ever happens in a synchronous SEGV handler, so in
 701     * practice it seems to be ok.
 702     */
 703    mmap_lock();
 704
 705    p = pageflags_find(address, address);
 706
 707    /* If this address was not really writable, nothing to do. */
 708    if (!p || !(p->flags & PAGE_WRITE_ORG)) {
 709        mmap_unlock();
 710        return 0;
 711    }
 712
 713    current_tb_invalidated = false;
 714    if (p->flags & PAGE_WRITE) {
 715        /*
 716         * If the page is actually marked WRITE then assume this is because
 717         * this thread raced with another one which got here first and
 718         * set the page to PAGE_WRITE and did the TB invalidate for us.
 719         */
 720#ifdef TARGET_HAS_PRECISE_SMC
 721        TranslationBlock *current_tb = tcg_tb_lookup(pc);
 722        if (current_tb) {
 723            current_tb_invalidated = tb_cflags(current_tb) & CF_INVALID;
 724        }
 725#endif
 726    } else {
 727        target_ulong start, len, i;
 728        int prot;
 729
 730        if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
 731            start = address & TARGET_PAGE_MASK;
 732            len = TARGET_PAGE_SIZE;
 733            prot = p->flags | PAGE_WRITE;
 734            pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0);
 735            current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc);
 736        } else {
 737            start = address & qemu_host_page_mask;
 738            len = qemu_host_page_size;
 739            prot = 0;
 740
 741            for (i = 0; i < len; i += TARGET_PAGE_SIZE) {
 742                target_ulong addr = start + i;
 743
 744                p = pageflags_find(addr, addr);
 745                if (p) {
 746                    prot |= p->flags;
 747                    if (p->flags & PAGE_WRITE_ORG) {
 748                        prot |= PAGE_WRITE;
 749                        pageflags_set_clear(addr, addr + TARGET_PAGE_SIZE - 1,
 750                                            PAGE_WRITE, 0);
 751                    }
 752                }
 753                /*
 754                 * Since the content will be modified, we must invalidate
 755                 * the corresponding translated code.
 756                 */
 757                current_tb_invalidated |=
 758                    tb_invalidate_phys_page_unwind(addr, pc);
 759            }
 760        }
 761        if (prot & PAGE_EXEC) {
 762            prot = (prot & ~PAGE_EXEC) | PAGE_READ;
 763        }
 764        mprotect((void *)g2h_untagged(start), len, prot & PAGE_BITS);
 765    }
 766    mmap_unlock();
 767
 768    /* If current TB was invalidated return to main loop */
 769    return current_tb_invalidated ? 2 : 1;
 770}
 771
 772static int probe_access_internal(CPUArchState *env, vaddr addr,
 773                                 int fault_size, MMUAccessType access_type,
 774                                 bool nonfault, uintptr_t ra)
 775{
 776    int acc_flag;
 777    bool maperr;
 778
 779    switch (access_type) {
 780    case MMU_DATA_STORE:
 781        acc_flag = PAGE_WRITE_ORG;
 782        break;
 783    case MMU_DATA_LOAD:
 784        acc_flag = PAGE_READ;
 785        break;
 786    case MMU_INST_FETCH:
 787        acc_flag = PAGE_EXEC;
 788        break;
 789    default:
 790        g_assert_not_reached();
 791    }
 792
 793    if (guest_addr_valid_untagged(addr)) {
 794        int page_flags = page_get_flags(addr);
 795        if (page_flags & acc_flag) {
 796            if ((acc_flag == PAGE_READ || acc_flag == PAGE_WRITE)
 797                && cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
 798                return TLB_MMIO;
 799            }
 800            return 0; /* success */
 801        }
 802        maperr = !(page_flags & PAGE_VALID);
 803    } else {
 804        maperr = true;
 805    }
 806
 807    if (nonfault) {
 808        return TLB_INVALID_MASK;
 809    }
 810
 811    cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
 812}
 813
 814int probe_access_flags(CPUArchState *env, vaddr addr, int size,
 815                       MMUAccessType access_type, int mmu_idx,
 816                       bool nonfault, void **phost, uintptr_t ra)
 817{
 818    int flags;
 819
 820    g_assert(-(addr | TARGET_PAGE_MASK) >= size);
 821    flags = probe_access_internal(env, addr, size, access_type, nonfault, ra);
 822    *phost = (flags & TLB_INVALID_MASK) ? NULL : g2h(env_cpu(env), addr);
 823    return flags;
 824}
 825
 826void *probe_access(CPUArchState *env, vaddr addr, int size,
 827                   MMUAccessType access_type, int mmu_idx, uintptr_t ra)
 828{
 829    int flags;
 830
 831    g_assert(-(addr | TARGET_PAGE_MASK) >= size);
 832    flags = probe_access_internal(env, addr, size, access_type, false, ra);
 833    g_assert((flags & ~TLB_MMIO) == 0);
 834
 835    return size ? g2h(env_cpu(env), addr) : NULL;
 836}
 837
 838tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
 839                                        void **hostp)
 840{
 841    int flags;
 842
 843    flags = probe_access_internal(env, addr, 1, MMU_INST_FETCH, false, 0);
 844    g_assert(flags == 0);
 845
 846    if (hostp) {
 847        *hostp = g2h_untagged(addr);
 848    }
 849    return addr;
 850}
 851
 852#ifdef TARGET_PAGE_DATA_SIZE
 853/*
 854 * Allocate chunks of target data together.  For the only current user,
 855 * if we allocate one hunk per page, we have overhead of 40/128 or 40%.
 856 * Therefore, allocate memory for 64 pages at a time for overhead < 1%.
 857 */
 858#define TPD_PAGES  64
 859#define TBD_MASK   (TARGET_PAGE_MASK * TPD_PAGES)
 860
 861typedef struct TargetPageDataNode {
 862    struct rcu_head rcu;
 863    IntervalTreeNode itree;
 864    char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned));
 865} TargetPageDataNode;
 866
 867static IntervalTreeRoot targetdata_root;
 868
 869void page_reset_target_data(target_ulong start, target_ulong last)
 870{
 871    IntervalTreeNode *n, *next;
 872
 873    assert_memory_lock();
 874
 875    start &= TARGET_PAGE_MASK;
 876    last |= ~TARGET_PAGE_MASK;
 877
 878    for (n = interval_tree_iter_first(&targetdata_root, start, last),
 879         next = n ? interval_tree_iter_next(n, start, last) : NULL;
 880         n != NULL;
 881         n = next,
 882         next = next ? interval_tree_iter_next(n, start, last) : NULL) {
 883        target_ulong n_start, n_last, p_ofs, p_len;
 884        TargetPageDataNode *t = container_of(n, TargetPageDataNode, itree);
 885
 886        if (n->start >= start && n->last <= last) {
 887            interval_tree_remove(n, &targetdata_root);
 888            g_free_rcu(t, rcu);
 889            continue;
 890        }
 891
 892        if (n->start < start) {
 893            n_start = start;
 894            p_ofs = (start - n->start) >> TARGET_PAGE_BITS;
 895        } else {
 896            n_start = n->start;
 897            p_ofs = 0;
 898        }
 899        n_last = MIN(last, n->last);
 900        p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
 901
 902        memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE);
 903    }
 904}
 905
 906void *page_get_target_data(target_ulong address)
 907{
 908    IntervalTreeNode *n;
 909    TargetPageDataNode *t;
 910    target_ulong page, region;
 911
 912    page = address & TARGET_PAGE_MASK;
 913    region = address & TBD_MASK;
 914
 915    n = interval_tree_iter_first(&targetdata_root, page, page);
 916    if (!n) {
 917        /*
 918         * See util/interval-tree.c re lockless lookups: no false positives
 919         * but there are false negatives.  If we find nothing, retry with
 920         * the mmap lock acquired.  We also need the lock for the
 921         * allocation + insert.
 922         */
 923        mmap_lock();
 924        n = interval_tree_iter_first(&targetdata_root, page, page);
 925        if (!n) {
 926            t = g_new0(TargetPageDataNode, 1);
 927            n = &t->itree;
 928            n->start = region;
 929            n->last = region | ~TBD_MASK;
 930            interval_tree_insert(n, &targetdata_root);
 931        }
 932        mmap_unlock();
 933    }
 934
 935    t = container_of(n, TargetPageDataNode, itree);
 936    return t->data[(page - region) >> TARGET_PAGE_BITS];
 937}
 938#else
 939void page_reset_target_data(target_ulong start, target_ulong last) { }
 940#endif /* TARGET_PAGE_DATA_SIZE */
 941
 942/* The softmmu versions of these helpers are in cputlb.c.  */
 943
 944static void *cpu_mmu_lookup(CPUArchState *env, vaddr addr,
 945                            MemOp mop, uintptr_t ra, MMUAccessType type)
 946{
 947    int a_bits = get_alignment_bits(mop);
 948    void *ret;
 949
 950    /* Enforce guest required alignment.  */
 951    if (unlikely(addr & ((1 << a_bits) - 1))) {
 952        cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra);
 953    }
 954
 955    ret = g2h(env_cpu(env), addr);
 956    set_helper_retaddr(ra);
 957    return ret;
 958}
 959
 960#include "ldst_atomicity.c.inc"
 961
 962static uint8_t do_ld1_mmu(CPUArchState *env, abi_ptr addr,
 963                          MemOp mop, uintptr_t ra)
 964{
 965    void *haddr;
 966    uint8_t ret;
 967
 968    tcg_debug_assert((mop & MO_SIZE) == MO_8);
 969    cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
 970    haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
 971    ret = ldub_p(haddr);
 972    clear_helper_retaddr();
 973    return ret;
 974}
 975
 976tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
 977                                 MemOpIdx oi, uintptr_t ra)
 978{
 979    return do_ld1_mmu(env, addr, get_memop(oi), ra);
 980}
 981
 982tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
 983                                 MemOpIdx oi, uintptr_t ra)
 984{
 985    return (int8_t)do_ld1_mmu(env, addr, get_memop(oi), ra);
 986}
 987
 988uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
 989                    MemOpIdx oi, uintptr_t ra)
 990{
 991    uint8_t ret = do_ld1_mmu(env, addr, get_memop(oi), ra);
 992    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
 993    return ret;
 994}
 995
 996static uint16_t do_ld2_mmu(CPUArchState *env, abi_ptr addr,
 997                           MemOp mop, uintptr_t ra)
 998{
 999    void *haddr;
1000    uint16_t ret;
1001
1002    tcg_debug_assert((mop & MO_SIZE) == MO_16);
1003    cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
1004    haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
1005    ret = load_atom_2(env, ra, haddr, mop);
1006    clear_helper_retaddr();
1007
1008    if (mop & MO_BSWAP) {
1009        ret = bswap16(ret);
1010    }
1011    return ret;
1012}
1013
1014tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
1015                                 MemOpIdx oi, uintptr_t ra)
1016{
1017    return do_ld2_mmu(env, addr, get_memop(oi), ra);
1018}
1019
1020tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
1021                                 MemOpIdx oi, uintptr_t ra)
1022{
1023    return (int16_t)do_ld2_mmu(env, addr, get_memop(oi), ra);
1024}
1025
1026uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
1027                     MemOpIdx oi, uintptr_t ra)
1028{
1029    uint16_t ret = do_ld2_mmu(env, addr, get_memop(oi), ra);
1030    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1031    return ret;
1032}
1033
1034static uint32_t do_ld4_mmu(CPUArchState *env, abi_ptr addr,
1035                           MemOp mop, uintptr_t ra)
1036{
1037    void *haddr;
1038    uint32_t ret;
1039
1040    tcg_debug_assert((mop & MO_SIZE) == MO_32);
1041    cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
1042    haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
1043    ret = load_atom_4(env, ra, haddr, mop);
1044    clear_helper_retaddr();
1045
1046    if (mop & MO_BSWAP) {
1047        ret = bswap32(ret);
1048    }
1049    return ret;
1050}
1051
1052tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
1053                                 MemOpIdx oi, uintptr_t ra)
1054{
1055    return do_ld4_mmu(env, addr, get_memop(oi), ra);
1056}
1057
1058tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
1059                                 MemOpIdx oi, uintptr_t ra)
1060{
1061    return (int32_t)do_ld4_mmu(env, addr, get_memop(oi), ra);
1062}
1063
1064uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
1065                     MemOpIdx oi, uintptr_t ra)
1066{
1067    uint32_t ret = do_ld4_mmu(env, addr, get_memop(oi), ra);
1068    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1069    return ret;
1070}
1071
1072static uint64_t do_ld8_mmu(CPUArchState *env, abi_ptr addr,
1073                           MemOp mop, uintptr_t ra)
1074{
1075    void *haddr;
1076    uint64_t ret;
1077
1078    tcg_debug_assert((mop & MO_SIZE) == MO_64);
1079    cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
1080    haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
1081    ret = load_atom_8(env, ra, haddr, mop);
1082    clear_helper_retaddr();
1083
1084    if (mop & MO_BSWAP) {
1085        ret = bswap64(ret);
1086    }
1087    return ret;
1088}
1089
1090uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
1091                        MemOpIdx oi, uintptr_t ra)
1092{
1093    return do_ld8_mmu(env, addr, get_memop(oi), ra);
1094}
1095
1096uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
1097                     MemOpIdx oi, uintptr_t ra)
1098{
1099    uint64_t ret = do_ld8_mmu(env, addr, get_memop(oi), ra);
1100    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1101    return ret;
1102}
1103
1104static Int128 do_ld16_mmu(CPUArchState *env, abi_ptr addr,
1105                          MemOp mop, uintptr_t ra)
1106{
1107    void *haddr;
1108    Int128 ret;
1109
1110    tcg_debug_assert((mop & MO_SIZE) == MO_128);
1111    cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
1112    haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_LOAD);
1113    ret = load_atom_16(env, ra, haddr, mop);
1114    clear_helper_retaddr();
1115
1116    if (mop & MO_BSWAP) {
1117        ret = bswap128(ret);
1118    }
1119    return ret;
1120}
1121
1122Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
1123                       MemOpIdx oi, uintptr_t ra)
1124{
1125    return do_ld16_mmu(env, addr, get_memop(oi), ra);
1126}
1127
1128Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, MemOpIdx oi)
1129{
1130    return helper_ld16_mmu(env, addr, oi, GETPC());
1131}
1132
1133Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
1134                    MemOpIdx oi, uintptr_t ra)
1135{
1136    Int128 ret = do_ld16_mmu(env, addr, get_memop(oi), ra);
1137    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
1138    return ret;
1139}
1140
1141static void do_st1_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
1142                       MemOp mop, uintptr_t ra)
1143{
1144    void *haddr;
1145
1146    tcg_debug_assert((mop & MO_SIZE) == MO_8);
1147    cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
1148    haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
1149    stb_p(haddr, val);
1150    clear_helper_retaddr();
1151}
1152
1153void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
1154                    MemOpIdx oi, uintptr_t ra)
1155{
1156    do_st1_mmu(env, addr, val, get_memop(oi), ra);
1157}
1158
1159void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
1160                 MemOpIdx oi, uintptr_t ra)
1161{
1162    do_st1_mmu(env, addr, val, get_memop(oi), ra);
1163    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1164}
1165
1166static void do_st2_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
1167                       MemOp mop, uintptr_t ra)
1168{
1169    void *haddr;
1170
1171    tcg_debug_assert((mop & MO_SIZE) == MO_16);
1172    cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
1173    haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
1174
1175    if (mop & MO_BSWAP) {
1176        val = bswap16(val);
1177    }
1178    store_atom_2(env, ra, haddr, mop, val);
1179    clear_helper_retaddr();
1180}
1181
1182void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
1183                    MemOpIdx oi, uintptr_t ra)
1184{
1185    do_st2_mmu(env, addr, val, get_memop(oi), ra);
1186}
1187
1188void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
1189                    MemOpIdx oi, uintptr_t ra)
1190{
1191    do_st2_mmu(env, addr, val, get_memop(oi), ra);
1192    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1193}
1194
1195static void do_st4_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
1196                       MemOp mop, uintptr_t ra)
1197{
1198    void *haddr;
1199
1200    tcg_debug_assert((mop & MO_SIZE) == MO_32);
1201    cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
1202    haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
1203
1204    if (mop & MO_BSWAP) {
1205        val = bswap32(val);
1206    }
1207    store_atom_4(env, ra, haddr, mop, val);
1208    clear_helper_retaddr();
1209}
1210
1211void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
1212                    MemOpIdx oi, uintptr_t ra)
1213{
1214    do_st4_mmu(env, addr, val, get_memop(oi), ra);
1215}
1216
1217void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
1218                 MemOpIdx oi, uintptr_t ra)
1219{
1220    do_st4_mmu(env, addr, val, get_memop(oi), ra);
1221    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1222}
1223
1224static void do_st8_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
1225                       MemOp mop, uintptr_t ra)
1226{
1227    void *haddr;
1228
1229    tcg_debug_assert((mop & MO_SIZE) == MO_64);
1230    cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
1231    haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
1232
1233    if (mop & MO_BSWAP) {
1234        val = bswap64(val);
1235    }
1236    store_atom_8(env, ra, haddr, mop, val);
1237    clear_helper_retaddr();
1238}
1239
1240void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
1241                    MemOpIdx oi, uintptr_t ra)
1242{
1243    do_st8_mmu(env, addr, val, get_memop(oi), ra);
1244}
1245
1246void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
1247                    MemOpIdx oi, uintptr_t ra)
1248{
1249    do_st8_mmu(env, addr, val, get_memop(oi), ra);
1250    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1251}
1252
1253static void do_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
1254                        MemOp mop, uintptr_t ra)
1255{
1256    void *haddr;
1257
1258    tcg_debug_assert((mop & MO_SIZE) == MO_128);
1259    cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
1260    haddr = cpu_mmu_lookup(env, addr, mop, ra, MMU_DATA_STORE);
1261
1262    if (mop & MO_BSWAP) {
1263        val = bswap128(val);
1264    }
1265    store_atom_16(env, ra, haddr, mop, val);
1266    clear_helper_retaddr();
1267}
1268
1269void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
1270                     MemOpIdx oi, uintptr_t ra)
1271{
1272    do_st16_mmu(env, addr, val, get_memop(oi), ra);
1273}
1274
1275void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
1276{
1277    helper_st16_mmu(env, addr, val, oi, GETPC());
1278}
1279
1280void cpu_st16_mmu(CPUArchState *env, abi_ptr addr,
1281                  Int128 val, MemOpIdx oi, uintptr_t ra)
1282{
1283    do_st16_mmu(env, addr, val, get_memop(oi), ra);
1284    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
1285}
1286
1287uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
1288{
1289    uint32_t ret;
1290
1291    set_helper_retaddr(1);
1292    ret = ldub_p(g2h_untagged(ptr));
1293    clear_helper_retaddr();
1294    return ret;
1295}
1296
1297uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr ptr)
1298{
1299    uint32_t ret;
1300
1301    set_helper_retaddr(1);
1302    ret = lduw_p(g2h_untagged(ptr));
1303    clear_helper_retaddr();
1304    return ret;
1305}
1306
1307uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr ptr)
1308{
1309    uint32_t ret;
1310
1311    set_helper_retaddr(1);
1312    ret = ldl_p(g2h_untagged(ptr));
1313    clear_helper_retaddr();
1314    return ret;
1315}
1316
1317uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
1318{
1319    uint64_t ret;
1320
1321    set_helper_retaddr(1);
1322    ret = ldq_p(g2h_untagged(ptr));
1323    clear_helper_retaddr();
1324    return ret;
1325}
1326
1327uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
1328                         MemOpIdx oi, uintptr_t ra)
1329{
1330    void *haddr;
1331    uint8_t ret;
1332
1333    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
1334    ret = ldub_p(haddr);
1335    clear_helper_retaddr();
1336    return ret;
1337}
1338
1339uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
1340                          MemOpIdx oi, uintptr_t ra)
1341{
1342    void *haddr;
1343    uint16_t ret;
1344
1345    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
1346    ret = lduw_p(haddr);
1347    clear_helper_retaddr();
1348    if (get_memop(oi) & MO_BSWAP) {
1349        ret = bswap16(ret);
1350    }
1351    return ret;
1352}
1353
1354uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
1355                          MemOpIdx oi, uintptr_t ra)
1356{
1357    void *haddr;
1358    uint32_t ret;
1359
1360    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_INST_FETCH);
1361    ret = ldl_p(haddr);
1362    clear_helper_retaddr();
1363    if (get_memop(oi) & MO_BSWAP) {
1364        ret = bswap32(ret);
1365    }
1366    return ret;
1367}
1368
1369uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
1370                          MemOpIdx oi, uintptr_t ra)
1371{
1372    void *haddr;
1373    uint64_t ret;
1374
1375    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
1376    ret = ldq_p(haddr);
1377    clear_helper_retaddr();
1378    if (get_memop(oi) & MO_BSWAP) {
1379        ret = bswap64(ret);
1380    }
1381    return ret;
1382}
1383
1384#include "ldst_common.c.inc"
1385
1386/*
1387 * Do not allow unaligned operations to proceed.  Return the host address.
1388 */
1389static void *atomic_mmu_lookup(CPUArchState *env, vaddr addr, MemOpIdx oi,
1390                               int size, uintptr_t retaddr)
1391{
1392    MemOp mop = get_memop(oi);
1393    int a_bits = get_alignment_bits(mop);
1394    void *ret;
1395
1396    /* Enforce guest required alignment.  */
1397    if (unlikely(addr & ((1 << a_bits) - 1))) {
1398        cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, retaddr);
1399    }
1400
1401    /* Enforce qemu required alignment.  */
1402    if (unlikely(addr & (size - 1))) {
1403        cpu_loop_exit_atomic(env_cpu(env), retaddr);
1404    }
1405
1406    ret = g2h(env_cpu(env), addr);
1407    set_helper_retaddr(retaddr);
1408    return ret;
1409}
1410
1411#include "atomic_common.c.inc"
1412
1413/*
1414 * First set of functions passes in OI and RETADDR.
1415 * This makes them callable from other helpers.
1416 */
1417
1418#define ATOMIC_NAME(X) \
1419    glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
1420#define ATOMIC_MMU_CLEANUP do { clear_helper_retaddr(); } while (0)
1421
1422#define DATA_SIZE 1
1423#include "atomic_template.h"
1424
1425#define DATA_SIZE 2
1426#include "atomic_template.h"
1427
1428#define DATA_SIZE 4
1429#include "atomic_template.h"
1430
1431#ifdef CONFIG_ATOMIC64
1432#define DATA_SIZE 8
1433#include "atomic_template.h"
1434#endif
1435
1436#if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
1437#define DATA_SIZE 16
1438#include "atomic_template.h"
1439#endif
1440