uboot/arch/arm/cpu/armv8/cache_v8.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * (C) Copyright 2013
   4 * David Feng <fenghua@phytium.com.cn>
   5 *
   6 * (C) Copyright 2016
   7 * Alexander Graf <agraf@suse.de>
   8 */
   9
  10#include <common.h>
  11#include <cpu_func.h>
  12#include <hang.h>
  13#include <log.h>
  14#include <asm/cache.h>
  15#include <asm/global_data.h>
  16#include <asm/system.h>
  17#include <asm/armv8/mmu.h>
  18
  19DECLARE_GLOBAL_DATA_PTR;
  20
  21#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
  22
  23/*
  24 *  With 4k page granule, a virtual address is split into 4 lookup parts
  25 *  spanning 9 bits each:
  26 *
  27 *    _______________________________________________
  28 *   |       |       |       |       |       |       |
  29 *   |   0   |  Lv0  |  Lv1  |  Lv2  |  Lv3  |  off  |
  30 *   |_______|_______|_______|_______|_______|_______|
  31 *     63-48   47-39   38-30   29-21   20-12   11-00
  32 *
  33 *             mask        page size
  34 *
  35 *    Lv0: FF8000000000       --
  36 *    Lv1:   7FC0000000       1G
  37 *    Lv2:     3FE00000       2M
  38 *    Lv3:       1FF000       4K
  39 *    off:          FFF
  40 */
  41
  42u64 get_tcr(int el, u64 *pips, u64 *pva_bits)
  43{
  44        u64 max_addr = 0;
  45        u64 ips, va_bits;
  46        u64 tcr;
  47        int i;
  48
  49        /* Find the largest address we need to support */
  50        for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
  51                max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size);
  52
  53        /* Calculate the maximum physical (and thus virtual) address */
  54        if (max_addr > (1ULL << 44)) {
  55                ips = 5;
  56                va_bits = 48;
  57        } else  if (max_addr > (1ULL << 42)) {
  58                ips = 4;
  59                va_bits = 44;
  60        } else  if (max_addr > (1ULL << 40)) {
  61                ips = 3;
  62                va_bits = 42;
  63        } else  if (max_addr > (1ULL << 36)) {
  64                ips = 2;
  65                va_bits = 40;
  66        } else  if (max_addr > (1ULL << 32)) {
  67                ips = 1;
  68                va_bits = 36;
  69        } else {
  70                ips = 0;
  71                va_bits = 32;
  72        }
  73
  74        if (el == 1) {
  75                tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE;
  76        } else if (el == 2) {
  77                tcr = TCR_EL2_RSVD | (ips << 16);
  78        } else {
  79                tcr = TCR_EL3_RSVD | (ips << 16);
  80        }
  81
  82        /* PTWs cacheable, inner/outer WBWA and inner shareable */
  83        tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
  84        tcr |= TCR_T0SZ(va_bits);
  85
  86        if (pips)
  87                *pips = ips;
  88        if (pva_bits)
  89                *pva_bits = va_bits;
  90
  91        return tcr;
  92}
  93
  94#define MAX_PTE_ENTRIES 512
  95
  96static int pte_type(u64 *pte)
  97{
  98        return *pte & PTE_TYPE_MASK;
  99}
 100
 101/* Returns the LSB number for a PTE on level <level> */
 102static int level2shift(int level)
 103{
 104        /* Page is 12 bits wide, every level translates 9 bits */
 105        return (12 + 9 * (3 - level));
 106}
 107
 108static u64 *find_pte(u64 addr, int level)
 109{
 110        int start_level = 0;
 111        u64 *pte;
 112        u64 idx;
 113        u64 va_bits;
 114        int i;
 115
 116        debug("addr=%llx level=%d\n", addr, level);
 117
 118        get_tcr(0, NULL, &va_bits);
 119        if (va_bits < 39)
 120                start_level = 1;
 121
 122        if (level < start_level)
 123                return NULL;
 124
 125        /* Walk through all page table levels to find our PTE */
 126        pte = (u64*)gd->arch.tlb_addr;
 127        for (i = start_level; i < 4; i++) {
 128                idx = (addr >> level2shift(i)) & 0x1FF;
 129                pte += idx;
 130                debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte);
 131
 132                /* Found it */
 133                if (i == level)
 134                        return pte;
 135                /* PTE is no table (either invalid or block), can't traverse */
 136                if (pte_type(pte) != PTE_TYPE_TABLE)
 137                        return NULL;
 138                /* Off to the next level */
 139                pte = (u64*)(*pte & 0x0000fffffffff000ULL);
 140        }
 141
 142        /* Should never reach here */
 143        return NULL;
 144}
 145
 146/* Returns and creates a new full table (512 entries) */
 147static u64 *create_table(void)
 148{
 149        u64 *new_table = (u64*)gd->arch.tlb_fillptr;
 150        u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64);
 151
 152        /* Allocate MAX_PTE_ENTRIES pte entries */
 153        gd->arch.tlb_fillptr += pt_len;
 154
 155        if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size)
 156                panic("Insufficient RAM for page table: 0x%lx > 0x%lx. "
 157                      "Please increase the size in get_page_table_size()",
 158                        gd->arch.tlb_fillptr - gd->arch.tlb_addr,
 159                        gd->arch.tlb_size);
 160
 161        /* Mark all entries as invalid */
 162        memset(new_table, 0, pt_len);
 163
 164        return new_table;
 165}
 166
 167static void set_pte_table(u64 *pte, u64 *table)
 168{
 169        /* Point *pte to the new table */
 170        debug("Setting %p to addr=%p\n", pte, table);
 171        *pte = PTE_TYPE_TABLE | (ulong)table;
 172}
 173
 174/* Splits a block PTE into table with subpages spanning the old block */
 175static void split_block(u64 *pte, int level)
 176{
 177        u64 old_pte = *pte;
 178        u64 *new_table;
 179        u64 i = 0;
 180        /* level describes the parent level, we need the child ones */
 181        int levelshift = level2shift(level + 1);
 182
 183        if (pte_type(pte) != PTE_TYPE_BLOCK)
 184                panic("PTE %p (%llx) is not a block. Some driver code wants to "
 185                      "modify dcache settings for an range not covered in "
 186                      "mem_map.", pte, old_pte);
 187
 188        new_table = create_table();
 189        debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table);
 190
 191        for (i = 0; i < MAX_PTE_ENTRIES; i++) {
 192                new_table[i] = old_pte | (i << levelshift);
 193
 194                /* Level 3 block PTEs have the table type */
 195                if ((level + 1) == 3)
 196                        new_table[i] |= PTE_TYPE_TABLE;
 197
 198                debug("Setting new_table[%lld] = %llx\n", i, new_table[i]);
 199        }
 200
 201        /* Set the new table into effect */
 202        set_pte_table(pte, new_table);
 203}
 204
 205/* Add one mm_region map entry to the page tables */
 206static void add_map(struct mm_region *map)
 207{
 208        u64 *pte;
 209        u64 virt = map->virt;
 210        u64 phys = map->phys;
 211        u64 size = map->size;
 212        u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
 213        u64 blocksize;
 214        int level;
 215        u64 *new_table;
 216
 217        while (size) {
 218                pte = find_pte(virt, 0);
 219                if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) {
 220                        debug("Creating table for virt 0x%llx\n", virt);
 221                        new_table = create_table();
 222                        set_pte_table(pte, new_table);
 223                }
 224
 225                for (level = 1; level < 4; level++) {
 226                        pte = find_pte(virt, level);
 227                        if (!pte)
 228                                panic("pte not found\n");
 229
 230                        blocksize = 1ULL << level2shift(level);
 231                        debug("Checking if pte fits for virt=%llx size=%llx blocksize=%llx\n",
 232                              virt, size, blocksize);
 233                        if (size >= blocksize && !(virt & (blocksize - 1))) {
 234                                /* Page fits, create block PTE */
 235                                debug("Setting PTE %p to block virt=%llx\n",
 236                                      pte, virt);
 237                                if (level == 3)
 238                                        *pte = phys | attrs | PTE_TYPE_PAGE;
 239                                else
 240                                        *pte = phys | attrs;
 241                                virt += blocksize;
 242                                phys += blocksize;
 243                                size -= blocksize;
 244                                break;
 245                        } else if (pte_type(pte) == PTE_TYPE_FAULT) {
 246                                /* Page doesn't fit, create subpages */
 247                                debug("Creating subtable for virt 0x%llx blksize=%llx\n",
 248                                      virt, blocksize);
 249                                new_table = create_table();
 250                                set_pte_table(pte, new_table);
 251                        } else if (pte_type(pte) == PTE_TYPE_BLOCK) {
 252                                debug("Split block into subtable for virt 0x%llx blksize=0x%llx\n",
 253                                      virt, blocksize);
 254                                split_block(pte, level);
 255                        }
 256                }
 257        }
 258}
 259
 260enum pte_type {
 261        PTE_INVAL,
 262        PTE_BLOCK,
 263        PTE_LEVEL,
 264};
 265
 266/*
 267 * This is a recursively called function to count the number of
 268 * page tables we need to cover a particular PTE range. If you
 269 * call this with level = -1 you basically get the full 48 bit
 270 * coverage.
 271 */
 272static int count_required_pts(u64 addr, int level, u64 maxaddr)
 273{
 274        int levelshift = level2shift(level);
 275        u64 levelsize = 1ULL << levelshift;
 276        u64 levelmask = levelsize - 1;
 277        u64 levelend = addr + levelsize;
 278        int r = 0;
 279        int i;
 280        enum pte_type pte_type = PTE_INVAL;
 281
 282        for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) {
 283                struct mm_region *map = &mem_map[i];
 284                u64 start = map->virt;
 285                u64 end = start + map->size;
 286
 287                /* Check if the PTE would overlap with the map */
 288                if (max(addr, start) <= min(levelend, end)) {
 289                        start = max(addr, start);
 290                        end = min(levelend, end);
 291
 292                        /* We need a sub-pt for this level */
 293                        if ((start & levelmask) || (end & levelmask)) {
 294                                pte_type = PTE_LEVEL;
 295                                break;
 296                        }
 297
 298                        /* Lv0 can not do block PTEs, so do levels here too */
 299                        if (level <= 0) {
 300                                pte_type = PTE_LEVEL;
 301                                break;
 302                        }
 303
 304                        /* PTE is active, but fits into a block */
 305                        pte_type = PTE_BLOCK;
 306                }
 307        }
 308
 309        /*
 310         * Block PTEs at this level are already covered by the parent page
 311         * table, so we only need to count sub page tables.
 312         */
 313        if (pte_type == PTE_LEVEL) {
 314                int sublevel = level + 1;
 315                u64 sublevelsize = 1ULL << level2shift(sublevel);
 316
 317                /* Account for the new sub page table ... */
 318                r = 1;
 319
 320                /* ... and for all child page tables that one might have */
 321                for (i = 0; i < MAX_PTE_ENTRIES; i++) {
 322                        r += count_required_pts(addr, sublevel, maxaddr);
 323                        addr += sublevelsize;
 324
 325                        if (addr >= maxaddr) {
 326                                /*
 327                                 * We reached the end of address space, no need
 328                                 * to look any further.
 329                                 */
 330                                break;
 331                        }
 332                }
 333        }
 334
 335        return r;
 336}
 337
 338/* Returns the estimated required size of all page tables */
 339__weak u64 get_page_table_size(void)
 340{
 341        u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
 342        u64 size = 0;
 343        u64 va_bits;
 344        int start_level = 0;
 345
 346        get_tcr(0, NULL, &va_bits);
 347        if (va_bits < 39)
 348                start_level = 1;
 349
 350        /* Account for all page tables we would need to cover our memory map */
 351        size = one_pt * count_required_pts(0, start_level - 1, 1ULL << va_bits);
 352
 353        /*
 354         * We need to duplicate our page table once to have an emergency pt to
 355         * resort to when splitting page tables later on
 356         */
 357        size *= 2;
 358
 359        /*
 360         * We may need to split page tables later on if dcache settings change,
 361         * so reserve up to 4 (random pick) page tables for that.
 362         */
 363        size += one_pt * 4;
 364
 365        return size;
 366}
 367
 368void setup_pgtables(void)
 369{
 370        int i;
 371
 372        if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr)
 373                panic("Page table pointer not setup.");
 374
 375        /*
 376         * Allocate the first level we're on with invalidate entries.
 377         * If the starting level is 0 (va_bits >= 39), then this is our
 378         * Lv0 page table, otherwise it's the entry Lv1 page table.
 379         */
 380        create_table();
 381
 382        /* Now add all MMU table entries one after another to the table */
 383        for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
 384                add_map(&mem_map[i]);
 385}
 386
 387static void setup_all_pgtables(void)
 388{
 389        u64 tlb_addr = gd->arch.tlb_addr;
 390        u64 tlb_size = gd->arch.tlb_size;
 391
 392        /* Reset the fill ptr */
 393        gd->arch.tlb_fillptr = tlb_addr;
 394
 395        /* Create normal system page tables */
 396        setup_pgtables();
 397
 398        /* Create emergency page tables */
 399        gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr -
 400                             (uintptr_t)gd->arch.tlb_addr;
 401        gd->arch.tlb_addr = gd->arch.tlb_fillptr;
 402        setup_pgtables();
 403        gd->arch.tlb_emerg = gd->arch.tlb_addr;
 404        gd->arch.tlb_addr = tlb_addr;
 405        gd->arch.tlb_size = tlb_size;
 406}
 407
 408/* to activate the MMU we need to set up virtual memory */
 409__weak void mmu_setup(void)
 410{
 411        int el;
 412
 413        /* Set up page tables only once */
 414        if (!gd->arch.tlb_fillptr)
 415                setup_all_pgtables();
 416
 417        el = current_el();
 418        set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
 419                          MEMORY_ATTRIBUTES);
 420
 421        /* enable the mmu */
 422        set_sctlr(get_sctlr() | CR_M);
 423}
 424
 425/*
 426 * Performs a invalidation of the entire data cache at all levels
 427 */
 428void invalidate_dcache_all(void)
 429{
 430        __asm_invalidate_dcache_all();
 431        __asm_invalidate_l3_dcache();
 432}
 433
 434/*
 435 * Performs a clean & invalidation of the entire data cache at all levels.
 436 * This function needs to be inline to avoid using stack.
 437 * __asm_flush_l3_dcache return status of timeout
 438 */
 439inline void flush_dcache_all(void)
 440{
 441        int ret;
 442
 443        __asm_flush_dcache_all();
 444        ret = __asm_flush_l3_dcache();
 445        if (ret)
 446                debug("flushing dcache returns 0x%x\n", ret);
 447        else
 448                debug("flushing dcache successfully.\n");
 449}
 450
 451#ifndef CONFIG_SYS_DISABLE_DCACHE_OPS
 452/*
 453 * Invalidates range in all levels of D-cache/unified cache
 454 */
 455void invalidate_dcache_range(unsigned long start, unsigned long stop)
 456{
 457        __asm_invalidate_dcache_range(start, stop);
 458}
 459
 460/*
 461 * Flush range(clean & invalidate) from all levels of D-cache/unified cache
 462 */
 463void flush_dcache_range(unsigned long start, unsigned long stop)
 464{
 465        __asm_flush_dcache_range(start, stop);
 466}
 467#else
 468void invalidate_dcache_range(unsigned long start, unsigned long stop)
 469{
 470}
 471
 472void flush_dcache_range(unsigned long start, unsigned long stop)
 473{
 474}
 475#endif /* CONFIG_SYS_DISABLE_DCACHE_OPS */
 476
 477void dcache_enable(void)
 478{
 479        /* The data cache is not active unless the mmu is enabled */
 480        if (!(get_sctlr() & CR_M)) {
 481                invalidate_dcache_all();
 482                __asm_invalidate_tlb_all();
 483                mmu_setup();
 484        }
 485
 486        set_sctlr(get_sctlr() | CR_C);
 487}
 488
 489void dcache_disable(void)
 490{
 491        uint32_t sctlr;
 492
 493        sctlr = get_sctlr();
 494
 495        /* if cache isn't enabled no need to disable */
 496        if (!(sctlr & CR_C))
 497                return;
 498
 499        set_sctlr(sctlr & ~(CR_C|CR_M));
 500
 501        flush_dcache_all();
 502        __asm_invalidate_tlb_all();
 503}
 504
 505int dcache_status(void)
 506{
 507        return (get_sctlr() & CR_C) != 0;
 508}
 509
 510u64 *__weak arch_get_page_table(void) {
 511        puts("No page table offset defined\n");
 512
 513        return NULL;
 514}
 515
 516static bool is_aligned(u64 addr, u64 size, u64 align)
 517{
 518        return !(addr & (align - 1)) && !(size & (align - 1));
 519}
 520
 521/* Use flag to indicate if attrs has more than d-cache attributes */
 522static u64 set_one_region(u64 start, u64 size, u64 attrs, bool flag, int level)
 523{
 524        int levelshift = level2shift(level);
 525        u64 levelsize = 1ULL << levelshift;
 526        u64 *pte = find_pte(start, level);
 527
 528        /* Can we can just modify the current level block PTE? */
 529        if (is_aligned(start, size, levelsize)) {
 530                if (flag) {
 531                        *pte &= ~PMD_ATTRMASK;
 532                        *pte |= attrs & PMD_ATTRMASK;
 533                } else {
 534                        *pte &= ~PMD_ATTRINDX_MASK;
 535                        *pte |= attrs & PMD_ATTRINDX_MASK;
 536                }
 537                debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level);
 538
 539                return levelsize;
 540        }
 541
 542        /* Unaligned or doesn't fit, maybe split block into table */
 543        debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte);
 544
 545        /* Maybe we need to split the block into a table */
 546        if (pte_type(pte) == PTE_TYPE_BLOCK)
 547                split_block(pte, level);
 548
 549        /* And then double-check it became a table or already is one */
 550        if (pte_type(pte) != PTE_TYPE_TABLE)
 551                panic("PTE %p (%llx) for addr=%llx should be a table",
 552                      pte, *pte, start);
 553
 554        /* Roll on to the next page table level */
 555        return 0;
 556}
 557
 558void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
 559                                     enum dcache_option option)
 560{
 561        u64 attrs = PMD_ATTRINDX(option >> 2);
 562        u64 real_start = start;
 563        u64 real_size = size;
 564
 565        debug("start=%lx size=%lx\n", (ulong)start, (ulong)size);
 566
 567        if (!gd->arch.tlb_emerg)
 568                panic("Emergency page table not setup.");
 569
 570        /*
 571         * We can not modify page tables that we're currently running on,
 572         * so we first need to switch to the "emergency" page tables where
 573         * we can safely modify our primary page tables and then switch back
 574         */
 575        __asm_switch_ttbr(gd->arch.tlb_emerg);
 576
 577        /*
 578         * Loop through the address range until we find a page granule that fits
 579         * our alignment constraints, then set it to the new cache attributes
 580         */
 581        while (size > 0) {
 582                int level;
 583                u64 r;
 584
 585                for (level = 1; level < 4; level++) {
 586                        /* Set d-cache attributes only */
 587                        r = set_one_region(start, size, attrs, false, level);
 588                        if (r) {
 589                                /* PTE successfully replaced */
 590                                size -= r;
 591                                start += r;
 592                                break;
 593                        }
 594                }
 595
 596        }
 597
 598        /* We're done modifying page tables, switch back to our primary ones */
 599        __asm_switch_ttbr(gd->arch.tlb_addr);
 600
 601        /*
 602         * Make sure there's nothing stale in dcache for a region that might
 603         * have caches off now
 604         */
 605        flush_dcache_range(real_start, real_start + real_size);
 606}
 607
 608/*
 609 * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits.
 610 * The procecess is break-before-make. The target region will be marked as
 611 * invalid during the process of changing.
 612 */
 613void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs)
 614{
 615        int level;
 616        u64 r, size, start;
 617
 618        start = addr;
 619        size = siz;
 620        /*
 621         * Loop through the address range until we find a page granule that fits
 622         * our alignment constraints, then set it to "invalid".
 623         */
 624        while (size > 0) {
 625                for (level = 1; level < 4; level++) {
 626                        /* Set PTE to fault */
 627                        r = set_one_region(start, size, PTE_TYPE_FAULT, true,
 628                                           level);
 629                        if (r) {
 630                                /* PTE successfully invalidated */
 631                                size -= r;
 632                                start += r;
 633                                break;
 634                        }
 635                }
 636        }
 637
 638        flush_dcache_range(gd->arch.tlb_addr,
 639                           gd->arch.tlb_addr + gd->arch.tlb_size);
 640        __asm_invalidate_tlb_all();
 641
 642        /*
 643         * Loop through the address range until we find a page granule that fits
 644         * our alignment constraints, then set it to the new cache attributes
 645         */
 646        start = addr;
 647        size = siz;
 648        while (size > 0) {
 649                for (level = 1; level < 4; level++) {
 650                        /* Set PTE to new attributes */
 651                        r = set_one_region(start, size, attrs, true, level);
 652                        if (r) {
 653                                /* PTE successfully updated */
 654                                size -= r;
 655                                start += r;
 656                                break;
 657                        }
 658                }
 659        }
 660        flush_dcache_range(gd->arch.tlb_addr,
 661                           gd->arch.tlb_addr + gd->arch.tlb_size);
 662        __asm_invalidate_tlb_all();
 663}
 664
 665#else   /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
 666
 667/*
 668 * For SPL builds, we may want to not have dcache enabled. Any real U-Boot
 669 * running however really wants to have dcache and the MMU active. Check that
 670 * everything is sane and give the developer a hint if it isn't.
 671 */
 672#ifndef CONFIG_SPL_BUILD
 673#error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache.
 674#endif
 675
 676void invalidate_dcache_all(void)
 677{
 678}
 679
 680void flush_dcache_all(void)
 681{
 682}
 683
 684void dcache_enable(void)
 685{
 686}
 687
 688void dcache_disable(void)
 689{
 690}
 691
 692int dcache_status(void)
 693{
 694        return 0;
 695}
 696
 697void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
 698                                     enum dcache_option option)
 699{
 700}
 701
 702#endif  /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
 703
 704#if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF)
 705
 706void icache_enable(void)
 707{
 708        invalidate_icache_all();
 709        set_sctlr(get_sctlr() | CR_I);
 710}
 711
 712void icache_disable(void)
 713{
 714        set_sctlr(get_sctlr() & ~CR_I);
 715}
 716
 717int icache_status(void)
 718{
 719        return (get_sctlr() & CR_I) != 0;
 720}
 721
 722int mmu_status(void)
 723{
 724        return (get_sctlr() & CR_M) != 0;
 725}
 726
 727void invalidate_icache_all(void)
 728{
 729        __asm_invalidate_icache_all();
 730        __asm_invalidate_l3_icache();
 731}
 732
 733#else   /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
 734
 735void icache_enable(void)
 736{
 737}
 738
 739void icache_disable(void)
 740{
 741}
 742
 743int icache_status(void)
 744{
 745        return 0;
 746}
 747
 748int mmu_status(void)
 749{
 750        return 0;
 751}
 752
 753void invalidate_icache_all(void)
 754{
 755}
 756
 757#endif  /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
 758
 759/*
 760 * Enable dCache & iCache, whether cache is actually enabled
 761 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
 762 */
 763void __weak enable_caches(void)
 764{
 765        icache_enable();
 766        dcache_enable();
 767}
 768