uboot/arch/arm/cpu/armv8/cache_v8.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * (C) Copyright 2013
   4 * David Feng <fenghua@phytium.com.cn>
   5 *
   6 * (C) Copyright 2016
   7 * Alexander Graf <agraf@suse.de>
   8 */
   9
  10#include <common.h>
  11#include <cpu_func.h>
  12#include <hang.h>
  13#include <asm/system.h>
  14#include <asm/armv8/mmu.h>
  15
  16DECLARE_GLOBAL_DATA_PTR;
  17
  18#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
  19
  20/*
  21 *  With 4k page granule, a virtual address is split into 4 lookup parts
  22 *  spanning 9 bits each:
  23 *
  24 *    _______________________________________________
  25 *   |       |       |       |       |       |       |
  26 *   |   0   |  Lv0  |  Lv1  |  Lv2  |  Lv3  |  off  |
  27 *   |_______|_______|_______|_______|_______|_______|
  28 *     63-48   47-39   38-30   29-21   20-12   11-00
  29 *
  30 *             mask        page size
  31 *
  32 *    Lv0: FF8000000000       --
  33 *    Lv1:   7FC0000000       1G
  34 *    Lv2:     3FE00000       2M
  35 *    Lv3:       1FF000       4K
  36 *    off:          FFF
  37 */
  38
  39u64 get_tcr(int el, u64 *pips, u64 *pva_bits)
  40{
  41        u64 max_addr = 0;
  42        u64 ips, va_bits;
  43        u64 tcr;
  44        int i;
  45
  46        /* Find the largest address we need to support */
  47        for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
  48                max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size);
  49
  50        /* Calculate the maximum physical (and thus virtual) address */
  51        if (max_addr > (1ULL << 44)) {
  52                ips = 5;
  53                va_bits = 48;
  54        } else  if (max_addr > (1ULL << 42)) {
  55                ips = 4;
  56                va_bits = 44;
  57        } else  if (max_addr > (1ULL << 40)) {
  58                ips = 3;
  59                va_bits = 42;
  60        } else  if (max_addr > (1ULL << 36)) {
  61                ips = 2;
  62                va_bits = 40;
  63        } else  if (max_addr > (1ULL << 32)) {
  64                ips = 1;
  65                va_bits = 36;
  66        } else {
  67                ips = 0;
  68                va_bits = 32;
  69        }
  70
  71        if (el == 1) {
  72                tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE;
  73        } else if (el == 2) {
  74                tcr = TCR_EL2_RSVD | (ips << 16);
  75        } else {
  76                tcr = TCR_EL3_RSVD | (ips << 16);
  77        }
  78
  79        /* PTWs cacheable, inner/outer WBWA and inner shareable */
  80        tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
  81        tcr |= TCR_T0SZ(va_bits);
  82
  83        if (pips)
  84                *pips = ips;
  85        if (pva_bits)
  86                *pva_bits = va_bits;
  87
  88        return tcr;
  89}
  90
  91#define MAX_PTE_ENTRIES 512
  92
  93static int pte_type(u64 *pte)
  94{
  95        return *pte & PTE_TYPE_MASK;
  96}
  97
  98/* Returns the LSB number for a PTE on level <level> */
  99static int level2shift(int level)
 100{
 101        /* Page is 12 bits wide, every level translates 9 bits */
 102        return (12 + 9 * (3 - level));
 103}
 104
 105static u64 *find_pte(u64 addr, int level)
 106{
 107        int start_level = 0;
 108        u64 *pte;
 109        u64 idx;
 110        u64 va_bits;
 111        int i;
 112
 113        debug("addr=%llx level=%d\n", addr, level);
 114
 115        get_tcr(0, NULL, &va_bits);
 116        if (va_bits < 39)
 117                start_level = 1;
 118
 119        if (level < start_level)
 120                return NULL;
 121
 122        /* Walk through all page table levels to find our PTE */
 123        pte = (u64*)gd->arch.tlb_addr;
 124        for (i = start_level; i < 4; i++) {
 125                idx = (addr >> level2shift(i)) & 0x1FF;
 126                pte += idx;
 127                debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte);
 128
 129                /* Found it */
 130                if (i == level)
 131                        return pte;
 132                /* PTE is no table (either invalid or block), can't traverse */
 133                if (pte_type(pte) != PTE_TYPE_TABLE)
 134                        return NULL;
 135                /* Off to the next level */
 136                pte = (u64*)(*pte & 0x0000fffffffff000ULL);
 137        }
 138
 139        /* Should never reach here */
 140        return NULL;
 141}
 142
 143/* Returns and creates a new full table (512 entries) */
 144static u64 *create_table(void)
 145{
 146        u64 *new_table = (u64*)gd->arch.tlb_fillptr;
 147        u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64);
 148
 149        /* Allocate MAX_PTE_ENTRIES pte entries */
 150        gd->arch.tlb_fillptr += pt_len;
 151
 152        if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size)
 153                panic("Insufficient RAM for page table: 0x%lx > 0x%lx. "
 154                      "Please increase the size in get_page_table_size()",
 155                        gd->arch.tlb_fillptr - gd->arch.tlb_addr,
 156                        gd->arch.tlb_size);
 157
 158        /* Mark all entries as invalid */
 159        memset(new_table, 0, pt_len);
 160
 161        return new_table;
 162}
 163
 164static void set_pte_table(u64 *pte, u64 *table)
 165{
 166        /* Point *pte to the new table */
 167        debug("Setting %p to addr=%p\n", pte, table);
 168        *pte = PTE_TYPE_TABLE | (ulong)table;
 169}
 170
 171/* Splits a block PTE into table with subpages spanning the old block */
 172static void split_block(u64 *pte, int level)
 173{
 174        u64 old_pte = *pte;
 175        u64 *new_table;
 176        u64 i = 0;
 177        /* level describes the parent level, we need the child ones */
 178        int levelshift = level2shift(level + 1);
 179
 180        if (pte_type(pte) != PTE_TYPE_BLOCK)
 181                panic("PTE %p (%llx) is not a block. Some driver code wants to "
 182                      "modify dcache settings for an range not covered in "
 183                      "mem_map.", pte, old_pte);
 184
 185        new_table = create_table();
 186        debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table);
 187
 188        for (i = 0; i < MAX_PTE_ENTRIES; i++) {
 189                new_table[i] = old_pte | (i << levelshift);
 190
 191                /* Level 3 block PTEs have the table type */
 192                if ((level + 1) == 3)
 193                        new_table[i] |= PTE_TYPE_TABLE;
 194
 195                debug("Setting new_table[%lld] = %llx\n", i, new_table[i]);
 196        }
 197
 198        /* Set the new table into effect */
 199        set_pte_table(pte, new_table);
 200}
 201
 202/* Add one mm_region map entry to the page tables */
 203static void add_map(struct mm_region *map)
 204{
 205        u64 *pte;
 206        u64 virt = map->virt;
 207        u64 phys = map->phys;
 208        u64 size = map->size;
 209        u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
 210        u64 blocksize;
 211        int level;
 212        u64 *new_table;
 213
 214        while (size) {
 215                pte = find_pte(virt, 0);
 216                if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) {
 217                        debug("Creating table for virt 0x%llx\n", virt);
 218                        new_table = create_table();
 219                        set_pte_table(pte, new_table);
 220                }
 221
 222                for (level = 1; level < 4; level++) {
 223                        pte = find_pte(virt, level);
 224                        if (!pte)
 225                                panic("pte not found\n");
 226
 227                        blocksize = 1ULL << level2shift(level);
 228                        debug("Checking if pte fits for virt=%llx size=%llx blocksize=%llx\n",
 229                              virt, size, blocksize);
 230                        if (size >= blocksize && !(virt & (blocksize - 1))) {
 231                                /* Page fits, create block PTE */
 232                                debug("Setting PTE %p to block virt=%llx\n",
 233                                      pte, virt);
 234                                if (level == 3)
 235                                        *pte = phys | attrs | PTE_TYPE_PAGE;
 236                                else
 237                                        *pte = phys | attrs;
 238                                virt += blocksize;
 239                                phys += blocksize;
 240                                size -= blocksize;
 241                                break;
 242                        } else if (pte_type(pte) == PTE_TYPE_FAULT) {
 243                                /* Page doesn't fit, create subpages */
 244                                debug("Creating subtable for virt 0x%llx blksize=%llx\n",
 245                                      virt, blocksize);
 246                                new_table = create_table();
 247                                set_pte_table(pte, new_table);
 248                        } else if (pte_type(pte) == PTE_TYPE_BLOCK) {
 249                                debug("Split block into subtable for virt 0x%llx blksize=0x%llx\n",
 250                                      virt, blocksize);
 251                                split_block(pte, level);
 252                        }
 253                }
 254        }
 255}
 256
 257enum pte_type {
 258        PTE_INVAL,
 259        PTE_BLOCK,
 260        PTE_LEVEL,
 261};
 262
 263/*
 264 * This is a recursively called function to count the number of
 265 * page tables we need to cover a particular PTE range. If you
 266 * call this with level = -1 you basically get the full 48 bit
 267 * coverage.
 268 */
 269static int count_required_pts(u64 addr, int level, u64 maxaddr)
 270{
 271        int levelshift = level2shift(level);
 272        u64 levelsize = 1ULL << levelshift;
 273        u64 levelmask = levelsize - 1;
 274        u64 levelend = addr + levelsize;
 275        int r = 0;
 276        int i;
 277        enum pte_type pte_type = PTE_INVAL;
 278
 279        for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) {
 280                struct mm_region *map = &mem_map[i];
 281                u64 start = map->virt;
 282                u64 end = start + map->size;
 283
 284                /* Check if the PTE would overlap with the map */
 285                if (max(addr, start) <= min(levelend, end)) {
 286                        start = max(addr, start);
 287                        end = min(levelend, end);
 288
 289                        /* We need a sub-pt for this level */
 290                        if ((start & levelmask) || (end & levelmask)) {
 291                                pte_type = PTE_LEVEL;
 292                                break;
 293                        }
 294
 295                        /* Lv0 can not do block PTEs, so do levels here too */
 296                        if (level <= 0) {
 297                                pte_type = PTE_LEVEL;
 298                                break;
 299                        }
 300
 301                        /* PTE is active, but fits into a block */
 302                        pte_type = PTE_BLOCK;
 303                }
 304        }
 305
 306        /*
 307         * Block PTEs at this level are already covered by the parent page
 308         * table, so we only need to count sub page tables.
 309         */
 310        if (pte_type == PTE_LEVEL) {
 311                int sublevel = level + 1;
 312                u64 sublevelsize = 1ULL << level2shift(sublevel);
 313
 314                /* Account for the new sub page table ... */
 315                r = 1;
 316
 317                /* ... and for all child page tables that one might have */
 318                for (i = 0; i < MAX_PTE_ENTRIES; i++) {
 319                        r += count_required_pts(addr, sublevel, maxaddr);
 320                        addr += sublevelsize;
 321
 322                        if (addr >= maxaddr) {
 323                                /*
 324                                 * We reached the end of address space, no need
 325                                 * to look any further.
 326                                 */
 327                                break;
 328                        }
 329                }
 330        }
 331
 332        return r;
 333}
 334
 335/* Returns the estimated required size of all page tables */
 336__weak u64 get_page_table_size(void)
 337{
 338        u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
 339        u64 size = 0;
 340        u64 va_bits;
 341        int start_level = 0;
 342
 343        get_tcr(0, NULL, &va_bits);
 344        if (va_bits < 39)
 345                start_level = 1;
 346
 347        /* Account for all page tables we would need to cover our memory map */
 348        size = one_pt * count_required_pts(0, start_level - 1, 1ULL << va_bits);
 349
 350        /*
 351         * We need to duplicate our page table once to have an emergency pt to
 352         * resort to when splitting page tables later on
 353         */
 354        size *= 2;
 355
 356        /*
 357         * We may need to split page tables later on if dcache settings change,
 358         * so reserve up to 4 (random pick) page tables for that.
 359         */
 360        size += one_pt * 4;
 361
 362        return size;
 363}
 364
 365void setup_pgtables(void)
 366{
 367        int i;
 368
 369        if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr)
 370                panic("Page table pointer not setup.");
 371
 372        /*
 373         * Allocate the first level we're on with invalidate entries.
 374         * If the starting level is 0 (va_bits >= 39), then this is our
 375         * Lv0 page table, otherwise it's the entry Lv1 page table.
 376         */
 377        create_table();
 378
 379        /* Now add all MMU table entries one after another to the table */
 380        for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
 381                add_map(&mem_map[i]);
 382}
 383
 384static void setup_all_pgtables(void)
 385{
 386        u64 tlb_addr = gd->arch.tlb_addr;
 387        u64 tlb_size = gd->arch.tlb_size;
 388
 389        /* Reset the fill ptr */
 390        gd->arch.tlb_fillptr = tlb_addr;
 391
 392        /* Create normal system page tables */
 393        setup_pgtables();
 394
 395        /* Create emergency page tables */
 396        gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr -
 397                             (uintptr_t)gd->arch.tlb_addr;
 398        gd->arch.tlb_addr = gd->arch.tlb_fillptr;
 399        setup_pgtables();
 400        gd->arch.tlb_emerg = gd->arch.tlb_addr;
 401        gd->arch.tlb_addr = tlb_addr;
 402        gd->arch.tlb_size = tlb_size;
 403}
 404
 405/* to activate the MMU we need to set up virtual memory */
 406__weak void mmu_setup(void)
 407{
 408        int el;
 409
 410        /* Set up page tables only once */
 411        if (!gd->arch.tlb_fillptr)
 412                setup_all_pgtables();
 413
 414        el = current_el();
 415        set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
 416                          MEMORY_ATTRIBUTES);
 417
 418        /* enable the mmu */
 419        set_sctlr(get_sctlr() | CR_M);
 420}
 421
 422/*
 423 * Performs a invalidation of the entire data cache at all levels
 424 */
 425void invalidate_dcache_all(void)
 426{
 427        __asm_invalidate_dcache_all();
 428        __asm_invalidate_l3_dcache();
 429}
 430
 431/*
 432 * Performs a clean & invalidation of the entire data cache at all levels.
 433 * This function needs to be inline to avoid using stack.
 434 * __asm_flush_l3_dcache return status of timeout
 435 */
 436inline void flush_dcache_all(void)
 437{
 438        int ret;
 439
 440        __asm_flush_dcache_all();
 441        ret = __asm_flush_l3_dcache();
 442        if (ret)
 443                debug("flushing dcache returns 0x%x\n", ret);
 444        else
 445                debug("flushing dcache successfully.\n");
 446}
 447
 448#ifndef CONFIG_SYS_DISABLE_DCACHE_OPS
 449/*
 450 * Invalidates range in all levels of D-cache/unified cache
 451 */
 452void invalidate_dcache_range(unsigned long start, unsigned long stop)
 453{
 454        __asm_invalidate_dcache_range(start, stop);
 455}
 456
 457/*
 458 * Flush range(clean & invalidate) from all levels of D-cache/unified cache
 459 */
 460void flush_dcache_range(unsigned long start, unsigned long stop)
 461{
 462        __asm_flush_dcache_range(start, stop);
 463}
 464#else
 465void invalidate_dcache_range(unsigned long start, unsigned long stop)
 466{
 467}
 468
 469void flush_dcache_range(unsigned long start, unsigned long stop)
 470{
 471}
 472#endif /* CONFIG_SYS_DISABLE_DCACHE_OPS */
 473
 474void dcache_enable(void)
 475{
 476        /* The data cache is not active unless the mmu is enabled */
 477        if (!(get_sctlr() & CR_M)) {
 478                invalidate_dcache_all();
 479                __asm_invalidate_tlb_all();
 480                mmu_setup();
 481        }
 482
 483        set_sctlr(get_sctlr() | CR_C);
 484}
 485
 486void dcache_disable(void)
 487{
 488        uint32_t sctlr;
 489
 490        sctlr = get_sctlr();
 491
 492        /* if cache isn't enabled no need to disable */
 493        if (!(sctlr & CR_C))
 494                return;
 495
 496        set_sctlr(sctlr & ~(CR_C|CR_M));
 497
 498        flush_dcache_all();
 499        __asm_invalidate_tlb_all();
 500}
 501
 502int dcache_status(void)
 503{
 504        return (get_sctlr() & CR_C) != 0;
 505}
 506
 507u64 *__weak arch_get_page_table(void) {
 508        puts("No page table offset defined\n");
 509
 510        return NULL;
 511}
 512
 513static bool is_aligned(u64 addr, u64 size, u64 align)
 514{
 515        return !(addr & (align - 1)) && !(size & (align - 1));
 516}
 517
 518/* Use flag to indicate if attrs has more than d-cache attributes */
 519static u64 set_one_region(u64 start, u64 size, u64 attrs, bool flag, int level)
 520{
 521        int levelshift = level2shift(level);
 522        u64 levelsize = 1ULL << levelshift;
 523        u64 *pte = find_pte(start, level);
 524
 525        /* Can we can just modify the current level block PTE? */
 526        if (is_aligned(start, size, levelsize)) {
 527                if (flag) {
 528                        *pte &= ~PMD_ATTRMASK;
 529                        *pte |= attrs & PMD_ATTRMASK;
 530                } else {
 531                        *pte &= ~PMD_ATTRINDX_MASK;
 532                        *pte |= attrs & PMD_ATTRINDX_MASK;
 533                }
 534                debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level);
 535
 536                return levelsize;
 537        }
 538
 539        /* Unaligned or doesn't fit, maybe split block into table */
 540        debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte);
 541
 542        /* Maybe we need to split the block into a table */
 543        if (pte_type(pte) == PTE_TYPE_BLOCK)
 544                split_block(pte, level);
 545
 546        /* And then double-check it became a table or already is one */
 547        if (pte_type(pte) != PTE_TYPE_TABLE)
 548                panic("PTE %p (%llx) for addr=%llx should be a table",
 549                      pte, *pte, start);
 550
 551        /* Roll on to the next page table level */
 552        return 0;
 553}
 554
 555void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
 556                                     enum dcache_option option)
 557{
 558        u64 attrs = PMD_ATTRINDX(option);
 559        u64 real_start = start;
 560        u64 real_size = size;
 561
 562        debug("start=%lx size=%lx\n", (ulong)start, (ulong)size);
 563
 564        if (!gd->arch.tlb_emerg)
 565                panic("Emergency page table not setup.");
 566
 567        /*
 568         * We can not modify page tables that we're currently running on,
 569         * so we first need to switch to the "emergency" page tables where
 570         * we can safely modify our primary page tables and then switch back
 571         */
 572        __asm_switch_ttbr(gd->arch.tlb_emerg);
 573
 574        /*
 575         * Loop through the address range until we find a page granule that fits
 576         * our alignment constraints, then set it to the new cache attributes
 577         */
 578        while (size > 0) {
 579                int level;
 580                u64 r;
 581
 582                for (level = 1; level < 4; level++) {
 583                        /* Set d-cache attributes only */
 584                        r = set_one_region(start, size, attrs, false, level);
 585                        if (r) {
 586                                /* PTE successfully replaced */
 587                                size -= r;
 588                                start += r;
 589                                break;
 590                        }
 591                }
 592
 593        }
 594
 595        /* We're done modifying page tables, switch back to our primary ones */
 596        __asm_switch_ttbr(gd->arch.tlb_addr);
 597
 598        /*
 599         * Make sure there's nothing stale in dcache for a region that might
 600         * have caches off now
 601         */
 602        flush_dcache_range(real_start, real_start + real_size);
 603}
 604
 605/*
 606 * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits.
 607 * The procecess is break-before-make. The target region will be marked as
 608 * invalid during the process of changing.
 609 */
 610void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs)
 611{
 612        int level;
 613        u64 r, size, start;
 614
 615        start = addr;
 616        size = siz;
 617        /*
 618         * Loop through the address range until we find a page granule that fits
 619         * our alignment constraints, then set it to "invalid".
 620         */
 621        while (size > 0) {
 622                for (level = 1; level < 4; level++) {
 623                        /* Set PTE to fault */
 624                        r = set_one_region(start, size, PTE_TYPE_FAULT, true,
 625                                           level);
 626                        if (r) {
 627                                /* PTE successfully invalidated */
 628                                size -= r;
 629                                start += r;
 630                                break;
 631                        }
 632                }
 633        }
 634
 635        flush_dcache_range(gd->arch.tlb_addr,
 636                           gd->arch.tlb_addr + gd->arch.tlb_size);
 637        __asm_invalidate_tlb_all();
 638
 639        /*
 640         * Loop through the address range until we find a page granule that fits
 641         * our alignment constraints, then set it to the new cache attributes
 642         */
 643        start = addr;
 644        size = siz;
 645        while (size > 0) {
 646                for (level = 1; level < 4; level++) {
 647                        /* Set PTE to new attributes */
 648                        r = set_one_region(start, size, attrs, true, level);
 649                        if (r) {
 650                                /* PTE successfully updated */
 651                                size -= r;
 652                                start += r;
 653                                break;
 654                        }
 655                }
 656        }
 657        flush_dcache_range(gd->arch.tlb_addr,
 658                           gd->arch.tlb_addr + gd->arch.tlb_size);
 659        __asm_invalidate_tlb_all();
 660}
 661
 662#else   /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
 663
 664/*
 665 * For SPL builds, we may want to not have dcache enabled. Any real U-Boot
 666 * running however really wants to have dcache and the MMU active. Check that
 667 * everything is sane and give the developer a hint if it isn't.
 668 */
 669#ifndef CONFIG_SPL_BUILD
 670#error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache.
 671#endif
 672
 673void invalidate_dcache_all(void)
 674{
 675}
 676
 677void flush_dcache_all(void)
 678{
 679}
 680
 681void dcache_enable(void)
 682{
 683}
 684
 685void dcache_disable(void)
 686{
 687}
 688
 689int dcache_status(void)
 690{
 691        return 0;
 692}
 693
 694void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
 695                                     enum dcache_option option)
 696{
 697}
 698
 699#endif  /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
 700
 701#if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF)
 702
 703void icache_enable(void)
 704{
 705        invalidate_icache_all();
 706        set_sctlr(get_sctlr() | CR_I);
 707}
 708
 709void icache_disable(void)
 710{
 711        set_sctlr(get_sctlr() & ~CR_I);
 712}
 713
 714int icache_status(void)
 715{
 716        return (get_sctlr() & CR_I) != 0;
 717}
 718
 719void invalidate_icache_all(void)
 720{
 721        __asm_invalidate_icache_all();
 722        __asm_invalidate_l3_icache();
 723}
 724
 725#else   /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
 726
 727void icache_enable(void)
 728{
 729}
 730
 731void icache_disable(void)
 732{
 733}
 734
 735int icache_status(void)
 736{
 737        return 0;
 738}
 739
 740void invalidate_icache_all(void)
 741{
 742}
 743
 744#endif  /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
 745
 746/*
 747 * Enable dCache & iCache, whether cache is actually enabled
 748 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
 749 */
 750void __weak enable_caches(void)
 751{
 752        icache_enable();
 753        dcache_enable();
 754}
 755