uboot/arch/arm/cpu/armv8/cache_v8.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * (C) Copyright 2013
   4 * David Feng <fenghua@phytium.com.cn>
   5 *
   6 * (C) Copyright 2016
   7 * Alexander Graf <agraf@suse.de>
   8 */
   9
  10#include <common.h>
  11#include <cpu_func.h>
  12#include <hang.h>
  13#include <log.h>
  14#include <asm/cache.h>
  15#include <asm/system.h>
  16#include <asm/armv8/mmu.h>
  17
  18DECLARE_GLOBAL_DATA_PTR;
  19
  20#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
  21
  22/*
  23 *  With 4k page granule, a virtual address is split into 4 lookup parts
  24 *  spanning 9 bits each:
  25 *
  26 *    _______________________________________________
  27 *   |       |       |       |       |       |       |
  28 *   |   0   |  Lv0  |  Lv1  |  Lv2  |  Lv3  |  off  |
  29 *   |_______|_______|_______|_______|_______|_______|
  30 *     63-48   47-39   38-30   29-21   20-12   11-00
  31 *
  32 *             mask        page size
  33 *
  34 *    Lv0: FF8000000000       --
  35 *    Lv1:   7FC0000000       1G
  36 *    Lv2:     3FE00000       2M
  37 *    Lv3:       1FF000       4K
  38 *    off:          FFF
  39 */
  40
  41u64 get_tcr(int el, u64 *pips, u64 *pva_bits)
  42{
  43        u64 max_addr = 0;
  44        u64 ips, va_bits;
  45        u64 tcr;
  46        int i;
  47
  48        /* Find the largest address we need to support */
  49        for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
  50                max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size);
  51
  52        /* Calculate the maximum physical (and thus virtual) address */
  53        if (max_addr > (1ULL << 44)) {
  54                ips = 5;
  55                va_bits = 48;
  56        } else  if (max_addr > (1ULL << 42)) {
  57                ips = 4;
  58                va_bits = 44;
  59        } else  if (max_addr > (1ULL << 40)) {
  60                ips = 3;
  61                va_bits = 42;
  62        } else  if (max_addr > (1ULL << 36)) {
  63                ips = 2;
  64                va_bits = 40;
  65        } else  if (max_addr > (1ULL << 32)) {
  66                ips = 1;
  67                va_bits = 36;
  68        } else {
  69                ips = 0;
  70                va_bits = 32;
  71        }
  72
  73        if (el == 1) {
  74                tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE;
  75        } else if (el == 2) {
  76                tcr = TCR_EL2_RSVD | (ips << 16);
  77        } else {
  78                tcr = TCR_EL3_RSVD | (ips << 16);
  79        }
  80
  81        /* PTWs cacheable, inner/outer WBWA and inner shareable */
  82        tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
  83        tcr |= TCR_T0SZ(va_bits);
  84
  85        if (pips)
  86                *pips = ips;
  87        if (pva_bits)
  88                *pva_bits = va_bits;
  89
  90        return tcr;
  91}
  92
  93#define MAX_PTE_ENTRIES 512
  94
  95static int pte_type(u64 *pte)
  96{
  97        return *pte & PTE_TYPE_MASK;
  98}
  99
 100/* Returns the LSB number for a PTE on level <level> */
 101static int level2shift(int level)
 102{
 103        /* Page is 12 bits wide, every level translates 9 bits */
 104        return (12 + 9 * (3 - level));
 105}
 106
 107static u64 *find_pte(u64 addr, int level)
 108{
 109        int start_level = 0;
 110        u64 *pte;
 111        u64 idx;
 112        u64 va_bits;
 113        int i;
 114
 115        debug("addr=%llx level=%d\n", addr, level);
 116
 117        get_tcr(0, NULL, &va_bits);
 118        if (va_bits < 39)
 119                start_level = 1;
 120
 121        if (level < start_level)
 122                return NULL;
 123
 124        /* Walk through all page table levels to find our PTE */
 125        pte = (u64*)gd->arch.tlb_addr;
 126        for (i = start_level; i < 4; i++) {
 127                idx = (addr >> level2shift(i)) & 0x1FF;
 128                pte += idx;
 129                debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte);
 130
 131                /* Found it */
 132                if (i == level)
 133                        return pte;
 134                /* PTE is no table (either invalid or block), can't traverse */
 135                if (pte_type(pte) != PTE_TYPE_TABLE)
 136                        return NULL;
 137                /* Off to the next level */
 138                pte = (u64*)(*pte & 0x0000fffffffff000ULL);
 139        }
 140
 141        /* Should never reach here */
 142        return NULL;
 143}
 144
 145/* Returns and creates a new full table (512 entries) */
 146static u64 *create_table(void)
 147{
 148        u64 *new_table = (u64*)gd->arch.tlb_fillptr;
 149        u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64);
 150
 151        /* Allocate MAX_PTE_ENTRIES pte entries */
 152        gd->arch.tlb_fillptr += pt_len;
 153
 154        if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size)
 155                panic("Insufficient RAM for page table: 0x%lx > 0x%lx. "
 156                      "Please increase the size in get_page_table_size()",
 157                        gd->arch.tlb_fillptr - gd->arch.tlb_addr,
 158                        gd->arch.tlb_size);
 159
 160        /* Mark all entries as invalid */
 161        memset(new_table, 0, pt_len);
 162
 163        return new_table;
 164}
 165
 166static void set_pte_table(u64 *pte, u64 *table)
 167{
 168        /* Point *pte to the new table */
 169        debug("Setting %p to addr=%p\n", pte, table);
 170        *pte = PTE_TYPE_TABLE | (ulong)table;
 171}
 172
 173/* Splits a block PTE into table with subpages spanning the old block */
 174static void split_block(u64 *pte, int level)
 175{
 176        u64 old_pte = *pte;
 177        u64 *new_table;
 178        u64 i = 0;
 179        /* level describes the parent level, we need the child ones */
 180        int levelshift = level2shift(level + 1);
 181
 182        if (pte_type(pte) != PTE_TYPE_BLOCK)
 183                panic("PTE %p (%llx) is not a block. Some driver code wants to "
 184                      "modify dcache settings for an range not covered in "
 185                      "mem_map.", pte, old_pte);
 186
 187        new_table = create_table();
 188        debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table);
 189
 190        for (i = 0; i < MAX_PTE_ENTRIES; i++) {
 191                new_table[i] = old_pte | (i << levelshift);
 192
 193                /* Level 3 block PTEs have the table type */
 194                if ((level + 1) == 3)
 195                        new_table[i] |= PTE_TYPE_TABLE;
 196
 197                debug("Setting new_table[%lld] = %llx\n", i, new_table[i]);
 198        }
 199
 200        /* Set the new table into effect */
 201        set_pte_table(pte, new_table);
 202}
 203
 204/* Add one mm_region map entry to the page tables */
 205static void add_map(struct mm_region *map)
 206{
 207        u64 *pte;
 208        u64 virt = map->virt;
 209        u64 phys = map->phys;
 210        u64 size = map->size;
 211        u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
 212        u64 blocksize;
 213        int level;
 214        u64 *new_table;
 215
 216        while (size) {
 217                pte = find_pte(virt, 0);
 218                if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) {
 219                        debug("Creating table for virt 0x%llx\n", virt);
 220                        new_table = create_table();
 221                        set_pte_table(pte, new_table);
 222                }
 223
 224                for (level = 1; level < 4; level++) {
 225                        pte = find_pte(virt, level);
 226                        if (!pte)
 227                                panic("pte not found\n");
 228
 229                        blocksize = 1ULL << level2shift(level);
 230                        debug("Checking if pte fits for virt=%llx size=%llx blocksize=%llx\n",
 231                              virt, size, blocksize);
 232                        if (size >= blocksize && !(virt & (blocksize - 1))) {
 233                                /* Page fits, create block PTE */
 234                                debug("Setting PTE %p to block virt=%llx\n",
 235                                      pte, virt);
 236                                if (level == 3)
 237                                        *pte = phys | attrs | PTE_TYPE_PAGE;
 238                                else
 239                                        *pte = phys | attrs;
 240                                virt += blocksize;
 241                                phys += blocksize;
 242                                size -= blocksize;
 243                                break;
 244                        } else if (pte_type(pte) == PTE_TYPE_FAULT) {
 245                                /* Page doesn't fit, create subpages */
 246                                debug("Creating subtable for virt 0x%llx blksize=%llx\n",
 247                                      virt, blocksize);
 248                                new_table = create_table();
 249                                set_pte_table(pte, new_table);
 250                        } else if (pte_type(pte) == PTE_TYPE_BLOCK) {
 251                                debug("Split block into subtable for virt 0x%llx blksize=0x%llx\n",
 252                                      virt, blocksize);
 253                                split_block(pte, level);
 254                        }
 255                }
 256        }
 257}
 258
 259enum pte_type {
 260        PTE_INVAL,
 261        PTE_BLOCK,
 262        PTE_LEVEL,
 263};
 264
 265/*
 266 * This is a recursively called function to count the number of
 267 * page tables we need to cover a particular PTE range. If you
 268 * call this with level = -1 you basically get the full 48 bit
 269 * coverage.
 270 */
 271static int count_required_pts(u64 addr, int level, u64 maxaddr)
 272{
 273        int levelshift = level2shift(level);
 274        u64 levelsize = 1ULL << levelshift;
 275        u64 levelmask = levelsize - 1;
 276        u64 levelend = addr + levelsize;
 277        int r = 0;
 278        int i;
 279        enum pte_type pte_type = PTE_INVAL;
 280
 281        for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) {
 282                struct mm_region *map = &mem_map[i];
 283                u64 start = map->virt;
 284                u64 end = start + map->size;
 285
 286                /* Check if the PTE would overlap with the map */
 287                if (max(addr, start) <= min(levelend, end)) {
 288                        start = max(addr, start);
 289                        end = min(levelend, end);
 290
 291                        /* We need a sub-pt for this level */
 292                        if ((start & levelmask) || (end & levelmask)) {
 293                                pte_type = PTE_LEVEL;
 294                                break;
 295                        }
 296
 297                        /* Lv0 can not do block PTEs, so do levels here too */
 298                        if (level <= 0) {
 299                                pte_type = PTE_LEVEL;
 300                                break;
 301                        }
 302
 303                        /* PTE is active, but fits into a block */
 304                        pte_type = PTE_BLOCK;
 305                }
 306        }
 307
 308        /*
 309         * Block PTEs at this level are already covered by the parent page
 310         * table, so we only need to count sub page tables.
 311         */
 312        if (pte_type == PTE_LEVEL) {
 313                int sublevel = level + 1;
 314                u64 sublevelsize = 1ULL << level2shift(sublevel);
 315
 316                /* Account for the new sub page table ... */
 317                r = 1;
 318
 319                /* ... and for all child page tables that one might have */
 320                for (i = 0; i < MAX_PTE_ENTRIES; i++) {
 321                        r += count_required_pts(addr, sublevel, maxaddr);
 322                        addr += sublevelsize;
 323
 324                        if (addr >= maxaddr) {
 325                                /*
 326                                 * We reached the end of address space, no need
 327                                 * to look any further.
 328                                 */
 329                                break;
 330                        }
 331                }
 332        }
 333
 334        return r;
 335}
 336
 337/* Returns the estimated required size of all page tables */
 338__weak u64 get_page_table_size(void)
 339{
 340        u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
 341        u64 size = 0;
 342        u64 va_bits;
 343        int start_level = 0;
 344
 345        get_tcr(0, NULL, &va_bits);
 346        if (va_bits < 39)
 347                start_level = 1;
 348
 349        /* Account for all page tables we would need to cover our memory map */
 350        size = one_pt * count_required_pts(0, start_level - 1, 1ULL << va_bits);
 351
 352        /*
 353         * We need to duplicate our page table once to have an emergency pt to
 354         * resort to when splitting page tables later on
 355         */
 356        size *= 2;
 357
 358        /*
 359         * We may need to split page tables later on if dcache settings change,
 360         * so reserve up to 4 (random pick) page tables for that.
 361         */
 362        size += one_pt * 4;
 363
 364        return size;
 365}
 366
 367void setup_pgtables(void)
 368{
 369        int i;
 370
 371        if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr)
 372                panic("Page table pointer not setup.");
 373
 374        /*
 375         * Allocate the first level we're on with invalidate entries.
 376         * If the starting level is 0 (va_bits >= 39), then this is our
 377         * Lv0 page table, otherwise it's the entry Lv1 page table.
 378         */
 379        create_table();
 380
 381        /* Now add all MMU table entries one after another to the table */
 382        for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
 383                add_map(&mem_map[i]);
 384}
 385
 386static void setup_all_pgtables(void)
 387{
 388        u64 tlb_addr = gd->arch.tlb_addr;
 389        u64 tlb_size = gd->arch.tlb_size;
 390
 391        /* Reset the fill ptr */
 392        gd->arch.tlb_fillptr = tlb_addr;
 393
 394        /* Create normal system page tables */
 395        setup_pgtables();
 396
 397        /* Create emergency page tables */
 398        gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr -
 399                             (uintptr_t)gd->arch.tlb_addr;
 400        gd->arch.tlb_addr = gd->arch.tlb_fillptr;
 401        setup_pgtables();
 402        gd->arch.tlb_emerg = gd->arch.tlb_addr;
 403        gd->arch.tlb_addr = tlb_addr;
 404        gd->arch.tlb_size = tlb_size;
 405}
 406
 407/* to activate the MMU we need to set up virtual memory */
 408__weak void mmu_setup(void)
 409{
 410        int el;
 411
 412        /* Set up page tables only once */
 413        if (!gd->arch.tlb_fillptr)
 414                setup_all_pgtables();
 415
 416        el = current_el();
 417        set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
 418                          MEMORY_ATTRIBUTES);
 419
 420        /* enable the mmu */
 421        set_sctlr(get_sctlr() | CR_M);
 422}
 423
 424/*
 425 * Performs a invalidation of the entire data cache at all levels
 426 */
 427void invalidate_dcache_all(void)
 428{
 429        __asm_invalidate_dcache_all();
 430        __asm_invalidate_l3_dcache();
 431}
 432
 433/*
 434 * Performs a clean & invalidation of the entire data cache at all levels.
 435 * This function needs to be inline to avoid using stack.
 436 * __asm_flush_l3_dcache return status of timeout
 437 */
 438inline void flush_dcache_all(void)
 439{
 440        int ret;
 441
 442        __asm_flush_dcache_all();
 443        ret = __asm_flush_l3_dcache();
 444        if (ret)
 445                debug("flushing dcache returns 0x%x\n", ret);
 446        else
 447                debug("flushing dcache successfully.\n");
 448}
 449
 450#ifndef CONFIG_SYS_DISABLE_DCACHE_OPS
 451/*
 452 * Invalidates range in all levels of D-cache/unified cache
 453 */
 454void invalidate_dcache_range(unsigned long start, unsigned long stop)
 455{
 456        __asm_invalidate_dcache_range(start, stop);
 457}
 458
 459/*
 460 * Flush range(clean & invalidate) from all levels of D-cache/unified cache
 461 */
 462void flush_dcache_range(unsigned long start, unsigned long stop)
 463{
 464        __asm_flush_dcache_range(start, stop);
 465}
 466#else
 467void invalidate_dcache_range(unsigned long start, unsigned long stop)
 468{
 469}
 470
 471void flush_dcache_range(unsigned long start, unsigned long stop)
 472{
 473}
 474#endif /* CONFIG_SYS_DISABLE_DCACHE_OPS */
 475
 476void dcache_enable(void)
 477{
 478        /* The data cache is not active unless the mmu is enabled */
 479        if (!(get_sctlr() & CR_M)) {
 480                invalidate_dcache_all();
 481                __asm_invalidate_tlb_all();
 482                mmu_setup();
 483        }
 484
 485        set_sctlr(get_sctlr() | CR_C);
 486}
 487
 488void dcache_disable(void)
 489{
 490        uint32_t sctlr;
 491
 492        sctlr = get_sctlr();
 493
 494        /* if cache isn't enabled no need to disable */
 495        if (!(sctlr & CR_C))
 496                return;
 497
 498        set_sctlr(sctlr & ~(CR_C|CR_M));
 499
 500        flush_dcache_all();
 501        __asm_invalidate_tlb_all();
 502}
 503
 504int dcache_status(void)
 505{
 506        return (get_sctlr() & CR_C) != 0;
 507}
 508
 509u64 *__weak arch_get_page_table(void) {
 510        puts("No page table offset defined\n");
 511
 512        return NULL;
 513}
 514
 515static bool is_aligned(u64 addr, u64 size, u64 align)
 516{
 517        return !(addr & (align - 1)) && !(size & (align - 1));
 518}
 519
 520/* Use flag to indicate if attrs has more than d-cache attributes */
 521static u64 set_one_region(u64 start, u64 size, u64 attrs, bool flag, int level)
 522{
 523        int levelshift = level2shift(level);
 524        u64 levelsize = 1ULL << levelshift;
 525        u64 *pte = find_pte(start, level);
 526
 527        /* Can we can just modify the current level block PTE? */
 528        if (is_aligned(start, size, levelsize)) {
 529                if (flag) {
 530                        *pte &= ~PMD_ATTRMASK;
 531                        *pte |= attrs & PMD_ATTRMASK;
 532                } else {
 533                        *pte &= ~PMD_ATTRINDX_MASK;
 534                        *pte |= attrs & PMD_ATTRINDX_MASK;
 535                }
 536                debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level);
 537
 538                return levelsize;
 539        }
 540
 541        /* Unaligned or doesn't fit, maybe split block into table */
 542        debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte);
 543
 544        /* Maybe we need to split the block into a table */
 545        if (pte_type(pte) == PTE_TYPE_BLOCK)
 546                split_block(pte, level);
 547
 548        /* And then double-check it became a table or already is one */
 549        if (pte_type(pte) != PTE_TYPE_TABLE)
 550                panic("PTE %p (%llx) for addr=%llx should be a table",
 551                      pte, *pte, start);
 552
 553        /* Roll on to the next page table level */
 554        return 0;
 555}
 556
 557void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
 558                                     enum dcache_option option)
 559{
 560        u64 attrs = PMD_ATTRINDX(option >> 2);
 561        u64 real_start = start;
 562        u64 real_size = size;
 563
 564        debug("start=%lx size=%lx\n", (ulong)start, (ulong)size);
 565
 566        if (!gd->arch.tlb_emerg)
 567                panic("Emergency page table not setup.");
 568
 569        /*
 570         * We can not modify page tables that we're currently running on,
 571         * so we first need to switch to the "emergency" page tables where
 572         * we can safely modify our primary page tables and then switch back
 573         */
 574        __asm_switch_ttbr(gd->arch.tlb_emerg);
 575
 576        /*
 577         * Loop through the address range until we find a page granule that fits
 578         * our alignment constraints, then set it to the new cache attributes
 579         */
 580        while (size > 0) {
 581                int level;
 582                u64 r;
 583
 584                for (level = 1; level < 4; level++) {
 585                        /* Set d-cache attributes only */
 586                        r = set_one_region(start, size, attrs, false, level);
 587                        if (r) {
 588                                /* PTE successfully replaced */
 589                                size -= r;
 590                                start += r;
 591                                break;
 592                        }
 593                }
 594
 595        }
 596
 597        /* We're done modifying page tables, switch back to our primary ones */
 598        __asm_switch_ttbr(gd->arch.tlb_addr);
 599
 600        /*
 601         * Make sure there's nothing stale in dcache for a region that might
 602         * have caches off now
 603         */
 604        flush_dcache_range(real_start, real_start + real_size);
 605}
 606
 607/*
 608 * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits.
 609 * The procecess is break-before-make. The target region will be marked as
 610 * invalid during the process of changing.
 611 */
 612void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs)
 613{
 614        int level;
 615        u64 r, size, start;
 616
 617        start = addr;
 618        size = siz;
 619        /*
 620         * Loop through the address range until we find a page granule that fits
 621         * our alignment constraints, then set it to "invalid".
 622         */
 623        while (size > 0) {
 624                for (level = 1; level < 4; level++) {
 625                        /* Set PTE to fault */
 626                        r = set_one_region(start, size, PTE_TYPE_FAULT, true,
 627                                           level);
 628                        if (r) {
 629                                /* PTE successfully invalidated */
 630                                size -= r;
 631                                start += r;
 632                                break;
 633                        }
 634                }
 635        }
 636
 637        flush_dcache_range(gd->arch.tlb_addr,
 638                           gd->arch.tlb_addr + gd->arch.tlb_size);
 639        __asm_invalidate_tlb_all();
 640
 641        /*
 642         * Loop through the address range until we find a page granule that fits
 643         * our alignment constraints, then set it to the new cache attributes
 644         */
 645        start = addr;
 646        size = siz;
 647        while (size > 0) {
 648                for (level = 1; level < 4; level++) {
 649                        /* Set PTE to new attributes */
 650                        r = set_one_region(start, size, attrs, true, level);
 651                        if (r) {
 652                                /* PTE successfully updated */
 653                                size -= r;
 654                                start += r;
 655                                break;
 656                        }
 657                }
 658        }
 659        flush_dcache_range(gd->arch.tlb_addr,
 660                           gd->arch.tlb_addr + gd->arch.tlb_size);
 661        __asm_invalidate_tlb_all();
 662}
 663
 664#else   /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
 665
 666/*
 667 * For SPL builds, we may want to not have dcache enabled. Any real U-Boot
 668 * running however really wants to have dcache and the MMU active. Check that
 669 * everything is sane and give the developer a hint if it isn't.
 670 */
 671#ifndef CONFIG_SPL_BUILD
 672#error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache.
 673#endif
 674
 675void invalidate_dcache_all(void)
 676{
 677}
 678
 679void flush_dcache_all(void)
 680{
 681}
 682
 683void dcache_enable(void)
 684{
 685}
 686
 687void dcache_disable(void)
 688{
 689}
 690
 691int dcache_status(void)
 692{
 693        return 0;
 694}
 695
 696void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
 697                                     enum dcache_option option)
 698{
 699}
 700
 701#endif  /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
 702
 703#if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF)
 704
 705void icache_enable(void)
 706{
 707        invalidate_icache_all();
 708        set_sctlr(get_sctlr() | CR_I);
 709}
 710
 711void icache_disable(void)
 712{
 713        set_sctlr(get_sctlr() & ~CR_I);
 714}
 715
 716int icache_status(void)
 717{
 718        return (get_sctlr() & CR_I) != 0;
 719}
 720
 721void invalidate_icache_all(void)
 722{
 723        __asm_invalidate_icache_all();
 724        __asm_invalidate_l3_icache();
 725}
 726
 727#else   /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
 728
 729void icache_enable(void)
 730{
 731}
 732
 733void icache_disable(void)
 734{
 735}
 736
 737int icache_status(void)
 738{
 739        return 0;
 740}
 741
 742void invalidate_icache_all(void)
 743{
 744}
 745
 746#endif  /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
 747
 748/*
 749 * Enable dCache & iCache, whether cache is actually enabled
 750 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
 751 */
 752void __weak enable_caches(void)
 753{
 754        icache_enable();
 755        dcache_enable();
 756}
 757