uboot/arch/arc/lib/cache.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
   4 */
   5
   6#include <config.h>
   7#include <common.h>
   8#include <linux/compiler.h>
   9#include <linux/kernel.h>
  10#include <linux/log2.h>
  11#include <asm/arcregs.h>
  12#include <asm/arc-bcr.h>
  13#include <asm/cache.h>
  14
  15/*
  16 * [ NOTE 1 ]:
  17 * Data cache (L1 D$ or SL$) entire invalidate operation or data cache disable
  18 * operation may result in unexpected behavior and data loss even if we flush
  19 * data cache right before invalidation. That may happens if we store any context
  20 * on stack (like we store BLINK register on stack before function call).
  21 * BLINK register is the register where return address is automatically saved
  22 * when we do function call with instructions like 'bl'.
  23 *
  24 * There is the real example:
  25 * We may hang in the next code as we store any BLINK register on stack in
  26 * invalidate_dcache_all() function.
  27 *
  28 * void flush_dcache_all() {
  29 *     __dc_entire_op(OP_FLUSH);
  30 *     // Other code //
  31 * }
  32 *
  33 * void invalidate_dcache_all() {
  34 *     __dc_entire_op(OP_INV);
  35 *     // Other code //
  36 * }
  37 *
  38 * void foo(void) {
  39 *     flush_dcache_all();
  40 *     invalidate_dcache_all();
  41 * }
  42 *
  43 * Now let's see what really happens during that code execution:
  44 *
  45 * foo()
  46 *   |->> call flush_dcache_all
  47 *     [return address is saved to BLINK register]
  48 *     [push BLINK] (save to stack)              ![point 1]
  49 *     |->> call __dc_entire_op(OP_FLUSH)
  50 *         [return address is saved to BLINK register]
  51 *         [flush L1 D$]
  52 *         return [jump to BLINK]
  53 *     <<------
  54 *     [other flush_dcache_all code]
  55 *     [pop BLINK] (get from stack)
  56 *     return [jump to BLINK]
  57 *   <<------
  58 *   |->> call invalidate_dcache_all
  59 *     [return address is saved to BLINK register]
  60 *     [push BLINK] (save to stack)               ![point 2]
  61 *     |->> call __dc_entire_op(OP_FLUSH)
  62 *         [return address is saved to BLINK register]
  63 *         [invalidate L1 D$]                 ![point 3]
  64 *         // Oops!!!
  65 *         // We lose return address from invalidate_dcache_all function:
  66 *         // we save it to stack and invalidate L1 D$ after that!
  67 *         return [jump to BLINK]
  68 *     <<------
  69 *     [other invalidate_dcache_all code]
  70 *     [pop BLINK] (get from stack)
  71 *     // we don't have this data in L1 dcache as we invalidated it in [point 3]
  72 *     // so we get it from next memory level (for example DDR memory)
  73 *     // but in the memory we have value which we save in [point 1], which
  74 *     // is return address from flush_dcache_all function (instead of
  75 *     // address from current invalidate_dcache_all function which we
  76 *     // saved in [point 2] !)
  77 *     return [jump to BLINK]
  78 *   <<------
  79 *   // As BLINK points to invalidate_dcache_all, we call it again and
  80 *   // loop forever.
  81 *
  82 * Fortunately we may fix that by using flush & invalidation of D$ with a single
  83 * one instruction (instead of flush and invalidation instructions pair) and
  84 * enabling force function inline with '__attribute__((always_inline))' gcc
  85 * attribute to avoid any function call (and BLINK store) between cache flush
  86 * and disable.
  87 *
  88 *
  89 * [ NOTE 2 ]:
  90 * As of today we only support the following cache configurations on ARC.
  91 * Other configurations may exist in HW (for example, since version 3.0 HS
  92 * supports SL$ (L2 system level cache) disable) but we don't support it in SW.
  93 * Configuration 1:
  94 *        ______________________
  95 *       |                      |
  96 *       |   ARC CPU            |
  97 *       |______________________|
  98 *        ___|___        ___|___
  99 *       |       |      |       |
 100 *       | L1 I$ |      | L1 D$ |
 101 *       |_______|      |_______|
 102 *        on/off         on/off
 103 *        ___|______________|____
 104 *       |                      |
 105 *       |   main memory        |
 106 *       |______________________|
 107 *
 108 * Configuration 2:
 109 *        ______________________
 110 *       |                      |
 111 *       |   ARC CPU            |
 112 *       |______________________|
 113 *        ___|___        ___|___
 114 *       |       |      |       |
 115 *       | L1 I$ |      | L1 D$ |
 116 *       |_______|      |_______|
 117 *        on/off         on/off
 118 *        ___|______________|____
 119 *       |                      |
 120 *       |   L2 (SL$)           |
 121 *       |______________________|
 122 *          always must be on
 123 *        ___|______________|____
 124 *       |                      |
 125 *       |   main memory        |
 126 *       |______________________|
 127 *
 128 * Configuration 3:
 129 *        ______________________
 130 *       |                      |
 131 *       |   ARC CPU            |
 132 *       |______________________|
 133 *        ___|___        ___|___
 134 *       |       |      |       |
 135 *       | L1 I$ |      | L1 D$ |
 136 *       |_______|      |_______|
 137 *        on/off        must be on
 138 *        ___|______________|____      _______
 139 *       |                      |     |       |
 140 *       |   L2 (SL$)           |-----|  IOC  |
 141 *       |______________________|     |_______|
 142 *          always must be on          on/off
 143 *        ___|______________|____
 144 *       |                      |
 145 *       |   main memory        |
 146 *       |______________________|
 147 */
 148
 149DECLARE_GLOBAL_DATA_PTR;
 150
 151/* Bit values in IC_CTRL */
 152#define IC_CTRL_CACHE_DISABLE   BIT(0)
 153
 154/* Bit values in DC_CTRL */
 155#define DC_CTRL_CACHE_DISABLE   BIT(0)
 156#define DC_CTRL_INV_MODE_FLUSH  BIT(6)
 157#define DC_CTRL_FLUSH_STATUS    BIT(8)
 158
 159#define OP_INV                  BIT(0)
 160#define OP_FLUSH                BIT(1)
 161#define OP_FLUSH_N_INV          (OP_FLUSH | OP_INV)
 162
 163/* Bit val in SLC_CONTROL */
 164#define SLC_CTRL_DIS            0x001
 165#define SLC_CTRL_IM             0x040
 166#define SLC_CTRL_BUSY           0x100
 167#define SLC_CTRL_RGN_OP_INV     0x200
 168
 169#define CACHE_LINE_MASK         (~(gd->arch.l1_line_sz - 1))
 170
 171/*
 172 * We don't want to use '__always_inline' macro here as it can be redefined
 173 * to simple 'inline' in some cases which breaks stuff. See [ NOTE 1 ] for more
 174 * details about the reasons we need to use always_inline functions.
 175 */
 176#define inlined_cachefunc        inline __attribute__((always_inline))
 177
 178static inlined_cachefunc void __ic_entire_invalidate(void);
 179static inlined_cachefunc void __dc_entire_op(const int cacheop);
 180
 181static inline bool pae_exists(void)
 182{
 183        /* TODO: should we compare mmu version from BCR and from CONFIG? */
 184#if (CONFIG_ARC_MMU_VER >= 4)
 185        union bcr_mmu_4 mmu4;
 186
 187        mmu4.word = read_aux_reg(ARC_AUX_MMU_BCR);
 188
 189        if (mmu4.fields.pae)
 190                return true;
 191#endif /* (CONFIG_ARC_MMU_VER >= 4) */
 192
 193        return false;
 194}
 195
 196static inlined_cachefunc bool icache_exists(void)
 197{
 198        union bcr_di_cache ibcr;
 199
 200        ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
 201        return !!ibcr.fields.ver;
 202}
 203
 204static inlined_cachefunc bool icache_enabled(void)
 205{
 206        if (!icache_exists())
 207                return false;
 208
 209        return !(read_aux_reg(ARC_AUX_IC_CTRL) & IC_CTRL_CACHE_DISABLE);
 210}
 211
 212static inlined_cachefunc bool dcache_exists(void)
 213{
 214        union bcr_di_cache dbcr;
 215
 216        dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
 217        return !!dbcr.fields.ver;
 218}
 219
 220static inlined_cachefunc bool dcache_enabled(void)
 221{
 222        if (!dcache_exists())
 223                return false;
 224
 225        return !(read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_CACHE_DISABLE);
 226}
 227
 228static inlined_cachefunc bool slc_exists(void)
 229{
 230        if (is_isa_arcv2()) {
 231                union bcr_generic sbcr;
 232
 233                sbcr.word = read_aux_reg(ARC_BCR_SLC);
 234                return !!sbcr.fields.ver;
 235        }
 236
 237        return false;
 238}
 239
 240static inlined_cachefunc bool slc_data_bypass(void)
 241{
 242        /*
 243         * If L1 data cache is disabled SL$ is bypassed and all load/store
 244         * requests are sent directly to main memory.
 245         */
 246        return !dcache_enabled();
 247}
 248
 249static inline bool ioc_exists(void)
 250{
 251        if (is_isa_arcv2()) {
 252                union bcr_clust_cfg cbcr;
 253
 254                cbcr.word = read_aux_reg(ARC_BCR_CLUSTER);
 255                return cbcr.fields.c;
 256        }
 257
 258        return false;
 259}
 260
 261static inline bool ioc_enabled(void)
 262{
 263        /*
 264         * We check only CONFIG option instead of IOC HW state check as IOC
 265         * must be disabled by default.
 266         */
 267        if (is_ioc_enabled())
 268                return ioc_exists();
 269
 270        return false;
 271}
 272
 273static inlined_cachefunc void __slc_entire_op(const int op)
 274{
 275        unsigned int ctrl;
 276
 277        if (!slc_exists())
 278                return;
 279
 280        ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
 281
 282        if (!(op & OP_FLUSH))           /* i.e. OP_INV */
 283                ctrl &= ~SLC_CTRL_IM;   /* clear IM: Disable flush before Inv */
 284        else
 285                ctrl |= SLC_CTRL_IM;
 286
 287        write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
 288
 289        if (op & OP_INV)        /* Inv or flush-n-inv use same cmd reg */
 290                write_aux_reg(ARC_AUX_SLC_INVALIDATE, 0x1);
 291        else
 292                write_aux_reg(ARC_AUX_SLC_FLUSH, 0x1);
 293
 294        /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
 295        read_aux_reg(ARC_AUX_SLC_CTRL);
 296
 297        /* Important to wait for flush to complete */
 298        while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
 299}
 300
 301static void slc_upper_region_init(void)
 302{
 303        /*
 304         * ARC_AUX_SLC_RGN_START1 and ARC_AUX_SLC_RGN_END1 register exist
 305         * only if PAE exists in current HW. So we had to check pae_exist
 306         * before using them.
 307         */
 308        if (!pae_exists())
 309                return;
 310
 311        /*
 312         * ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1 are always == 0
 313         * as we don't use PAE40.
 314         */
 315        write_aux_reg(ARC_AUX_SLC_RGN_END1, 0);
 316        write_aux_reg(ARC_AUX_SLC_RGN_START1, 0);
 317}
 318
 319static void __slc_rgn_op(unsigned long paddr, unsigned long sz, const int op)
 320{
 321#ifdef CONFIG_ISA_ARCV2
 322
 323        unsigned int ctrl;
 324        unsigned long end;
 325
 326        if (!slc_exists())
 327                return;
 328
 329        /*
 330         * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
 331         *  - b'000 (default) is Flush,
 332         *  - b'001 is Invalidate if CTRL.IM == 0
 333         *  - b'001 is Flush-n-Invalidate if CTRL.IM == 1
 334         */
 335        ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
 336
 337        /* Don't rely on default value of IM bit */
 338        if (!(op & OP_FLUSH))           /* i.e. OP_INV */
 339                ctrl &= ~SLC_CTRL_IM;   /* clear IM: Disable flush before Inv */
 340        else
 341                ctrl |= SLC_CTRL_IM;
 342
 343        if (op & OP_INV)
 344                ctrl |= SLC_CTRL_RGN_OP_INV;    /* Inv or flush-n-inv */
 345        else
 346                ctrl &= ~SLC_CTRL_RGN_OP_INV;
 347
 348        write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
 349
 350        /*
 351         * Lower bits are ignored, no need to clip
 352         * END needs to be setup before START (latter triggers the operation)
 353         * END can't be same as START, so add (l2_line_sz - 1) to sz
 354         */
 355        end = paddr + sz + gd->arch.slc_line_sz - 1;
 356
 357        /*
 358         * Upper addresses (ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1)
 359         * are always == 0 as we don't use PAE40, so we only setup lower ones
 360         * (ARC_AUX_SLC_RGN_END and ARC_AUX_SLC_RGN_START)
 361         */
 362        write_aux_reg(ARC_AUX_SLC_RGN_END, end);
 363        write_aux_reg(ARC_AUX_SLC_RGN_START, paddr);
 364
 365        /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
 366        read_aux_reg(ARC_AUX_SLC_CTRL);
 367
 368        while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
 369
 370#endif /* CONFIG_ISA_ARCV2 */
 371}
 372
 373static void arc_ioc_setup(void)
 374{
 375        /* IOC Aperture start is equal to DDR start */
 376        unsigned int ap_base = CONFIG_SYS_SDRAM_BASE;
 377        /* IOC Aperture size is equal to DDR size */
 378        long ap_size = CONFIG_SYS_SDRAM_SIZE;
 379
 380        /* Unsupported configuration. See [ NOTE 2 ] for more details. */
 381        if (!slc_exists())
 382                panic("Try to enable IOC but SLC is not present");
 383
 384        /* Unsupported configuration. See [ NOTE 2 ] for more details. */
 385        if (!dcache_enabled())
 386                panic("Try to enable IOC but L1 D$ is disabled");
 387
 388        if (!is_power_of_2(ap_size) || ap_size < 4096)
 389                panic("IOC Aperture size must be power of 2 and bigger 4Kib");
 390
 391        /* IOC Aperture start must be aligned to the size of the aperture */
 392        if (ap_base % ap_size != 0)
 393                panic("IOC Aperture start must be aligned to the size of the aperture");
 394
 395        flush_n_invalidate_dcache_all();
 396
 397        /*
 398         * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
 399         * so setting 0x11 implies 512M, 0x12 implies 1G...
 400         */
 401        write_aux_reg(ARC_AUX_IO_COH_AP0_SIZE,
 402                      order_base_2(ap_size / 1024) - 2);
 403
 404        write_aux_reg(ARC_AUX_IO_COH_AP0_BASE, ap_base >> 12);
 405        write_aux_reg(ARC_AUX_IO_COH_PARTIAL, 1);
 406        write_aux_reg(ARC_AUX_IO_COH_ENABLE, 1);
 407}
 408
 409static void read_decode_cache_bcr_arcv2(void)
 410{
 411#ifdef CONFIG_ISA_ARCV2
 412
 413        union bcr_slc_cfg slc_cfg;
 414
 415        if (slc_exists()) {
 416                slc_cfg.word = read_aux_reg(ARC_AUX_SLC_CONFIG);
 417                gd->arch.slc_line_sz = (slc_cfg.fields.lsz == 0) ? 128 : 64;
 418
 419                /*
 420                 * We don't support configuration where L1 I$ or L1 D$ is
 421                 * absent but SL$ exists. See [ NOTE 2 ] for more details.
 422                 */
 423                if (!icache_exists() || !dcache_exists())
 424                        panic("Unsupported cache configuration: SLC exists but one of L1 caches is absent");
 425        }
 426
 427#endif /* CONFIG_ISA_ARCV2 */
 428}
 429
 430void read_decode_cache_bcr(void)
 431{
 432        int dc_line_sz = 0, ic_line_sz = 0;
 433        union bcr_di_cache ibcr, dbcr;
 434
 435        ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
 436        if (ibcr.fields.ver) {
 437                gd->arch.l1_line_sz = ic_line_sz = 8 << ibcr.fields.line_len;
 438                if (!ic_line_sz)
 439                        panic("Instruction exists but line length is 0\n");
 440        }
 441
 442        dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
 443        if (dbcr.fields.ver) {
 444                gd->arch.l1_line_sz = dc_line_sz = 16 << dbcr.fields.line_len;
 445                if (!dc_line_sz)
 446                        panic("Data cache exists but line length is 0\n");
 447        }
 448
 449        if (ic_line_sz && dc_line_sz && (ic_line_sz != dc_line_sz))
 450                panic("Instruction and data cache line lengths differ\n");
 451}
 452
 453void cache_init(void)
 454{
 455        read_decode_cache_bcr();
 456
 457        if (is_isa_arcv2())
 458                read_decode_cache_bcr_arcv2();
 459
 460        if (is_isa_arcv2() && ioc_enabled())
 461                arc_ioc_setup();
 462
 463        if (is_isa_arcv2() && slc_exists())
 464                slc_upper_region_init();
 465}
 466
 467int icache_status(void)
 468{
 469        return icache_enabled();
 470}
 471
 472void icache_enable(void)
 473{
 474        if (icache_exists())
 475                write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) &
 476                              ~IC_CTRL_CACHE_DISABLE);
 477}
 478
 479void icache_disable(void)
 480{
 481        if (!icache_exists())
 482                return;
 483
 484        __ic_entire_invalidate();
 485
 486        write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) |
 487                      IC_CTRL_CACHE_DISABLE);
 488}
 489
 490/* IC supports only invalidation */
 491static inlined_cachefunc void __ic_entire_invalidate(void)
 492{
 493        if (!icache_enabled())
 494                return;
 495
 496        /* Any write to IC_IVIC register triggers invalidation of entire I$ */
 497        write_aux_reg(ARC_AUX_IC_IVIC, 1);
 498        /*
 499         * As per ARC HS databook (see chapter 5.3.3.2)
 500         * it is required to add 3 NOPs after each write to IC_IVIC.
 501         */
 502        __builtin_arc_nop();
 503        __builtin_arc_nop();
 504        __builtin_arc_nop();
 505        read_aux_reg(ARC_AUX_IC_CTRL);  /* blocks */
 506}
 507
 508void invalidate_icache_all(void)
 509{
 510        __ic_entire_invalidate();
 511
 512        /*
 513         * If SL$ is bypassed for data it is used only for instructions,
 514         * so we need to invalidate it too.
 515         * TODO: HS 3.0 supports SLC disable so we need to check slc
 516         * enable/disable status here.
 517         */
 518        if (is_isa_arcv2() && slc_data_bypass())
 519                __slc_entire_op(OP_INV);
 520}
 521
 522int dcache_status(void)
 523{
 524        return dcache_enabled();
 525}
 526
 527void dcache_enable(void)
 528{
 529        if (!dcache_exists())
 530                return;
 531
 532        write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) &
 533                      ~(DC_CTRL_INV_MODE_FLUSH | DC_CTRL_CACHE_DISABLE));
 534}
 535
 536void dcache_disable(void)
 537{
 538        if (!dcache_exists())
 539                return;
 540
 541        __dc_entire_op(OP_FLUSH_N_INV);
 542
 543        /*
 544         * As SLC will be bypassed for data after L1 D$ disable we need to
 545         * flush it first before L1 D$ disable. Also we invalidate SLC to
 546         * avoid any inconsistent data problems after enabling L1 D$ again with
 547         * dcache_enable function.
 548         */
 549        if (is_isa_arcv2())
 550                __slc_entire_op(OP_FLUSH_N_INV);
 551
 552        write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) |
 553                      DC_CTRL_CACHE_DISABLE);
 554}
 555
 556/* Common Helper for Line Operations on D-cache */
 557static inline void __dcache_line_loop(unsigned long paddr, unsigned long sz,
 558                                      const int cacheop)
 559{
 560        unsigned int aux_cmd;
 561        int num_lines;
 562
 563        /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
 564        aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL;
 565
 566        sz += paddr & ~CACHE_LINE_MASK;
 567        paddr &= CACHE_LINE_MASK;
 568
 569        num_lines = DIV_ROUND_UP(sz, gd->arch.l1_line_sz);
 570
 571        while (num_lines-- > 0) {
 572#if (CONFIG_ARC_MMU_VER == 3)
 573                write_aux_reg(ARC_AUX_DC_PTAG, paddr);
 574#endif
 575                write_aux_reg(aux_cmd, paddr);
 576                paddr += gd->arch.l1_line_sz;
 577        }
 578}
 579
 580static inlined_cachefunc void __before_dc_op(const int op)
 581{
 582        unsigned int ctrl;
 583
 584        ctrl = read_aux_reg(ARC_AUX_DC_CTRL);
 585
 586        /* IM bit implies flush-n-inv, instead of vanilla inv */
 587        if (op == OP_INV)
 588                ctrl &= ~DC_CTRL_INV_MODE_FLUSH;
 589        else
 590                ctrl |= DC_CTRL_INV_MODE_FLUSH;
 591
 592        write_aux_reg(ARC_AUX_DC_CTRL, ctrl);
 593}
 594
 595static inlined_cachefunc void __after_dc_op(const int op)
 596{
 597        if (op & OP_FLUSH)      /* flush / flush-n-inv both wait */
 598                while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
 599}
 600
 601static inlined_cachefunc void __dc_entire_op(const int cacheop)
 602{
 603        int aux;
 604
 605        if (!dcache_enabled())
 606                return;
 607
 608        __before_dc_op(cacheop);
 609
 610        if (cacheop & OP_INV)   /* Inv or flush-n-inv use same cmd reg */
 611                aux = ARC_AUX_DC_IVDC;
 612        else
 613                aux = ARC_AUX_DC_FLSH;
 614
 615        write_aux_reg(aux, 0x1);
 616
 617        __after_dc_op(cacheop);
 618}
 619
 620static inline void __dc_line_op(unsigned long paddr, unsigned long sz,
 621                                const int cacheop)
 622{
 623        if (!dcache_enabled())
 624                return;
 625
 626        __before_dc_op(cacheop);
 627        __dcache_line_loop(paddr, sz, cacheop);
 628        __after_dc_op(cacheop);
 629}
 630
 631void invalidate_dcache_range(unsigned long start, unsigned long end)
 632{
 633        if (start >= end)
 634                return;
 635
 636        /*
 637         * ARCv1                                 -> call __dc_line_op
 638         * ARCv2 && L1 D$ disabled               -> nothing
 639         * ARCv2 && L1 D$ enabled && IOC enabled -> nothing
 640         * ARCv2 && L1 D$ enabled && no IOC      -> call __dc_line_op; call __slc_rgn_op
 641         */
 642        if (!is_isa_arcv2() || !ioc_enabled())
 643                __dc_line_op(start, end - start, OP_INV);
 644
 645        if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass())
 646                __slc_rgn_op(start, end - start, OP_INV);
 647}
 648
 649void flush_dcache_range(unsigned long start, unsigned long end)
 650{
 651        if (start >= end)
 652                return;
 653
 654        /*
 655         * ARCv1                                 -> call __dc_line_op
 656         * ARCv2 && L1 D$ disabled               -> nothing
 657         * ARCv2 && L1 D$ enabled && IOC enabled -> nothing
 658         * ARCv2 && L1 D$ enabled && no IOC      -> call __dc_line_op; call __slc_rgn_op
 659         */
 660        if (!is_isa_arcv2() || !ioc_enabled())
 661                __dc_line_op(start, end - start, OP_FLUSH);
 662
 663        if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass())
 664                __slc_rgn_op(start, end - start, OP_FLUSH);
 665}
 666
 667void flush_cache(unsigned long start, unsigned long size)
 668{
 669        flush_dcache_range(start, start + size);
 670}
 671
 672/*
 673 * As invalidate_dcache_all() is not used in generic U-Boot code and as we
 674 * don't need it in arch/arc code alone (invalidate without flush) we implement
 675 * flush_n_invalidate_dcache_all (flush and invalidate in 1 operation) because
 676 * it's much safer. See [ NOTE 1 ] for more details.
 677 */
 678void flush_n_invalidate_dcache_all(void)
 679{
 680        __dc_entire_op(OP_FLUSH_N_INV);
 681
 682        if (is_isa_arcv2() && !slc_data_bypass())
 683                __slc_entire_op(OP_FLUSH_N_INV);
 684}
 685
 686void flush_dcache_all(void)
 687{
 688        __dc_entire_op(OP_FLUSH);
 689
 690        if (is_isa_arcv2() && !slc_data_bypass())
 691                __slc_entire_op(OP_FLUSH);
 692}
 693
 694/*
 695 * This is function to cleanup all caches (and therefore sync I/D caches) which
 696 * can be used for cleanup before linux launch or to sync caches during
 697 * relocation.
 698 */
 699void sync_n_cleanup_cache_all(void)
 700{
 701        __dc_entire_op(OP_FLUSH_N_INV);
 702
 703        /*
 704         * If SL$ is bypassed for data it is used only for instructions,
 705         * and we shouldn't flush it. So invalidate it instead of flush_n_inv.
 706         */
 707        if (is_isa_arcv2()) {
 708                if (slc_data_bypass())
 709                        __slc_entire_op(OP_INV);
 710                else
 711                        __slc_entire_op(OP_FLUSH_N_INV);
 712        }
 713
 714        __ic_entire_invalidate();
 715}
 716