uboot/arch/arc/lib/cache.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
   4 */
   5
   6#include <config.h>
   7#include <common.h>
   8#include <cpu_func.h>
   9#include <linux/bitops.h>
  10#include <linux/compiler.h>
  11#include <linux/kernel.h>
  12#include <linux/log2.h>
  13#include <asm/arcregs.h>
  14#include <asm/arc-bcr.h>
  15#include <asm/cache.h>
  16
  17/*
  18 * [ NOTE 1 ]:
  19 * Data cache (L1 D$ or SL$) entire invalidate operation or data cache disable
  20 * operation may result in unexpected behavior and data loss even if we flush
  21 * data cache right before invalidation. That may happens if we store any context
  22 * on stack (like we store BLINK register on stack before function call).
  23 * BLINK register is the register where return address is automatically saved
  24 * when we do function call with instructions like 'bl'.
  25 *
  26 * There is the real example:
  27 * We may hang in the next code as we store any BLINK register on stack in
  28 * invalidate_dcache_all() function.
  29 *
  30 * void flush_dcache_all() {
  31 *     __dc_entire_op(OP_FLUSH);
  32 *     // Other code //
  33 * }
  34 *
  35 * void invalidate_dcache_all() {
  36 *     __dc_entire_op(OP_INV);
  37 *     // Other code //
  38 * }
  39 *
  40 * void foo(void) {
  41 *     flush_dcache_all();
  42 *     invalidate_dcache_all();
  43 * }
  44 *
  45 * Now let's see what really happens during that code execution:
  46 *
  47 * foo()
  48 *   |->> call flush_dcache_all
  49 *     [return address is saved to BLINK register]
  50 *     [push BLINK] (save to stack)              ![point 1]
  51 *     |->> call __dc_entire_op(OP_FLUSH)
  52 *         [return address is saved to BLINK register]
  53 *         [flush L1 D$]
  54 *         return [jump to BLINK]
  55 *     <<------
  56 *     [other flush_dcache_all code]
  57 *     [pop BLINK] (get from stack)
  58 *     return [jump to BLINK]
  59 *   <<------
  60 *   |->> call invalidate_dcache_all
  61 *     [return address is saved to BLINK register]
  62 *     [push BLINK] (save to stack)               ![point 2]
  63 *     |->> call __dc_entire_op(OP_FLUSH)
  64 *         [return address is saved to BLINK register]
  65 *         [invalidate L1 D$]                 ![point 3]
  66 *         // Oops!!!
  67 *         // We lose return address from invalidate_dcache_all function:
  68 *         // we save it to stack and invalidate L1 D$ after that!
  69 *         return [jump to BLINK]
  70 *     <<------
  71 *     [other invalidate_dcache_all code]
  72 *     [pop BLINK] (get from stack)
  73 *     // we don't have this data in L1 dcache as we invalidated it in [point 3]
  74 *     // so we get it from next memory level (for example DDR memory)
  75 *     // but in the memory we have value which we save in [point 1], which
  76 *     // is return address from flush_dcache_all function (instead of
  77 *     // address from current invalidate_dcache_all function which we
  78 *     // saved in [point 2] !)
  79 *     return [jump to BLINK]
  80 *   <<------
  81 *   // As BLINK points to invalidate_dcache_all, we call it again and
  82 *   // loop forever.
  83 *
  84 * Fortunately we may fix that by using flush & invalidation of D$ with a single
  85 * one instruction (instead of flush and invalidation instructions pair) and
  86 * enabling force function inline with '__attribute__((always_inline))' gcc
  87 * attribute to avoid any function call (and BLINK store) between cache flush
  88 * and disable.
  89 *
  90 *
  91 * [ NOTE 2 ]:
  92 * As of today we only support the following cache configurations on ARC.
  93 * Other configurations may exist in HW but we don't support it in SW.
  94 * Configuration 1:
  95 *        ______________________
  96 *       |                      |
  97 *       |   ARC CPU            |
  98 *       |______________________|
  99 *        ___|___        ___|___
 100 *       |       |      |       |
 101 *       | L1 I$ |      | L1 D$ |
 102 *       |_______|      |_______|
 103 *        on/off         on/off
 104 *        ___|______________|____
 105 *       |                      |
 106 *       |   main memory        |
 107 *       |______________________|
 108 *
 109 * Configuration 2:
 110 *        ______________________
 111 *       |                      |
 112 *       |   ARC CPU            |
 113 *       |______________________|
 114 *        ___|___        ___|___
 115 *       |       |      |       |
 116 *       | L1 I$ |      | L1 D$ |
 117 *       |_______|      |_______|
 118 *        on/off         on/off
 119 *        ___|______________|____
 120 *       |                      |
 121 *       |   L2 (SL$)           |
 122 *       |______________________|
 123 *          always on (ARCv2, HS <  3.0)
 124 *          on/off    (ARCv2, HS >= 3.0)
 125 *        ___|______________|____
 126 *       |                      |
 127 *       |   main memory        |
 128 *       |______________________|
 129 *
 130 * Configuration 3:
 131 *        ______________________
 132 *       |                      |
 133 *       |   ARC CPU            |
 134 *       |______________________|
 135 *        ___|___        ___|___
 136 *       |       |      |       |
 137 *       | L1 I$ |      | L1 D$ |
 138 *       |_______|      |_______|
 139 *        on/off        must be on
 140 *        ___|______________|____      _______
 141 *       |                      |     |       |
 142 *       |   L2 (SL$)           |-----|  IOC  |
 143 *       |______________________|     |_______|
 144 *          always must be on          on/off
 145 *        ___|______________|____
 146 *       |                      |
 147 *       |   main memory        |
 148 *       |______________________|
 149 */
 150
 151DECLARE_GLOBAL_DATA_PTR;
 152
 153/* Bit values in IC_CTRL */
 154#define IC_CTRL_CACHE_DISABLE   BIT(0)
 155
 156/* Bit values in DC_CTRL */
 157#define DC_CTRL_CACHE_DISABLE   BIT(0)
 158#define DC_CTRL_INV_MODE_FLUSH  BIT(6)
 159#define DC_CTRL_FLUSH_STATUS    BIT(8)
 160
 161#define OP_INV                  BIT(0)
 162#define OP_FLUSH                BIT(1)
 163#define OP_FLUSH_N_INV          (OP_FLUSH | OP_INV)
 164
 165/* Bit val in SLC_CONTROL */
 166#define SLC_CTRL_DIS            0x001
 167#define SLC_CTRL_IM             0x040
 168#define SLC_CTRL_BUSY           0x100
 169#define SLC_CTRL_RGN_OP_INV     0x200
 170
 171#define CACHE_LINE_MASK         (~(gd->arch.l1_line_sz - 1))
 172
 173/*
 174 * We don't want to use '__always_inline' macro here as it can be redefined
 175 * to simple 'inline' in some cases which breaks stuff. See [ NOTE 1 ] for more
 176 * details about the reasons we need to use always_inline functions.
 177 */
 178#define inlined_cachefunc        inline __attribute__((always_inline))
 179
 180static inlined_cachefunc void __ic_entire_invalidate(void);
 181static inlined_cachefunc void __dc_entire_op(const int cacheop);
 182static inlined_cachefunc void __slc_entire_op(const int op);
 183static inlined_cachefunc bool ioc_enabled(void);
 184
 185static inline bool pae_exists(void)
 186{
 187        /* TODO: should we compare mmu version from BCR and from CONFIG? */
 188#if (CONFIG_ARC_MMU_VER >= 4)
 189        union bcr_mmu_4 mmu4;
 190
 191        mmu4.word = read_aux_reg(ARC_AUX_MMU_BCR);
 192
 193        if (mmu4.fields.pae)
 194                return true;
 195#endif /* (CONFIG_ARC_MMU_VER >= 4) */
 196
 197        return false;
 198}
 199
 200static inlined_cachefunc bool icache_exists(void)
 201{
 202        union bcr_di_cache ibcr;
 203
 204        ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
 205        return !!ibcr.fields.ver;
 206}
 207
 208static inlined_cachefunc bool icache_enabled(void)
 209{
 210        if (!icache_exists())
 211                return false;
 212
 213        return !(read_aux_reg(ARC_AUX_IC_CTRL) & IC_CTRL_CACHE_DISABLE);
 214}
 215
 216static inlined_cachefunc bool dcache_exists(void)
 217{
 218        union bcr_di_cache dbcr;
 219
 220        dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
 221        return !!dbcr.fields.ver;
 222}
 223
 224static inlined_cachefunc bool dcache_enabled(void)
 225{
 226        if (!dcache_exists())
 227                return false;
 228
 229        return !(read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_CACHE_DISABLE);
 230}
 231
 232static inlined_cachefunc bool slc_exists(void)
 233{
 234        if (is_isa_arcv2()) {
 235                union bcr_generic sbcr;
 236
 237                sbcr.word = read_aux_reg(ARC_BCR_SLC);
 238                return !!sbcr.fields.ver;
 239        }
 240
 241        return false;
 242}
 243
 244enum slc_dis_status {
 245        ST_SLC_MISSING = 0,
 246        ST_SLC_NO_DISABLE_CTRL,
 247        ST_SLC_DISABLE_CTRL
 248};
 249
 250/*
 251 * ARCv1                                     -> ST_SLC_MISSING
 252 * ARCv2 && SLC absent                       -> ST_SLC_MISSING
 253 * ARCv2 && SLC exists && SLC version <= 2   -> ST_SLC_NO_DISABLE_CTRL
 254 * ARCv2 && SLC exists && SLC version > 2    -> ST_SLC_DISABLE_CTRL
 255 */
 256static inlined_cachefunc enum slc_dis_status slc_disable_supported(void)
 257{
 258        if (is_isa_arcv2()) {
 259                union bcr_generic sbcr;
 260
 261                sbcr.word = read_aux_reg(ARC_BCR_SLC);
 262                if (sbcr.fields.ver == 0)
 263                        return ST_SLC_MISSING;
 264                else if (sbcr.fields.ver <= 2)
 265                        return ST_SLC_NO_DISABLE_CTRL;
 266                else
 267                        return ST_SLC_DISABLE_CTRL;
 268        }
 269
 270        return ST_SLC_MISSING;
 271}
 272
 273static inlined_cachefunc bool __slc_enabled(void)
 274{
 275        return !(read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_DIS);
 276}
 277
 278static inlined_cachefunc void __slc_enable(void)
 279{
 280        unsigned int ctrl;
 281
 282        ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
 283        ctrl &= ~SLC_CTRL_DIS;
 284        write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
 285}
 286
 287static inlined_cachefunc void __slc_disable(void)
 288{
 289        unsigned int ctrl;
 290
 291        ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
 292        ctrl |= SLC_CTRL_DIS;
 293        write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
 294}
 295
 296static inlined_cachefunc bool slc_enabled(void)
 297{
 298        enum slc_dis_status slc_status = slc_disable_supported();
 299
 300        if (slc_status == ST_SLC_MISSING)
 301                return false;
 302        else if (slc_status == ST_SLC_NO_DISABLE_CTRL)
 303                return true;
 304        else
 305                return __slc_enabled();
 306}
 307
 308static inlined_cachefunc bool slc_data_bypass(void)
 309{
 310        /*
 311         * If L1 data cache is disabled SL$ is bypassed and all load/store
 312         * requests are sent directly to main memory.
 313         */
 314        return !dcache_enabled();
 315}
 316
 317void slc_enable(void)
 318{
 319        if (slc_disable_supported() != ST_SLC_DISABLE_CTRL)
 320                return;
 321
 322        if (__slc_enabled())
 323                return;
 324
 325        __slc_enable();
 326}
 327
 328/* TODO: warn if we are not able to disable SLC */
 329void slc_disable(void)
 330{
 331        if (slc_disable_supported() != ST_SLC_DISABLE_CTRL)
 332                return;
 333
 334        /* we don't support SLC disabling if we use IOC */
 335        if (ioc_enabled())
 336                return;
 337
 338        if (!__slc_enabled())
 339                return;
 340
 341        /*
 342         * We need to flush L1D$ to guarantee that we won't have any
 343         * writeback operations during SLC disabling.
 344         */
 345        __dc_entire_op(OP_FLUSH);
 346        __slc_entire_op(OP_FLUSH_N_INV);
 347        __slc_disable();
 348}
 349
 350static inlined_cachefunc bool ioc_exists(void)
 351{
 352        if (is_isa_arcv2()) {
 353                union bcr_clust_cfg cbcr;
 354
 355                cbcr.word = read_aux_reg(ARC_BCR_CLUSTER);
 356                return cbcr.fields.c;
 357        }
 358
 359        return false;
 360}
 361
 362static inlined_cachefunc bool ioc_enabled(void)
 363{
 364        /*
 365         * We check only CONFIG option instead of IOC HW state check as IOC
 366         * must be disabled by default.
 367         */
 368        if (is_ioc_enabled())
 369                return ioc_exists();
 370
 371        return false;
 372}
 373
 374static inlined_cachefunc void __slc_entire_op(const int op)
 375{
 376        unsigned int ctrl;
 377
 378        if (!slc_enabled())
 379                return;
 380
 381        ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
 382
 383        if (!(op & OP_FLUSH))           /* i.e. OP_INV */
 384                ctrl &= ~SLC_CTRL_IM;   /* clear IM: Disable flush before Inv */
 385        else
 386                ctrl |= SLC_CTRL_IM;
 387
 388        write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
 389
 390        if (op & OP_INV)        /* Inv or flush-n-inv use same cmd reg */
 391                write_aux_reg(ARC_AUX_SLC_INVALIDATE, 0x1);
 392        else
 393                write_aux_reg(ARC_AUX_SLC_FLUSH, 0x1);
 394
 395        /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
 396        read_aux_reg(ARC_AUX_SLC_CTRL);
 397
 398        /* Important to wait for flush to complete */
 399        while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
 400}
 401
 402static void slc_upper_region_init(void)
 403{
 404        /*
 405         * ARC_AUX_SLC_RGN_START1 and ARC_AUX_SLC_RGN_END1 register exist
 406         * only if PAE exists in current HW. So we had to check pae_exist
 407         * before using them.
 408         */
 409        if (!pae_exists())
 410                return;
 411
 412        /*
 413         * ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1 are always == 0
 414         * as we don't use PAE40.
 415         */
 416        write_aux_reg(ARC_AUX_SLC_RGN_END1, 0);
 417        write_aux_reg(ARC_AUX_SLC_RGN_START1, 0);
 418}
 419
 420static void __slc_rgn_op(unsigned long paddr, unsigned long sz, const int op)
 421{
 422#ifdef CONFIG_ISA_ARCV2
 423
 424        unsigned int ctrl;
 425        unsigned long end;
 426
 427        if (!slc_enabled())
 428                return;
 429
 430        /*
 431         * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
 432         *  - b'000 (default) is Flush,
 433         *  - b'001 is Invalidate if CTRL.IM == 0
 434         *  - b'001 is Flush-n-Invalidate if CTRL.IM == 1
 435         */
 436        ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
 437
 438        /* Don't rely on default value of IM bit */
 439        if (!(op & OP_FLUSH))           /* i.e. OP_INV */
 440                ctrl &= ~SLC_CTRL_IM;   /* clear IM: Disable flush before Inv */
 441        else
 442                ctrl |= SLC_CTRL_IM;
 443
 444        if (op & OP_INV)
 445                ctrl |= SLC_CTRL_RGN_OP_INV;    /* Inv or flush-n-inv */
 446        else
 447                ctrl &= ~SLC_CTRL_RGN_OP_INV;
 448
 449        write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
 450
 451        /*
 452         * Lower bits are ignored, no need to clip
 453         * END needs to be setup before START (latter triggers the operation)
 454         * END can't be same as START, so add (l2_line_sz - 1) to sz
 455         */
 456        end = paddr + sz + gd->arch.slc_line_sz - 1;
 457
 458        /*
 459         * Upper addresses (ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1)
 460         * are always == 0 as we don't use PAE40, so we only setup lower ones
 461         * (ARC_AUX_SLC_RGN_END and ARC_AUX_SLC_RGN_START)
 462         */
 463        write_aux_reg(ARC_AUX_SLC_RGN_END, end);
 464        write_aux_reg(ARC_AUX_SLC_RGN_START, paddr);
 465
 466        /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
 467        read_aux_reg(ARC_AUX_SLC_CTRL);
 468
 469        while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
 470
 471#endif /* CONFIG_ISA_ARCV2 */
 472}
 473
 474static void arc_ioc_setup(void)
 475{
 476        /* IOC Aperture start is equal to DDR start */
 477        unsigned int ap_base = CONFIG_SYS_SDRAM_BASE;
 478        /* IOC Aperture size is equal to DDR size */
 479        long ap_size = CONFIG_SYS_SDRAM_SIZE;
 480
 481        /* Unsupported configuration. See [ NOTE 2 ] for more details. */
 482        if (!slc_exists())
 483                panic("Try to enable IOC but SLC is not present");
 484
 485        if (!slc_enabled())
 486                panic("Try to enable IOC but SLC is disabled");
 487
 488        /* Unsupported configuration. See [ NOTE 2 ] for more details. */
 489        if (!dcache_enabled())
 490                panic("Try to enable IOC but L1 D$ is disabled");
 491
 492        if (!is_power_of_2(ap_size) || ap_size < 4096)
 493                panic("IOC Aperture size must be power of 2 and bigger 4Kib");
 494
 495        /* IOC Aperture start must be aligned to the size of the aperture */
 496        if (ap_base % ap_size != 0)
 497                panic("IOC Aperture start must be aligned to the size of the aperture");
 498
 499        flush_n_invalidate_dcache_all();
 500
 501        /*
 502         * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
 503         * so setting 0x11 implies 512M, 0x12 implies 1G...
 504         */
 505        write_aux_reg(ARC_AUX_IO_COH_AP0_SIZE,
 506                      order_base_2(ap_size / 1024) - 2);
 507
 508        write_aux_reg(ARC_AUX_IO_COH_AP0_BASE, ap_base >> 12);
 509        write_aux_reg(ARC_AUX_IO_COH_PARTIAL, 1);
 510        write_aux_reg(ARC_AUX_IO_COH_ENABLE, 1);
 511}
 512
 513static void read_decode_cache_bcr_arcv2(void)
 514{
 515#ifdef CONFIG_ISA_ARCV2
 516
 517        union bcr_slc_cfg slc_cfg;
 518
 519        if (slc_exists()) {
 520                slc_cfg.word = read_aux_reg(ARC_AUX_SLC_CONFIG);
 521                gd->arch.slc_line_sz = (slc_cfg.fields.lsz == 0) ? 128 : 64;
 522
 523                /*
 524                 * We don't support configuration where L1 I$ or L1 D$ is
 525                 * absent but SL$ exists. See [ NOTE 2 ] for more details.
 526                 */
 527                if (!icache_exists() || !dcache_exists())
 528                        panic("Unsupported cache configuration: SLC exists but one of L1 caches is absent");
 529        }
 530
 531#endif /* CONFIG_ISA_ARCV2 */
 532}
 533
 534void read_decode_cache_bcr(void)
 535{
 536        int dc_line_sz = 0, ic_line_sz = 0;
 537        union bcr_di_cache ibcr, dbcr;
 538
 539        /*
 540         * We don't care much about I$ line length really as there're
 541         * no per-line ops on I$ instead we only do full invalidation of it
 542         * on occasion of relocation and right before jumping to the OS.
 543         * Still we check insane config with zero-encoded line length in
 544         * presense of version field in I$ BCR. Just in case.
 545         */
 546        ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
 547        if (ibcr.fields.ver) {
 548                ic_line_sz = 8 << ibcr.fields.line_len;
 549                if (!ic_line_sz)
 550                        panic("Instruction exists but line length is 0\n");
 551        }
 552
 553        dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
 554        if (dbcr.fields.ver) {
 555                gd->arch.l1_line_sz = dc_line_sz = 16 << dbcr.fields.line_len;
 556                if (!dc_line_sz)
 557                        panic("Data cache exists but line length is 0\n");
 558        }
 559}
 560
 561void cache_init(void)
 562{
 563        read_decode_cache_bcr();
 564
 565        if (is_isa_arcv2())
 566                read_decode_cache_bcr_arcv2();
 567
 568        if (is_isa_arcv2() && ioc_enabled())
 569                arc_ioc_setup();
 570
 571        if (is_isa_arcv2() && slc_exists())
 572                slc_upper_region_init();
 573}
 574
 575int icache_status(void)
 576{
 577        return icache_enabled();
 578}
 579
 580void icache_enable(void)
 581{
 582        if (icache_exists())
 583                write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) &
 584                              ~IC_CTRL_CACHE_DISABLE);
 585}
 586
 587void icache_disable(void)
 588{
 589        if (!icache_exists())
 590                return;
 591
 592        __ic_entire_invalidate();
 593
 594        write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) |
 595                      IC_CTRL_CACHE_DISABLE);
 596}
 597
 598/* IC supports only invalidation */
 599static inlined_cachefunc void __ic_entire_invalidate(void)
 600{
 601        if (!icache_enabled())
 602                return;
 603
 604        /* Any write to IC_IVIC register triggers invalidation of entire I$ */
 605        write_aux_reg(ARC_AUX_IC_IVIC, 1);
 606        /*
 607         * As per ARC HS databook (see chapter 5.3.3.2)
 608         * it is required to add 3 NOPs after each write to IC_IVIC.
 609         */
 610        __builtin_arc_nop();
 611        __builtin_arc_nop();
 612        __builtin_arc_nop();
 613        read_aux_reg(ARC_AUX_IC_CTRL);  /* blocks */
 614}
 615
 616void invalidate_icache_all(void)
 617{
 618        __ic_entire_invalidate();
 619
 620        /*
 621         * If SL$ is bypassed for data it is used only for instructions,
 622         * so we need to invalidate it too.
 623         */
 624        if (is_isa_arcv2() && slc_data_bypass())
 625                __slc_entire_op(OP_INV);
 626}
 627
 628int dcache_status(void)
 629{
 630        return dcache_enabled();
 631}
 632
 633void dcache_enable(void)
 634{
 635        if (!dcache_exists())
 636                return;
 637
 638        write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) &
 639                      ~(DC_CTRL_INV_MODE_FLUSH | DC_CTRL_CACHE_DISABLE));
 640}
 641
 642void dcache_disable(void)
 643{
 644        if (!dcache_exists())
 645                return;
 646
 647        __dc_entire_op(OP_FLUSH_N_INV);
 648
 649        /*
 650         * As SLC will be bypassed for data after L1 D$ disable we need to
 651         * flush it first before L1 D$ disable. Also we invalidate SLC to
 652         * avoid any inconsistent data problems after enabling L1 D$ again with
 653         * dcache_enable function.
 654         */
 655        if (is_isa_arcv2())
 656                __slc_entire_op(OP_FLUSH_N_INV);
 657
 658        write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) |
 659                      DC_CTRL_CACHE_DISABLE);
 660}
 661
 662/* Common Helper for Line Operations on D-cache */
 663static inline void __dcache_line_loop(unsigned long paddr, unsigned long sz,
 664                                      const int cacheop)
 665{
 666        unsigned int aux_cmd;
 667        int num_lines;
 668
 669        /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
 670        aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL;
 671
 672        sz += paddr & ~CACHE_LINE_MASK;
 673        paddr &= CACHE_LINE_MASK;
 674
 675        num_lines = DIV_ROUND_UP(sz, gd->arch.l1_line_sz);
 676
 677        while (num_lines-- > 0) {
 678#if (CONFIG_ARC_MMU_VER == 3)
 679                write_aux_reg(ARC_AUX_DC_PTAG, paddr);
 680#endif
 681                write_aux_reg(aux_cmd, paddr);
 682                paddr += gd->arch.l1_line_sz;
 683        }
 684}
 685
 686static inlined_cachefunc void __before_dc_op(const int op)
 687{
 688        unsigned int ctrl;
 689
 690        ctrl = read_aux_reg(ARC_AUX_DC_CTRL);
 691
 692        /* IM bit implies flush-n-inv, instead of vanilla inv */
 693        if (op == OP_INV)
 694                ctrl &= ~DC_CTRL_INV_MODE_FLUSH;
 695        else
 696                ctrl |= DC_CTRL_INV_MODE_FLUSH;
 697
 698        write_aux_reg(ARC_AUX_DC_CTRL, ctrl);
 699}
 700
 701static inlined_cachefunc void __after_dc_op(const int op)
 702{
 703        if (op & OP_FLUSH)      /* flush / flush-n-inv both wait */
 704                while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
 705}
 706
 707static inlined_cachefunc void __dc_entire_op(const int cacheop)
 708{
 709        int aux;
 710
 711        if (!dcache_enabled())
 712                return;
 713
 714        __before_dc_op(cacheop);
 715
 716        if (cacheop & OP_INV)   /* Inv or flush-n-inv use same cmd reg */
 717                aux = ARC_AUX_DC_IVDC;
 718        else
 719                aux = ARC_AUX_DC_FLSH;
 720
 721        write_aux_reg(aux, 0x1);
 722
 723        __after_dc_op(cacheop);
 724}
 725
 726static inline void __dc_line_op(unsigned long paddr, unsigned long sz,
 727                                const int cacheop)
 728{
 729        if (!dcache_enabled())
 730                return;
 731
 732        __before_dc_op(cacheop);
 733        __dcache_line_loop(paddr, sz, cacheop);
 734        __after_dc_op(cacheop);
 735}
 736
 737void invalidate_dcache_range(unsigned long start, unsigned long end)
 738{
 739        if (start >= end)
 740                return;
 741
 742        /*
 743         * ARCv1                                 -> call __dc_line_op
 744         * ARCv2 && L1 D$ disabled               -> nothing
 745         * ARCv2 && L1 D$ enabled && IOC enabled -> nothing
 746         * ARCv2 && L1 D$ enabled && no IOC      -> call __dc_line_op; call __slc_rgn_op
 747         */
 748        if (!is_isa_arcv2() || !ioc_enabled())
 749                __dc_line_op(start, end - start, OP_INV);
 750
 751        if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass())
 752                __slc_rgn_op(start, end - start, OP_INV);
 753}
 754
 755void flush_dcache_range(unsigned long start, unsigned long end)
 756{
 757        if (start >= end)
 758                return;
 759
 760        /*
 761         * ARCv1                                 -> call __dc_line_op
 762         * ARCv2 && L1 D$ disabled               -> nothing
 763         * ARCv2 && L1 D$ enabled && IOC enabled -> nothing
 764         * ARCv2 && L1 D$ enabled && no IOC      -> call __dc_line_op; call __slc_rgn_op
 765         */
 766        if (!is_isa_arcv2() || !ioc_enabled())
 767                __dc_line_op(start, end - start, OP_FLUSH);
 768
 769        if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass())
 770                __slc_rgn_op(start, end - start, OP_FLUSH);
 771}
 772
 773void flush_cache(unsigned long start, unsigned long size)
 774{
 775        flush_dcache_range(start, start + size);
 776}
 777
 778/*
 779 * As invalidate_dcache_all() is not used in generic U-Boot code and as we
 780 * don't need it in arch/arc code alone (invalidate without flush) we implement
 781 * flush_n_invalidate_dcache_all (flush and invalidate in 1 operation) because
 782 * it's much safer. See [ NOTE 1 ] for more details.
 783 */
 784void flush_n_invalidate_dcache_all(void)
 785{
 786        __dc_entire_op(OP_FLUSH_N_INV);
 787
 788        if (is_isa_arcv2() && !slc_data_bypass())
 789                __slc_entire_op(OP_FLUSH_N_INV);
 790}
 791
 792void flush_dcache_all(void)
 793{
 794        __dc_entire_op(OP_FLUSH);
 795
 796        if (is_isa_arcv2() && !slc_data_bypass())
 797                __slc_entire_op(OP_FLUSH);
 798}
 799
 800/*
 801 * This is function to cleanup all caches (and therefore sync I/D caches) which
 802 * can be used for cleanup before linux launch or to sync caches during
 803 * relocation.
 804 */
 805void sync_n_cleanup_cache_all(void)
 806{
 807        __dc_entire_op(OP_FLUSH_N_INV);
 808
 809        /*
 810         * If SL$ is bypassed for data it is used only for instructions,
 811         * and we shouldn't flush it. So invalidate it instead of flush_n_inv.
 812         */
 813        if (is_isa_arcv2()) {
 814                if (slc_data_bypass())
 815                        __slc_entire_op(OP_INV);
 816                else
 817                        __slc_entire_op(OP_FLUSH_N_INV);
 818        }
 819
 820        __ic_entire_invalidate();
 821}
 822