linux/arch/arm/mm/cache-l2x0.c
<<
>>
Prefs
   1/*
   2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
   3 *
   4 * Copyright (C) 2007 ARM Limited
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18 */
  19#include <linux/err.h>
  20#include <linux/init.h>
  21#include <linux/spinlock.h>
  22#include <linux/io.h>
  23#include <linux/of.h>
  24#include <linux/of_address.h>
  25
  26#include <asm/cacheflush.h>
  27#include <asm/hardware/cache-l2x0.h>
  28#include "cache-aurora-l2.h"
  29
  30#define CACHE_LINE_SIZE         32
  31
  32static void __iomem *l2x0_base;
  33static DEFINE_RAW_SPINLOCK(l2x0_lock);
  34static u32 l2x0_way_mask;       /* Bitmask of active ways */
  35static u32 l2x0_size;
  36static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
  37
  38/* Aurora don't have the cache ID register available, so we have to
  39 * pass it though the device tree */
  40static u32  cache_id_part_number_from_dt;
  41
  42struct l2x0_regs l2x0_saved_regs;
  43
  44struct l2x0_of_data {
  45        void (*setup)(const struct device_node *, u32 *, u32 *);
  46        void (*save)(void);
  47        struct outer_cache_fns outer_cache;
  48};
  49
  50static bool of_init = false;
  51
  52static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
  53{
  54        /* wait for cache operation by line or way to complete */
  55        while (readl_relaxed(reg) & mask)
  56                cpu_relax();
  57}
  58
  59#ifdef CONFIG_CACHE_PL310
  60static inline void cache_wait(void __iomem *reg, unsigned long mask)
  61{
  62        /* cache operations by line are atomic on PL310 */
  63}
  64#else
  65#define cache_wait      cache_wait_way
  66#endif
  67
  68static inline void cache_sync(void)
  69{
  70        void __iomem *base = l2x0_base;
  71
  72        writel_relaxed(0, base + sync_reg_offset);
  73        cache_wait(base + L2X0_CACHE_SYNC, 1);
  74}
  75
  76static inline void l2x0_clean_line(unsigned long addr)
  77{
  78        void __iomem *base = l2x0_base;
  79        cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
  80        writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
  81}
  82
  83static inline void l2x0_inv_line(unsigned long addr)
  84{
  85        void __iomem *base = l2x0_base;
  86        cache_wait(base + L2X0_INV_LINE_PA, 1);
  87        writel_relaxed(addr, base + L2X0_INV_LINE_PA);
  88}
  89
  90#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
  91static inline void debug_writel(unsigned long val)
  92{
  93        if (outer_cache.set_debug)
  94                outer_cache.set_debug(val);
  95}
  96
  97static void pl310_set_debug(unsigned long val)
  98{
  99        writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
 100}
 101#else
 102/* Optimised out for non-errata case */
 103static inline void debug_writel(unsigned long val)
 104{
 105}
 106
 107#define pl310_set_debug NULL
 108#endif
 109
 110#ifdef CONFIG_PL310_ERRATA_588369
 111static inline void l2x0_flush_line(unsigned long addr)
 112{
 113        void __iomem *base = l2x0_base;
 114
 115        /* Clean by PA followed by Invalidate by PA */
 116        cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
 117        writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
 118        cache_wait(base + L2X0_INV_LINE_PA, 1);
 119        writel_relaxed(addr, base + L2X0_INV_LINE_PA);
 120}
 121#else
 122
 123static inline void l2x0_flush_line(unsigned long addr)
 124{
 125        void __iomem *base = l2x0_base;
 126        cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
 127        writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
 128}
 129#endif
 130
 131static void l2x0_cache_sync(void)
 132{
 133        unsigned long flags;
 134
 135        raw_spin_lock_irqsave(&l2x0_lock, flags);
 136        cache_sync();
 137        raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 138}
 139
 140static void __l2x0_flush_all(void)
 141{
 142        debug_writel(0x03);
 143        writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
 144        cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
 145        cache_sync();
 146        debug_writel(0x00);
 147}
 148
 149static void l2x0_flush_all(void)
 150{
 151        unsigned long flags;
 152
 153        /* clean all ways */
 154        raw_spin_lock_irqsave(&l2x0_lock, flags);
 155        __l2x0_flush_all();
 156        raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 157}
 158
 159static void l2x0_clean_all(void)
 160{
 161        unsigned long flags;
 162
 163        /* clean all ways */
 164        raw_spin_lock_irqsave(&l2x0_lock, flags);
 165        writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
 166        cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
 167        cache_sync();
 168        raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 169}
 170
 171static void l2x0_inv_all(void)
 172{
 173        unsigned long flags;
 174
 175        /* invalidate all ways */
 176        raw_spin_lock_irqsave(&l2x0_lock, flags);
 177        /* Invalidating when L2 is enabled is a nono */
 178        BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
 179        writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
 180        cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
 181        cache_sync();
 182        raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 183}
 184
 185static void l2x0_inv_range(unsigned long start, unsigned long end)
 186{
 187        void __iomem *base = l2x0_base;
 188        unsigned long flags;
 189
 190        raw_spin_lock_irqsave(&l2x0_lock, flags);
 191        if (start & (CACHE_LINE_SIZE - 1)) {
 192                start &= ~(CACHE_LINE_SIZE - 1);
 193                debug_writel(0x03);
 194                l2x0_flush_line(start);
 195                debug_writel(0x00);
 196                start += CACHE_LINE_SIZE;
 197        }
 198
 199        if (end & (CACHE_LINE_SIZE - 1)) {
 200                end &= ~(CACHE_LINE_SIZE - 1);
 201                debug_writel(0x03);
 202                l2x0_flush_line(end);
 203                debug_writel(0x00);
 204        }
 205
 206        while (start < end) {
 207                unsigned long blk_end = start + min(end - start, 4096UL);
 208
 209                while (start < blk_end) {
 210                        l2x0_inv_line(start);
 211                        start += CACHE_LINE_SIZE;
 212                }
 213
 214                if (blk_end < end) {
 215                        raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 216                        raw_spin_lock_irqsave(&l2x0_lock, flags);
 217                }
 218        }
 219        cache_wait(base + L2X0_INV_LINE_PA, 1);
 220        cache_sync();
 221        raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 222}
 223
 224static void l2x0_clean_range(unsigned long start, unsigned long end)
 225{
 226        void __iomem *base = l2x0_base;
 227        unsigned long flags;
 228
 229        if ((end - start) >= l2x0_size) {
 230                l2x0_clean_all();
 231                return;
 232        }
 233
 234        raw_spin_lock_irqsave(&l2x0_lock, flags);
 235        start &= ~(CACHE_LINE_SIZE - 1);
 236        while (start < end) {
 237                unsigned long blk_end = start + min(end - start, 4096UL);
 238
 239                while (start < blk_end) {
 240                        l2x0_clean_line(start);
 241                        start += CACHE_LINE_SIZE;
 242                }
 243
 244                if (blk_end < end) {
 245                        raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 246                        raw_spin_lock_irqsave(&l2x0_lock, flags);
 247                }
 248        }
 249        cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
 250        cache_sync();
 251        raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 252}
 253
 254static void l2x0_flush_range(unsigned long start, unsigned long end)
 255{
 256        void __iomem *base = l2x0_base;
 257        unsigned long flags;
 258
 259        if ((end - start) >= l2x0_size) {
 260                l2x0_flush_all();
 261                return;
 262        }
 263
 264        raw_spin_lock_irqsave(&l2x0_lock, flags);
 265        start &= ~(CACHE_LINE_SIZE - 1);
 266        while (start < end) {
 267                unsigned long blk_end = start + min(end - start, 4096UL);
 268
 269                debug_writel(0x03);
 270                while (start < blk_end) {
 271                        l2x0_flush_line(start);
 272                        start += CACHE_LINE_SIZE;
 273                }
 274                debug_writel(0x00);
 275
 276                if (blk_end < end) {
 277                        raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 278                        raw_spin_lock_irqsave(&l2x0_lock, flags);
 279                }
 280        }
 281        cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
 282        cache_sync();
 283        raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 284}
 285
 286static void l2x0_disable(void)
 287{
 288        unsigned long flags;
 289
 290        raw_spin_lock_irqsave(&l2x0_lock, flags);
 291        __l2x0_flush_all();
 292        writel_relaxed(0, l2x0_base + L2X0_CTRL);
 293        dsb();
 294        raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 295}
 296
 297static void l2x0_unlock(u32 cache_id)
 298{
 299        int lockregs;
 300        int i;
 301
 302        switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
 303        case L2X0_CACHE_ID_PART_L310:
 304                lockregs = 8;
 305                break;
 306        case AURORA_CACHE_ID:
 307                lockregs = 4;
 308                break;
 309        default:
 310                /* L210 and unknown types */
 311                lockregs = 1;
 312                break;
 313        }
 314
 315        for (i = 0; i < lockregs; i++) {
 316                writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
 317                               i * L2X0_LOCKDOWN_STRIDE);
 318                writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
 319                               i * L2X0_LOCKDOWN_STRIDE);
 320        }
 321}
 322
 323void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
 324{
 325        u32 aux;
 326        u32 cache_id;
 327        u32 way_size = 0;
 328        int ways;
 329        int way_size_shift = L2X0_WAY_SIZE_SHIFT;
 330        const char *type;
 331
 332        l2x0_base = base;
 333        if (cache_id_part_number_from_dt)
 334                cache_id = cache_id_part_number_from_dt;
 335        else
 336                cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
 337        aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
 338
 339        aux &= aux_mask;
 340        aux |= aux_val;
 341
 342        /* Determine the number of ways */
 343        switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
 344        case L2X0_CACHE_ID_PART_L310:
 345                if (aux & (1 << 16))
 346                        ways = 16;
 347                else
 348                        ways = 8;
 349                type = "L310";
 350#ifdef CONFIG_PL310_ERRATA_753970
 351                /* Unmapped register. */
 352                sync_reg_offset = L2X0_DUMMY_REG;
 353#endif
 354                if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
 355                        outer_cache.set_debug = pl310_set_debug;
 356                break;
 357        case L2X0_CACHE_ID_PART_L210:
 358                ways = (aux >> 13) & 0xf;
 359                type = "L210";
 360                break;
 361
 362        case AURORA_CACHE_ID:
 363                sync_reg_offset = AURORA_SYNC_REG;
 364                ways = (aux >> 13) & 0xf;
 365                ways = 2 << ((ways + 1) >> 2);
 366                way_size_shift = AURORA_WAY_SIZE_SHIFT;
 367                type = "Aurora";
 368                break;
 369        default:
 370                /* Assume unknown chips have 8 ways */
 371                ways = 8;
 372                type = "L2x0 series";
 373                break;
 374        }
 375
 376        l2x0_way_mask = (1 << ways) - 1;
 377
 378        /*
 379         * L2 cache Size =  Way size * Number of ways
 380         */
 381        way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
 382        way_size = 1 << (way_size + way_size_shift);
 383
 384        l2x0_size = ways * way_size * SZ_1K;
 385
 386        /*
 387         * Check if l2x0 controller is already enabled.
 388         * If you are booting from non-secure mode
 389         * accessing the below registers will fault.
 390         */
 391        if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
 392                /* Make sure that I&D is not locked down when starting */
 393                l2x0_unlock(cache_id);
 394
 395                /* l2x0 controller is disabled */
 396                writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
 397
 398                l2x0_inv_all();
 399
 400                /* enable L2X0 */
 401                writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
 402        }
 403
 404        /* Re-read it in case some bits are reserved. */
 405        aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
 406
 407        /* Save the value for resuming. */
 408        l2x0_saved_regs.aux_ctrl = aux;
 409
 410        if (!of_init) {
 411                outer_cache.inv_range = l2x0_inv_range;
 412                outer_cache.clean_range = l2x0_clean_range;
 413                outer_cache.flush_range = l2x0_flush_range;
 414                outer_cache.sync = l2x0_cache_sync;
 415                outer_cache.flush_all = l2x0_flush_all;
 416                outer_cache.inv_all = l2x0_inv_all;
 417                outer_cache.disable = l2x0_disable;
 418        }
 419
 420        printk(KERN_INFO "%s cache controller enabled\n", type);
 421        printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
 422                        ways, cache_id, aux, l2x0_size);
 423}
 424
 425#ifdef CONFIG_OF
 426static int l2_wt_override;
 427
 428/*
 429 * Note that the end addresses passed to Linux primitives are
 430 * noninclusive, while the hardware cache range operations use
 431 * inclusive start and end addresses.
 432 */
 433static unsigned long calc_range_end(unsigned long start, unsigned long end)
 434{
 435        /*
 436         * Limit the number of cache lines processed at once,
 437         * since cache range operations stall the CPU pipeline
 438         * until completion.
 439         */
 440        if (end > start + MAX_RANGE_SIZE)
 441                end = start + MAX_RANGE_SIZE;
 442
 443        /*
 444         * Cache range operations can't straddle a page boundary.
 445         */
 446        if (end > PAGE_ALIGN(start+1))
 447                end = PAGE_ALIGN(start+1);
 448
 449        return end;
 450}
 451
 452/*
 453 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
 454 * and range operations only do a TLB lookup on the start address.
 455 */
 456static void aurora_pa_range(unsigned long start, unsigned long end,
 457                        unsigned long offset)
 458{
 459        unsigned long flags;
 460
 461        raw_spin_lock_irqsave(&l2x0_lock, flags);
 462        writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
 463        writel_relaxed(end, l2x0_base + offset);
 464        raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 465
 466        cache_sync();
 467}
 468
 469static void aurora_inv_range(unsigned long start, unsigned long end)
 470{
 471        /*
 472         * round start and end adresses up to cache line size
 473         */
 474        start &= ~(CACHE_LINE_SIZE - 1);
 475        end = ALIGN(end, CACHE_LINE_SIZE);
 476
 477        /*
 478         * Invalidate all full cache lines between 'start' and 'end'.
 479         */
 480        while (start < end) {
 481                unsigned long range_end = calc_range_end(start, end);
 482                aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
 483                                AURORA_INVAL_RANGE_REG);
 484                start = range_end;
 485        }
 486}
 487
 488static void aurora_clean_range(unsigned long start, unsigned long end)
 489{
 490        /*
 491         * If L2 is forced to WT, the L2 will always be clean and we
 492         * don't need to do anything here.
 493         */
 494        if (!l2_wt_override) {
 495                start &= ~(CACHE_LINE_SIZE - 1);
 496                end = ALIGN(end, CACHE_LINE_SIZE);
 497                while (start != end) {
 498                        unsigned long range_end = calc_range_end(start, end);
 499                        aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
 500                                        AURORA_CLEAN_RANGE_REG);
 501                        start = range_end;
 502                }
 503        }
 504}
 505
 506static void aurora_flush_range(unsigned long start, unsigned long end)
 507{
 508        start &= ~(CACHE_LINE_SIZE - 1);
 509        end = ALIGN(end, CACHE_LINE_SIZE);
 510        while (start != end) {
 511                unsigned long range_end = calc_range_end(start, end);
 512                /*
 513                 * If L2 is forced to WT, the L2 will always be clean and we
 514                 * just need to invalidate.
 515                 */
 516                if (l2_wt_override)
 517                        aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
 518                                                        AURORA_INVAL_RANGE_REG);
 519                else
 520                        aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
 521                                                        AURORA_FLUSH_RANGE_REG);
 522                start = range_end;
 523        }
 524}
 525
 526static void __init l2x0_of_setup(const struct device_node *np,
 527                                 u32 *aux_val, u32 *aux_mask)
 528{
 529        u32 data[2] = { 0, 0 };
 530        u32 tag = 0;
 531        u32 dirty = 0;
 532        u32 val = 0, mask = 0;
 533
 534        of_property_read_u32(np, "arm,tag-latency", &tag);
 535        if (tag) {
 536                mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
 537                val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
 538        }
 539
 540        of_property_read_u32_array(np, "arm,data-latency",
 541                                   data, ARRAY_SIZE(data));
 542        if (data[0] && data[1]) {
 543                mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
 544                        L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
 545                val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
 546                       ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
 547        }
 548
 549        of_property_read_u32(np, "arm,dirty-latency", &dirty);
 550        if (dirty) {
 551                mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
 552                val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
 553        }
 554
 555        *aux_val &= ~mask;
 556        *aux_val |= val;
 557        *aux_mask &= ~mask;
 558}
 559
 560static void __init pl310_of_setup(const struct device_node *np,
 561                                  u32 *aux_val, u32 *aux_mask)
 562{
 563        u32 data[3] = { 0, 0, 0 };
 564        u32 tag[3] = { 0, 0, 0 };
 565        u32 filter[2] = { 0, 0 };
 566
 567        of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
 568        if (tag[0] && tag[1] && tag[2])
 569                writel_relaxed(
 570                        ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
 571                        ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
 572                        ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
 573                        l2x0_base + L2X0_TAG_LATENCY_CTRL);
 574
 575        of_property_read_u32_array(np, "arm,data-latency",
 576                                   data, ARRAY_SIZE(data));
 577        if (data[0] && data[1] && data[2])
 578                writel_relaxed(
 579                        ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
 580                        ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
 581                        ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
 582                        l2x0_base + L2X0_DATA_LATENCY_CTRL);
 583
 584        of_property_read_u32_array(np, "arm,filter-ranges",
 585                                   filter, ARRAY_SIZE(filter));
 586        if (filter[1]) {
 587                writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
 588                               l2x0_base + L2X0_ADDR_FILTER_END);
 589                writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
 590                               l2x0_base + L2X0_ADDR_FILTER_START);
 591        }
 592}
 593
 594static void __init pl310_save(void)
 595{
 596        u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
 597                L2X0_CACHE_ID_RTL_MASK;
 598
 599        l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
 600                L2X0_TAG_LATENCY_CTRL);
 601        l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
 602                L2X0_DATA_LATENCY_CTRL);
 603        l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
 604                L2X0_ADDR_FILTER_END);
 605        l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
 606                L2X0_ADDR_FILTER_START);
 607
 608        if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
 609                /*
 610                 * From r2p0, there is Prefetch offset/control register
 611                 */
 612                l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
 613                        L2X0_PREFETCH_CTRL);
 614                /*
 615                 * From r3p0, there is Power control register
 616                 */
 617                if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
 618                        l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
 619                                L2X0_POWER_CTRL);
 620        }
 621}
 622
 623static void aurora_save(void)
 624{
 625        l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL);
 626        l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
 627}
 628
 629static void l2x0_resume(void)
 630{
 631        if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
 632                /* restore aux ctrl and enable l2 */
 633                l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
 634
 635                writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
 636                        L2X0_AUX_CTRL);
 637
 638                l2x0_inv_all();
 639
 640                writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
 641        }
 642}
 643
 644static void pl310_resume(void)
 645{
 646        u32 l2x0_revision;
 647
 648        if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
 649                /* restore pl310 setup */
 650                writel_relaxed(l2x0_saved_regs.tag_latency,
 651                        l2x0_base + L2X0_TAG_LATENCY_CTRL);
 652                writel_relaxed(l2x0_saved_regs.data_latency,
 653                        l2x0_base + L2X0_DATA_LATENCY_CTRL);
 654                writel_relaxed(l2x0_saved_regs.filter_end,
 655                        l2x0_base + L2X0_ADDR_FILTER_END);
 656                writel_relaxed(l2x0_saved_regs.filter_start,
 657                        l2x0_base + L2X0_ADDR_FILTER_START);
 658
 659                l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
 660                        L2X0_CACHE_ID_RTL_MASK;
 661
 662                if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
 663                        writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
 664                                l2x0_base + L2X0_PREFETCH_CTRL);
 665                        if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
 666                                writel_relaxed(l2x0_saved_regs.pwr_ctrl,
 667                                        l2x0_base + L2X0_POWER_CTRL);
 668                }
 669        }
 670
 671        l2x0_resume();
 672}
 673
 674static void aurora_resume(void)
 675{
 676        if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
 677                writel_relaxed(l2x0_saved_regs.aux_ctrl,
 678                                l2x0_base + L2X0_AUX_CTRL);
 679                writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
 680        }
 681}
 682
 683static void __init aurora_broadcast_l2_commands(void)
 684{
 685        __u32 u;
 686        /* Enable Broadcasting of cache commands to L2*/
 687        __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u));
 688        u |= AURORA_CTRL_FW;            /* Set the FW bit */
 689        __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u));
 690        isb();
 691}
 692
 693static void __init aurora_of_setup(const struct device_node *np,
 694                                u32 *aux_val, u32 *aux_mask)
 695{
 696        u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
 697        u32 mask =  AURORA_ACR_REPLACEMENT_MASK;
 698
 699        of_property_read_u32(np, "cache-id-part",
 700                        &cache_id_part_number_from_dt);
 701
 702        /* Determine and save the write policy */
 703        l2_wt_override = of_property_read_bool(np, "wt-override");
 704
 705        if (l2_wt_override) {
 706                val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
 707                mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
 708        }
 709
 710        *aux_val &= ~mask;
 711        *aux_val |= val;
 712        *aux_mask &= ~mask;
 713}
 714
 715static const struct l2x0_of_data pl310_data = {
 716        .setup = pl310_of_setup,
 717        .save  = pl310_save,
 718        .outer_cache = {
 719                .resume      = pl310_resume,
 720                .inv_range   = l2x0_inv_range,
 721                .clean_range = l2x0_clean_range,
 722                .flush_range = l2x0_flush_range,
 723                .sync        = l2x0_cache_sync,
 724                .flush_all   = l2x0_flush_all,
 725                .inv_all     = l2x0_inv_all,
 726                .disable     = l2x0_disable,
 727        },
 728};
 729
 730static const struct l2x0_of_data l2x0_data = {
 731        .setup = l2x0_of_setup,
 732        .save  = NULL,
 733        .outer_cache = {
 734                .resume      = l2x0_resume,
 735                .inv_range   = l2x0_inv_range,
 736                .clean_range = l2x0_clean_range,
 737                .flush_range = l2x0_flush_range,
 738                .sync        = l2x0_cache_sync,
 739                .flush_all   = l2x0_flush_all,
 740                .inv_all     = l2x0_inv_all,
 741                .disable     = l2x0_disable,
 742        },
 743};
 744
 745static const struct l2x0_of_data aurora_with_outer_data = {
 746        .setup = aurora_of_setup,
 747        .save  = aurora_save,
 748        .outer_cache = {
 749                .resume      = aurora_resume,
 750                .inv_range   = aurora_inv_range,
 751                .clean_range = aurora_clean_range,
 752                .flush_range = aurora_flush_range,
 753                .sync        = l2x0_cache_sync,
 754                .flush_all   = l2x0_flush_all,
 755                .inv_all     = l2x0_inv_all,
 756                .disable     = l2x0_disable,
 757        },
 758};
 759
 760static const struct l2x0_of_data aurora_no_outer_data = {
 761        .setup = aurora_of_setup,
 762        .save  = aurora_save,
 763        .outer_cache = {
 764                .resume      = aurora_resume,
 765        },
 766};
 767
 768static const struct of_device_id l2x0_ids[] __initconst = {
 769        { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
 770        { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
 771        { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
 772        { .compatible = "marvell,aurora-system-cache",
 773          .data = (void *)&aurora_no_outer_data},
 774        { .compatible = "marvell,aurora-outer-cache",
 775          .data = (void *)&aurora_with_outer_data},
 776        {}
 777};
 778
 779int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
 780{
 781        struct device_node *np;
 782        const struct l2x0_of_data *data;
 783        struct resource res;
 784
 785        np = of_find_matching_node(NULL, l2x0_ids);
 786        if (!np)
 787                return -ENODEV;
 788
 789        if (of_address_to_resource(np, 0, &res))
 790                return -ENODEV;
 791
 792        l2x0_base = ioremap(res.start, resource_size(&res));
 793        if (!l2x0_base)
 794                return -ENOMEM;
 795
 796        l2x0_saved_regs.phy_base = res.start;
 797
 798        data = of_match_node(l2x0_ids, np)->data;
 799
 800        /* L2 configuration can only be changed if the cache is disabled */
 801        if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
 802                if (data->setup)
 803                        data->setup(np, &aux_val, &aux_mask);
 804
 805                /* For aurora cache in no outer mode select the
 806                 * correct mode using the coprocessor*/
 807                if (data == &aurora_no_outer_data)
 808                        aurora_broadcast_l2_commands();
 809        }
 810
 811        if (data->save)
 812                data->save();
 813
 814        of_init = true;
 815        memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
 816        l2x0_init(l2x0_base, aux_val, aux_mask);
 817
 818        return 0;
 819}
 820#endif
 821