linux/arch/arm/mm/cache-uniphier.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 */
  14
  15#define pr_fmt(fmt)             "uniphier: " fmt
  16
  17#include <linux/init.h>
  18#include <linux/io.h>
  19#include <linux/log2.h>
  20#include <linux/of_address.h>
  21#include <linux/slab.h>
  22#include <asm/hardware/cache-uniphier.h>
  23#include <asm/outercache.h>
  24
  25/* control registers */
  26#define UNIPHIER_SSCC           0x0     /* Control Register */
  27#define    UNIPHIER_SSCC_BST                    BIT(20) /* UCWG burst read */
  28#define    UNIPHIER_SSCC_ACT                    BIT(19) /* Inst-Data separate */
  29#define    UNIPHIER_SSCC_WTG                    BIT(18) /* WT gathering on */
  30#define    UNIPHIER_SSCC_PRD                    BIT(17) /* enable pre-fetch */
  31#define    UNIPHIER_SSCC_ON                     BIT(0)  /* enable cache */
  32#define UNIPHIER_SSCLPDAWCR     0x30    /* Unified/Data Active Way Control */
  33#define UNIPHIER_SSCLPIAWCR     0x34    /* Instruction Active Way Control */
  34
  35/* revision registers */
  36#define UNIPHIER_SSCID          0x0     /* ID Register */
  37
  38/* operation registers */
  39#define UNIPHIER_SSCOPE         0x244   /* Cache Operation Primitive Entry */
  40#define    UNIPHIER_SSCOPE_CM_INV               0x0     /* invalidate */
  41#define    UNIPHIER_SSCOPE_CM_CLEAN             0x1     /* clean */
  42#define    UNIPHIER_SSCOPE_CM_FLUSH             0x2     /* flush */
  43#define    UNIPHIER_SSCOPE_CM_SYNC              0x8     /* sync (drain bufs) */
  44#define    UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH    0x9     /* flush p-fetch buf */
  45#define UNIPHIER_SSCOQM         0x248   /* Cache Operation Queue Mode */
  46#define    UNIPHIER_SSCOQM_TID_MASK             (0x3 << 21)
  47#define    UNIPHIER_SSCOQM_TID_LRU_DATA         (0x0 << 21)
  48#define    UNIPHIER_SSCOQM_TID_LRU_INST         (0x1 << 21)
  49#define    UNIPHIER_SSCOQM_TID_WAY              (0x2 << 21)
  50#define    UNIPHIER_SSCOQM_S_MASK               (0x3 << 17)
  51#define    UNIPHIER_SSCOQM_S_RANGE              (0x0 << 17)
  52#define    UNIPHIER_SSCOQM_S_ALL                (0x1 << 17)
  53#define    UNIPHIER_SSCOQM_S_WAY                (0x2 << 17)
  54#define    UNIPHIER_SSCOQM_CE                   BIT(15) /* notify completion */
  55#define    UNIPHIER_SSCOQM_CM_INV               0x0     /* invalidate */
  56#define    UNIPHIER_SSCOQM_CM_CLEAN             0x1     /* clean */
  57#define    UNIPHIER_SSCOQM_CM_FLUSH             0x2     /* flush */
  58#define    UNIPHIER_SSCOQM_CM_PREFETCH          0x3     /* prefetch to cache */
  59#define    UNIPHIER_SSCOQM_CM_PREFETCH_BUF      0x4     /* prefetch to pf-buf */
  60#define    UNIPHIER_SSCOQM_CM_TOUCH             0x5     /* touch */
  61#define    UNIPHIER_SSCOQM_CM_TOUCH_ZERO        0x6     /* touch to zero */
  62#define    UNIPHIER_SSCOQM_CM_TOUCH_DIRTY       0x7     /* touch with dirty */
  63#define UNIPHIER_SSCOQAD        0x24c   /* Cache Operation Queue Address */
  64#define UNIPHIER_SSCOQSZ        0x250   /* Cache Operation Queue Size */
  65#define UNIPHIER_SSCOQMASK      0x254   /* Cache Operation Queue Address Mask */
  66#define UNIPHIER_SSCOQWN        0x258   /* Cache Operation Queue Way Number */
  67#define UNIPHIER_SSCOPPQSEF     0x25c   /* Cache Operation Queue Set Complete*/
  68#define    UNIPHIER_SSCOPPQSEF_FE               BIT(1)
  69#define    UNIPHIER_SSCOPPQSEF_OE               BIT(0)
  70#define UNIPHIER_SSCOLPQS       0x260   /* Cache Operation Queue Status */
  71#define    UNIPHIER_SSCOLPQS_EF                 BIT(2)
  72#define    UNIPHIER_SSCOLPQS_EST                BIT(1)
  73#define    UNIPHIER_SSCOLPQS_QST                BIT(0)
  74
  75/* Is the touch/pre-fetch destination specified by ways? */
  76#define UNIPHIER_SSCOQM_TID_IS_WAY(op) \
  77                ((op & UNIPHIER_SSCOQM_TID_MASK) == UNIPHIER_SSCOQM_TID_WAY)
  78/* Is the operation region specified by address range? */
  79#define UNIPHIER_SSCOQM_S_IS_RANGE(op) \
  80                ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
  81
  82/**
  83 * uniphier_cache_data - UniPhier outer cache specific data
  84 *
  85 * @ctrl_base: virtual base address of control registers
  86 * @rev_base: virtual base address of revision registers
  87 * @op_base: virtual base address of operation registers
  88 * @way_present_mask: each bit specifies if the way is present
  89 * @way_locked_mask: each bit specifies if the way is locked
  90 * @nsets: number of associativity sets
  91 * @line_size: line size in bytes
  92 * @range_op_max_size: max size that can be handled by a single range operation
  93 * @list: list node to include this level in the whole cache hierarchy
  94 */
  95struct uniphier_cache_data {
  96        void __iomem *ctrl_base;
  97        void __iomem *rev_base;
  98        void __iomem *op_base;
  99        void __iomem *way_ctrl_base;
 100        u32 way_present_mask;
 101        u32 way_locked_mask;
 102        u32 nsets;
 103        u32 line_size;
 104        u32 range_op_max_size;
 105        struct list_head list;
 106};
 107
 108/*
 109 * List of the whole outer cache hierarchy.  This list is only modified during
 110 * the early boot stage, so no mutex is taken for the access to the list.
 111 */
 112static LIST_HEAD(uniphier_cache_list);
 113
 114/**
 115 * __uniphier_cache_sync - perform a sync point for a particular cache level
 116 *
 117 * @data: cache controller specific data
 118 */
 119static void __uniphier_cache_sync(struct uniphier_cache_data *data)
 120{
 121        /* This sequence need not be atomic.  Do not disable IRQ. */
 122        writel_relaxed(UNIPHIER_SSCOPE_CM_SYNC,
 123                       data->op_base + UNIPHIER_SSCOPE);
 124        /* need a read back to confirm */
 125        readl_relaxed(data->op_base + UNIPHIER_SSCOPE);
 126}
 127
 128/**
 129 * __uniphier_cache_maint_common - run a queue operation for a particular level
 130 *
 131 * @data: cache controller specific data
 132 * @start: start address of range operation (don't care for "all" operation)
 133 * @size: data size of range operation (don't care for "all" operation)
 134 * @operation: flags to specify the desired cache operation
 135 */
 136static void __uniphier_cache_maint_common(struct uniphier_cache_data *data,
 137                                          unsigned long start,
 138                                          unsigned long size,
 139                                          u32 operation)
 140{
 141        unsigned long flags;
 142
 143        /*
 144         * No spin lock is necessary here because:
 145         *
 146         * [1] This outer cache controller is able to accept maintenance
 147         * operations from multiple CPUs at a time in an SMP system; if a
 148         * maintenance operation is under way and another operation is issued,
 149         * the new one is stored in the queue.  The controller performs one
 150         * operation after another.  If the queue is full, the status register,
 151         * UNIPHIER_SSCOPPQSEF, indicates that the queue registration has
 152         * failed.  The status registers, UNIPHIER_{SSCOPPQSEF, SSCOLPQS}, have
 153         * different instances for each CPU, i.e. each CPU can track the status
 154         * of the maintenance operations triggered by itself.
 155         *
 156         * [2] The cache command registers, UNIPHIER_{SSCOQM, SSCOQAD, SSCOQSZ,
 157         * SSCOQWN}, are shared between multiple CPUs, but the hardware still
 158         * guarantees the registration sequence is atomic; the write access to
 159         * them are arbitrated by the hardware.  The first accessor to the
 160         * register, UNIPHIER_SSCOQM, holds the access right and it is released
 161         * by reading the status register, UNIPHIER_SSCOPPQSEF.  While one CPU
 162         * is holding the access right, other CPUs fail to register operations.
 163         * One CPU should not hold the access right for a long time, so local
 164         * IRQs should be disabled while the following sequence.
 165         */
 166        local_irq_save(flags);
 167
 168        /* clear the complete notification flag */
 169        writel_relaxed(UNIPHIER_SSCOLPQS_EF, data->op_base + UNIPHIER_SSCOLPQS);
 170
 171        do {
 172                /* set cache operation */
 173                writel_relaxed(UNIPHIER_SSCOQM_CE | operation,
 174                               data->op_base + UNIPHIER_SSCOQM);
 175
 176                /* set address range if needed */
 177                if (likely(UNIPHIER_SSCOQM_S_IS_RANGE(operation))) {
 178                        writel_relaxed(start, data->op_base + UNIPHIER_SSCOQAD);
 179                        writel_relaxed(size, data->op_base + UNIPHIER_SSCOQSZ);
 180                }
 181
 182                /* set target ways if needed */
 183                if (unlikely(UNIPHIER_SSCOQM_TID_IS_WAY(operation)))
 184                        writel_relaxed(data->way_locked_mask,
 185                                       data->op_base + UNIPHIER_SSCOQWN);
 186        } while (unlikely(readl_relaxed(data->op_base + UNIPHIER_SSCOPPQSEF) &
 187                          (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
 188
 189        /* wait until the operation is completed */
 190        while (likely(readl_relaxed(data->op_base + UNIPHIER_SSCOLPQS) !=
 191                      UNIPHIER_SSCOLPQS_EF))
 192                cpu_relax();
 193
 194        local_irq_restore(flags);
 195}
 196
 197static void __uniphier_cache_maint_all(struct uniphier_cache_data *data,
 198                                       u32 operation)
 199{
 200        __uniphier_cache_maint_common(data, 0, 0,
 201                                      UNIPHIER_SSCOQM_S_ALL | operation);
 202
 203        __uniphier_cache_sync(data);
 204}
 205
 206static void __uniphier_cache_maint_range(struct uniphier_cache_data *data,
 207                                         unsigned long start, unsigned long end,
 208                                         u32 operation)
 209{
 210        unsigned long size;
 211
 212        /*
 213         * If the start address is not aligned,
 214         * perform a cache operation for the first cache-line
 215         */
 216        start = start & ~(data->line_size - 1);
 217
 218        size = end - start;
 219
 220        if (unlikely(size >= (unsigned long)(-data->line_size))) {
 221                /* this means cache operation for all range */
 222                __uniphier_cache_maint_all(data, operation);
 223                return;
 224        }
 225
 226        /*
 227         * If the end address is not aligned,
 228         * perform a cache operation for the last cache-line
 229         */
 230        size = ALIGN(size, data->line_size);
 231
 232        while (size) {
 233                unsigned long chunk_size = min_t(unsigned long, size,
 234                                                 data->range_op_max_size);
 235
 236                __uniphier_cache_maint_common(data, start, chunk_size,
 237                                        UNIPHIER_SSCOQM_S_RANGE | operation);
 238
 239                start += chunk_size;
 240                size -= chunk_size;
 241        }
 242
 243        __uniphier_cache_sync(data);
 244}
 245
 246static void __uniphier_cache_enable(struct uniphier_cache_data *data, bool on)
 247{
 248        u32 val = 0;
 249
 250        if (on)
 251                val = UNIPHIER_SSCC_WTG | UNIPHIER_SSCC_PRD | UNIPHIER_SSCC_ON;
 252
 253        writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC);
 254}
 255
 256static void __init __uniphier_cache_set_locked_ways(
 257                                        struct uniphier_cache_data *data,
 258                                        u32 way_mask)
 259{
 260        unsigned int cpu;
 261
 262        data->way_locked_mask = way_mask & data->way_present_mask;
 263
 264        for_each_possible_cpu(cpu)
 265                writel_relaxed(~data->way_locked_mask & data->way_present_mask,
 266                               data->way_ctrl_base + 4 * cpu);
 267}
 268
 269static void uniphier_cache_maint_range(unsigned long start, unsigned long end,
 270                                       u32 operation)
 271{
 272        struct uniphier_cache_data *data;
 273
 274        list_for_each_entry(data, &uniphier_cache_list, list)
 275                __uniphier_cache_maint_range(data, start, end, operation);
 276}
 277
 278static void uniphier_cache_maint_all(u32 operation)
 279{
 280        struct uniphier_cache_data *data;
 281
 282        list_for_each_entry(data, &uniphier_cache_list, list)
 283                __uniphier_cache_maint_all(data, operation);
 284}
 285
 286static void uniphier_cache_inv_range(unsigned long start, unsigned long end)
 287{
 288        uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_INV);
 289}
 290
 291static void uniphier_cache_clean_range(unsigned long start, unsigned long end)
 292{
 293        uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_CLEAN);
 294}
 295
 296static void uniphier_cache_flush_range(unsigned long start, unsigned long end)
 297{
 298        uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_FLUSH);
 299}
 300
 301static void __init uniphier_cache_inv_all(void)
 302{
 303        uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
 304}
 305
 306static void uniphier_cache_flush_all(void)
 307{
 308        uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
 309}
 310
 311static void uniphier_cache_disable(void)
 312{
 313        struct uniphier_cache_data *data;
 314
 315        list_for_each_entry_reverse(data, &uniphier_cache_list, list)
 316                __uniphier_cache_enable(data, false);
 317
 318        uniphier_cache_flush_all();
 319}
 320
 321static void __init uniphier_cache_enable(void)
 322{
 323        struct uniphier_cache_data *data;
 324
 325        uniphier_cache_inv_all();
 326
 327        list_for_each_entry(data, &uniphier_cache_list, list) {
 328                __uniphier_cache_enable(data, true);
 329                __uniphier_cache_set_locked_ways(data, 0);
 330        }
 331}
 332
 333static void uniphier_cache_sync(void)
 334{
 335        struct uniphier_cache_data *data;
 336
 337        list_for_each_entry(data, &uniphier_cache_list, list)
 338                __uniphier_cache_sync(data);
 339}
 340
 341int __init uniphier_cache_l2_is_enabled(void)
 342{
 343        struct uniphier_cache_data *data;
 344
 345        data = list_first_entry_or_null(&uniphier_cache_list,
 346                                        struct uniphier_cache_data, list);
 347        if (!data)
 348                return 0;
 349
 350        return !!(readl_relaxed(data->ctrl_base + UNIPHIER_SSCC) &
 351                  UNIPHIER_SSCC_ON);
 352}
 353
 354void __init uniphier_cache_l2_touch_range(unsigned long start,
 355                                          unsigned long end)
 356{
 357        struct uniphier_cache_data *data;
 358
 359        data = list_first_entry_or_null(&uniphier_cache_list,
 360                                        struct uniphier_cache_data, list);
 361        if (data)
 362                __uniphier_cache_maint_range(data, start, end,
 363                                             UNIPHIER_SSCOQM_TID_WAY |
 364                                             UNIPHIER_SSCOQM_CM_TOUCH);
 365}
 366
 367void __init uniphier_cache_l2_set_locked_ways(u32 way_mask)
 368{
 369        struct uniphier_cache_data *data;
 370
 371        data = list_first_entry_or_null(&uniphier_cache_list,
 372                                        struct uniphier_cache_data, list);
 373        if (data)
 374                __uniphier_cache_set_locked_ways(data, way_mask);
 375}
 376
 377static const struct of_device_id uniphier_cache_match[] __initconst = {
 378        {
 379                .compatible = "socionext,uniphier-system-cache",
 380        },
 381        { /* sentinel */ }
 382};
 383
 384static int __init __uniphier_cache_init(struct device_node *np,
 385                                        unsigned int *cache_level)
 386{
 387        struct uniphier_cache_data *data;
 388        u32 level, cache_size;
 389        struct device_node *next_np;
 390        int ret = 0;
 391
 392        if (!of_match_node(uniphier_cache_match, np)) {
 393                pr_err("L%d: not compatible with uniphier cache\n",
 394                       *cache_level);
 395                return -EINVAL;
 396        }
 397
 398        if (of_property_read_u32(np, "cache-level", &level)) {
 399                pr_err("L%d: cache-level is not specified\n", *cache_level);
 400                return -EINVAL;
 401        }
 402
 403        if (level != *cache_level) {
 404                pr_err("L%d: cache-level is unexpected value %d\n",
 405                       *cache_level, level);
 406                return -EINVAL;
 407        }
 408
 409        if (!of_property_read_bool(np, "cache-unified")) {
 410                pr_err("L%d: cache-unified is not specified\n", *cache_level);
 411                return -EINVAL;
 412        }
 413
 414        data = kzalloc(sizeof(*data), GFP_KERNEL);
 415        if (!data)
 416                return -ENOMEM;
 417
 418        if (of_property_read_u32(np, "cache-line-size", &data->line_size) ||
 419            !is_power_of_2(data->line_size)) {
 420                pr_err("L%d: cache-line-size is unspecified or invalid\n",
 421                       *cache_level);
 422                ret = -EINVAL;
 423                goto err;
 424        }
 425
 426        if (of_property_read_u32(np, "cache-sets", &data->nsets) ||
 427            !is_power_of_2(data->nsets)) {
 428                pr_err("L%d: cache-sets is unspecified or invalid\n",
 429                       *cache_level);
 430                ret = -EINVAL;
 431                goto err;
 432        }
 433
 434        if (of_property_read_u32(np, "cache-size", &cache_size) ||
 435            cache_size == 0 || cache_size % (data->nsets * data->line_size)) {
 436                pr_err("L%d: cache-size is unspecified or invalid\n",
 437                       *cache_level);
 438                ret = -EINVAL;
 439                goto err;
 440        }
 441
 442        data->way_present_mask =
 443                ((u32)1 << cache_size / data->nsets / data->line_size) - 1;
 444
 445        data->ctrl_base = of_iomap(np, 0);
 446        if (!data->ctrl_base) {
 447                pr_err("L%d: failed to map control register\n", *cache_level);
 448                ret = -ENOMEM;
 449                goto err;
 450        }
 451
 452        data->rev_base = of_iomap(np, 1);
 453        if (!data->rev_base) {
 454                pr_err("L%d: failed to map revision register\n", *cache_level);
 455                ret = -ENOMEM;
 456                goto err;
 457        }
 458
 459        data->op_base = of_iomap(np, 2);
 460        if (!data->op_base) {
 461                pr_err("L%d: failed to map operation register\n", *cache_level);
 462                ret = -ENOMEM;
 463                goto err;
 464        }
 465
 466        data->way_ctrl_base = data->ctrl_base + 0xc00;
 467
 468        if (*cache_level == 2) {
 469                u32 revision = readl(data->rev_base + UNIPHIER_SSCID);
 470                /*
 471                 * The size of range operation is limited to (1 << 22) or less
 472                 * for PH-sLD8 or older SoCs.
 473                 */
 474                if (revision <= 0x16)
 475                        data->range_op_max_size = (u32)1 << 22;
 476
 477                /*
 478                 * Unfortunatly, the offset address of active way control base
 479                 * varies from SoC to SoC.
 480                 */
 481                switch (revision) {
 482                case 0x11:      /* sLD3 */
 483                        data->way_ctrl_base = data->ctrl_base + 0x870;
 484                        break;
 485                case 0x12:      /* LD4 */
 486                case 0x16:      /* sld8 */
 487                        data->way_ctrl_base = data->ctrl_base + 0x840;
 488                        break;
 489                default:
 490                        break;
 491                }
 492        }
 493
 494        data->range_op_max_size -= data->line_size;
 495
 496        INIT_LIST_HEAD(&data->list);
 497        list_add_tail(&data->list, &uniphier_cache_list); /* no mutex */
 498
 499        /*
 500         * OK, this level has been successfully initialized.  Look for the next
 501         * level cache.  Do not roll back even if the initialization of the
 502         * next level cache fails because we want to continue with available
 503         * cache levels.
 504         */
 505        next_np = of_find_next_cache_node(np);
 506        if (next_np) {
 507                (*cache_level)++;
 508                ret = __uniphier_cache_init(next_np, cache_level);
 509        }
 510        of_node_put(next_np);
 511
 512        return ret;
 513err:
 514        iounmap(data->op_base);
 515        iounmap(data->rev_base);
 516        iounmap(data->ctrl_base);
 517        kfree(data);
 518
 519        return ret;
 520}
 521
 522int __init uniphier_cache_init(void)
 523{
 524        struct device_node *np = NULL;
 525        unsigned int cache_level;
 526        int ret = 0;
 527
 528        /* look for level 2 cache */
 529        while ((np = of_find_matching_node(np, uniphier_cache_match)))
 530                if (!of_property_read_u32(np, "cache-level", &cache_level) &&
 531                    cache_level == 2)
 532                        break;
 533
 534        if (!np)
 535                return -ENODEV;
 536
 537        ret = __uniphier_cache_init(np, &cache_level);
 538        of_node_put(np);
 539
 540        if (ret) {
 541                /*
 542                 * Error out iif L2 initialization fails.  Continue with any
 543                 * error on L3 or outer because they are optional.
 544                 */
 545                if (cache_level == 2) {
 546                        pr_err("failed to initialize L2 cache\n");
 547                        return ret;
 548                }
 549
 550                cache_level--;
 551                ret = 0;
 552        }
 553
 554        outer_cache.inv_range = uniphier_cache_inv_range;
 555        outer_cache.clean_range = uniphier_cache_clean_range;
 556        outer_cache.flush_range = uniphier_cache_flush_range;
 557        outer_cache.flush_all = uniphier_cache_flush_all;
 558        outer_cache.disable = uniphier_cache_disable;
 559        outer_cache.sync = uniphier_cache_sync;
 560
 561        uniphier_cache_enable();
 562
 563        pr_info("enabled outer cache (cache level: %d)\n", cache_level);
 564
 565        return ret;
 566}
 567