linux/arch/mips/cavium-octeon/executive/cvmx-l2c.c
<<
>>
Prefs
   1/***********************license start***************
   2 * Author: Cavium Networks
   3 *
   4 * Contact: support@caviumnetworks.com
   5 * This file is part of the OCTEON SDK
   6 *
   7 * Copyright (c) 2003-2008 Cavium Networks
   8 *
   9 * This file is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License, Version 2, as
  11 * published by the Free Software Foundation.
  12 *
  13 * This file is distributed in the hope that it will be useful, but
  14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16 * NONINFRINGEMENT.  See the GNU General Public License for more
  17 * details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this file; if not, write to the Free Software
  21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  22 * or visit http://www.gnu.org/licenses/.
  23 *
  24 * This file may also be available under a different license from Cavium.
  25 * Contact Cavium Networks for more information
  26 ***********************license end**************************************/
  27
  28/*
  29 * Implementation of the Level 2 Cache (L2C) control, measurement, and
  30 * debugging facilities.
  31 */
  32
  33#include <asm/octeon/cvmx.h>
  34#include <asm/octeon/cvmx-l2c.h>
  35#include <asm/octeon/cvmx-spinlock.h>
  36
  37/*
  38 * This spinlock is used internally to ensure that only one core is
  39 * performing certain L2 operations at a time.
  40 *
  41 * NOTE: This only protects calls from within a single application -
  42 * if multiple applications or operating systems are running, then it
  43 * is up to the user program to coordinate between them.
  44 */
  45static cvmx_spinlock_t cvmx_l2c_spinlock;
  46
  47static inline int l2_size_half(void)
  48{
  49        uint64_t val = cvmx_read_csr(CVMX_L2D_FUS3);
  50        return !!(val & (1ull << 34));
  51}
  52
  53int cvmx_l2c_get_core_way_partition(uint32_t core)
  54{
  55        uint32_t field;
  56
  57        /* Validate the core number */
  58        if (core >= cvmx_octeon_num_cores())
  59                return -1;
  60
  61        /*
  62         * Use the lower two bits of the coreNumber to determine the
  63         * bit offset of the UMSK[] field in the L2C_SPAR register.
  64         */
  65        field = (core & 0x3) * 8;
  66
  67        /*
  68         * Return the UMSK[] field from the appropriate L2C_SPAR
  69         * register based on the coreNumber.
  70         */
  71
  72        switch (core & 0xC) {
  73        case 0x0:
  74                return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >>
  75                        field;
  76        case 0x4:
  77                return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >>
  78                        field;
  79        case 0x8:
  80                return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >>
  81                        field;
  82        case 0xC:
  83                return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >>
  84                        field;
  85        }
  86        return 0;
  87}
  88
  89int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask)
  90{
  91        uint32_t field;
  92        uint32_t valid_mask;
  93
  94        valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
  95
  96        mask &= valid_mask;
  97
  98        /* A UMSK setting which blocks all L2C Ways is an error. */
  99        if (mask == valid_mask)
 100                return -1;
 101
 102        /* Validate the core number */
 103        if (core >= cvmx_octeon_num_cores())
 104                return -1;
 105
 106        /* Check to make sure current mask & new mask don't block all ways */
 107        if (((mask | cvmx_l2c_get_core_way_partition(core)) & valid_mask) ==
 108            valid_mask)
 109                return -1;
 110
 111        /* Use the lower two bits of core to determine the bit offset of the
 112         * UMSK[] field in the L2C_SPAR register.
 113         */
 114        field = (core & 0x3) * 8;
 115
 116        /* Assign the new mask setting to the UMSK[] field in the appropriate
 117         * L2C_SPAR register based on the core_num.
 118         *
 119         */
 120        switch (core & 0xC) {
 121        case 0x0:
 122                cvmx_write_csr(CVMX_L2C_SPAR0,
 123                               (cvmx_read_csr(CVMX_L2C_SPAR0) &
 124                                ~(0xFF << field)) | mask << field);
 125                break;
 126        case 0x4:
 127                cvmx_write_csr(CVMX_L2C_SPAR1,
 128                               (cvmx_read_csr(CVMX_L2C_SPAR1) &
 129                                ~(0xFF << field)) | mask << field);
 130                break;
 131        case 0x8:
 132                cvmx_write_csr(CVMX_L2C_SPAR2,
 133                               (cvmx_read_csr(CVMX_L2C_SPAR2) &
 134                                ~(0xFF << field)) | mask << field);
 135                break;
 136        case 0xC:
 137                cvmx_write_csr(CVMX_L2C_SPAR3,
 138                               (cvmx_read_csr(CVMX_L2C_SPAR3) &
 139                                ~(0xFF << field)) | mask << field);
 140                break;
 141        }
 142        return 0;
 143}
 144
 145int cvmx_l2c_set_hw_way_partition(uint32_t mask)
 146{
 147        uint32_t valid_mask;
 148
 149        valid_mask = 0xff;
 150
 151        if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN38XX)) {
 152                if (l2_size_half())
 153                        valid_mask = 0xf;
 154        } else if (l2_size_half())
 155                valid_mask = 0x3;
 156
 157        mask &= valid_mask;
 158
 159        /* A UMSK setting which blocks all L2C Ways is an error. */
 160        if (mask == valid_mask)
 161                return -1;
 162        /* Check to make sure current mask & new mask don't block all ways */
 163        if (((mask | cvmx_l2c_get_hw_way_partition()) & valid_mask) ==
 164            valid_mask)
 165                return -1;
 166
 167        cvmx_write_csr(CVMX_L2C_SPAR4,
 168                       (cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
 169        return 0;
 170}
 171
 172int cvmx_l2c_get_hw_way_partition(void)
 173{
 174        return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF);
 175}
 176
 177void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
 178                          uint32_t clear_on_read)
 179{
 180        union cvmx_l2c_pfctl pfctl;
 181
 182        pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
 183
 184        switch (counter) {
 185        case 0:
 186                pfctl.s.cnt0sel = event;
 187                pfctl.s.cnt0ena = 1;
 188                if (!cvmx_octeon_is_pass1())
 189                        pfctl.s.cnt0rdclr = clear_on_read;
 190                break;
 191        case 1:
 192                pfctl.s.cnt1sel = event;
 193                pfctl.s.cnt1ena = 1;
 194                if (!cvmx_octeon_is_pass1())
 195                        pfctl.s.cnt1rdclr = clear_on_read;
 196                break;
 197        case 2:
 198                pfctl.s.cnt2sel = event;
 199                pfctl.s.cnt2ena = 1;
 200                if (!cvmx_octeon_is_pass1())
 201                        pfctl.s.cnt2rdclr = clear_on_read;
 202                break;
 203        case 3:
 204        default:
 205                pfctl.s.cnt3sel = event;
 206                pfctl.s.cnt3ena = 1;
 207                if (!cvmx_octeon_is_pass1())
 208                        pfctl.s.cnt3rdclr = clear_on_read;
 209                break;
 210        }
 211
 212        cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
 213}
 214
 215uint64_t cvmx_l2c_read_perf(uint32_t counter)
 216{
 217        switch (counter) {
 218        case 0:
 219                return cvmx_read_csr(CVMX_L2C_PFC0);
 220        case 1:
 221                return cvmx_read_csr(CVMX_L2C_PFC1);
 222        case 2:
 223                return cvmx_read_csr(CVMX_L2C_PFC2);
 224        case 3:
 225        default:
 226                return cvmx_read_csr(CVMX_L2C_PFC3);
 227        }
 228}
 229
 230/**
 231 * @INTERNAL
 232 * Helper function use to fault in cache lines for L2 cache locking
 233 *
 234 * @addr:   Address of base of memory region to read into L2 cache
 235 * @len:    Length (in bytes) of region to fault in
 236 */
 237static void fault_in(uint64_t addr, int len)
 238{
 239        volatile char *ptr;
 240        volatile char dummy;
 241        /*
 242         * Adjust addr and length so we get all cache lines even for
 243         * small ranges spanning two cache lines
 244         */
 245        len += addr & CVMX_CACHE_LINE_MASK;
 246        addr &= ~CVMX_CACHE_LINE_MASK;
 247        ptr = (volatile char *)cvmx_phys_to_ptr(addr);
 248        /*
 249         * Invalidate L1 cache to make sure all loads result in data
 250         * being in L2.
 251         */
 252        CVMX_DCACHE_INVALIDATE;
 253        while (len > 0) {
 254                dummy += *ptr;
 255                len -= CVMX_CACHE_LINE_SIZE;
 256                ptr += CVMX_CACHE_LINE_SIZE;
 257        }
 258}
 259
 260int cvmx_l2c_lock_line(uint64_t addr)
 261{
 262        int retval = 0;
 263        union cvmx_l2c_dbg l2cdbg;
 264        union cvmx_l2c_lckbase lckbase;
 265        union cvmx_l2c_lckoff lckoff;
 266        union cvmx_l2t_err l2t_err;
 267        l2cdbg.u64 = 0;
 268        lckbase.u64 = 0;
 269        lckoff.u64 = 0;
 270
 271        cvmx_spinlock_lock(&cvmx_l2c_spinlock);
 272
 273        /* Clear l2t error bits if set */
 274        l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
 275        l2t_err.s.lckerr = 1;
 276        l2t_err.s.lckerr2 = 1;
 277        cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
 278
 279        addr &= ~CVMX_CACHE_LINE_MASK;
 280
 281        /* Set this core as debug core */
 282        l2cdbg.s.ppnum = cvmx_get_core_num();
 283        CVMX_SYNC;
 284        cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
 285        cvmx_read_csr(CVMX_L2C_DBG);
 286
 287        lckoff.s.lck_offset = 0;        /* Only lock 1 line at a time */
 288        cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
 289        cvmx_read_csr(CVMX_L2C_LCKOFF);
 290
 291        if (((union cvmx_l2c_cfg) (cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
 292                int alias_shift =
 293                    CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
 294                uint64_t addr_tmp =
 295                    addr ^ (addr & ((1 << alias_shift) - 1)) >>
 296                    CVMX_L2_SET_BITS;
 297                lckbase.s.lck_base = addr_tmp >> 7;
 298        } else {
 299                lckbase.s.lck_base = addr >> 7;
 300        }
 301
 302        lckbase.s.lck_ena = 1;
 303        cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
 304        cvmx_read_csr(CVMX_L2C_LCKBASE);        /* Make sure it gets there */
 305
 306        fault_in(addr, CVMX_CACHE_LINE_SIZE);
 307
 308        lckbase.s.lck_ena = 0;
 309        cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
 310        cvmx_read_csr(CVMX_L2C_LCKBASE);        /* Make sure it gets there */
 311
 312        /* Stop being debug core */
 313        cvmx_write_csr(CVMX_L2C_DBG, 0);
 314        cvmx_read_csr(CVMX_L2C_DBG);
 315
 316        l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
 317        if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
 318                retval = 1;     /* We were unable to lock the line */
 319
 320        cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
 321
 322        return retval;
 323}
 324
 325int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
 326{
 327        int retval = 0;
 328
 329        /* Round start/end to cache line boundaries */
 330        len += start & CVMX_CACHE_LINE_MASK;
 331        start &= ~CVMX_CACHE_LINE_MASK;
 332        len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
 333
 334        while (len) {
 335                retval += cvmx_l2c_lock_line(start);
 336                start += CVMX_CACHE_LINE_SIZE;
 337                len -= CVMX_CACHE_LINE_SIZE;
 338        }
 339
 340        return retval;
 341}
 342
 343void cvmx_l2c_flush(void)
 344{
 345        uint64_t assoc, set;
 346        uint64_t n_assoc, n_set;
 347        union cvmx_l2c_dbg l2cdbg;
 348
 349        cvmx_spinlock_lock(&cvmx_l2c_spinlock);
 350
 351        l2cdbg.u64 = 0;
 352        if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
 353                l2cdbg.s.ppnum = cvmx_get_core_num();
 354        l2cdbg.s.finv = 1;
 355        n_set = CVMX_L2_SETS;
 356        n_assoc = l2_size_half() ? (CVMX_L2_ASSOC / 2) : CVMX_L2_ASSOC;
 357        for (set = 0; set < n_set; set++) {
 358                for (assoc = 0; assoc < n_assoc; assoc++) {
 359                        l2cdbg.s.set = assoc;
 360                        /* Enter debug mode, and make sure all other
 361                         ** writes complete before we enter debug
 362                         ** mode */
 363                        CVMX_SYNCW;
 364                        cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
 365                        cvmx_read_csr(CVMX_L2C_DBG);
 366
 367                        CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG
 368                                               (CVMX_MIPS_SPACE_XKPHYS,
 369                                                set * CVMX_CACHE_LINE_SIZE), 0);
 370                        CVMX_SYNCW;     /* Push STF out to L2 */
 371                        /* Exit debug mode */
 372                        CVMX_SYNC;
 373                        cvmx_write_csr(CVMX_L2C_DBG, 0);
 374                        cvmx_read_csr(CVMX_L2C_DBG);
 375                }
 376        }
 377
 378        cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
 379}
 380
 381int cvmx_l2c_unlock_line(uint64_t address)
 382{
 383        int assoc;
 384        union cvmx_l2c_tag tag;
 385        union cvmx_l2c_dbg l2cdbg;
 386        uint32_t tag_addr;
 387
 388        uint32_t index = cvmx_l2c_address_to_index(address);
 389
 390        cvmx_spinlock_lock(&cvmx_l2c_spinlock);
 391        /* Compute portion of address that is stored in tag */
 392        tag_addr =
 393            ((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) &
 394             ((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
 395        for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
 396                tag = cvmx_get_l2c_tag(assoc, index);
 397
 398                if (tag.s.V && (tag.s.addr == tag_addr)) {
 399                        l2cdbg.u64 = 0;
 400                        l2cdbg.s.ppnum = cvmx_get_core_num();
 401                        l2cdbg.s.set = assoc;
 402                        l2cdbg.s.finv = 1;
 403
 404                        CVMX_SYNC;
 405                        /* Enter debug mode */
 406                        cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
 407                        cvmx_read_csr(CVMX_L2C_DBG);
 408
 409                        CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG
 410                                               (CVMX_MIPS_SPACE_XKPHYS,
 411                                                address), 0);
 412                        CVMX_SYNC;
 413                        /* Exit debug mode */
 414                        cvmx_write_csr(CVMX_L2C_DBG, 0);
 415                        cvmx_read_csr(CVMX_L2C_DBG);
 416                        cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
 417                        return tag.s.L;
 418                }
 419        }
 420        cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
 421        return 0;
 422}
 423
 424int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
 425{
 426        int num_unlocked = 0;
 427        /* Round start/end to cache line boundaries */
 428        len += start & CVMX_CACHE_LINE_MASK;
 429        start &= ~CVMX_CACHE_LINE_MASK;
 430        len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
 431        while (len > 0) {
 432                num_unlocked += cvmx_l2c_unlock_line(start);
 433                start += CVMX_CACHE_LINE_SIZE;
 434                len -= CVMX_CACHE_LINE_SIZE;
 435        }
 436
 437        return num_unlocked;
 438}
 439
 440/*
 441 * Internal l2c tag types.  These are converted to a generic structure
 442 * that can be used on all chips.
 443 */
 444union __cvmx_l2c_tag {
 445        uint64_t u64;
 446        struct cvmx_l2c_tag_cn50xx {
 447                uint64_t reserved:40;
 448                uint64_t V:1;   /* Line valid */
 449                uint64_t D:1;   /* Line dirty */
 450                uint64_t L:1;   /* Line locked */
 451                uint64_t U:1;   /* Use, LRU eviction */
 452                uint64_t addr:20;       /* Phys mem addr (33..14) */
 453        } cn50xx;
 454        struct cvmx_l2c_tag_cn30xx {
 455                uint64_t reserved:41;
 456                uint64_t V:1;   /* Line valid */
 457                uint64_t D:1;   /* Line dirty */
 458                uint64_t L:1;   /* Line locked */
 459                uint64_t U:1;   /* Use, LRU eviction */
 460                uint64_t addr:19;       /* Phys mem addr (33..15) */
 461        } cn30xx;
 462        struct cvmx_l2c_tag_cn31xx {
 463                uint64_t reserved:42;
 464                uint64_t V:1;   /* Line valid */
 465                uint64_t D:1;   /* Line dirty */
 466                uint64_t L:1;   /* Line locked */
 467                uint64_t U:1;   /* Use, LRU eviction */
 468                uint64_t addr:18;       /* Phys mem addr (33..16) */
 469        } cn31xx;
 470        struct cvmx_l2c_tag_cn38xx {
 471                uint64_t reserved:43;
 472                uint64_t V:1;   /* Line valid */
 473                uint64_t D:1;   /* Line dirty */
 474                uint64_t L:1;   /* Line locked */
 475                uint64_t U:1;   /* Use, LRU eviction */
 476                uint64_t addr:17;       /* Phys mem addr (33..17) */
 477        } cn38xx;
 478        struct cvmx_l2c_tag_cn58xx {
 479                uint64_t reserved:44;
 480                uint64_t V:1;   /* Line valid */
 481                uint64_t D:1;   /* Line dirty */
 482                uint64_t L:1;   /* Line locked */
 483                uint64_t U:1;   /* Use, LRU eviction */
 484                uint64_t addr:16;       /* Phys mem addr (33..18) */
 485        } cn58xx;
 486        struct cvmx_l2c_tag_cn58xx cn56xx;      /* 2048 sets */
 487        struct cvmx_l2c_tag_cn31xx cn52xx;      /* 512 sets */
 488};
 489
 490/**
 491 * @INTERNAL
 492 * Function to read a L2C tag.  This code make the current core
 493 * the 'debug core' for the L2.  This code must only be executed by
 494 * 1 core at a time.
 495 *
 496 * @assoc:  Association (way) of the tag to dump
 497 * @index:  Index of the cacheline
 498 *
 499 * Returns The Octeon model specific tag structure.  This is
 500 *         translated by a wrapper function to a generic form that is
 501 *         easier for applications to use.
 502 */
 503static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
 504{
 505
 506        uint64_t debug_tag_addr = (((1ULL << 63) | (index << 7)) + 96);
 507        uint64_t core = cvmx_get_core_num();
 508        union __cvmx_l2c_tag tag_val;
 509        uint64_t dbg_addr = CVMX_L2C_DBG;
 510        unsigned long flags;
 511
 512        union cvmx_l2c_dbg debug_val;
 513        debug_val.u64 = 0;
 514        /*
 515         * For low core count parts, the core number is always small enough
 516         * to stay in the correct field and not set any reserved bits.
 517         */
 518        debug_val.s.ppnum = core;
 519        debug_val.s.l2t = 1;
 520        debug_val.s.set = assoc;
 521        /*
 522         * Make sure core is quiet (no prefetches, etc.) before
 523         * entering debug mode.
 524         */
 525        CVMX_SYNC;
 526        /* Flush L1 to make sure debug load misses L1 */
 527        CVMX_DCACHE_INVALIDATE;
 528
 529        local_irq_save(flags);
 530
 531        /*
 532         * The following must be done in assembly as when in debug
 533         * mode all data loads from L2 return special debug data, not
 534         * normal memory contents.  Also, interrupts must be
 535         * disabled, since if an interrupt occurs while in debug mode
 536         * the ISR will get debug data from all its memory reads
 537         * instead of the contents of memory
 538         */
 539
 540        asm volatile (".set push              \n"
 541                "        .set mips64              \n"
 542                "        .set noreorder           \n"
 543                /* Enter debug mode, wait for store */
 544                "        sd    %[dbg_val], 0(%[dbg_addr])  \n"
 545                "        ld    $0, 0(%[dbg_addr]) \n"
 546                /* Read L2C tag data */
 547                "        ld    %[tag_val], 0(%[tag_addr]) \n"
 548                /* Exit debug mode, wait for store */
 549                "        sd    $0, 0(%[dbg_addr])  \n"
 550                "        ld    $0, 0(%[dbg_addr]) \n"
 551                /* Invalidate dcache to discard debug data */
 552                "        cache 9, 0($0) \n"
 553                "        .set pop" :
 554                [tag_val] "=r"(tag_val.u64) : [dbg_addr] "r"(dbg_addr),
 555                [dbg_val] "r"(debug_val.u64),
 556                [tag_addr] "r"(debug_tag_addr) : "memory");
 557
 558        local_irq_restore(flags);
 559        return tag_val;
 560
 561}
 562
 563union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
 564{
 565        union __cvmx_l2c_tag tmp_tag;
 566        union cvmx_l2c_tag tag;
 567        tag.u64 = 0;
 568
 569        if ((int)association >= cvmx_l2c_get_num_assoc()) {
 570                cvmx_dprintf
 571                    ("ERROR: cvmx_get_l2c_tag association out of range\n");
 572                return tag;
 573        }
 574        if ((int)index >= cvmx_l2c_get_num_sets()) {
 575                cvmx_dprintf("ERROR: cvmx_get_l2c_tag "
 576                             "index out of range (arg: %d, max: %d\n",
 577                     index, cvmx_l2c_get_num_sets());
 578                return tag;
 579        }
 580        /* __read_l2_tag is intended for internal use only */
 581        tmp_tag = __read_l2_tag(association, index);
 582
 583        /*
 584         * Convert all tag structure types to generic version, as it
 585         * can represent all models.
 586         */
 587        if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
 588                tag.s.V = tmp_tag.cn58xx.V;
 589                tag.s.D = tmp_tag.cn58xx.D;
 590                tag.s.L = tmp_tag.cn58xx.L;
 591                tag.s.U = tmp_tag.cn58xx.U;
 592                tag.s.addr = tmp_tag.cn58xx.addr;
 593        } else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
 594                tag.s.V = tmp_tag.cn38xx.V;
 595                tag.s.D = tmp_tag.cn38xx.D;
 596                tag.s.L = tmp_tag.cn38xx.L;
 597                tag.s.U = tmp_tag.cn38xx.U;
 598                tag.s.addr = tmp_tag.cn38xx.addr;
 599        } else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
 600                   || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
 601                tag.s.V = tmp_tag.cn31xx.V;
 602                tag.s.D = tmp_tag.cn31xx.D;
 603                tag.s.L = tmp_tag.cn31xx.L;
 604                tag.s.U = tmp_tag.cn31xx.U;
 605                tag.s.addr = tmp_tag.cn31xx.addr;
 606        } else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
 607                tag.s.V = tmp_tag.cn30xx.V;
 608                tag.s.D = tmp_tag.cn30xx.D;
 609                tag.s.L = tmp_tag.cn30xx.L;
 610                tag.s.U = tmp_tag.cn30xx.U;
 611                tag.s.addr = tmp_tag.cn30xx.addr;
 612        } else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
 613                tag.s.V = tmp_tag.cn50xx.V;
 614                tag.s.D = tmp_tag.cn50xx.D;
 615                tag.s.L = tmp_tag.cn50xx.L;
 616                tag.s.U = tmp_tag.cn50xx.U;
 617                tag.s.addr = tmp_tag.cn50xx.addr;
 618        } else {
 619                cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
 620        }
 621
 622        return tag;
 623}
 624
 625uint32_t cvmx_l2c_address_to_index(uint64_t addr)
 626{
 627        uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT;
 628        union cvmx_l2c_cfg l2c_cfg;
 629        l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
 630
 631        if (l2c_cfg.s.idxalias) {
 632                idx ^=
 633                    ((addr & CVMX_L2C_ALIAS_MASK) >>
 634                     CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
 635        }
 636        idx &= CVMX_L2C_IDX_MASK;
 637        return idx;
 638}
 639
 640int cvmx_l2c_get_cache_size_bytes(void)
 641{
 642        return cvmx_l2c_get_num_sets() * cvmx_l2c_get_num_assoc() *
 643                CVMX_CACHE_LINE_SIZE;
 644}
 645
 646/**
 647 * Return log base 2 of the number of sets in the L2 cache
 648 * Returns
 649 */
 650int cvmx_l2c_get_set_bits(void)
 651{
 652        int l2_set_bits;
 653        if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
 654                l2_set_bits = 11;       /* 2048 sets */
 655        else if (OCTEON_IS_MODEL(OCTEON_CN38XX))
 656                l2_set_bits = 10;       /* 1024 sets */
 657        else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
 658                 || OCTEON_IS_MODEL(OCTEON_CN52XX))
 659                l2_set_bits = 9;        /* 512 sets */
 660        else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
 661                l2_set_bits = 8;        /* 256 sets */
 662        else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
 663                l2_set_bits = 7;        /* 128 sets */
 664        else {
 665                cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
 666                l2_set_bits = 11;       /* 2048 sets */
 667        }
 668        return l2_set_bits;
 669
 670}
 671
 672/* Return the number of sets in the L2 Cache */
 673int cvmx_l2c_get_num_sets(void)
 674{
 675        return 1 << cvmx_l2c_get_set_bits();
 676}
 677
 678/* Return the number of associations in the L2 Cache */
 679int cvmx_l2c_get_num_assoc(void)
 680{
 681        int l2_assoc;
 682        if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
 683            OCTEON_IS_MODEL(OCTEON_CN52XX) ||
 684            OCTEON_IS_MODEL(OCTEON_CN58XX) ||
 685            OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN38XX))
 686                l2_assoc = 8;
 687        else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
 688                 OCTEON_IS_MODEL(OCTEON_CN30XX))
 689                l2_assoc = 4;
 690        else {
 691                cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
 692                l2_assoc = 8;
 693        }
 694
 695        /* Check to see if part of the cache is disabled */
 696        if (cvmx_fuse_read(265))
 697                l2_assoc = l2_assoc >> 2;
 698        else if (cvmx_fuse_read(264))
 699                l2_assoc = l2_assoc >> 1;
 700
 701        return l2_assoc;
 702}
 703
 704/**
 705 * Flush a line from the L2 cache
 706 * This should only be called from one core at a time, as this routine
 707 * sets the core to the 'debug' core in order to flush the line.
 708 *
 709 * @assoc:  Association (or way) to flush
 710 * @index:  Index to flush
 711 */
 712void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
 713{
 714        union cvmx_l2c_dbg l2cdbg;
 715
 716        l2cdbg.u64 = 0;
 717        l2cdbg.s.ppnum = cvmx_get_core_num();
 718        l2cdbg.s.finv = 1;
 719
 720        l2cdbg.s.set = assoc;
 721        /*
 722         * Enter debug mode, and make sure all other writes complete
 723         * before we enter debug mode.
 724         */
 725        asm volatile ("sync" : : : "memory");
 726        cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
 727        cvmx_read_csr(CVMX_L2C_DBG);
 728
 729        CVMX_PREPARE_FOR_STORE(((1ULL << 63) + (index) * 128), 0);
 730        /* Exit debug mode */
 731        asm volatile ("sync" : : : "memory");
 732        cvmx_write_csr(CVMX_L2C_DBG, 0);
 733        cvmx_read_csr(CVMX_L2C_DBG);
 734}
 735