uboot/drivers/net/pfe_eth/pfe_hw.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright 2015-2016 Freescale Semiconductor, Inc.
   4 * Copyright 2017 NXP
   5 */
   6#include <net/pfe_eth/pfe_eth.h>
   7#include <net/pfe_eth/pfe/pfe_hw.h>
   8
   9static struct pe_info pe[MAX_PE];
  10
  11/*
  12 * Initializes the PFE library.
  13 * Must be called before using any of the library functions.
  14 */
  15void pfe_lib_init(void)
  16{
  17        int pfe_pe_id;
  18
  19        for (pfe_pe_id = CLASS0_ID; pfe_pe_id <= CLASS_MAX_ID; pfe_pe_id++) {
  20                pe[pfe_pe_id].dmem_base_addr =
  21                        (u32)CLASS_DMEM_BASE_ADDR(pfe_pe_id);
  22                pe[pfe_pe_id].pmem_base_addr =
  23                        (u32)CLASS_IMEM_BASE_ADDR(pfe_pe_id);
  24                pe[pfe_pe_id].pmem_size = (u32)CLASS_IMEM_SIZE;
  25                pe[pfe_pe_id].mem_access_wdata =
  26                        (void *)CLASS_MEM_ACCESS_WDATA;
  27                pe[pfe_pe_id].mem_access_addr = (void *)CLASS_MEM_ACCESS_ADDR;
  28                pe[pfe_pe_id].mem_access_rdata = (void *)CLASS_MEM_ACCESS_RDATA;
  29        }
  30
  31        for (pfe_pe_id = TMU0_ID; pfe_pe_id <= TMU_MAX_ID; pfe_pe_id++) {
  32                if (pfe_pe_id == TMU2_ID)
  33                        continue;
  34                pe[pfe_pe_id].dmem_base_addr =
  35                        (u32)TMU_DMEM_BASE_ADDR(pfe_pe_id - TMU0_ID);
  36                pe[pfe_pe_id].pmem_base_addr =
  37                        (u32)TMU_IMEM_BASE_ADDR(pfe_pe_id - TMU0_ID);
  38                pe[pfe_pe_id].pmem_size = (u32)TMU_IMEM_SIZE;
  39                pe[pfe_pe_id].mem_access_wdata = (void *)TMU_MEM_ACCESS_WDATA;
  40                pe[pfe_pe_id].mem_access_addr = (void *)TMU_MEM_ACCESS_ADDR;
  41                pe[pfe_pe_id].mem_access_rdata = (void *)TMU_MEM_ACCESS_RDATA;
  42        }
  43}
  44
  45/*
  46 * Writes a buffer to PE internal memory from the host
  47 * through indirect access registers.
  48 *
  49 * @param[in] id               PE identification (CLASS0_ID, ..., TMU0_ID,
  50 *                              ..., UTIL_ID)
  51 * @param[in] mem_access_addr   DMEM destination address (must be 32bit
  52 *                              aligned)
  53 * @param[in] src               Buffer source address
  54 * @param[in] len               Number of bytes to copy
  55 */
  56static void pe_mem_memcpy_to32(int id, u32 mem_access_addr, const void *src,
  57                               unsigned int len)
  58{
  59        u32 offset = 0, val, addr;
  60        unsigned int len32 = len >> 2;
  61        int i;
  62
  63        addr = mem_access_addr | PE_MEM_ACCESS_WRITE |
  64                PE_MEM_ACCESS_BYTE_ENABLE(0, 4);
  65
  66        for (i = 0; i < len32; i++, offset += 4, src += 4) {
  67                val = *(u32 *)src;
  68                writel(cpu_to_be32(val), pe[id].mem_access_wdata);
  69                writel(addr + offset, pe[id].mem_access_addr);
  70        }
  71
  72        len = (len & 0x3);
  73        if (len) {
  74                val = 0;
  75
  76                addr = (mem_access_addr | PE_MEM_ACCESS_WRITE |
  77                        PE_MEM_ACCESS_BYTE_ENABLE(0, len)) + offset;
  78
  79                for (i = 0; i < len; i++, src++)
  80                        val |= (*(u8 *)src) << (8 * i);
  81
  82                writel(cpu_to_be32(val), pe[id].mem_access_wdata);
  83                writel(addr, pe[id].mem_access_addr);
  84        }
  85}
  86
  87/*
  88 * Writes a buffer to PE internal data memory (DMEM) from the host
  89 * through indirect access registers.
  90 * @param[in] id        PE identification (CLASS0_ID, ..., TMU0_ID,
  91 *                      ..., UTIL_ID)
  92 * @param[in] dst       DMEM destination address (must be 32bit
  93 *                      aligned)
  94 * @param[in] src       Buffer source address
  95 * @param[in] len       Number of bytes to copy
  96 */
  97static void pe_dmem_memcpy_to32(int id, u32 dst, const void *src,
  98                                unsigned int len)
  99{
 100        pe_mem_memcpy_to32(id, pe[id].dmem_base_addr | dst | PE_MEM_ACCESS_DMEM,
 101                           src, len);
 102}
 103
 104/*
 105 * Writes a buffer to PE internal program memory (PMEM) from the host
 106 * through indirect access registers.
 107 * @param[in] id        PE identification (CLASS0_ID, ..., TMU0_ID,
 108 *                      ..., TMU3_ID)
 109 * @param[in] dst       PMEM destination address (must be 32bit
 110 *                      aligned)
 111 * @param[in] src       Buffer source address
 112 * @param[in] len       Number of bytes to copy
 113 */
 114static void pe_pmem_memcpy_to32(int id, u32 dst, const void *src,
 115                                unsigned int len)
 116{
 117        pe_mem_memcpy_to32(id, pe[id].pmem_base_addr | (dst & (pe[id].pmem_size
 118                                - 1)) | PE_MEM_ACCESS_IMEM, src, len);
 119}
 120
 121/*
 122 * Reads PE internal program memory (IMEM) from the host
 123 * through indirect access registers.
 124 * @param[in] id                PE identification (CLASS0_ID, ..., TMU0_ID,
 125 *                              ..., TMU3_ID)
 126 * @param[in] addr              PMEM read address (must be aligned on size)
 127 * @param[in] size              Number of bytes to read (maximum 4, must not
 128 *                              cross 32bit boundaries)
 129 * @return                      the data read (in PE endianness, i.e BE).
 130 */
 131u32 pe_pmem_read(int id, u32 addr, u8 size)
 132{
 133        u32 offset = addr & 0x3;
 134        u32 mask = 0xffffffff >> ((4 - size) << 3);
 135        u32 val;
 136
 137        addr = pe[id].pmem_base_addr | ((addr & ~0x3) & (pe[id].pmem_size - 1))
 138                | PE_MEM_ACCESS_READ | PE_MEM_ACCESS_IMEM |
 139                PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
 140
 141        writel(addr, pe[id].mem_access_addr);
 142        val = be32_to_cpu(readl(pe[id].mem_access_rdata));
 143
 144        return (val >> (offset << 3)) & mask;
 145}
 146
 147/*
 148 * Writes PE internal data memory (DMEM) from the host
 149 * through indirect access registers.
 150 * @param[in] id        PE identification (CLASS0_ID, ..., TMU0_ID,
 151 *                      ..., UTIL_ID)
 152 * @param[in] val       Value to write (in PE endianness, i.e BE)
 153 * @param[in] addr      DMEM write address (must be aligned on size)
 154 * @param[in] size      Number of bytes to write (maximum 4, must not
 155 *                      cross 32bit boundaries)
 156 */
 157void pe_dmem_write(int id, u32 val, u32 addr, u8 size)
 158{
 159        u32 offset = addr & 0x3;
 160
 161        addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_WRITE |
 162                PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
 163
 164        /* Indirect access interface is byte swapping data being written */
 165        writel(cpu_to_be32(val << (offset << 3)), pe[id].mem_access_wdata);
 166        writel(addr, pe[id].mem_access_addr);
 167}
 168
 169/*
 170 * Reads PE internal data memory (DMEM) from the host
 171 * through indirect access registers.
 172 * @param[in] id                PE identification (CLASS0_ID, ..., TMU0_ID,
 173 *                              ..., UTIL_ID)
 174 * @param[in] addr              DMEM read address (must be aligned on size)
 175 * @param[in] size              Number of bytes to read (maximum 4, must not
 176 *                              cross 32bit boundaries)
 177 * @return                      the data read (in PE endianness, i.e BE).
 178 */
 179u32 pe_dmem_read(int id, u32 addr, u8 size)
 180{
 181        u32 offset = addr & 0x3;
 182        u32 mask = 0xffffffff >> ((4 - size) << 3);
 183        u32 val;
 184
 185        addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_READ |
 186                PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
 187
 188        writel(addr, pe[id].mem_access_addr);
 189
 190        /* Indirect access interface is byte swapping data being read */
 191        val = be32_to_cpu(readl(pe[id].mem_access_rdata));
 192
 193        return (val >> (offset << 3)) & mask;
 194}
 195
 196/*
 197 * This function is used to write to CLASS internal bus peripherals (ccu,
 198 * pe-lem) from the host
 199 * through indirect access registers.
 200 * @param[in]   val     value to write
 201 * @param[in]   addr    Address to write to (must be aligned on size)
 202 * @param[in]   size    Number of bytes to write (1, 2 or 4)
 203 *
 204 */
 205static void class_bus_write(u32 val, u32 addr, u8 size)
 206{
 207        u32 offset = addr & 0x3;
 208
 209        writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
 210
 211        addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | PE_MEM_ACCESS_WRITE |
 212                (size << 24);
 213
 214        writel(cpu_to_be32(val << (offset << 3)), CLASS_BUS_ACCESS_WDATA);
 215        writel(addr, CLASS_BUS_ACCESS_ADDR);
 216}
 217
 218/*
 219 * Reads from CLASS internal bus peripherals (ccu, pe-lem) from the host
 220 * through indirect access registers.
 221 * @param[in] addr      Address to read from (must be aligned on size)
 222 * @param[in] size      Number of bytes to read (1, 2 or 4)
 223 * @return              the read data
 224 */
 225static u32 class_bus_read(u32 addr, u8 size)
 226{
 227        u32 offset = addr & 0x3;
 228        u32 mask = 0xffffffff >> ((4 - size) << 3);
 229        u32 val;
 230
 231        writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
 232
 233        addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | (size << 24);
 234
 235        writel(addr, CLASS_BUS_ACCESS_ADDR);
 236        val = be32_to_cpu(readl(CLASS_BUS_ACCESS_RDATA));
 237
 238        return (val >> (offset << 3)) & mask;
 239}
 240
 241/*
 242 * Writes data to the cluster memory (PE_LMEM)
 243 * @param[in] dst       PE LMEM destination address (must be 32bit aligned)
 244 * @param[in] src       Buffer source address
 245 * @param[in] len       Number of bytes to copy
 246 */
 247static void class_pe_lmem_memcpy_to32(u32 dst, const void *src,
 248                                      unsigned int len)
 249{
 250        u32 len32 = len >> 2;
 251        int i;
 252
 253        for (i = 0; i < len32; i++, src += 4, dst += 4)
 254                class_bus_write(*(u32 *)src, dst, 4);
 255
 256        if (len & 0x2) {
 257                class_bus_write(*(u16 *)src, dst, 2);
 258                src += 2;
 259                dst += 2;
 260        }
 261
 262        if (len & 0x1) {
 263                class_bus_write(*(u8 *)src, dst, 1);
 264                src++;
 265                dst++;
 266        }
 267}
 268
 269/*
 270 * Writes value to the cluster memory (PE_LMEM)
 271 * @param[in] dst       PE LMEM destination address (must be 32bit aligned)
 272 * @param[in] val       Value to write
 273 * @param[in] len       Number of bytes to write
 274 */
 275static void class_pe_lmem_memset(u32 dst, int val, unsigned int len)
 276{
 277        u32 len32 = len >> 2;
 278        int i;
 279
 280        val = val | (val << 8) | (val << 16) | (val << 24);
 281
 282        for (i = 0; i < len32; i++, dst += 4)
 283                class_bus_write(val, dst, 4);
 284
 285        if (len & 0x2) {
 286                class_bus_write(val, dst, 2);
 287                dst += 2;
 288        }
 289
 290        if (len & 0x1) {
 291                class_bus_write(val, dst, 1);
 292                dst++;
 293        }
 294}
 295
 296/*
 297 * Reads data from the cluster memory (PE_LMEM)
 298 * @param[out] dst      pointer to the source buffer data are copied to
 299 * @param[in] len       length in bytes of the amount of data to read
 300 *                      from cluster memory
 301 * @param[in] offset    offset in bytes in the cluster memory where data are
 302 *                      read from
 303 */
 304void pe_lmem_read(u32 *dst, u32 len, u32 offset)
 305{
 306        u32 len32 = len >> 2;
 307        int i = 0;
 308
 309        for (i = 0; i < len32; dst++, i++, offset += 4)
 310                *dst = class_bus_read(PE_LMEM_BASE_ADDR + offset, 4);
 311
 312        if (len & 0x03)
 313                *dst = class_bus_read(PE_LMEM_BASE_ADDR + offset, (len & 0x03));
 314}
 315
 316/*
 317 * Writes data to the cluster memory (PE_LMEM)
 318 * @param[in] src       pointer to the source buffer data are copied from
 319 * @param[in] len       length in bytes of the amount of data to write to the
 320 *                              cluster memory
 321 * @param[in] offset    offset in bytes in the cluster memory where data are
 322 *                              written to
 323 */
 324void pe_lmem_write(u32 *src, u32 len, u32 offset)
 325{
 326        u32 len32 = len >> 2;
 327        int i = 0;
 328
 329        for (i = 0; i < len32; src++, i++, offset += 4)
 330                class_bus_write(*src, PE_LMEM_BASE_ADDR + offset, 4);
 331
 332        if (len & 0x03)
 333                class_bus_write(*src, PE_LMEM_BASE_ADDR + offset, (len &
 334                                        0x03));
 335}
 336
 337/*
 338 * Loads an elf section into pmem
 339 * Code needs to be at least 16bit aligned and only PROGBITS sections are
 340 * supported
 341 *
 342 * @param[in] id        PE identification (CLASS0_ID, ..., TMU0_ID, ...,
 343 *                                      TMU3_ID)
 344 * @param[in] data      pointer to the elf firmware
 345 * @param[in] shdr      pointer to the elf section header
 346 */
 347static int pe_load_pmem_section(int id, const void *data, Elf32_Shdr *shdr)
 348{
 349        u32 offset = be32_to_cpu(shdr->sh_offset);
 350        u32 addr = be32_to_cpu(shdr->sh_addr);
 351        u32 size = be32_to_cpu(shdr->sh_size);
 352        u32 type = be32_to_cpu(shdr->sh_type);
 353
 354        if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
 355                printf(
 356                        "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
 357                        __func__, addr, (unsigned long)data + offset);
 358
 359                return -1;
 360        }
 361
 362        if (addr & 0x1) {
 363                printf("%s: load address(%x) is not 16bit aligned\n",
 364                       __func__, addr);
 365                return -1;
 366        }
 367
 368        if (size & 0x1) {
 369                printf("%s: load size(%x) is not 16bit aligned\n", __func__,
 370                       size);
 371                return -1;
 372        }
 373
 374                debug("pmem pe%d @%x len %d\n", id, addr, size);
 375        switch (type) {
 376        case SHT_PROGBITS:
 377                pe_pmem_memcpy_to32(id, addr, data + offset, size);
 378                break;
 379
 380        default:
 381                printf("%s: unsupported section type(%x)\n", __func__, type);
 382                return -1;
 383        }
 384
 385        return 0;
 386}
 387
 388/*
 389 * Loads an elf section into dmem
 390 * Data needs to be at least 32bit aligned, NOBITS sections are correctly
 391 * initialized to 0
 392 *
 393 * @param[in] id        PE identification (CLASS0_ID, ..., TMU0_ID,
 394 *                      ..., UTIL_ID)
 395 * @param[in] data      pointer to the elf firmware
 396 * @param[in] shdr      pointer to the elf section header
 397 */
 398static int pe_load_dmem_section(int id, const void *data, Elf32_Shdr *shdr)
 399{
 400        u32 offset = be32_to_cpu(shdr->sh_offset);
 401        u32 addr = be32_to_cpu(shdr->sh_addr);
 402        u32 size = be32_to_cpu(shdr->sh_size);
 403        u32 type = be32_to_cpu(shdr->sh_type);
 404        u32 size32 = size >> 2;
 405        int i;
 406
 407        if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
 408                printf(
 409                        "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
 410                        __func__, addr, (unsigned long)data + offset);
 411
 412                return -1;
 413        }
 414
 415        if (addr & 0x3) {
 416                printf("%s: load address(%x) is not 32bit aligned\n",
 417                       __func__, addr);
 418                return -1;
 419        }
 420
 421        switch (type) {
 422        case SHT_PROGBITS:
 423                debug("dmem pe%d @%x len %d\n", id, addr, size);
 424                pe_dmem_memcpy_to32(id, addr, data + offset, size);
 425                break;
 426
 427        case SHT_NOBITS:
 428                debug("dmem zero pe%d @%x len %d\n", id, addr, size);
 429                for (i = 0; i < size32; i++, addr += 4)
 430                        pe_dmem_write(id, 0, addr, 4);
 431
 432                if (size & 0x3)
 433                        pe_dmem_write(id, 0, addr, size & 0x3);
 434
 435                break;
 436
 437        default:
 438                printf("%s: unsupported section type(%x)\n", __func__, type);
 439                return -1;
 440        }
 441
 442        return 0;
 443}
 444
 445/*
 446 * Loads an elf section into DDR
 447 * Data needs to be at least 32bit aligned, NOBITS sections are correctly
 448 *              initialized to 0
 449 *
 450 * @param[in] id        PE identification (CLASS0_ID, ..., TMU0_ID,
 451 *                      ..., UTIL_ID)
 452 * @param[in] data      pointer to the elf firmware
 453 * @param[in] shdr      pointer to the elf section header
 454 */
 455static int pe_load_ddr_section(int id, const void *data, Elf32_Shdr *shdr)
 456{
 457        u32 offset = be32_to_cpu(shdr->sh_offset);
 458        u32 addr = be32_to_cpu(shdr->sh_addr);
 459        u32 size = be32_to_cpu(shdr->sh_size);
 460        u32 type = be32_to_cpu(shdr->sh_type);
 461        u32 flags = be32_to_cpu(shdr->sh_flags);
 462
 463        switch (type) {
 464        case SHT_PROGBITS:
 465                debug("ddr  pe%d @%x len %d\n", id, addr, size);
 466                if (flags & SHF_EXECINSTR) {
 467                        if (id <= CLASS_MAX_ID) {
 468                                /* DO the loading only once in DDR */
 469                                if (id == CLASS0_ID) {
 470                                        debug(
 471                                                "%s: load address(%x) and elf file address(%lx) rcvd\n"
 472                                                , __func__, addr,
 473                                                (unsigned long)data + offset);
 474                                        if (((unsigned long)(data + offset)
 475                                                & 0x3) != (addr & 0x3)) {
 476                                                printf(
 477                                                        "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
 478                                                        __func__, addr,
 479                                                        (unsigned long)data +
 480                                                        offset);
 481
 482                                                return -1;
 483                                        }
 484
 485                                        if (addr & 0x1) {
 486                                                printf(
 487                                                        "%s: load address(%x) is not 16bit aligned\n"
 488                                                        , __func__, addr);
 489                                                return -1;
 490                                        }
 491
 492                                        if (size & 0x1) {
 493                                                printf(
 494                                                        "%s: load length(%x) is not 16bit aligned\n"
 495                                                        , __func__, size);
 496                                                return -1;
 497                                        }
 498
 499                                        memcpy((void *)DDR_PFE_TO_VIRT(addr),
 500                                               data + offset, size);
 501                                }
 502                        } else {
 503                                printf(
 504                                        "%s: unsupported ddr section type(%x) for PE(%d)\n"
 505                                        , __func__, type, id);
 506                                return -1;
 507                        }
 508
 509                } else {
 510                        memcpy((void *)DDR_PFE_TO_VIRT(addr), data + offset,
 511                               size);
 512                }
 513
 514                break;
 515
 516        case SHT_NOBITS:
 517                debug("ddr zero pe%d @%x len %d\n", id, addr, size);
 518                memset((void *)DDR_PFE_TO_VIRT(addr), 0, size);
 519
 520                break;
 521
 522        default:
 523                printf("%s: unsupported section type(%x)\n", __func__, type);
 524                return -1;
 525        }
 526
 527        return 0;
 528}
 529
 530/*
 531 * Loads an elf section into pe lmem
 532 * Data needs to be at least 32bit aligned, NOBITS sections are correctly
 533 * initialized to 0
 534 *
 535 * @param[in] id        PE identification (CLASS0_ID,..., CLASS5_ID)
 536 * @param[in] data      pointer to the elf firmware
 537 * @param[in] shdr      pointer to the elf section header
 538 */
 539static int pe_load_pe_lmem_section(int id, const void *data, Elf32_Shdr *shdr)
 540{
 541        u32 offset = be32_to_cpu(shdr->sh_offset);
 542        u32 addr = be32_to_cpu(shdr->sh_addr);
 543        u32 size = be32_to_cpu(shdr->sh_size);
 544        u32 type = be32_to_cpu(shdr->sh_type);
 545
 546        if (id > CLASS_MAX_ID) {
 547                printf("%s: unsupported pe-lmem section type(%x) for PE(%d)\n",
 548                       __func__, type, id);
 549                return -1;
 550        }
 551
 552        if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
 553                printf(
 554                        "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
 555                        __func__, addr, (unsigned long)data + offset);
 556
 557                return -1;
 558        }
 559
 560        if (addr & 0x3) {
 561                printf("%s: load address(%x) is not 32bit aligned\n",
 562                       __func__, addr);
 563                return -1;
 564        }
 565
 566        debug("lmem  pe%d @%x len %d\n", id, addr, size);
 567
 568        switch (type) {
 569        case SHT_PROGBITS:
 570                class_pe_lmem_memcpy_to32(addr, data + offset, size);
 571                break;
 572
 573        case SHT_NOBITS:
 574                class_pe_lmem_memset(addr, 0, size);
 575                break;
 576
 577        default:
 578                printf("%s: unsupported section type(%x)\n", __func__, type);
 579                return -1;
 580        }
 581
 582        return 0;
 583}
 584
 585/*
 586 * Loads an elf section into a PE
 587 * For now only supports loading a section to dmem (all PE's), pmem (class and
 588 * tmu PE's), DDDR (util PE code)
 589 * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
 590 * ..., UTIL_ID)
 591 * @param[in] data      pointer to the elf firmware
 592 * @param[in] shdr      pointer to the elf section header
 593 */
 594int pe_load_elf_section(int id, const void *data, Elf32_Shdr *shdr)
 595{
 596        u32 addr = be32_to_cpu(shdr->sh_addr);
 597        u32 size = be32_to_cpu(shdr->sh_size);
 598
 599        if (IS_DMEM(addr, size))
 600                return pe_load_dmem_section(id, data, shdr);
 601        else if (IS_PMEM(addr, size))
 602                return pe_load_pmem_section(id, data, shdr);
 603        else if (IS_PFE_LMEM(addr, size))
 604                return 0;
 605        else if (IS_PHYS_DDR(addr, size))
 606                return pe_load_ddr_section(id, data, shdr);
 607        else if (IS_PE_LMEM(addr, size))
 608                return pe_load_pe_lmem_section(id, data, shdr);
 609
 610        printf("%s: unsupported memory range(%x)\n", __func__, addr);
 611
 612        return 0;
 613}
 614
 615/**************************** BMU ***************************/
 616/*
 617 * Resets a BMU block.
 618 * @param[in] base      BMU block base address
 619 */
 620static inline void bmu_reset(void *base)
 621{
 622        writel(CORE_SW_RESET, base + BMU_CTRL);
 623
 624        /* Wait for self clear */
 625        while (readl(base + BMU_CTRL) & CORE_SW_RESET)
 626                ;
 627}
 628
 629/*
 630 * Enabled a BMU block.
 631 * @param[in] base      BMU block base address
 632 */
 633void bmu_enable(void *base)
 634{
 635        writel(CORE_ENABLE, base + BMU_CTRL);
 636}
 637
 638/*
 639 * Disables a BMU block.
 640 * @param[in] base      BMU block base address
 641 */
 642static inline void bmu_disable(void *base)
 643{
 644        writel(CORE_DISABLE, base + BMU_CTRL);
 645}
 646
 647/*
 648 * Sets the configuration of a BMU block.
 649 * @param[in] base      BMU block base address
 650 * @param[in] cfg       BMU configuration
 651 */
 652static inline void bmu_set_config(void *base, struct bmu_cfg *cfg)
 653{
 654        writel(cfg->baseaddr, base + BMU_UCAST_BASE_ADDR);
 655        writel(cfg->count & 0xffff, base + BMU_UCAST_CONFIG);
 656        writel(cfg->size & 0xffff, base + BMU_BUF_SIZE);
 657
 658        /* Interrupts are never used */
 659        writel(0x0, base + BMU_INT_ENABLE);
 660}
 661
 662/*
 663 * Initializes a BMU block.
 664 * @param[in] base      BMU block base address
 665 * @param[in] cfg       BMU configuration
 666 */
 667void bmu_init(void *base, struct bmu_cfg *cfg)
 668{
 669        bmu_disable(base);
 670
 671        bmu_set_config(base, cfg);
 672
 673        bmu_reset(base);
 674}
 675
 676/**************************** GPI ***************************/
 677/*
 678 * Resets a GPI block.
 679 * @param[in] base      GPI base address
 680 */
 681static inline void gpi_reset(void *base)
 682{
 683        writel(CORE_SW_RESET, base + GPI_CTRL);
 684}
 685
 686/*
 687 * Enables a GPI block.
 688 * @param[in] base      GPI base address
 689 */
 690void gpi_enable(void *base)
 691{
 692        writel(CORE_ENABLE, base + GPI_CTRL);
 693}
 694
 695/*
 696 * Disables a GPI block.
 697 * @param[in] base      GPI base address
 698 */
 699void gpi_disable(void *base)
 700{
 701        writel(CORE_DISABLE, base + GPI_CTRL);
 702}
 703
 704/*
 705 * Sets the configuration of a GPI block.
 706 * @param[in] base      GPI base address
 707 * @param[in] cfg       GPI configuration
 708 */
 709static inline void gpi_set_config(void *base, struct gpi_cfg *cfg)
 710{
 711        writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL), base
 712               + GPI_LMEM_ALLOC_ADDR);
 713        writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL), base
 714               + GPI_LMEM_FREE_ADDR);
 715        writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL), base
 716               + GPI_DDR_ALLOC_ADDR);
 717        writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL), base
 718               + GPI_DDR_FREE_ADDR);
 719        writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR);
 720        writel(DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET);
 721        writel(LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET);
 722        writel(0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET);
 723        writel(0, base + GPI_DDR_SEC_BUF_DATA_OFFSET);
 724        writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, base + GPI_HDR_SIZE);
 725        writel((DDR_BUF_SIZE << 16) | LMEM_BUF_SIZE, base + GPI_BUF_SIZE);
 726
 727        writel(((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) |
 728                GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG);
 729        writel(cfg->tmlf_txthres, base + GPI_TMLF_TX);
 730        writel(cfg->aseq_len, base + GPI_DTX_ASEQ);
 731
 732        /*Make GPI AXI transactions non-bufferable */
 733        writel(0x1, base + GPI_AXI_CTRL);
 734}
 735
 736/*
 737 * Initializes a GPI block.
 738 * @param[in] base      GPI base address
 739 * @param[in] cfg       GPI configuration
 740 */
 741void gpi_init(void *base, struct gpi_cfg *cfg)
 742{
 743        gpi_reset(base);
 744
 745        gpi_disable(base);
 746
 747        gpi_set_config(base, cfg);
 748}
 749
 750/**************************** CLASSIFIER ***************************/
 751/*
 752 * Resets CLASSIFIER block.
 753 */
 754static inline void class_reset(void)
 755{
 756        writel(CORE_SW_RESET, CLASS_TX_CTRL);
 757}
 758
 759/*
 760 * Enables all CLASS-PE's cores.
 761 */
 762void class_enable(void)
 763{
 764        writel(CORE_ENABLE, CLASS_TX_CTRL);
 765}
 766
 767/*
 768 * Disables all CLASS-PE's cores.
 769 */
 770void class_disable(void)
 771{
 772        writel(CORE_DISABLE, CLASS_TX_CTRL);
 773}
 774
 775/*
 776 * Sets the configuration of the CLASSIFIER block.
 777 * @param[in] cfg       CLASSIFIER configuration
 778 */
 779static inline void class_set_config(struct class_cfg *cfg)
 780{
 781        if (PLL_CLK_EN == 0) {
 782                /* Clock ratio: for 1:1 the value is 0 */
 783                writel(0x0, CLASS_PE_SYS_CLK_RATIO);
 784        } else {
 785                /* Clock ratio: for 1:2 the value is 1 */
 786                writel(0x1, CLASS_PE_SYS_CLK_RATIO);
 787        }
 788        writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, CLASS_HDR_SIZE);
 789        writel(LMEM_BUF_SIZE, CLASS_LMEM_BUF_SIZE);
 790        writel(CLASS_ROUTE_ENTRY_SIZE(CLASS_ROUTE_SIZE) |
 791                CLASS_ROUTE_HASH_SIZE(cfg->route_table_hash_bits),
 792                CLASS_ROUTE_HASH_ENTRY_SIZE);
 793        writel(HASH_CRC_PORT_IP | QB2BUS_LE, CLASS_ROUTE_MULTI);
 794
 795        writel(cfg->route_table_baseaddr, CLASS_ROUTE_TABLE_BASE);
 796        memset((void *)DDR_PFE_TO_VIRT(cfg->route_table_baseaddr), 0,
 797               ROUTE_TABLE_SIZE);
 798
 799        writel(CLASS_PE0_RO_DM_ADDR0_VAL, CLASS_PE0_RO_DM_ADDR0);
 800        writel(CLASS_PE0_RO_DM_ADDR1_VAL, CLASS_PE0_RO_DM_ADDR1);
 801        writel(CLASS_PE0_QB_DM_ADDR0_VAL, CLASS_PE0_QB_DM_ADDR0);
 802        writel(CLASS_PE0_QB_DM_ADDR1_VAL, CLASS_PE0_QB_DM_ADDR1);
 803        writel(CBUS_VIRT_TO_PFE(TMU_PHY_INQ_PKTPTR), CLASS_TM_INQ_ADDR);
 804
 805        writel(23, CLASS_AFULL_THRES);
 806        writel(23, CLASS_TSQ_FIFO_THRES);
 807
 808        writel(24, CLASS_MAX_BUF_CNT);
 809        writel(24, CLASS_TSQ_MAX_CNT);
 810
 811        /*Make Class AXI transactions non-bufferable */
 812        writel(0x1, CLASS_AXI_CTRL);
 813
 814        /*Make Util AXI transactions non-bufferable */
 815        /*Util is disabled in U-boot, do it from here */
 816        writel(0x1, UTIL_AXI_CTRL);
 817}
 818
 819/*
 820 * Initializes CLASSIFIER block.
 821 * @param[in] cfg       CLASSIFIER configuration
 822 */
 823void class_init(struct class_cfg *cfg)
 824{
 825        class_reset();
 826
 827        class_disable();
 828
 829        class_set_config(cfg);
 830}
 831
 832/**************************** TMU ***************************/
 833/*
 834 * Enables TMU-PE cores.
 835 * @param[in] pe_mask   TMU PE mask
 836 */
 837void tmu_enable(u32 pe_mask)
 838{
 839        writel(readl(TMU_TX_CTRL) | (pe_mask & 0xF), TMU_TX_CTRL);
 840}
 841
 842/*
 843 * Disables TMU cores.
 844 * @param[in] pe_mask   TMU PE mask
 845 */
 846void tmu_disable(u32 pe_mask)
 847{
 848        writel(readl(TMU_TX_CTRL) & ~(pe_mask & 0xF), TMU_TX_CTRL);
 849}
 850
 851/*
 852 * Initializes TMU block.
 853 * @param[in] cfg       TMU configuration
 854 */
 855void tmu_init(struct tmu_cfg *cfg)
 856{
 857        int q, phyno;
 858
 859        /* keep in soft reset */
 860        writel(SW_RESET, TMU_CTRL);
 861
 862        /*Make Class AXI transactions non-bufferable */
 863        writel(0x1, TMU_AXI_CTRL);
 864
 865        /* enable EMAC PHY ports */
 866        writel(0x3, TMU_SYS_GENERIC_CONTROL);
 867
 868        writel(750, TMU_INQ_WATERMARK);
 869
 870        writel(CBUS_VIRT_TO_PFE(EGPI1_BASE_ADDR + GPI_INQ_PKTPTR),
 871               TMU_PHY0_INQ_ADDR);
 872        writel(CBUS_VIRT_TO_PFE(EGPI2_BASE_ADDR + GPI_INQ_PKTPTR),
 873               TMU_PHY1_INQ_ADDR);
 874
 875        writel(CBUS_VIRT_TO_PFE(HGPI_BASE_ADDR + GPI_INQ_PKTPTR),
 876               TMU_PHY3_INQ_ADDR);
 877        writel(CBUS_VIRT_TO_PFE(HIF_NOCPY_RX_INQ0_PKTPTR), TMU_PHY4_INQ_ADDR);
 878        writel(CBUS_VIRT_TO_PFE(UTIL_INQ_PKTPTR), TMU_PHY5_INQ_ADDR);
 879        writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL),
 880               TMU_BMU_INQ_ADDR);
 881
 882        /* enabling all 10 schedulers [9:0] of each TDQ  */
 883        writel(0x3FF, TMU_TDQ0_SCH_CTRL);
 884        writel(0x3FF, TMU_TDQ1_SCH_CTRL);
 885        writel(0x3FF, TMU_TDQ3_SCH_CTRL);
 886
 887        if (PLL_CLK_EN == 0) {
 888                /* Clock ratio: for 1:1 the value is 0 */
 889                writel(0x0, TMU_PE_SYS_CLK_RATIO);
 890        } else {
 891                /* Clock ratio: for 1:2 the value is 1 */
 892                writel(0x1, TMU_PE_SYS_CLK_RATIO);
 893        }
 894
 895        /* Extra packet pointers will be stored from this address onwards */
 896        debug("TMU_LLM_BASE_ADDR %x\n", cfg->llm_base_addr);
 897        writel(cfg->llm_base_addr, TMU_LLM_BASE_ADDR);
 898
 899        debug("TMU_LLM_QUE_LEN %x\n", cfg->llm_queue_len);
 900        writel(cfg->llm_queue_len,      TMU_LLM_QUE_LEN);
 901
 902        writel(5, TMU_TDQ_IIFG_CFG);
 903        writel(DDR_BUF_SIZE, TMU_BMU_BUF_SIZE);
 904
 905        writel(0x0, TMU_CTRL);
 906
 907        /* MEM init */
 908        writel(MEM_INIT, TMU_CTRL);
 909
 910        while (!(readl(TMU_CTRL) & MEM_INIT_DONE))
 911                ;
 912
 913        /* LLM init */
 914        writel(LLM_INIT, TMU_CTRL);
 915
 916        while (!(readl(TMU_CTRL) & LLM_INIT_DONE))
 917                ;
 918
 919        /* set up each queue for tail drop */
 920        for (phyno = 0; phyno < 4; phyno++) {
 921                if (phyno == 2)
 922                        continue;
 923                for (q = 0; q < 16; q++) {
 924                        u32 qmax;
 925
 926                        writel((phyno << 8) | q, TMU_TEQ_CTRL);
 927                        writel(BIT(22), TMU_TEQ_QCFG);
 928
 929                        if (phyno == 3)
 930                                qmax = DEFAULT_TMU3_QDEPTH;
 931                        else
 932                                qmax = (q == 0) ? DEFAULT_Q0_QDEPTH :
 933                                        DEFAULT_MAX_QDEPTH;
 934
 935                        writel(qmax << 18, TMU_TEQ_HW_PROB_CFG2);
 936                        writel(qmax >> 14, TMU_TEQ_HW_PROB_CFG3);
 937                }
 938        }
 939        writel(0x05, TMU_TEQ_DISABLE_DROPCHK);
 940        writel(0, TMU_CTRL);
 941}
 942
 943/**************************** HIF ***************************/
 944/*
 945 * Enable hif tx DMA and interrupt
 946 */
 947void hif_tx_enable(void)
 948{
 949        writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL);
 950}
 951
 952/*
 953 * Disable hif tx DMA and interrupt
 954 */
 955void hif_tx_disable(void)
 956{
 957        u32 hif_int;
 958
 959        writel(0, HIF_TX_CTRL);
 960
 961        hif_int = readl(HIF_INT_ENABLE);
 962        hif_int &= HIF_TXPKT_INT_EN;
 963        writel(hif_int, HIF_INT_ENABLE);
 964}
 965
 966/*
 967 * Enable hif rx DMA and interrupt
 968 */
 969void hif_rx_enable(void)
 970{
 971        writel((HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB), HIF_RX_CTRL);
 972}
 973
 974/*
 975 * Disable hif rx DMA and interrupt
 976 */
 977void hif_rx_disable(void)
 978{
 979        u32 hif_int;
 980
 981        writel(0, HIF_RX_CTRL);
 982
 983        hif_int = readl(HIF_INT_ENABLE);
 984        hif_int &= HIF_RXPKT_INT_EN;
 985        writel(hif_int, HIF_INT_ENABLE);
 986}
 987
 988/*
 989 * Initializes HIF copy block.
 990 */
 991void hif_init(void)
 992{
 993        /* Initialize HIF registers */
 994        writel(HIF_RX_POLL_CTRL_CYCLE << 16 | HIF_TX_POLL_CTRL_CYCLE,
 995               HIF_POLL_CTRL);
 996        /* Make HIF AXI transactions non-bufferable */
 997        writel(0x1, HIF_AXI_CTRL);
 998}
 999