uboot/arch/mips/mach-octeon/include/mach/cvmx-fpa3.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (C) 2020 Marvell International Ltd.
   4 *
   5 * Interface to the CN78XX Free Pool Allocator, a.k.a. FPA3
   6 */
   7
   8#include "cvmx-address.h"
   9#include "cvmx-fpa-defs.h"
  10#include "cvmx-scratch.h"
  11
  12#ifndef __CVMX_FPA3_H__
  13#define __CVMX_FPA3_H__
  14
  15typedef struct {
  16        unsigned res0 : 6;
  17        unsigned node : 2;
  18        unsigned res1 : 2;
  19        unsigned lpool : 6;
  20        unsigned valid_magic : 16;
  21} cvmx_fpa3_pool_t;
  22
  23typedef struct {
  24        unsigned res0 : 6;
  25        unsigned node : 2;
  26        unsigned res1 : 6;
  27        unsigned laura : 10;
  28        unsigned valid_magic : 16;
  29} cvmx_fpa3_gaura_t;
  30
  31#define CVMX_FPA3_VALID_MAGIC   0xf9a3
  32#define CVMX_FPA3_INVALID_GAURA ((cvmx_fpa3_gaura_t){ 0, 0, 0, 0, 0 })
  33#define CVMX_FPA3_INVALID_POOL  ((cvmx_fpa3_pool_t){ 0, 0, 0, 0, 0 })
  34
  35static inline bool __cvmx_fpa3_aura_valid(cvmx_fpa3_gaura_t aura)
  36{
  37        if (aura.valid_magic != CVMX_FPA3_VALID_MAGIC)
  38                return false;
  39        return true;
  40}
  41
  42static inline bool __cvmx_fpa3_pool_valid(cvmx_fpa3_pool_t pool)
  43{
  44        if (pool.valid_magic != CVMX_FPA3_VALID_MAGIC)
  45                return false;
  46        return true;
  47}
  48
  49static inline cvmx_fpa3_gaura_t __cvmx_fpa3_gaura(int node, int laura)
  50{
  51        cvmx_fpa3_gaura_t aura;
  52
  53        if (node < 0)
  54                node = cvmx_get_node_num();
  55        if (laura < 0)
  56                return CVMX_FPA3_INVALID_GAURA;
  57
  58        aura.node = node;
  59        aura.laura = laura;
  60        aura.valid_magic = CVMX_FPA3_VALID_MAGIC;
  61        return aura;
  62}
  63
  64static inline cvmx_fpa3_pool_t __cvmx_fpa3_pool(int node, int lpool)
  65{
  66        cvmx_fpa3_pool_t pool;
  67
  68        if (node < 0)
  69                node = cvmx_get_node_num();
  70        if (lpool < 0)
  71                return CVMX_FPA3_INVALID_POOL;
  72
  73        pool.node = node;
  74        pool.lpool = lpool;
  75        pool.valid_magic = CVMX_FPA3_VALID_MAGIC;
  76        return pool;
  77}
  78
  79#undef CVMX_FPA3_VALID_MAGIC
  80
  81/**
  82 * Structure describing the data format used for stores to the FPA.
  83 */
  84typedef union {
  85        u64 u64;
  86        struct {
  87                u64 scraddr : 8;
  88                u64 len : 8;
  89                u64 did : 8;
  90                u64 addr : 40;
  91        } s;
  92        struct {
  93                u64 scraddr : 8;
  94                u64 len : 8;
  95                u64 did : 8;
  96                u64 node : 4;
  97                u64 red : 1;
  98                u64 reserved2 : 9;
  99                u64 aura : 10;
 100                u64 reserved3 : 16;
 101        } cn78xx;
 102} cvmx_fpa3_iobdma_data_t;
 103
 104/**
 105 * Struct describing load allocate operation addresses for FPA pool.
 106 */
 107union cvmx_fpa3_load_data {
 108        u64 u64;
 109        struct {
 110                u64 seg : 2;
 111                u64 reserved1 : 13;
 112                u64 io : 1;
 113                u64 did : 8;
 114                u64 node : 4;
 115                u64 red : 1;
 116                u64 reserved2 : 9;
 117                u64 aura : 10;
 118                u64 reserved3 : 16;
 119        };
 120};
 121
 122typedef union cvmx_fpa3_load_data cvmx_fpa3_load_data_t;
 123
 124/**
 125 * Struct describing store free operation addresses from FPA pool.
 126 */
 127union cvmx_fpa3_store_addr {
 128        u64 u64;
 129        struct {
 130                u64 seg : 2;
 131                u64 reserved1 : 13;
 132                u64 io : 1;
 133                u64 did : 8;
 134                u64 node : 4;
 135                u64 reserved2 : 10;
 136                u64 aura : 10;
 137                u64 fabs : 1;
 138                u64 reserved3 : 3;
 139                u64 dwb_count : 9;
 140                u64 reserved4 : 3;
 141        };
 142};
 143
 144typedef union cvmx_fpa3_store_addr cvmx_fpa3_store_addr_t;
 145
 146enum cvmx_fpa3_pool_alignment_e {
 147        FPA_NATURAL_ALIGNMENT,
 148        FPA_OFFSET_ALIGNMENT,
 149        FPA_OPAQUE_ALIGNMENT
 150};
 151
 152#define CVMX_FPA3_AURAX_LIMIT_MAX ((1ull << 40) - 1)
 153
 154/**
 155 * @INTERNAL
 156 * Accessor functions to return number of POOLS in an FPA3
 157 * depending on SoC model.
 158 * The number is per-node for models supporting multi-node configurations.
 159 */
 160static inline int cvmx_fpa3_num_pools(void)
 161{
 162        if (OCTEON_IS_MODEL(OCTEON_CN78XX))
 163                return 64;
 164        if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
 165                return 32;
 166        if (OCTEON_IS_MODEL(OCTEON_CN73XX))
 167                return 32;
 168        printf("ERROR: %s: Unknowm model\n", __func__);
 169        return -1;
 170}
 171
 172/**
 173 * @INTERNAL
 174 * Accessor functions to return number of AURAS in an FPA3
 175 * depending on SoC model.
 176 * The number is per-node for models supporting multi-node configurations.
 177 */
 178static inline int cvmx_fpa3_num_auras(void)
 179{
 180        if (OCTEON_IS_MODEL(OCTEON_CN78XX))
 181                return 1024;
 182        if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
 183                return 512;
 184        if (OCTEON_IS_MODEL(OCTEON_CN73XX))
 185                return 512;
 186        printf("ERROR: %s: Unknowm model\n", __func__);
 187        return -1;
 188}
 189
 190/**
 191 * Get the FPA3 POOL underneath FPA3 AURA, containing all its buffers
 192 *
 193 */
 194static inline cvmx_fpa3_pool_t cvmx_fpa3_aura_to_pool(cvmx_fpa3_gaura_t aura)
 195{
 196        cvmx_fpa3_pool_t pool;
 197        cvmx_fpa_aurax_pool_t aurax_pool;
 198
 199        aurax_pool.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_POOL(aura.laura));
 200
 201        pool = __cvmx_fpa3_pool(aura.node, aurax_pool.s.pool);
 202        return pool;
 203}
 204
 205/**
 206 * Get a new block from the FPA pool
 207 *
 208 * @param aura  - aura number
 209 * @return pointer to the block or NULL on failure
 210 */
 211static inline void *cvmx_fpa3_alloc(cvmx_fpa3_gaura_t aura)
 212{
 213        u64 address;
 214        cvmx_fpa3_load_data_t load_addr;
 215
 216        load_addr.u64 = 0;
 217        load_addr.seg = CVMX_MIPS_SPACE_XKPHYS;
 218        load_addr.io = 1;
 219        load_addr.did = 0x29; /* Device ID. Indicates FPA. */
 220        load_addr.node = aura.node;
 221        load_addr.red = 0; /* Perform RED on allocation.
 222                                  * FIXME to use config option
 223                                  */
 224        load_addr.aura = aura.laura;
 225
 226        address = cvmx_read64_uint64(load_addr.u64);
 227        if (!address)
 228                return NULL;
 229        return cvmx_phys_to_ptr(address);
 230}
 231
 232/**
 233 * Asynchronously get a new block from the FPA
 234 *
 235 * The result of cvmx_fpa_async_alloc() may be retrieved using
 236 * cvmx_fpa_async_alloc_finish().
 237 *
 238 * @param scr_addr Local scratch address to put response in.  This is a byte
 239 *                 address but must be 8 byte aligned.
 240 * @param aura     Global aura to get the block from
 241 */
 242static inline void cvmx_fpa3_async_alloc(u64 scr_addr, cvmx_fpa3_gaura_t aura)
 243{
 244        cvmx_fpa3_iobdma_data_t data;
 245
 246        /* Hardware only uses 64 bit aligned locations, so convert from byte
 247         * address to 64-bit index
 248         */
 249        data.u64 = 0ull;
 250        data.cn78xx.scraddr = scr_addr >> 3;
 251        data.cn78xx.len = 1;
 252        data.cn78xx.did = 0x29;
 253        data.cn78xx.node = aura.node;
 254        data.cn78xx.aura = aura.laura;
 255        cvmx_scratch_write64(scr_addr, 0ull);
 256
 257        CVMX_SYNCW;
 258        cvmx_send_single(data.u64);
 259}
 260
 261/**
 262 * Retrieve the result of cvmx_fpa3_async_alloc
 263 *
 264 * @param scr_addr The Local scratch address.  Must be the same value
 265 * passed to cvmx_fpa_async_alloc().
 266 *
 267 * @param aura Global aura the block came from.  Must be the same value
 268 * passed to cvmx_fpa_async_alloc.
 269 *
 270 * @return Pointer to the block or NULL on failure
 271 */
 272static inline void *cvmx_fpa3_async_alloc_finish(u64 scr_addr, cvmx_fpa3_gaura_t aura)
 273{
 274        u64 address;
 275
 276        CVMX_SYNCIOBDMA;
 277
 278        address = cvmx_scratch_read64(scr_addr);
 279        if (cvmx_likely(address))
 280                return cvmx_phys_to_ptr(address);
 281        else
 282                /* Try regular alloc if async failed */
 283                return cvmx_fpa3_alloc(aura);
 284}
 285
 286/**
 287 * Free a pointer back to the pool.
 288 *
 289 * @param aura   global aura number
 290 * @param ptr    physical address of block to free.
 291 * @param num_cache_lines Cache lines to invalidate
 292 */
 293static inline void cvmx_fpa3_free(void *ptr, cvmx_fpa3_gaura_t aura, unsigned int num_cache_lines)
 294{
 295        cvmx_fpa3_store_addr_t newptr;
 296        cvmx_addr_t newdata;
 297
 298        newdata.u64 = cvmx_ptr_to_phys(ptr);
 299
 300        /* Make sure that any previous writes to memory go out before we free
 301           this buffer. This also serves as a barrier to prevent GCC from
 302           reordering operations to after the free. */
 303        CVMX_SYNCWS;
 304
 305        newptr.u64 = 0;
 306        newptr.seg = CVMX_MIPS_SPACE_XKPHYS;
 307        newptr.io = 1;
 308        newptr.did = 0x29; /* Device id, indicates FPA */
 309        newptr.node = aura.node;
 310        newptr.aura = aura.laura;
 311        newptr.fabs = 0; /* Free absolute. FIXME to use config option */
 312        newptr.dwb_count = num_cache_lines;
 313
 314        cvmx_write_io(newptr.u64, newdata.u64);
 315}
 316
 317/**
 318 * Free a pointer back to the pool without flushing the write buffer.
 319 *
 320 * @param aura   global aura number
 321 * @param ptr    physical address of block to free.
 322 * @param num_cache_lines Cache lines to invalidate
 323 */
 324static inline void cvmx_fpa3_free_nosync(void *ptr, cvmx_fpa3_gaura_t aura,
 325                                         unsigned int num_cache_lines)
 326{
 327        cvmx_fpa3_store_addr_t newptr;
 328        cvmx_addr_t newdata;
 329
 330        newdata.u64 = cvmx_ptr_to_phys(ptr);
 331
 332        /* Prevent GCC from reordering writes to (*ptr) */
 333        asm volatile("" : : : "memory");
 334
 335        newptr.u64 = 0;
 336        newptr.seg = CVMX_MIPS_SPACE_XKPHYS;
 337        newptr.io = 1;
 338        newptr.did = 0x29; /* Device id, indicates FPA */
 339        newptr.node = aura.node;
 340        newptr.aura = aura.laura;
 341        newptr.fabs = 0; /* Free absolute. FIXME to use config option */
 342        newptr.dwb_count = num_cache_lines;
 343
 344        cvmx_write_io(newptr.u64, newdata.u64);
 345}
 346
 347static inline int cvmx_fpa3_pool_is_enabled(cvmx_fpa3_pool_t pool)
 348{
 349        cvmx_fpa_poolx_cfg_t pool_cfg;
 350
 351        if (!__cvmx_fpa3_pool_valid(pool))
 352                return -1;
 353
 354        pool_cfg.u64 = cvmx_read_csr_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool));
 355        return pool_cfg.cn78xx.ena;
 356}
 357
 358static inline int cvmx_fpa3_config_red_params(unsigned int node, int qos_avg_en, int red_lvl_dly,
 359                                              int avg_dly)
 360{
 361        cvmx_fpa_gen_cfg_t fpa_cfg;
 362        cvmx_fpa_red_delay_t red_delay;
 363
 364        fpa_cfg.u64 = cvmx_read_csr_node(node, CVMX_FPA_GEN_CFG);
 365        fpa_cfg.s.avg_en = qos_avg_en;
 366        fpa_cfg.s.lvl_dly = red_lvl_dly;
 367        cvmx_write_csr_node(node, CVMX_FPA_GEN_CFG, fpa_cfg.u64);
 368
 369        red_delay.u64 = cvmx_read_csr_node(node, CVMX_FPA_RED_DELAY);
 370        red_delay.s.avg_dly = avg_dly;
 371        cvmx_write_csr_node(node, CVMX_FPA_RED_DELAY, red_delay.u64);
 372        return 0;
 373}
 374
 375/**
 376 * Gets the buffer size of the specified pool,
 377 *
 378 * @param aura Global aura number
 379 * @return Returns size of the buffers in the specified pool.
 380 */
 381static inline int cvmx_fpa3_get_aura_buf_size(cvmx_fpa3_gaura_t aura)
 382{
 383        cvmx_fpa3_pool_t pool;
 384        cvmx_fpa_poolx_cfg_t pool_cfg;
 385        int block_size;
 386
 387        pool = cvmx_fpa3_aura_to_pool(aura);
 388
 389        pool_cfg.u64 = cvmx_read_csr_node(pool.node, CVMX_FPA_POOLX_CFG(pool.lpool));
 390        block_size = pool_cfg.cn78xx.buf_size << 7;
 391        return block_size;
 392}
 393
 394/**
 395 * Return the number of available buffers in an AURA
 396 *
 397 * @param aura to receive count for
 398 * @return available buffer count
 399 */
 400static inline long long cvmx_fpa3_get_available(cvmx_fpa3_gaura_t aura)
 401{
 402        cvmx_fpa3_pool_t pool;
 403        cvmx_fpa_poolx_available_t avail_reg;
 404        cvmx_fpa_aurax_cnt_t cnt_reg;
 405        cvmx_fpa_aurax_cnt_limit_t limit_reg;
 406        long long ret;
 407
 408        pool = cvmx_fpa3_aura_to_pool(aura);
 409
 410        /* Get POOL available buffer count */
 411        avail_reg.u64 = cvmx_read_csr_node(pool.node, CVMX_FPA_POOLX_AVAILABLE(pool.lpool));
 412
 413        /* Get AURA current available count */
 414        cnt_reg.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_CNT(aura.laura));
 415        limit_reg.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_CNT_LIMIT(aura.laura));
 416
 417        if (limit_reg.cn78xx.limit < cnt_reg.cn78xx.cnt)
 418                return 0;
 419
 420        /* Calculate AURA-based buffer allowance */
 421        ret = limit_reg.cn78xx.limit - cnt_reg.cn78xx.cnt;
 422
 423        /* Use POOL real buffer availability when less then allowance */
 424        if (ret > (long long)avail_reg.cn78xx.count)
 425                ret = avail_reg.cn78xx.count;
 426
 427        return ret;
 428}
 429
 430/**
 431 * Configure the QoS parameters of an FPA3 AURA
 432 *
 433 * @param aura is the FPA3 AURA handle
 434 * @param ena_bp enables backpressure when outstanding count exceeds 'bp_thresh'
 435 * @param ena_red enables random early discard when outstanding count exceeds 'pass_thresh'
 436 * @param pass_thresh is the maximum count to invoke flow control
 437 * @param drop_thresh is the count threshold to begin dropping packets
 438 * @param bp_thresh is the back-pressure threshold
 439 *
 440 */
 441static inline void cvmx_fpa3_setup_aura_qos(cvmx_fpa3_gaura_t aura, bool ena_red, u64 pass_thresh,
 442                                            u64 drop_thresh, bool ena_bp, u64 bp_thresh)
 443{
 444        unsigned int shift = 0;
 445        u64 shift_thresh;
 446        cvmx_fpa_aurax_cnt_limit_t limit_reg;
 447        cvmx_fpa_aurax_cnt_levels_t aura_level;
 448
 449        if (!__cvmx_fpa3_aura_valid(aura))
 450                return;
 451
 452        /* Get AURAX count limit for validation */
 453        limit_reg.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_CNT_LIMIT(aura.laura));
 454
 455        if (pass_thresh < 256)
 456                pass_thresh = 255;
 457
 458        if (drop_thresh <= pass_thresh || drop_thresh > limit_reg.cn78xx.limit)
 459                drop_thresh = limit_reg.cn78xx.limit;
 460
 461        if (bp_thresh < 256 || bp_thresh > limit_reg.cn78xx.limit)
 462                bp_thresh = limit_reg.cn78xx.limit >> 1;
 463
 464        shift_thresh = (bp_thresh > drop_thresh) ? bp_thresh : drop_thresh;
 465
 466        /* Calculate shift so that the largest threshold fits in 8 bits */
 467        for (shift = 0; shift < (1 << 6); shift++) {
 468                if (0 == ((shift_thresh >> shift) & ~0xffull))
 469                        break;
 470        };
 471
 472        aura_level.u64 = cvmx_read_csr_node(aura.node, CVMX_FPA_AURAX_CNT_LEVELS(aura.laura));
 473        aura_level.s.pass = pass_thresh >> shift;
 474        aura_level.s.drop = drop_thresh >> shift;
 475        aura_level.s.bp = bp_thresh >> shift;
 476        aura_level.s.shift = shift;
 477        aura_level.s.red_ena = ena_red;
 478        aura_level.s.bp_ena = ena_bp;
 479        cvmx_write_csr_node(aura.node, CVMX_FPA_AURAX_CNT_LEVELS(aura.laura), aura_level.u64);
 480}
 481
 482cvmx_fpa3_gaura_t cvmx_fpa3_reserve_aura(int node, int desired_aura_num);
 483int cvmx_fpa3_release_aura(cvmx_fpa3_gaura_t aura);
 484cvmx_fpa3_pool_t cvmx_fpa3_reserve_pool(int node, int desired_pool_num);
 485int cvmx_fpa3_release_pool(cvmx_fpa3_pool_t pool);
 486int cvmx_fpa3_is_aura_available(int node, int aura_num);
 487int cvmx_fpa3_is_pool_available(int node, int pool_num);
 488
 489cvmx_fpa3_pool_t cvmx_fpa3_setup_fill_pool(int node, int desired_pool, const char *name,
 490                                           unsigned int block_size, unsigned int num_blocks,
 491                                           void *buffer);
 492
 493/**
 494 * Function to attach an aura to an existing pool
 495 *
 496 * @param node - configure fpa on this node
 497 * @param pool - configured pool to attach aura to
 498 * @param desired_aura - pointer to aura to use, set to -1 to allocate
 499 * @param name - name to register
 500 * @param block_size - size of buffers to use
 501 * @param num_blocks - number of blocks to allocate
 502 *
 503 * @return configured gaura on success, CVMX_FPA3_INVALID_GAURA on failure
 504 */
 505cvmx_fpa3_gaura_t cvmx_fpa3_set_aura_for_pool(cvmx_fpa3_pool_t pool, int desired_aura,
 506                                              const char *name, unsigned int block_size,
 507                                              unsigned int num_blocks);
 508
 509/**
 510 * Function to setup and initialize a pool.
 511 *
 512 * @param node - configure fpa on this node
 513 * @param desired_aura - aura to use, -1 for dynamic allocation
 514 * @param name - name to register
 515 * @param block_size - size of buffers in pool
 516 * @param num_blocks - max number of buffers allowed
 517 */
 518cvmx_fpa3_gaura_t cvmx_fpa3_setup_aura_and_pool(int node, int desired_aura, const char *name,
 519                                                void *buffer, unsigned int block_size,
 520                                                unsigned int num_blocks);
 521
 522int cvmx_fpa3_shutdown_aura_and_pool(cvmx_fpa3_gaura_t aura);
 523int cvmx_fpa3_shutdown_aura(cvmx_fpa3_gaura_t aura);
 524int cvmx_fpa3_shutdown_pool(cvmx_fpa3_pool_t pool);
 525const char *cvmx_fpa3_get_pool_name(cvmx_fpa3_pool_t pool);
 526int cvmx_fpa3_get_pool_buf_size(cvmx_fpa3_pool_t pool);
 527const char *cvmx_fpa3_get_aura_name(cvmx_fpa3_gaura_t aura);
 528
 529/* FIXME: Need a different macro for stage2 of u-boot */
 530
 531static inline void cvmx_fpa3_stage2_init(int aura, int pool, u64 stack_paddr, int stacklen,
 532                                         int buffer_sz, int buf_cnt)
 533{
 534        cvmx_fpa_poolx_cfg_t pool_cfg;
 535
 536        /* Configure pool stack */
 537        cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_BASE(pool), stack_paddr);
 538        cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_ADDR(pool), stack_paddr);
 539        cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_END(pool), stack_paddr + stacklen);
 540
 541        /* Configure pool with buffer size */
 542        pool_cfg.u64 = 0;
 543        pool_cfg.cn78xx.nat_align = 1;
 544        pool_cfg.cn78xx.buf_size = buffer_sz >> 7;
 545        pool_cfg.cn78xx.l_type = 0x2;
 546        pool_cfg.cn78xx.ena = 0;
 547        cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), pool_cfg.u64);
 548        /* Reset pool before starting */
 549        pool_cfg.cn78xx.ena = 1;
 550        cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), pool_cfg.u64);
 551
 552        cvmx_write_csr_node(0, CVMX_FPA_AURAX_CFG(aura), 0);
 553        cvmx_write_csr_node(0, CVMX_FPA_AURAX_CNT_ADD(aura), buf_cnt);
 554        cvmx_write_csr_node(0, CVMX_FPA_AURAX_POOL(aura), (u64)pool);
 555}
 556
 557static inline void cvmx_fpa3_stage2_disable(int aura, int pool)
 558{
 559        cvmx_write_csr_node(0, CVMX_FPA_AURAX_POOL(aura), 0);
 560        cvmx_write_csr_node(0, CVMX_FPA_POOLX_CFG(pool), 0);
 561        cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_BASE(pool), 0);
 562        cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_ADDR(pool), 0);
 563        cvmx_write_csr_node(0, CVMX_FPA_POOLX_STACK_END(pool), 0);
 564}
 565
 566#endif /* __CVMX_FPA3_H__ */
 567