uboot/arch/mips/mach-octeon/include/mach/cvmx-hwfau.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (C) 2020 Marvell International Ltd.
   4 *
   5 * Interface to the hardware Fetch and Add Unit.
   6 */
   7
   8/**
   9 * @file
  10 *
  11 * Interface to the hardware Fetch and Add Unit.
  12 *
  13 */
  14
  15#ifndef __CVMX_HWFAU_H__
  16#define __CVMX_HWFAU_H__
  17
  18typedef int cvmx_fau_reg64_t;
  19typedef int cvmx_fau_reg32_t;
  20typedef int cvmx_fau_reg16_t;
  21typedef int cvmx_fau_reg8_t;
  22
  23#define CVMX_FAU_REG_ANY -1
  24
  25/*
  26 * Octeon Fetch and Add Unit (FAU)
  27 */
  28
  29#define CVMX_FAU_LOAD_IO_ADDRESS cvmx_build_io_address(0x1e, 0)
  30#define CVMX_FAU_BITS_SCRADDR    63, 56
  31#define CVMX_FAU_BITS_LEN        55, 48
  32#define CVMX_FAU_BITS_INEVAL     35, 14
  33#define CVMX_FAU_BITS_TAGWAIT    13, 13
  34#define CVMX_FAU_BITS_NOADD      13, 13
  35#define CVMX_FAU_BITS_SIZE       12, 11
  36#define CVMX_FAU_BITS_REGISTER   10, 0
  37
  38#define CVMX_FAU_MAX_REGISTERS_8 (2048)
  39
  40typedef enum {
  41        CVMX_FAU_OP_SIZE_8 = 0,
  42        CVMX_FAU_OP_SIZE_16 = 1,
  43        CVMX_FAU_OP_SIZE_32 = 2,
  44        CVMX_FAU_OP_SIZE_64 = 3
  45} cvmx_fau_op_size_t;
  46
  47/**
  48 * Tagwait return definition. If a timeout occurs, the error
  49 * bit will be set. Otherwise the value of the register before
  50 * the update will be returned.
  51 */
  52typedef struct {
  53        u64 error : 1;
  54        s64 value : 63;
  55} cvmx_fau_tagwait64_t;
  56
  57/**
  58 * Tagwait return definition. If a timeout occurs, the error
  59 * bit will be set. Otherwise the value of the register before
  60 * the update will be returned.
  61 */
  62typedef struct {
  63        u64 error : 1;
  64        s32 value : 31;
  65} cvmx_fau_tagwait32_t;
  66
  67/**
  68 * Tagwait return definition. If a timeout occurs, the error
  69 * bit will be set. Otherwise the value of the register before
  70 * the update will be returned.
  71 */
  72typedef struct {
  73        u64 error : 1;
  74        s16 value : 15;
  75} cvmx_fau_tagwait16_t;
  76
  77/**
  78 * Tagwait return definition. If a timeout occurs, the error
  79 * bit will be set. Otherwise the value of the register before
  80 * the update will be returned.
  81 */
  82typedef struct {
  83        u64 error : 1;
  84        int8_t value : 7;
  85} cvmx_fau_tagwait8_t;
  86
  87/**
  88 * Asynchronous tagwait return definition. If a timeout occurs,
  89 * the error bit will be set. Otherwise the value of the
  90 * register before the update will be returned.
  91 */
  92typedef union {
  93        u64 u64;
  94        struct {
  95                u64 invalid : 1;
  96                u64 data : 63; /* unpredictable if invalid is set */
  97        } s;
  98} cvmx_fau_async_tagwait_result_t;
  99
 100#define SWIZZLE_8  0
 101#define SWIZZLE_16 0
 102#define SWIZZLE_32 0
 103
 104/**
 105 * @INTERNAL
 106 * Builds a store I/O address for writing to the FAU
 107 *
 108 * @param noadd  0 = Store value is atomically added to the current value
 109 *               1 = Store value is atomically written over the current value
 110 * @param reg    FAU atomic register to access. 0 <= reg < 2048.
 111 *               - Step by 2 for 16 bit access.
 112 *               - Step by 4 for 32 bit access.
 113 *               - Step by 8 for 64 bit access.
 114 * @return Address to store for atomic update
 115 */
 116static inline u64 __cvmx_hwfau_store_address(u64 noadd, u64 reg)
 117{
 118        return (CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
 119                cvmx_build_bits(CVMX_FAU_BITS_NOADD, noadd) |
 120                cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
 121}
 122
 123/**
 124 * @INTERNAL
 125 * Builds a I/O address for accessing the FAU
 126 *
 127 * @param tagwait Should the atomic add wait for the current tag switch
 128 *                operation to complete.
 129 *                - 0 = Don't wait
 130 *                - 1 = Wait for tag switch to complete
 131 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 132 *                - Step by 2 for 16 bit access.
 133 *                - Step by 4 for 32 bit access.
 134 *                - Step by 8 for 64 bit access.
 135 * @param value   Signed value to add.
 136 *                Note: When performing 32 and 64 bit access, only the low
 137 *                22 bits are available.
 138 * @return Address to read from for atomic update
 139 */
 140static inline u64 __cvmx_hwfau_atomic_address(u64 tagwait, u64 reg, s64 value)
 141{
 142        return (CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
 143                cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
 144                cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
 145                cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
 146}
 147
 148/**
 149 * Perform an atomic 64 bit add
 150 *
 151 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 152 *                - Step by 8 for 64 bit access.
 153 * @param value   Signed value to add.
 154 *                Note: Only the low 22 bits are available.
 155 * @return Value of the register before the update
 156 */
 157static inline s64 cvmx_hwfau_fetch_and_add64(cvmx_fau_reg64_t reg, s64 value)
 158{
 159        return cvmx_read64_int64(__cvmx_hwfau_atomic_address(0, reg, value));
 160}
 161
 162/**
 163 * Perform an atomic 32 bit add
 164 *
 165 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 166 *                - Step by 4 for 32 bit access.
 167 * @param value   Signed value to add.
 168 *                Note: Only the low 22 bits are available.
 169 * @return Value of the register before the update
 170 */
 171static inline s32 cvmx_hwfau_fetch_and_add32(cvmx_fau_reg32_t reg, s32 value)
 172{
 173        reg ^= SWIZZLE_32;
 174        return cvmx_read64_int32(__cvmx_hwfau_atomic_address(0, reg, value));
 175}
 176
 177/**
 178 * Perform an atomic 16 bit add
 179 *
 180 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 181 *                - Step by 2 for 16 bit access.
 182 * @param value   Signed value to add.
 183 * @return Value of the register before the update
 184 */
 185static inline s16 cvmx_hwfau_fetch_and_add16(cvmx_fau_reg16_t reg, s16 value)
 186{
 187        reg ^= SWIZZLE_16;
 188        return cvmx_read64_int16(__cvmx_hwfau_atomic_address(0, reg, value));
 189}
 190
 191/**
 192 * Perform an atomic 8 bit add
 193 *
 194 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 195 * @param value   Signed value to add.
 196 * @return Value of the register before the update
 197 */
 198static inline int8_t cvmx_hwfau_fetch_and_add8(cvmx_fau_reg8_t reg, int8_t value)
 199{
 200        reg ^= SWIZZLE_8;
 201        return cvmx_read64_int8(__cvmx_hwfau_atomic_address(0, reg, value));
 202}
 203
 204/**
 205 * Perform an atomic 64 bit add after the current tag switch
 206 * completes
 207 *
 208 * @param reg    FAU atomic register to access. 0 <= reg < 2048.
 209 *               - Step by 8 for 64 bit access.
 210 * @param value  Signed value to add.
 211 *               Note: Only the low 22 bits are available.
 212 * @return If a timeout occurs, the error bit will be set. Otherwise
 213 *         the value of the register before the update will be
 214 *         returned
 215 */
 216static inline cvmx_fau_tagwait64_t cvmx_hwfau_tagwait_fetch_and_add64(cvmx_fau_reg64_t reg,
 217                                                                      s64 value)
 218{
 219        union {
 220                u64 i64;
 221                cvmx_fau_tagwait64_t t;
 222        } result;
 223        result.i64 = cvmx_read64_int64(__cvmx_hwfau_atomic_address(1, reg, value));
 224        return result.t;
 225}
 226
 227/**
 228 * Perform an atomic 32 bit add after the current tag switch
 229 * completes
 230 *
 231 * @param reg    FAU atomic register to access. 0 <= reg < 2048.
 232 *               - Step by 4 for 32 bit access.
 233 * @param value  Signed value to add.
 234 *               Note: Only the low 22 bits are available.
 235 * @return If a timeout occurs, the error bit will be set. Otherwise
 236 *         the value of the register before the update will be
 237 *         returned
 238 */
 239static inline cvmx_fau_tagwait32_t cvmx_hwfau_tagwait_fetch_and_add32(cvmx_fau_reg32_t reg,
 240                                                                      s32 value)
 241{
 242        union {
 243                u64 i32;
 244                cvmx_fau_tagwait32_t t;
 245        } result;
 246        reg ^= SWIZZLE_32;
 247        result.i32 = cvmx_read64_int32(__cvmx_hwfau_atomic_address(1, reg, value));
 248        return result.t;
 249}
 250
 251/**
 252 * Perform an atomic 16 bit add after the current tag switch
 253 * completes
 254 *
 255 * @param reg    FAU atomic register to access. 0 <= reg < 2048.
 256 *               - Step by 2 for 16 bit access.
 257 * @param value  Signed value to add.
 258 * @return If a timeout occurs, the error bit will be set. Otherwise
 259 *         the value of the register before the update will be
 260 *         returned
 261 */
 262static inline cvmx_fau_tagwait16_t cvmx_hwfau_tagwait_fetch_and_add16(cvmx_fau_reg16_t reg,
 263                                                                      s16 value)
 264{
 265        union {
 266                u64 i16;
 267                cvmx_fau_tagwait16_t t;
 268        } result;
 269        reg ^= SWIZZLE_16;
 270        result.i16 = cvmx_read64_int16(__cvmx_hwfau_atomic_address(1, reg, value));
 271        return result.t;
 272}
 273
 274/**
 275 * Perform an atomic 8 bit add after the current tag switch
 276 * completes
 277 *
 278 * @param reg    FAU atomic register to access. 0 <= reg < 2048.
 279 * @param value  Signed value to add.
 280 * @return If a timeout occurs, the error bit will be set. Otherwise
 281 *         the value of the register before the update will be
 282 *         returned
 283 */
 284static inline cvmx_fau_tagwait8_t cvmx_hwfau_tagwait_fetch_and_add8(cvmx_fau_reg8_t reg,
 285                                                                    int8_t value)
 286{
 287        union {
 288                u64 i8;
 289                cvmx_fau_tagwait8_t t;
 290        } result;
 291        reg ^= SWIZZLE_8;
 292        result.i8 = cvmx_read64_int8(__cvmx_hwfau_atomic_address(1, reg, value));
 293        return result.t;
 294}
 295
 296/**
 297 * @INTERNAL
 298 * Builds I/O data for async operations
 299 *
 300 * @param scraddr Scratch pad byte address to write to.  Must be 8 byte aligned
 301 * @param value   Signed value to add.
 302 *                Note: When performing 32 and 64 bit access, only the low
 303 *                22 bits are available.
 304 * @param tagwait Should the atomic add wait for the current tag switch
 305 *                operation to complete.
 306 *                - 0 = Don't wait
 307 *                - 1 = Wait for tag switch to complete
 308 * @param size    The size of the operation:
 309 *                - CVMX_FAU_OP_SIZE_8  (0) = 8 bits
 310 *                - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
 311 *                - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
 312 *                - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
 313 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 314 *                - Step by 2 for 16 bit access.
 315 *                - Step by 4 for 32 bit access.
 316 *                - Step by 8 for 64 bit access.
 317 * @return Data to write using cvmx_send_single
 318 */
 319static inline u64 __cvmx_fau_iobdma_data(u64 scraddr, s64 value, u64 tagwait,
 320                                         cvmx_fau_op_size_t size, u64 reg)
 321{
 322        return (CVMX_FAU_LOAD_IO_ADDRESS | cvmx_build_bits(CVMX_FAU_BITS_SCRADDR, scraddr >> 3) |
 323                cvmx_build_bits(CVMX_FAU_BITS_LEN, 1) |
 324                cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
 325                cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
 326                cvmx_build_bits(CVMX_FAU_BITS_SIZE, size) |
 327                cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
 328}
 329
 330/**
 331 * Perform an async atomic 64 bit add. The old value is
 332 * placed in the scratch memory at byte address scraddr.
 333 *
 334 * @param scraddr Scratch memory byte address to put response in.
 335 *                Must be 8 byte aligned.
 336 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 337 *                - Step by 8 for 64 bit access.
 338 * @param value   Signed value to add.
 339 *                Note: Only the low 22 bits are available.
 340 * @return Placed in the scratch pad register
 341 */
 342static inline void cvmx_hwfau_async_fetch_and_add64(u64 scraddr, cvmx_fau_reg64_t reg, s64 value)
 343{
 344        cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_64, reg));
 345}
 346
 347/**
 348 * Perform an async atomic 32 bit add. The old value is
 349 * placed in the scratch memory at byte address scraddr.
 350 *
 351 * @param scraddr Scratch memory byte address to put response in.
 352 *                Must be 8 byte aligned.
 353 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 354 *                - Step by 4 for 32 bit access.
 355 * @param value   Signed value to add.
 356 *                Note: Only the low 22 bits are available.
 357 * @return Placed in the scratch pad register
 358 */
 359static inline void cvmx_hwfau_async_fetch_and_add32(u64 scraddr, cvmx_fau_reg32_t reg, s32 value)
 360{
 361        cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_32, reg));
 362}
 363
 364/**
 365 * Perform an async atomic 16 bit add. The old value is
 366 * placed in the scratch memory at byte address scraddr.
 367 *
 368 * @param scraddr Scratch memory byte address to put response in.
 369 *                Must be 8 byte aligned.
 370 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 371 *                - Step by 2 for 16 bit access.
 372 * @param value   Signed value to add.
 373 * @return Placed in the scratch pad register
 374 */
 375static inline void cvmx_hwfau_async_fetch_and_add16(u64 scraddr, cvmx_fau_reg16_t reg, s16 value)
 376{
 377        cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_16, reg));
 378}
 379
 380/**
 381 * Perform an async atomic 8 bit add. The old value is
 382 * placed in the scratch memory at byte address scraddr.
 383 *
 384 * @param scraddr Scratch memory byte address to put response in.
 385 *                Must be 8 byte aligned.
 386 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 387 * @param value   Signed value to add.
 388 * @return Placed in the scratch pad register
 389 */
 390static inline void cvmx_hwfau_async_fetch_and_add8(u64 scraddr, cvmx_fau_reg8_t reg, int8_t value)
 391{
 392        cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_8, reg));
 393}
 394
 395/**
 396 * Perform an async atomic 64 bit add after the current tag
 397 * switch completes.
 398 *
 399 * @param scraddr Scratch memory byte address to put response in.
 400 *                Must be 8 byte aligned.
 401 *                If a timeout occurs, the error bit (63) will be set. Otherwise
 402 *                the value of the register before the update will be
 403 *                returned
 404 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 405 *                - Step by 8 for 64 bit access.
 406 * @param value   Signed value to add.
 407 *                Note: Only the low 22 bits are available.
 408 * @return Placed in the scratch pad register
 409 */
 410static inline void cvmx_hwfau_async_tagwait_fetch_and_add64(u64 scraddr, cvmx_fau_reg64_t reg,
 411                                                            s64 value)
 412{
 413        cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_64, reg));
 414}
 415
 416/**
 417 * Perform an async atomic 32 bit add after the current tag
 418 * switch completes.
 419 *
 420 * @param scraddr Scratch memory byte address to put response in.
 421 *                Must be 8 byte aligned.
 422 *                If a timeout occurs, the error bit (63) will be set. Otherwise
 423 *                the value of the register before the update will be
 424 *                returned
 425 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 426 *                - Step by 4 for 32 bit access.
 427 * @param value   Signed value to add.
 428 *                Note: Only the low 22 bits are available.
 429 * @return Placed in the scratch pad register
 430 */
 431static inline void cvmx_hwfau_async_tagwait_fetch_and_add32(u64 scraddr, cvmx_fau_reg32_t reg,
 432                                                            s32 value)
 433{
 434        cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_32, reg));
 435}
 436
 437/**
 438 * Perform an async atomic 16 bit add after the current tag
 439 * switch completes.
 440 *
 441 * @param scraddr Scratch memory byte address to put response in.
 442 *                Must be 8 byte aligned.
 443 *                If a timeout occurs, the error bit (63) will be set. Otherwise
 444 *                the value of the register before the update will be
 445 *                returned
 446 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 447 *                - Step by 2 for 16 bit access.
 448 * @param value   Signed value to add.
 449 * @return Placed in the scratch pad register
 450 */
 451static inline void cvmx_hwfau_async_tagwait_fetch_and_add16(u64 scraddr, cvmx_fau_reg16_t reg,
 452                                                            s16 value)
 453{
 454        cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_16, reg));
 455}
 456
 457/**
 458 * Perform an async atomic 8 bit add after the current tag
 459 * switch completes.
 460 *
 461 * @param scraddr Scratch memory byte address to put response in.
 462 *                Must be 8 byte aligned.
 463 *                If a timeout occurs, the error bit (63) will be set. Otherwise
 464 *                the value of the register before the update will be
 465 *                returned
 466 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 467 * @param value   Signed value to add.
 468 * @return Placed in the scratch pad register
 469 */
 470static inline void cvmx_hwfau_async_tagwait_fetch_and_add8(u64 scraddr, cvmx_fau_reg8_t reg,
 471                                                           int8_t value)
 472{
 473        cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_8, reg));
 474}
 475
 476/**
 477 * Perform an atomic 64 bit add
 478 *
 479 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 480 *                - Step by 8 for 64 bit access.
 481 * @param value   Signed value to add.
 482 */
 483static inline void cvmx_hwfau_atomic_add64(cvmx_fau_reg64_t reg, s64 value)
 484{
 485        cvmx_write64_int64(__cvmx_hwfau_store_address(0, reg), value);
 486}
 487
 488/**
 489 * Perform an atomic 32 bit add
 490 *
 491 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 492 *                - Step by 4 for 32 bit access.
 493 * @param value   Signed value to add.
 494 */
 495static inline void cvmx_hwfau_atomic_add32(cvmx_fau_reg32_t reg, s32 value)
 496{
 497        reg ^= SWIZZLE_32;
 498        cvmx_write64_int32(__cvmx_hwfau_store_address(0, reg), value);
 499}
 500
 501/**
 502 * Perform an atomic 16 bit add
 503 *
 504 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 505 *                - Step by 2 for 16 bit access.
 506 * @param value   Signed value to add.
 507 */
 508static inline void cvmx_hwfau_atomic_add16(cvmx_fau_reg16_t reg, s16 value)
 509{
 510        reg ^= SWIZZLE_16;
 511        cvmx_write64_int16(__cvmx_hwfau_store_address(0, reg), value);
 512}
 513
 514/**
 515 * Perform an atomic 8 bit add
 516 *
 517 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 518 * @param value   Signed value to add.
 519 */
 520static inline void cvmx_hwfau_atomic_add8(cvmx_fau_reg8_t reg, int8_t value)
 521{
 522        reg ^= SWIZZLE_8;
 523        cvmx_write64_int8(__cvmx_hwfau_store_address(0, reg), value);
 524}
 525
 526/**
 527 * Perform an atomic 64 bit write
 528 *
 529 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 530 *                - Step by 8 for 64 bit access.
 531 * @param value   Signed value to write.
 532 */
 533static inline void cvmx_hwfau_atomic_write64(cvmx_fau_reg64_t reg, s64 value)
 534{
 535        cvmx_write64_int64(__cvmx_hwfau_store_address(1, reg), value);
 536}
 537
 538/**
 539 * Perform an atomic 32 bit write
 540 *
 541 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 542 *                - Step by 4 for 32 bit access.
 543 * @param value   Signed value to write.
 544 */
 545static inline void cvmx_hwfau_atomic_write32(cvmx_fau_reg32_t reg, s32 value)
 546{
 547        reg ^= SWIZZLE_32;
 548        cvmx_write64_int32(__cvmx_hwfau_store_address(1, reg), value);
 549}
 550
 551/**
 552 * Perform an atomic 16 bit write
 553 *
 554 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 555 *                - Step by 2 for 16 bit access.
 556 * @param value   Signed value to write.
 557 */
 558static inline void cvmx_hwfau_atomic_write16(cvmx_fau_reg16_t reg, s16 value)
 559{
 560        reg ^= SWIZZLE_16;
 561        cvmx_write64_int16(__cvmx_hwfau_store_address(1, reg), value);
 562}
 563
 564/**
 565 * Perform an atomic 8 bit write
 566 *
 567 * @param reg     FAU atomic register to access. 0 <= reg < 2048.
 568 * @param value   Signed value to write.
 569 */
 570static inline void cvmx_hwfau_atomic_write8(cvmx_fau_reg8_t reg, int8_t value)
 571{
 572        reg ^= SWIZZLE_8;
 573        cvmx_write64_int8(__cvmx_hwfau_store_address(1, reg), value);
 574}
 575
 576/** Allocates 64bit FAU register.
 577 *  @return value is the base address of allocated FAU register
 578 */
 579int cvmx_fau64_alloc(int reserve);
 580
 581/** Allocates 32bit FAU register.
 582 *  @return value is the base address of allocated FAU register
 583 */
 584int cvmx_fau32_alloc(int reserve);
 585
 586/** Allocates 16bit FAU register.
 587 *  @return value is the base address of allocated FAU register
 588 */
 589int cvmx_fau16_alloc(int reserve);
 590
 591/** Allocates 8bit FAU register.
 592 *  @return value is the base address of allocated FAU register
 593 */
 594int cvmx_fau8_alloc(int reserve);
 595
 596/** Frees the specified FAU register.
 597 *  @param address Base address of register to release.
 598 *  @return 0 on success; -1 on failure
 599 */
 600int cvmx_fau_free(int address);
 601
 602/** Display the fau registers array
 603 */
 604void cvmx_fau_show(void);
 605
 606#endif /* __CVMX_HWFAU_H__ */
 607