linux/arch/mips/include/asm/octeon/cvmx-fau.h
<<
>>
Prefs
   1/***********************license start***************
   2 * Author: Cavium Networks
   3 *
   4 * Contact: support@caviumnetworks.com
   5 * This file is part of the OCTEON SDK
   6 *
   7 * Copyright (c) 2003-2008 Cavium Networks
   8 *
   9 * This file is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License, Version 2, as
  11 * published by the Free Software Foundation.
  12 *
  13 * This file is distributed in the hope that it will be useful, but
  14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16 * NONINFRINGEMENT.  See the GNU General Public License for more
  17 * details.
  18 *
  19 * You should have received a copy of the GNU General Public License
  20 * along with this file; if not, write to the Free Software
  21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  22 * or visit http://www.gnu.org/licenses/.
  23 *
  24 * This file may also be available under a different license from Cavium.
  25 * Contact Cavium Networks for more information
  26 ***********************license end**************************************/
  27
  28/*
  29 * Interface to the hardware Fetch and Add Unit.
  30 */
  31
  32#ifndef __CVMX_FAU_H__
  33#define __CVMX_FAU_H__
  34
  35/*
  36 * Octeon Fetch and Add Unit (FAU)
  37 */
  38
  39#define CVMX_FAU_LOAD_IO_ADDRESS    cvmx_build_io_address(0x1e, 0)
  40#define CVMX_FAU_BITS_SCRADDR       63, 56
  41#define CVMX_FAU_BITS_LEN           55, 48
  42#define CVMX_FAU_BITS_INEVAL        35, 14
  43#define CVMX_FAU_BITS_TAGWAIT       13, 13
  44#define CVMX_FAU_BITS_NOADD         13, 13
  45#define CVMX_FAU_BITS_SIZE          12, 11
  46#define CVMX_FAU_BITS_REGISTER      10, 0
  47
  48typedef enum {
  49        CVMX_FAU_OP_SIZE_8 = 0,
  50        CVMX_FAU_OP_SIZE_16 = 1,
  51        CVMX_FAU_OP_SIZE_32 = 2,
  52        CVMX_FAU_OP_SIZE_64 = 3
  53} cvmx_fau_op_size_t;
  54
  55/**
  56 * Tagwait return definition. If a timeout occurs, the error
  57 * bit will be set. Otherwise the value of the register before
  58 * the update will be returned.
  59 */
  60typedef struct {
  61        uint64_t error:1;
  62        int64_t value:63;
  63} cvmx_fau_tagwait64_t;
  64
  65/**
  66 * Tagwait return definition. If a timeout occurs, the error
  67 * bit will be set. Otherwise the value of the register before
  68 * the update will be returned.
  69 */
  70typedef struct {
  71        uint64_t error:1;
  72        int32_t value:31;
  73} cvmx_fau_tagwait32_t;
  74
  75/**
  76 * Tagwait return definition. If a timeout occurs, the error
  77 * bit will be set. Otherwise the value of the register before
  78 * the update will be returned.
  79 */
  80typedef struct {
  81        uint64_t error:1;
  82        int16_t value:15;
  83} cvmx_fau_tagwait16_t;
  84
  85/**
  86 * Tagwait return definition. If a timeout occurs, the error
  87 * bit will be set. Otherwise the value of the register before
  88 * the update will be returned.
  89 */
  90typedef struct {
  91        uint64_t error:1;
  92        int8_t value:7;
  93} cvmx_fau_tagwait8_t;
  94
  95/**
  96 * Asynchronous tagwait return definition. If a timeout occurs,
  97 * the error bit will be set. Otherwise the value of the
  98 * register before the update will be returned.
  99 */
 100typedef union {
 101        uint64_t u64;
 102        struct {
 103                uint64_t invalid:1;
 104                uint64_t data:63;       /* unpredictable if invalid is set */
 105        } s;
 106} cvmx_fau_async_tagwait_result_t;
 107
 108/**
 109 * Builds a store I/O address for writing to the FAU
 110 *
 111 * @noadd:  0 = Store value is atomically added to the current value
 112 *               1 = Store value is atomically written over the current value
 113 * @reg:    FAU atomic register to access. 0 <= reg < 2048.
 114 *               - Step by 2 for 16 bit access.
 115 *               - Step by 4 for 32 bit access.
 116 *               - Step by 8 for 64 bit access.
 117 * Returns Address to store for atomic update
 118 */
 119static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
 120{
 121        return CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
 122               cvmx_build_bits(CVMX_FAU_BITS_NOADD, noadd) |
 123               cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
 124}
 125
 126/**
 127 * Builds a I/O address for accessing the FAU
 128 *
 129 * @tagwait: Should the atomic add wait for the current tag switch
 130 *                operation to complete.
 131 *                - 0 = Don't wait
 132 *                - 1 = Wait for tag switch to complete
 133 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 134 *                - Step by 2 for 16 bit access.
 135 *                - Step by 4 for 32 bit access.
 136 *                - Step by 8 for 64 bit access.
 137 * @value:   Signed value to add.
 138 *                Note: When performing 32 and 64 bit access, only the low
 139 *                22 bits are available.
 140 * Returns Address to read from for atomic update
 141 */
 142static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg,
 143                                                 int64_t value)
 144{
 145        return CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
 146               cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
 147               cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
 148               cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
 149}
 150
 151/**
 152 * Perform an atomic 64 bit add
 153 *
 154 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 155 *                - Step by 8 for 64 bit access.
 156 * @value:   Signed value to add.
 157 *                Note: Only the low 22 bits are available.
 158 * Returns Value of the register before the update
 159 */
 160static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,
 161                                               int64_t value)
 162{
 163        return cvmx_read64_int64(__cvmx_fau_atomic_address(0, reg, value));
 164}
 165
 166/**
 167 * Perform an atomic 32 bit add
 168 *
 169 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 170 *                - Step by 4 for 32 bit access.
 171 * @value:   Signed value to add.
 172 *                Note: Only the low 22 bits are available.
 173 * Returns Value of the register before the update
 174 */
 175static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
 176                                               int32_t value)
 177{
 178        return cvmx_read64_int32(__cvmx_fau_atomic_address(0, reg, value));
 179}
 180
 181/**
 182 * Perform an atomic 16 bit add
 183 *
 184 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 185 *                - Step by 2 for 16 bit access.
 186 * @value:   Signed value to add.
 187 * Returns Value of the register before the update
 188 */
 189static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg,
 190                                               int16_t value)
 191{
 192        return cvmx_read64_int16(__cvmx_fau_atomic_address(0, reg, value));
 193}
 194
 195/**
 196 * Perform an atomic 8 bit add
 197 *
 198 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 199 * @value:   Signed value to add.
 200 * Returns Value of the register before the update
 201 */
 202static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
 203{
 204        return cvmx_read64_int8(__cvmx_fau_atomic_address(0, reg, value));
 205}
 206
 207/**
 208 * Perform an atomic 64 bit add after the current tag switch
 209 * completes
 210 *
 211 * @reg:    FAU atomic register to access. 0 <= reg < 2048.
 212 *               - Step by 8 for 64 bit access.
 213 * @value:  Signed value to add.
 214 *               Note: Only the low 22 bits are available.
 215 * Returns If a timeout occurs, the error bit will be set. Otherwise
 216 *         the value of the register before the update will be
 217 *         returned
 218 */
 219static inline cvmx_fau_tagwait64_t
 220cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
 221{
 222        union {
 223                uint64_t i64;
 224                cvmx_fau_tagwait64_t t;
 225        } result;
 226        result.i64 =
 227            cvmx_read64_int64(__cvmx_fau_atomic_address(1, reg, value));
 228        return result.t;
 229}
 230
 231/**
 232 * Perform an atomic 32 bit add after the current tag switch
 233 * completes
 234 *
 235 * @reg:    FAU atomic register to access. 0 <= reg < 2048.
 236 *               - Step by 4 for 32 bit access.
 237 * @value:  Signed value to add.
 238 *               Note: Only the low 22 bits are available.
 239 * Returns If a timeout occurs, the error bit will be set. Otherwise
 240 *         the value of the register before the update will be
 241 *         returned
 242 */
 243static inline cvmx_fau_tagwait32_t
 244cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
 245{
 246        union {
 247                uint64_t i32;
 248                cvmx_fau_tagwait32_t t;
 249        } result;
 250        result.i32 =
 251            cvmx_read64_int32(__cvmx_fau_atomic_address(1, reg, value));
 252        return result.t;
 253}
 254
 255/**
 256 * Perform an atomic 16 bit add after the current tag switch
 257 * completes
 258 *
 259 * @reg:    FAU atomic register to access. 0 <= reg < 2048.
 260 *               - Step by 2 for 16 bit access.
 261 * @value:  Signed value to add.
 262 * Returns If a timeout occurs, the error bit will be set. Otherwise
 263 *         the value of the register before the update will be
 264 *         returned
 265 */
 266static inline cvmx_fau_tagwait16_t
 267cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
 268{
 269        union {
 270                uint64_t i16;
 271                cvmx_fau_tagwait16_t t;
 272        } result;
 273        result.i16 =
 274            cvmx_read64_int16(__cvmx_fau_atomic_address(1, reg, value));
 275        return result.t;
 276}
 277
 278/**
 279 * Perform an atomic 8 bit add after the current tag switch
 280 * completes
 281 *
 282 * @reg:    FAU atomic register to access. 0 <= reg < 2048.
 283 * @value:  Signed value to add.
 284 * Returns If a timeout occurs, the error bit will be set. Otherwise
 285 *         the value of the register before the update will be
 286 *         returned
 287 */
 288static inline cvmx_fau_tagwait8_t
 289cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
 290{
 291        union {
 292                uint64_t i8;
 293                cvmx_fau_tagwait8_t t;
 294        } result;
 295        result.i8 = cvmx_read64_int8(__cvmx_fau_atomic_address(1, reg, value));
 296        return result.t;
 297}
 298
 299/**
 300 * Builds I/O data for async operations
 301 *
 302 * @scraddr: Scratch pad byte address to write to.  Must be 8 byte aligned
 303 * @value:   Signed value to add.
 304 *                Note: When performing 32 and 64 bit access, only the low
 305 *                22 bits are available.
 306 * @tagwait: Should the atomic add wait for the current tag switch
 307 *                operation to complete.
 308 *                - 0 = Don't wait
 309 *                - 1 = Wait for tag switch to complete
 310 * @size:    The size of the operation:
 311 *                - CVMX_FAU_OP_SIZE_8  (0) = 8 bits
 312 *                - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
 313 *                - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
 314 *                - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
 315 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 316 *                - Step by 2 for 16 bit access.
 317 *                - Step by 4 for 32 bit access.
 318 *                - Step by 8 for 64 bit access.
 319 * Returns Data to write using cvmx_send_single
 320 */
 321static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value,
 322                                              uint64_t tagwait,
 323                                              cvmx_fau_op_size_t size,
 324                                              uint64_t reg)
 325{
 326        return CVMX_FAU_LOAD_IO_ADDRESS |
 327               cvmx_build_bits(CVMX_FAU_BITS_SCRADDR, scraddr >> 3) |
 328               cvmx_build_bits(CVMX_FAU_BITS_LEN, 1) |
 329               cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
 330               cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
 331               cvmx_build_bits(CVMX_FAU_BITS_SIZE, size) |
 332               cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
 333}
 334
 335/**
 336 * Perform an async atomic 64 bit add. The old value is
 337 * placed in the scratch memory at byte address scraddr.
 338 *
 339 * @scraddr: Scratch memory byte address to put response in.
 340 *                Must be 8 byte aligned.
 341 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 342 *                - Step by 8 for 64 bit access.
 343 * @value:   Signed value to add.
 344 *                Note: Only the low 22 bits are available.
 345 * Returns Placed in the scratch pad register
 346 */
 347static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr,
 348                                                  cvmx_fau_reg_64_t reg,
 349                                                  int64_t value)
 350{
 351        cvmx_send_single(__cvmx_fau_iobdma_data
 352                         (scraddr, value, 0, CVMX_FAU_OP_SIZE_64, reg));
 353}
 354
 355/**
 356 * Perform an async atomic 32 bit add. The old value is
 357 * placed in the scratch memory at byte address scraddr.
 358 *
 359 * @scraddr: Scratch memory byte address to put response in.
 360 *                Must be 8 byte aligned.
 361 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 362 *                - Step by 4 for 32 bit access.
 363 * @value:   Signed value to add.
 364 *                Note: Only the low 22 bits are available.
 365 * Returns Placed in the scratch pad register
 366 */
 367static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
 368                                                  cvmx_fau_reg_32_t reg,
 369                                                  int32_t value)
 370{
 371        cvmx_send_single(__cvmx_fau_iobdma_data
 372                         (scraddr, value, 0, CVMX_FAU_OP_SIZE_32, reg));
 373}
 374
 375/**
 376 * Perform an async atomic 16 bit add. The old value is
 377 * placed in the scratch memory at byte address scraddr.
 378 *
 379 * @scraddr: Scratch memory byte address to put response in.
 380 *                Must be 8 byte aligned.
 381 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 382 *                - Step by 2 for 16 bit access.
 383 * @value:   Signed value to add.
 384 * Returns Placed in the scratch pad register
 385 */
 386static inline void cvmx_fau_async_fetch_and_add16(uint64_t scraddr,
 387                                                  cvmx_fau_reg_16_t reg,
 388                                                  int16_t value)
 389{
 390        cvmx_send_single(__cvmx_fau_iobdma_data
 391                         (scraddr, value, 0, CVMX_FAU_OP_SIZE_16, reg));
 392}
 393
 394/**
 395 * Perform an async atomic 8 bit add. The old value is
 396 * placed in the scratch memory at byte address scraddr.
 397 *
 398 * @scraddr: Scratch memory byte address to put response in.
 399 *                Must be 8 byte aligned.
 400 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 401 * @value:   Signed value to add.
 402 * Returns Placed in the scratch pad register
 403 */
 404static inline void cvmx_fau_async_fetch_and_add8(uint64_t scraddr,
 405                                                 cvmx_fau_reg_8_t reg,
 406                                                 int8_t value)
 407{
 408        cvmx_send_single(__cvmx_fau_iobdma_data
 409                         (scraddr, value, 0, CVMX_FAU_OP_SIZE_8, reg));
 410}
 411
 412/**
 413 * Perform an async atomic 64 bit add after the current tag
 414 * switch completes.
 415 *
 416 * @scraddr: Scratch memory byte address to put response in.  Must be
 417 *           8 byte aligned.  If a timeout occurs, the error bit (63)
 418 *           will be set. Otherwise the value of the register before
 419 *           the update will be returned
 420 *
 421 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 422 *                - Step by 8 for 64 bit access.
 423 * @value:   Signed value to add.
 424 *                Note: Only the low 22 bits are available.
 425 * Returns Placed in the scratch pad register
 426 */
 427static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr,
 428                                                          cvmx_fau_reg_64_t reg,
 429                                                          int64_t value)
 430{
 431        cvmx_send_single(__cvmx_fau_iobdma_data
 432                         (scraddr, value, 1, CVMX_FAU_OP_SIZE_64, reg));
 433}
 434
 435/**
 436 * Perform an async atomic 32 bit add after the current tag
 437 * switch completes.
 438 *
 439 * @scraddr: Scratch memory byte address to put response in.  Must be
 440 *           8 byte aligned.  If a timeout occurs, the error bit (63)
 441 *           will be set. Otherwise the value of the register before
 442 *           the update will be returned
 443 *
 444 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 445 *                - Step by 4 for 32 bit access.
 446 * @value:   Signed value to add.
 447 *                Note: Only the low 22 bits are available.
 448 * Returns Placed in the scratch pad register
 449 */
 450static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr,
 451                                                          cvmx_fau_reg_32_t reg,
 452                                                          int32_t value)
 453{
 454        cvmx_send_single(__cvmx_fau_iobdma_data
 455                         (scraddr, value, 1, CVMX_FAU_OP_SIZE_32, reg));
 456}
 457
 458/**
 459 * Perform an async atomic 16 bit add after the current tag
 460 * switch completes.
 461 *
 462 * @scraddr: Scratch memory byte address to put response in.  Must be
 463 *           8 byte aligned.  If a timeout occurs, the error bit (63)
 464 *           will be set. Otherwise the value of the register before
 465 *           the update will be returned
 466 *
 467 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 468 *                - Step by 2 for 16 bit access.
 469 * @value:   Signed value to add.
 470 *
 471 * Returns Placed in the scratch pad register
 472 */
 473static inline void cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr,
 474                                                          cvmx_fau_reg_16_t reg,
 475                                                          int16_t value)
 476{
 477        cvmx_send_single(__cvmx_fau_iobdma_data
 478                         (scraddr, value, 1, CVMX_FAU_OP_SIZE_16, reg));
 479}
 480
 481/**
 482 * Perform an async atomic 8 bit add after the current tag
 483 * switch completes.
 484 *
 485 * @scraddr: Scratch memory byte address to put response in.  Must be
 486 *           8 byte aligned.  If a timeout occurs, the error bit (63)
 487 *           will be set. Otherwise the value of the register before
 488 *           the update will be returned
 489 *
 490 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 491 * @value:   Signed value to add.
 492 *
 493 * Returns Placed in the scratch pad register
 494 */
 495static inline void cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr,
 496                                                         cvmx_fau_reg_8_t reg,
 497                                                         int8_t value)
 498{
 499        cvmx_send_single(__cvmx_fau_iobdma_data
 500                         (scraddr, value, 1, CVMX_FAU_OP_SIZE_8, reg));
 501}
 502
 503/**
 504 * Perform an atomic 64 bit add
 505 *
 506 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 507 *                - Step by 8 for 64 bit access.
 508 * @value:   Signed value to add.
 509 */
 510static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
 511{
 512        cvmx_write64_int64(__cvmx_fau_store_address(0, reg), value);
 513}
 514
 515/**
 516 * Perform an atomic 32 bit add
 517 *
 518 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 519 *                - Step by 4 for 32 bit access.
 520 * @value:   Signed value to add.
 521 */
 522static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
 523{
 524        cvmx_write64_int32(__cvmx_fau_store_address(0, reg), value);
 525}
 526
 527/**
 528 * Perform an atomic 16 bit add
 529 *
 530 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 531 *                - Step by 2 for 16 bit access.
 532 * @value:   Signed value to add.
 533 */
 534static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
 535{
 536        cvmx_write64_int16(__cvmx_fau_store_address(0, reg), value);
 537}
 538
 539/**
 540 * Perform an atomic 8 bit add
 541 *
 542 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 543 * @value:   Signed value to add.
 544 */
 545static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value)
 546{
 547        cvmx_write64_int8(__cvmx_fau_store_address(0, reg), value);
 548}
 549
 550/**
 551 * Perform an atomic 64 bit write
 552 *
 553 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 554 *                - Step by 8 for 64 bit access.
 555 * @value:   Signed value to write.
 556 */
 557static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
 558{
 559        cvmx_write64_int64(__cvmx_fau_store_address(1, reg), value);
 560}
 561
 562/**
 563 * Perform an atomic 32 bit write
 564 *
 565 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 566 *                - Step by 4 for 32 bit access.
 567 * @value:   Signed value to write.
 568 */
 569static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
 570{
 571        cvmx_write64_int32(__cvmx_fau_store_address(1, reg), value);
 572}
 573
 574/**
 575 * Perform an atomic 16 bit write
 576 *
 577 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 578 *                - Step by 2 for 16 bit access.
 579 * @value:   Signed value to write.
 580 */
 581static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
 582{
 583        cvmx_write64_int16(__cvmx_fau_store_address(1, reg), value);
 584}
 585
 586/**
 587 * Perform an atomic 8 bit write
 588 *
 589 * @reg:     FAU atomic register to access. 0 <= reg < 2048.
 590 * @value:   Signed value to write.
 591 */
 592static inline void cvmx_fau_atomic_write8(cvmx_fau_reg_8_t reg, int8_t value)
 593{
 594        cvmx_write64_int8(__cvmx_fau_store_address(1, reg), value);
 595}
 596
 597#endif /* __CVMX_FAU_H__ */
 598