dpdk/drivers/common/iavf/iavf_adminq.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2001-2021 Intel Corporation
   3 */
   4
   5#include "iavf_status.h"
   6#include "iavf_type.h"
   7#include "iavf_register.h"
   8#include "iavf_adminq.h"
   9#include "iavf_prototype.h"
  10
  11/**
  12 *  iavf_adminq_init_regs - Initialize AdminQ registers
  13 *  @hw: pointer to the hardware structure
  14 *
  15 *  This assumes the alloc_asq and alloc_arq functions have already been called
  16 **/
  17STATIC void iavf_adminq_init_regs(struct iavf_hw *hw)
  18{
  19        /* set head and tail registers in our local struct */
  20        hw->aq.asq.tail = IAVF_VF_ATQT1;
  21        hw->aq.asq.head = IAVF_VF_ATQH1;
  22        hw->aq.asq.len  = IAVF_VF_ATQLEN1;
  23        hw->aq.asq.bal  = IAVF_VF_ATQBAL1;
  24        hw->aq.asq.bah  = IAVF_VF_ATQBAH1;
  25        hw->aq.arq.tail = IAVF_VF_ARQT1;
  26        hw->aq.arq.head = IAVF_VF_ARQH1;
  27        hw->aq.arq.len  = IAVF_VF_ARQLEN1;
  28        hw->aq.arq.bal  = IAVF_VF_ARQBAL1;
  29        hw->aq.arq.bah  = IAVF_VF_ARQBAH1;
  30}
  31
  32/**
  33 *  iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
  34 *  @hw: pointer to the hardware structure
  35 **/
  36enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
  37{
  38        enum iavf_status ret_code;
  39
  40        ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
  41                                         iavf_mem_atq_ring,
  42                                         (hw->aq.num_asq_entries *
  43                                         sizeof(struct iavf_aq_desc)),
  44                                         IAVF_ADMINQ_DESC_ALIGNMENT);
  45        if (ret_code)
  46                return ret_code;
  47
  48        ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
  49                                          (hw->aq.num_asq_entries *
  50                                          sizeof(struct iavf_asq_cmd_details)));
  51        if (ret_code) {
  52                iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
  53                return ret_code;
  54        }
  55
  56        return ret_code;
  57}
  58
  59/**
  60 *  iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
  61 *  @hw: pointer to the hardware structure
  62 **/
  63enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
  64{
  65        enum iavf_status ret_code;
  66
  67        ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
  68                                         iavf_mem_arq_ring,
  69                                         (hw->aq.num_arq_entries *
  70                                         sizeof(struct iavf_aq_desc)),
  71                                         IAVF_ADMINQ_DESC_ALIGNMENT);
  72
  73        return ret_code;
  74}
  75
  76/**
  77 *  iavf_free_adminq_asq - Free Admin Queue send rings
  78 *  @hw: pointer to the hardware structure
  79 *
  80 *  This assumes the posted send buffers have already been cleaned
  81 *  and de-allocated
  82 **/
  83void iavf_free_adminq_asq(struct iavf_hw *hw)
  84{
  85        iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
  86        iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
  87}
  88
  89/**
  90 *  iavf_free_adminq_arq - Free Admin Queue receive rings
  91 *  @hw: pointer to the hardware structure
  92 *
  93 *  This assumes the posted receive buffers have already been cleaned
  94 *  and de-allocated
  95 **/
  96void iavf_free_adminq_arq(struct iavf_hw *hw)
  97{
  98        iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
  99}
 100
 101/**
 102 *  iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
 103 *  @hw: pointer to the hardware structure
 104 **/
 105STATIC enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
 106{
 107        enum iavf_status ret_code;
 108        struct iavf_aq_desc *desc;
 109        struct iavf_dma_mem *bi;
 110        int i;
 111
 112        /* We'll be allocating the buffer info memory first, then we can
 113         * allocate the mapped buffers for the event processing
 114         */
 115
 116        /* buffer_info structures do not need alignment */
 117        ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
 118                (hw->aq.num_arq_entries * sizeof(struct iavf_dma_mem)));
 119        if (ret_code)
 120                goto alloc_arq_bufs;
 121        hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
 122
 123        /* allocate the mapped buffers */
 124        for (i = 0; i < hw->aq.num_arq_entries; i++) {
 125                bi = &hw->aq.arq.r.arq_bi[i];
 126                ret_code = iavf_allocate_dma_mem(hw, bi,
 127                                                 iavf_mem_arq_buf,
 128                                                 hw->aq.arq_buf_size,
 129                                                 IAVF_ADMINQ_DESC_ALIGNMENT);
 130                if (ret_code)
 131                        goto unwind_alloc_arq_bufs;
 132
 133                /* now configure the descriptors for use */
 134                desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
 135
 136                desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
 137                if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
 138                        desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
 139                desc->opcode = 0;
 140                /* This is in accordance with Admin queue design, there is no
 141                 * register for buffer size configuration
 142                 */
 143                desc->datalen = CPU_TO_LE16((u16)bi->size);
 144                desc->retval = 0;
 145                desc->cookie_high = 0;
 146                desc->cookie_low = 0;
 147                desc->params.external.addr_high =
 148                        CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
 149                desc->params.external.addr_low =
 150                        CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
 151                desc->params.external.param0 = 0;
 152                desc->params.external.param1 = 0;
 153        }
 154
 155alloc_arq_bufs:
 156        return ret_code;
 157
 158unwind_alloc_arq_bufs:
 159        /* don't try to free the one that failed... */
 160        i--;
 161        for (; i >= 0; i--)
 162                iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
 163        iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
 164
 165        return ret_code;
 166}
 167
 168/**
 169 *  iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
 170 *  @hw: pointer to the hardware structure
 171 **/
 172STATIC enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
 173{
 174        enum iavf_status ret_code;
 175        struct iavf_dma_mem *bi;
 176        int i;
 177
 178        /* No mapped memory needed yet, just the buffer info structures */
 179        ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
 180                (hw->aq.num_asq_entries * sizeof(struct iavf_dma_mem)));
 181        if (ret_code)
 182                goto alloc_asq_bufs;
 183        hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
 184
 185        /* allocate the mapped buffers */
 186        for (i = 0; i < hw->aq.num_asq_entries; i++) {
 187                bi = &hw->aq.asq.r.asq_bi[i];
 188                ret_code = iavf_allocate_dma_mem(hw, bi,
 189                                                 iavf_mem_asq_buf,
 190                                                 hw->aq.asq_buf_size,
 191                                                 IAVF_ADMINQ_DESC_ALIGNMENT);
 192                if (ret_code)
 193                        goto unwind_alloc_asq_bufs;
 194        }
 195alloc_asq_bufs:
 196        return ret_code;
 197
 198unwind_alloc_asq_bufs:
 199        /* don't try to free the one that failed... */
 200        i--;
 201        for (; i >= 0; i--)
 202                iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
 203        iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
 204
 205        return ret_code;
 206}
 207
 208/**
 209 *  iavf_free_arq_bufs - Free receive queue buffer info elements
 210 *  @hw: pointer to the hardware structure
 211 **/
 212STATIC void iavf_free_arq_bufs(struct iavf_hw *hw)
 213{
 214        int i;
 215
 216        /* free descriptors */
 217        for (i = 0; i < hw->aq.num_arq_entries; i++)
 218                iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
 219
 220        /* free the descriptor memory */
 221        iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
 222
 223        /* free the dma header */
 224        iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
 225}
 226
 227/**
 228 *  iavf_free_asq_bufs - Free send queue buffer info elements
 229 *  @hw: pointer to the hardware structure
 230 **/
 231STATIC void iavf_free_asq_bufs(struct iavf_hw *hw)
 232{
 233        int i;
 234
 235        /* only unmap if the address is non-NULL */
 236        for (i = 0; i < hw->aq.num_asq_entries; i++)
 237                if (hw->aq.asq.r.asq_bi[i].pa)
 238                        iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
 239
 240        /* free the buffer info list */
 241        iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
 242
 243        /* free the descriptor memory */
 244        iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
 245
 246        /* free the dma header */
 247        iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
 248}
 249
 250/**
 251 *  iavf_config_asq_regs - configure ASQ registers
 252 *  @hw: pointer to the hardware structure
 253 *
 254 *  Configure base address and length registers for the transmit queue
 255 **/
 256STATIC enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
 257{
 258        enum iavf_status ret_code = IAVF_SUCCESS;
 259        u32 reg = 0;
 260
 261        /* Clear Head and Tail */
 262        wr32(hw, hw->aq.asq.head, 0);
 263        wr32(hw, hw->aq.asq.tail, 0);
 264
 265        /* set starting point */
 266        wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
 267                                  IAVF_VF_ATQLEN1_ATQENABLE_MASK));
 268        wr32(hw, hw->aq.asq.bal, IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa));
 269        wr32(hw, hw->aq.asq.bah, IAVF_HI_DWORD(hw->aq.asq.desc_buf.pa));
 270
 271        /* Check one register to verify that config was applied */
 272        reg = rd32(hw, hw->aq.asq.bal);
 273        if (reg != IAVF_LO_DWORD(hw->aq.asq.desc_buf.pa))
 274                ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
 275
 276        return ret_code;
 277}
 278
 279/**
 280 *  iavf_config_arq_regs - ARQ register configuration
 281 *  @hw: pointer to the hardware structure
 282 *
 283 * Configure base address and length registers for the receive (event queue)
 284 **/
 285STATIC enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
 286{
 287        enum iavf_status ret_code = IAVF_SUCCESS;
 288        u32 reg = 0;
 289
 290        /* Clear Head and Tail */
 291        wr32(hw, hw->aq.arq.head, 0);
 292        wr32(hw, hw->aq.arq.tail, 0);
 293
 294        /* set starting point */
 295        wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
 296                                  IAVF_VF_ARQLEN1_ARQENABLE_MASK));
 297        wr32(hw, hw->aq.arq.bal, IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa));
 298        wr32(hw, hw->aq.arq.bah, IAVF_HI_DWORD(hw->aq.arq.desc_buf.pa));
 299
 300        /* Update tail in the HW to post pre-allocated buffers */
 301        wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
 302
 303        /* Check one register to verify that config was applied */
 304        reg = rd32(hw, hw->aq.arq.bal);
 305        if (reg != IAVF_LO_DWORD(hw->aq.arq.desc_buf.pa))
 306                ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
 307
 308        return ret_code;
 309}
 310
 311/**
 312 *  iavf_init_asq - main initialization routine for ASQ
 313 *  @hw: pointer to the hardware structure
 314 *
 315 *  This is the main initialization routine for the Admin Send Queue
 316 *  Prior to calling this function, drivers *MUST* set the following fields
 317 *  in the hw->aq structure:
 318 *     - hw->aq.num_asq_entries
 319 *     - hw->aq.arq_buf_size
 320 *
 321 *  Do *NOT* hold the lock when calling this as the memory allocation routines
 322 *  called are not going to be atomic context safe
 323 **/
 324enum iavf_status iavf_init_asq(struct iavf_hw *hw)
 325{
 326        enum iavf_status ret_code = IAVF_SUCCESS;
 327
 328        if (hw->aq.asq.count > 0) {
 329                /* queue already initialized */
 330                ret_code = IAVF_ERR_NOT_READY;
 331                goto init_adminq_exit;
 332        }
 333
 334        /* verify input for valid configuration */
 335        if ((hw->aq.num_asq_entries == 0) ||
 336            (hw->aq.asq_buf_size == 0)) {
 337                ret_code = IAVF_ERR_CONFIG;
 338                goto init_adminq_exit;
 339        }
 340
 341        hw->aq.asq.next_to_use = 0;
 342        hw->aq.asq.next_to_clean = 0;
 343
 344        /* allocate the ring memory */
 345        ret_code = iavf_alloc_adminq_asq_ring(hw);
 346        if (ret_code != IAVF_SUCCESS)
 347                goto init_adminq_exit;
 348
 349        /* allocate buffers in the rings */
 350        ret_code = iavf_alloc_asq_bufs(hw);
 351        if (ret_code != IAVF_SUCCESS)
 352                goto init_adminq_free_rings;
 353
 354        /* initialize base registers */
 355        ret_code = iavf_config_asq_regs(hw);
 356        if (ret_code != IAVF_SUCCESS)
 357                goto init_config_regs;
 358
 359        /* success! */
 360        hw->aq.asq.count = hw->aq.num_asq_entries;
 361        goto init_adminq_exit;
 362
 363init_adminq_free_rings:
 364        iavf_free_adminq_asq(hw);
 365        return ret_code;
 366
 367init_config_regs:
 368        iavf_free_asq_bufs(hw);
 369
 370init_adminq_exit:
 371        return ret_code;
 372}
 373
 374/**
 375 *  iavf_init_arq - initialize ARQ
 376 *  @hw: pointer to the hardware structure
 377 *
 378 *  The main initialization routine for the Admin Receive (Event) Queue.
 379 *  Prior to calling this function, drivers *MUST* set the following fields
 380 *  in the hw->aq structure:
 381 *     - hw->aq.num_asq_entries
 382 *     - hw->aq.arq_buf_size
 383 *
 384 *  Do *NOT* hold the lock when calling this as the memory allocation routines
 385 *  called are not going to be atomic context safe
 386 **/
 387enum iavf_status iavf_init_arq(struct iavf_hw *hw)
 388{
 389        enum iavf_status ret_code = IAVF_SUCCESS;
 390
 391        if (hw->aq.arq.count > 0) {
 392                /* queue already initialized */
 393                ret_code = IAVF_ERR_NOT_READY;
 394                goto init_adminq_exit;
 395        }
 396
 397        /* verify input for valid configuration */
 398        if ((hw->aq.num_arq_entries == 0) ||
 399            (hw->aq.arq_buf_size == 0)) {
 400                ret_code = IAVF_ERR_CONFIG;
 401                goto init_adminq_exit;
 402        }
 403
 404        hw->aq.arq.next_to_use = 0;
 405        hw->aq.arq.next_to_clean = 0;
 406
 407        /* allocate the ring memory */
 408        ret_code = iavf_alloc_adminq_arq_ring(hw);
 409        if (ret_code != IAVF_SUCCESS)
 410                goto init_adminq_exit;
 411
 412        /* allocate buffers in the rings */
 413        ret_code = iavf_alloc_arq_bufs(hw);
 414        if (ret_code != IAVF_SUCCESS)
 415                goto init_adminq_free_rings;
 416
 417        /* initialize base registers */
 418        ret_code = iavf_config_arq_regs(hw);
 419        if (ret_code != IAVF_SUCCESS)
 420                goto init_adminq_free_rings;
 421
 422        /* success! */
 423        hw->aq.arq.count = hw->aq.num_arq_entries;
 424        goto init_adminq_exit;
 425
 426init_adminq_free_rings:
 427        iavf_free_adminq_arq(hw);
 428
 429init_adminq_exit:
 430        return ret_code;
 431}
 432
 433/**
 434 *  iavf_shutdown_asq - shutdown the ASQ
 435 *  @hw: pointer to the hardware structure
 436 *
 437 *  The main shutdown routine for the Admin Send Queue
 438 **/
 439enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
 440{
 441        enum iavf_status ret_code = IAVF_SUCCESS;
 442
 443        iavf_acquire_spinlock(&hw->aq.asq_spinlock);
 444
 445        if (hw->aq.asq.count == 0) {
 446                ret_code = IAVF_ERR_NOT_READY;
 447                goto shutdown_asq_out;
 448        }
 449
 450        /* Stop firmware AdminQ processing */
 451        wr32(hw, hw->aq.asq.head, 0);
 452        wr32(hw, hw->aq.asq.tail, 0);
 453        wr32(hw, hw->aq.asq.len, 0);
 454        wr32(hw, hw->aq.asq.bal, 0);
 455        wr32(hw, hw->aq.asq.bah, 0);
 456
 457        hw->aq.asq.count = 0; /* to indicate uninitialized queue */
 458
 459        /* free ring buffers */
 460        iavf_free_asq_bufs(hw);
 461
 462shutdown_asq_out:
 463        iavf_release_spinlock(&hw->aq.asq_spinlock);
 464        return ret_code;
 465}
 466
 467/**
 468 *  iavf_shutdown_arq - shutdown ARQ
 469 *  @hw: pointer to the hardware structure
 470 *
 471 *  The main shutdown routine for the Admin Receive Queue
 472 **/
 473enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
 474{
 475        enum iavf_status ret_code = IAVF_SUCCESS;
 476
 477        iavf_acquire_spinlock(&hw->aq.arq_spinlock);
 478
 479        if (hw->aq.arq.count == 0) {
 480                ret_code = IAVF_ERR_NOT_READY;
 481                goto shutdown_arq_out;
 482        }
 483
 484        /* Stop firmware AdminQ processing */
 485        wr32(hw, hw->aq.arq.head, 0);
 486        wr32(hw, hw->aq.arq.tail, 0);
 487        wr32(hw, hw->aq.arq.len, 0);
 488        wr32(hw, hw->aq.arq.bal, 0);
 489        wr32(hw, hw->aq.arq.bah, 0);
 490
 491        hw->aq.arq.count = 0; /* to indicate uninitialized queue */
 492
 493        /* free ring buffers */
 494        iavf_free_arq_bufs(hw);
 495
 496shutdown_arq_out:
 497        iavf_release_spinlock(&hw->aq.arq_spinlock);
 498        return ret_code;
 499}
 500
 501/**
 502 *  iavf_init_adminq - main initialization routine for Admin Queue
 503 *  @hw: pointer to the hardware structure
 504 *
 505 *  Prior to calling this function, drivers *MUST* set the following fields
 506 *  in the hw->aq structure:
 507 *     - hw->aq.num_asq_entries
 508 *     - hw->aq.num_arq_entries
 509 *     - hw->aq.arq_buf_size
 510 *     - hw->aq.asq_buf_size
 511 **/
 512enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
 513{
 514        enum iavf_status ret_code;
 515
 516        /* verify input for valid configuration */
 517        if ((hw->aq.num_arq_entries == 0) ||
 518            (hw->aq.num_asq_entries == 0) ||
 519            (hw->aq.arq_buf_size == 0) ||
 520            (hw->aq.asq_buf_size == 0)) {
 521                ret_code = IAVF_ERR_CONFIG;
 522                goto init_adminq_exit;
 523        }
 524        iavf_init_spinlock(&hw->aq.asq_spinlock);
 525        iavf_init_spinlock(&hw->aq.arq_spinlock);
 526
 527        /* Set up register offsets */
 528        iavf_adminq_init_regs(hw);
 529
 530        /* setup ASQ command write back timeout */
 531        hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
 532
 533        /* allocate the ASQ */
 534        ret_code = iavf_init_asq(hw);
 535        if (ret_code != IAVF_SUCCESS)
 536                goto init_adminq_destroy_spinlocks;
 537
 538        /* allocate the ARQ */
 539        ret_code = iavf_init_arq(hw);
 540        if (ret_code != IAVF_SUCCESS)
 541                goto init_adminq_free_asq;
 542
 543        /* success! */
 544        goto init_adminq_exit;
 545
 546init_adminq_free_asq:
 547        iavf_shutdown_asq(hw);
 548init_adminq_destroy_spinlocks:
 549        iavf_destroy_spinlock(&hw->aq.asq_spinlock);
 550        iavf_destroy_spinlock(&hw->aq.arq_spinlock);
 551
 552init_adminq_exit:
 553        return ret_code;
 554}
 555
 556/**
 557 *  iavf_shutdown_adminq - shutdown routine for the Admin Queue
 558 *  @hw: pointer to the hardware structure
 559 **/
 560enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
 561{
 562        enum iavf_status ret_code = IAVF_SUCCESS;
 563
 564        if (iavf_check_asq_alive(hw))
 565                iavf_aq_queue_shutdown(hw, true);
 566
 567        iavf_shutdown_asq(hw);
 568        iavf_shutdown_arq(hw);
 569        iavf_destroy_spinlock(&hw->aq.asq_spinlock);
 570        iavf_destroy_spinlock(&hw->aq.arq_spinlock);
 571
 572        return ret_code;
 573}
 574
 575/**
 576 *  iavf_clean_asq - cleans Admin send queue
 577 *  @hw: pointer to the hardware structure
 578 *
 579 *  returns the number of free desc
 580 **/
 581u16 iavf_clean_asq(struct iavf_hw *hw)
 582{
 583        struct iavf_adminq_ring *asq = &(hw->aq.asq);
 584        struct iavf_asq_cmd_details *details;
 585        u16 ntc = asq->next_to_clean;
 586        struct iavf_aq_desc desc_cb;
 587        struct iavf_aq_desc *desc;
 588
 589        desc = IAVF_ADMINQ_DESC(*asq, ntc);
 590        details = IAVF_ADMINQ_DETAILS(*asq, ntc);
 591        while (rd32(hw, hw->aq.asq.head) != ntc) {
 592                iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 593                           "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
 594
 595                if (details->callback) {
 596                        IAVF_ADMINQ_CALLBACK cb_func =
 597                                        (IAVF_ADMINQ_CALLBACK)details->callback;
 598                        iavf_memcpy(&desc_cb, desc, sizeof(struct iavf_aq_desc),
 599                                    IAVF_DMA_TO_DMA);
 600                        cb_func(hw, &desc_cb);
 601                }
 602                iavf_memset(desc, 0, sizeof(*desc), IAVF_DMA_MEM);
 603                iavf_memset(details, 0, sizeof(*details), IAVF_NONDMA_MEM);
 604                ntc++;
 605                if (ntc == asq->count)
 606                        ntc = 0;
 607                desc = IAVF_ADMINQ_DESC(*asq, ntc);
 608                details = IAVF_ADMINQ_DETAILS(*asq, ntc);
 609        }
 610
 611        asq->next_to_clean = ntc;
 612
 613        return IAVF_DESC_UNUSED(asq);
 614}
 615
 616/**
 617 *  iavf_asq_done - check if FW has processed the Admin Send Queue
 618 *  @hw: pointer to the hw struct
 619 *
 620 *  Returns true if the firmware has processed all descriptors on the
 621 *  admin send queue. Returns false if there are still requests pending.
 622 **/
 623bool iavf_asq_done(struct iavf_hw *hw)
 624{
 625        /* AQ designers suggest use of head for better
 626         * timing reliability than DD bit
 627         */
 628        return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
 629
 630}
 631
 632/**
 633 *  iavf_asq_send_command - send command to Admin Queue
 634 *  @hw: pointer to the hw struct
 635 *  @desc: prefilled descriptor describing the command (non DMA mem)
 636 *  @buff: buffer to use for indirect commands
 637 *  @buff_size: size of buffer for indirect commands
 638 *  @cmd_details: pointer to command details structure
 639 *
 640 *  This is the main send command driver routine for the Admin Queue send
 641 *  queue.  It runs the queue, cleans the queue, etc
 642 **/
 643enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
 644                                struct iavf_aq_desc *desc,
 645                                void *buff, /* can be NULL */
 646                                u16  buff_size,
 647                                struct iavf_asq_cmd_details *cmd_details)
 648{
 649        enum iavf_status status = IAVF_SUCCESS;
 650        struct iavf_dma_mem *dma_buff = NULL;
 651        struct iavf_asq_cmd_details *details;
 652        struct iavf_aq_desc *desc_on_ring;
 653        bool cmd_completed = false;
 654        u16  retval = 0;
 655        u32  val = 0;
 656
 657        iavf_acquire_spinlock(&hw->aq.asq_spinlock);
 658
 659        hw->aq.asq_last_status = IAVF_AQ_RC_OK;
 660
 661        if (hw->aq.asq.count == 0) {
 662                iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 663                           "AQTX: Admin queue not initialized.\n");
 664                status = IAVF_ERR_QUEUE_EMPTY;
 665                goto asq_send_command_error;
 666        }
 667
 668        val = rd32(hw, hw->aq.asq.head);
 669        if (val >= hw->aq.num_asq_entries) {
 670                iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 671                           "AQTX: head overrun at %d\n", val);
 672                status = IAVF_ERR_QUEUE_EMPTY;
 673                goto asq_send_command_error;
 674        }
 675
 676        details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
 677        if (cmd_details) {
 678                iavf_memcpy(details,
 679                            cmd_details,
 680                            sizeof(struct iavf_asq_cmd_details),
 681                            IAVF_NONDMA_TO_NONDMA);
 682
 683                /* If the cmd_details are defined copy the cookie.  The
 684                 * CPU_TO_LE32 is not needed here because the data is ignored
 685                 * by the FW, only used by the driver
 686                 */
 687                if (details->cookie) {
 688                        desc->cookie_high =
 689                                CPU_TO_LE32(IAVF_HI_DWORD(details->cookie));
 690                        desc->cookie_low =
 691                                CPU_TO_LE32(IAVF_LO_DWORD(details->cookie));
 692                }
 693        } else {
 694                iavf_memset(details, 0,
 695                            sizeof(struct iavf_asq_cmd_details),
 696                            IAVF_NONDMA_MEM);
 697        }
 698
 699        /* clear requested flags and then set additional flags if defined */
 700        desc->flags &= ~CPU_TO_LE16(details->flags_dis);
 701        desc->flags |= CPU_TO_LE16(details->flags_ena);
 702
 703        if (buff_size > hw->aq.asq_buf_size) {
 704                iavf_debug(hw,
 705                           IAVF_DEBUG_AQ_MESSAGE,
 706                           "AQTX: Invalid buffer size: %d.\n",
 707                           buff_size);
 708                status = IAVF_ERR_INVALID_SIZE;
 709                goto asq_send_command_error;
 710        }
 711
 712        if (details->postpone && !details->async) {
 713                iavf_debug(hw,
 714                           IAVF_DEBUG_AQ_MESSAGE,
 715                           "AQTX: Async flag not set along with postpone flag");
 716                status = IAVF_ERR_PARAM;
 717                goto asq_send_command_error;
 718        }
 719
 720        /* call clean and check queue available function to reclaim the
 721         * descriptors that were processed by FW, the function returns the
 722         * number of desc available
 723         */
 724        /* the clean function called here could be called in a separate thread
 725         * in case of asynchronous completions
 726         */
 727        if (iavf_clean_asq(hw) == 0) {
 728                iavf_debug(hw,
 729                           IAVF_DEBUG_AQ_MESSAGE,
 730                           "AQTX: Error queue is full.\n");
 731                status = IAVF_ERR_ADMIN_QUEUE_FULL;
 732                goto asq_send_command_error;
 733        }
 734
 735        /* initialize the temp desc pointer with the right desc */
 736        desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
 737
 738        /* if the desc is available copy the temp desc to the right place */
 739        iavf_memcpy(desc_on_ring, desc, sizeof(struct iavf_aq_desc),
 740                    IAVF_NONDMA_TO_DMA);
 741
 742        /* if buff is not NULL assume indirect command */
 743        if (buff != NULL) {
 744                dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
 745                /* copy the user buff into the respective DMA buff */
 746                iavf_memcpy(dma_buff->va, buff, buff_size,
 747                            IAVF_NONDMA_TO_DMA);
 748                desc_on_ring->datalen = CPU_TO_LE16(buff_size);
 749
 750                /* Update the address values in the desc with the pa value
 751                 * for respective buffer
 752                 */
 753                desc_on_ring->params.external.addr_high =
 754                                CPU_TO_LE32(IAVF_HI_DWORD(dma_buff->pa));
 755                desc_on_ring->params.external.addr_low =
 756                                CPU_TO_LE32(IAVF_LO_DWORD(dma_buff->pa));
 757        }
 758
 759        /* bump the tail */
 760        iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
 761        iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
 762                      buff, buff_size);
 763        (hw->aq.asq.next_to_use)++;
 764        if (hw->aq.asq.next_to_use == hw->aq.asq.count)
 765                hw->aq.asq.next_to_use = 0;
 766        if (!details->postpone)
 767                wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
 768
 769        /* if cmd_details are not defined or async flag is not set,
 770         * we need to wait for desc write back
 771         */
 772        if (!details->async && !details->postpone) {
 773                u32 total_delay = 0;
 774
 775                do {
 776                        /* AQ designers suggest use of head for better
 777                         * timing reliability than DD bit
 778                         */
 779                        if (iavf_asq_done(hw))
 780                                break;
 781                        iavf_usec_delay(50);
 782                        total_delay += 50;
 783                } while (total_delay < hw->aq.asq_cmd_timeout);
 784        }
 785
 786        /* if ready, copy the desc back to temp */
 787        if (iavf_asq_done(hw)) {
 788                iavf_memcpy(desc, desc_on_ring, sizeof(struct iavf_aq_desc),
 789                            IAVF_DMA_TO_NONDMA);
 790                if (buff != NULL)
 791                        iavf_memcpy(buff, dma_buff->va, buff_size,
 792                                    IAVF_DMA_TO_NONDMA);
 793                retval = LE16_TO_CPU(desc->retval);
 794                if (retval != 0) {
 795                        iavf_debug(hw,
 796                                   IAVF_DEBUG_AQ_MESSAGE,
 797                                   "AQTX: Command completed with error 0x%X.\n",
 798                                   retval);
 799
 800                        /* strip off FW internal code */
 801                        retval &= 0xff;
 802                }
 803                cmd_completed = true;
 804                if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
 805                        status = IAVF_SUCCESS;
 806                else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
 807                        status = IAVF_ERR_NOT_READY;
 808                else
 809                        status = IAVF_ERR_ADMIN_QUEUE_ERROR;
 810                hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
 811        }
 812
 813        iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 814                   "AQTX: desc and buffer writeback:\n");
 815        iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
 816
 817        /* save writeback aq if requested */
 818        if (details->wb_desc)
 819                iavf_memcpy(details->wb_desc, desc_on_ring,
 820                            sizeof(struct iavf_aq_desc), IAVF_DMA_TO_NONDMA);
 821
 822        /* update the error if time out occurred */
 823        if ((!cmd_completed) &&
 824            (!details->async && !details->postpone)) {
 825                if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
 826                        iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 827                                   "AQTX: AQ Critical error.\n");
 828                        status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
 829                } else {
 830                        iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 831                                   "AQTX: Writeback timeout.\n");
 832                        status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
 833                }
 834        }
 835
 836asq_send_command_error:
 837        iavf_release_spinlock(&hw->aq.asq_spinlock);
 838        return status;
 839}
 840
 841/**
 842 *  iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
 843 *  @desc:     pointer to the temp descriptor (non DMA mem)
 844 *  @opcode:   the opcode can be used to decide which flags to turn off or on
 845 *
 846 *  Fill the desc with default values
 847 **/
 848void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc,
 849                                       u16 opcode)
 850{
 851        /* zero out the desc */
 852        iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc),
 853                    IAVF_NONDMA_MEM);
 854        desc->opcode = CPU_TO_LE16(opcode);
 855        desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_SI);
 856}
 857
 858/**
 859 *  iavf_clean_arq_element
 860 *  @hw: pointer to the hw struct
 861 *  @e: event info from the receive descriptor, includes any buffers
 862 *  @pending: number of events that could be left to process
 863 *
 864 *  This function cleans one Admin Receive Queue element and returns
 865 *  the contents through e.  It can also return how many events are
 866 *  left to process through 'pending'
 867 **/
 868enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
 869                                             struct iavf_arq_event_info *e,
 870                                             u16 *pending)
 871{
 872        enum iavf_status ret_code = IAVF_SUCCESS;
 873        u16 ntc = hw->aq.arq.next_to_clean;
 874        struct iavf_aq_desc *desc;
 875        struct iavf_dma_mem *bi;
 876        u16 desc_idx;
 877        u16 datalen;
 878        u16 flags;
 879        u16 ntu;
 880
 881        /* pre-clean the event info */
 882        iavf_memset(&e->desc, 0, sizeof(e->desc), IAVF_NONDMA_MEM);
 883
 884        /* take the lock before we start messing with the ring */
 885        iavf_acquire_spinlock(&hw->aq.arq_spinlock);
 886
 887        if (hw->aq.arq.count == 0) {
 888                iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 889                           "AQRX: Admin queue not initialized.\n");
 890                ret_code = IAVF_ERR_QUEUE_EMPTY;
 891                goto clean_arq_element_err;
 892        }
 893
 894        /* set next_to_use to head */
 895        ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
 896        if (ntu == ntc) {
 897                /* nothing to do - shouldn't need to update ring's values */
 898                ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
 899                goto clean_arq_element_out;
 900        }
 901
 902        /* now clean the next descriptor */
 903        desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
 904        desc_idx = ntc;
 905
 906        hw->aq.arq_last_status =
 907                (enum iavf_admin_queue_err)LE16_TO_CPU(desc->retval);
 908        flags = LE16_TO_CPU(desc->flags);
 909        if (flags & IAVF_AQ_FLAG_ERR) {
 910                ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
 911                iavf_debug(hw,
 912                           IAVF_DEBUG_AQ_MESSAGE,
 913                           "AQRX: Event received with error 0x%X.\n",
 914                           hw->aq.arq_last_status);
 915        }
 916
 917        iavf_memcpy(&e->desc, desc, sizeof(struct iavf_aq_desc),
 918                    IAVF_DMA_TO_NONDMA);
 919        datalen = LE16_TO_CPU(desc->datalen);
 920        e->msg_len = min(datalen, e->buf_len);
 921        if (e->msg_buf != NULL && (e->msg_len != 0))
 922                iavf_memcpy(e->msg_buf,
 923                            hw->aq.arq.r.arq_bi[desc_idx].va,
 924                            e->msg_len, IAVF_DMA_TO_NONDMA);
 925
 926        iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
 927        iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
 928                      hw->aq.arq_buf_size);
 929
 930        /* Restore the original datalen and buffer address in the desc,
 931         * FW updates datalen to indicate the event message
 932         * size
 933         */
 934        bi = &hw->aq.arq.r.arq_bi[ntc];
 935        iavf_memset((void *)desc, 0, sizeof(struct iavf_aq_desc), IAVF_DMA_MEM);
 936
 937        desc->flags = CPU_TO_LE16(IAVF_AQ_FLAG_BUF);
 938        if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
 939                desc->flags |= CPU_TO_LE16(IAVF_AQ_FLAG_LB);
 940        desc->datalen = CPU_TO_LE16((u16)bi->size);
 941        desc->params.external.addr_high = CPU_TO_LE32(IAVF_HI_DWORD(bi->pa));
 942        desc->params.external.addr_low = CPU_TO_LE32(IAVF_LO_DWORD(bi->pa));
 943
 944        /* set tail = the last cleaned desc index. */
 945        wr32(hw, hw->aq.arq.tail, ntc);
 946        /* ntc is updated to tail + 1 */
 947        ntc++;
 948        if (ntc == hw->aq.num_arq_entries)
 949                ntc = 0;
 950        hw->aq.arq.next_to_clean = ntc;
 951        hw->aq.arq.next_to_use = ntu;
 952
 953clean_arq_element_out:
 954        /* Set pending if needed, unlock and return */
 955        if (pending != NULL)
 956                *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
 957clean_arq_element_err:
 958        iavf_release_spinlock(&hw->aq.arq_spinlock);
 959
 960        return ret_code;
 961}
 962
 963