linux/drivers/net/ethernet/intel/iavf/iavf_adminq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 2013 - 2018 Intel Corporation. */
   3
   4#include "iavf_status.h"
   5#include "iavf_type.h"
   6#include "iavf_register.h"
   7#include "iavf_adminq.h"
   8#include "iavf_prototype.h"
   9
  10/**
  11 *  iavf_adminq_init_regs - Initialize AdminQ registers
  12 *  @hw: pointer to the hardware structure
  13 *
  14 *  This assumes the alloc_asq and alloc_arq functions have already been called
  15 **/
  16static void iavf_adminq_init_regs(struct iavf_hw *hw)
  17{
  18        /* set head and tail registers in our local struct */
  19        hw->aq.asq.tail = IAVF_VF_ATQT1;
  20        hw->aq.asq.head = IAVF_VF_ATQH1;
  21        hw->aq.asq.len  = IAVF_VF_ATQLEN1;
  22        hw->aq.asq.bal  = IAVF_VF_ATQBAL1;
  23        hw->aq.asq.bah  = IAVF_VF_ATQBAH1;
  24        hw->aq.arq.tail = IAVF_VF_ARQT1;
  25        hw->aq.arq.head = IAVF_VF_ARQH1;
  26        hw->aq.arq.len  = IAVF_VF_ARQLEN1;
  27        hw->aq.arq.bal  = IAVF_VF_ARQBAL1;
  28        hw->aq.arq.bah  = IAVF_VF_ARQBAH1;
  29}
  30
  31/**
  32 *  iavf_alloc_adminq_asq_ring - Allocate Admin Queue send rings
  33 *  @hw: pointer to the hardware structure
  34 **/
  35static enum iavf_status iavf_alloc_adminq_asq_ring(struct iavf_hw *hw)
  36{
  37        enum iavf_status ret_code;
  38
  39        ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
  40                                         iavf_mem_atq_ring,
  41                                         (hw->aq.num_asq_entries *
  42                                         sizeof(struct iavf_aq_desc)),
  43                                         IAVF_ADMINQ_DESC_ALIGNMENT);
  44        if (ret_code)
  45                return ret_code;
  46
  47        ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
  48                                          (hw->aq.num_asq_entries *
  49                                          sizeof(struct iavf_asq_cmd_details)));
  50        if (ret_code) {
  51                iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
  52                return ret_code;
  53        }
  54
  55        return ret_code;
  56}
  57
  58/**
  59 *  iavf_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
  60 *  @hw: pointer to the hardware structure
  61 **/
  62static enum iavf_status iavf_alloc_adminq_arq_ring(struct iavf_hw *hw)
  63{
  64        enum iavf_status ret_code;
  65
  66        ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
  67                                         iavf_mem_arq_ring,
  68                                         (hw->aq.num_arq_entries *
  69                                         sizeof(struct iavf_aq_desc)),
  70                                         IAVF_ADMINQ_DESC_ALIGNMENT);
  71
  72        return ret_code;
  73}
  74
  75/**
  76 *  iavf_free_adminq_asq - Free Admin Queue send rings
  77 *  @hw: pointer to the hardware structure
  78 *
  79 *  This assumes the posted send buffers have already been cleaned
  80 *  and de-allocated
  81 **/
  82static void iavf_free_adminq_asq(struct iavf_hw *hw)
  83{
  84        iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
  85}
  86
  87/**
  88 *  iavf_free_adminq_arq - Free Admin Queue receive rings
  89 *  @hw: pointer to the hardware structure
  90 *
  91 *  This assumes the posted receive buffers have already been cleaned
  92 *  and de-allocated
  93 **/
  94static void iavf_free_adminq_arq(struct iavf_hw *hw)
  95{
  96        iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
  97}
  98
  99/**
 100 *  iavf_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
 101 *  @hw: pointer to the hardware structure
 102 **/
 103static enum iavf_status iavf_alloc_arq_bufs(struct iavf_hw *hw)
 104{
 105        struct iavf_aq_desc *desc;
 106        struct iavf_dma_mem *bi;
 107        enum iavf_status ret_code;
 108        int i;
 109
 110        /* We'll be allocating the buffer info memory first, then we can
 111         * allocate the mapped buffers for the event processing
 112         */
 113
 114        /* buffer_info structures do not need alignment */
 115        ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
 116                                          (hw->aq.num_arq_entries *
 117                                           sizeof(struct iavf_dma_mem)));
 118        if (ret_code)
 119                goto alloc_arq_bufs;
 120        hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
 121
 122        /* allocate the mapped buffers */
 123        for (i = 0; i < hw->aq.num_arq_entries; i++) {
 124                bi = &hw->aq.arq.r.arq_bi[i];
 125                ret_code = iavf_allocate_dma_mem(hw, bi,
 126                                                 iavf_mem_arq_buf,
 127                                                 hw->aq.arq_buf_size,
 128                                                 IAVF_ADMINQ_DESC_ALIGNMENT);
 129                if (ret_code)
 130                        goto unwind_alloc_arq_bufs;
 131
 132                /* now configure the descriptors for use */
 133                desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
 134
 135                desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
 136                if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
 137                        desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
 138                desc->opcode = 0;
 139                /* This is in accordance with Admin queue design, there is no
 140                 * register for buffer size configuration
 141                 */
 142                desc->datalen = cpu_to_le16((u16)bi->size);
 143                desc->retval = 0;
 144                desc->cookie_high = 0;
 145                desc->cookie_low = 0;
 146                desc->params.external.addr_high =
 147                        cpu_to_le32(upper_32_bits(bi->pa));
 148                desc->params.external.addr_low =
 149                        cpu_to_le32(lower_32_bits(bi->pa));
 150                desc->params.external.param0 = 0;
 151                desc->params.external.param1 = 0;
 152        }
 153
 154alloc_arq_bufs:
 155        return ret_code;
 156
 157unwind_alloc_arq_bufs:
 158        /* don't try to free the one that failed... */
 159        i--;
 160        for (; i >= 0; i--)
 161                iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
 162        iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
 163
 164        return ret_code;
 165}
 166
 167/**
 168 *  iavf_alloc_asq_bufs - Allocate empty buffer structs for the send queue
 169 *  @hw: pointer to the hardware structure
 170 **/
 171static enum iavf_status iavf_alloc_asq_bufs(struct iavf_hw *hw)
 172{
 173        struct iavf_dma_mem *bi;
 174        enum iavf_status ret_code;
 175        int i;
 176
 177        /* No mapped memory needed yet, just the buffer info structures */
 178        ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
 179                                          (hw->aq.num_asq_entries *
 180                                           sizeof(struct iavf_dma_mem)));
 181        if (ret_code)
 182                goto alloc_asq_bufs;
 183        hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
 184
 185        /* allocate the mapped buffers */
 186        for (i = 0; i < hw->aq.num_asq_entries; i++) {
 187                bi = &hw->aq.asq.r.asq_bi[i];
 188                ret_code = iavf_allocate_dma_mem(hw, bi,
 189                                                 iavf_mem_asq_buf,
 190                                                 hw->aq.asq_buf_size,
 191                                                 IAVF_ADMINQ_DESC_ALIGNMENT);
 192                if (ret_code)
 193                        goto unwind_alloc_asq_bufs;
 194        }
 195alloc_asq_bufs:
 196        return ret_code;
 197
 198unwind_alloc_asq_bufs:
 199        /* don't try to free the one that failed... */
 200        i--;
 201        for (; i >= 0; i--)
 202                iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
 203        iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
 204
 205        return ret_code;
 206}
 207
 208/**
 209 *  iavf_free_arq_bufs - Free receive queue buffer info elements
 210 *  @hw: pointer to the hardware structure
 211 **/
 212static void iavf_free_arq_bufs(struct iavf_hw *hw)
 213{
 214        int i;
 215
 216        /* free descriptors */
 217        for (i = 0; i < hw->aq.num_arq_entries; i++)
 218                iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
 219
 220        /* free the descriptor memory */
 221        iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
 222
 223        /* free the dma header */
 224        iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
 225}
 226
 227/**
 228 *  iavf_free_asq_bufs - Free send queue buffer info elements
 229 *  @hw: pointer to the hardware structure
 230 **/
 231static void iavf_free_asq_bufs(struct iavf_hw *hw)
 232{
 233        int i;
 234
 235        /* only unmap if the address is non-NULL */
 236        for (i = 0; i < hw->aq.num_asq_entries; i++)
 237                if (hw->aq.asq.r.asq_bi[i].pa)
 238                        iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
 239
 240        /* free the buffer info list */
 241        iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
 242
 243        /* free the descriptor memory */
 244        iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
 245
 246        /* free the dma header */
 247        iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
 248}
 249
 250/**
 251 *  iavf_config_asq_regs - configure ASQ registers
 252 *  @hw: pointer to the hardware structure
 253 *
 254 *  Configure base address and length registers for the transmit queue
 255 **/
 256static enum iavf_status iavf_config_asq_regs(struct iavf_hw *hw)
 257{
 258        enum iavf_status ret_code = 0;
 259        u32 reg = 0;
 260
 261        /* Clear Head and Tail */
 262        wr32(hw, hw->aq.asq.head, 0);
 263        wr32(hw, hw->aq.asq.tail, 0);
 264
 265        /* set starting point */
 266        wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
 267                                  IAVF_VF_ATQLEN1_ATQENABLE_MASK));
 268        wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
 269        wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
 270
 271        /* Check one register to verify that config was applied */
 272        reg = rd32(hw, hw->aq.asq.bal);
 273        if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
 274                ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
 275
 276        return ret_code;
 277}
 278
 279/**
 280 *  iavf_config_arq_regs - ARQ register configuration
 281 *  @hw: pointer to the hardware structure
 282 *
 283 * Configure base address and length registers for the receive (event queue)
 284 **/
 285static enum iavf_status iavf_config_arq_regs(struct iavf_hw *hw)
 286{
 287        enum iavf_status ret_code = 0;
 288        u32 reg = 0;
 289
 290        /* Clear Head and Tail */
 291        wr32(hw, hw->aq.arq.head, 0);
 292        wr32(hw, hw->aq.arq.tail, 0);
 293
 294        /* set starting point */
 295        wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
 296                                  IAVF_VF_ARQLEN1_ARQENABLE_MASK));
 297        wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
 298        wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
 299
 300        /* Update tail in the HW to post pre-allocated buffers */
 301        wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
 302
 303        /* Check one register to verify that config was applied */
 304        reg = rd32(hw, hw->aq.arq.bal);
 305        if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
 306                ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
 307
 308        return ret_code;
 309}
 310
 311/**
 312 *  iavf_init_asq - main initialization routine for ASQ
 313 *  @hw: pointer to the hardware structure
 314 *
 315 *  This is the main initialization routine for the Admin Send Queue
 316 *  Prior to calling this function, drivers *MUST* set the following fields
 317 *  in the hw->aq structure:
 318 *     - hw->aq.num_asq_entries
 319 *     - hw->aq.arq_buf_size
 320 *
 321 *  Do *NOT* hold the lock when calling this as the memory allocation routines
 322 *  called are not going to be atomic context safe
 323 **/
 324static enum iavf_status iavf_init_asq(struct iavf_hw *hw)
 325{
 326        enum iavf_status ret_code = 0;
 327
 328        if (hw->aq.asq.count > 0) {
 329                /* queue already initialized */
 330                ret_code = IAVF_ERR_NOT_READY;
 331                goto init_adminq_exit;
 332        }
 333
 334        /* verify input for valid configuration */
 335        if ((hw->aq.num_asq_entries == 0) ||
 336            (hw->aq.asq_buf_size == 0)) {
 337                ret_code = IAVF_ERR_CONFIG;
 338                goto init_adminq_exit;
 339        }
 340
 341        hw->aq.asq.next_to_use = 0;
 342        hw->aq.asq.next_to_clean = 0;
 343
 344        /* allocate the ring memory */
 345        ret_code = iavf_alloc_adminq_asq_ring(hw);
 346        if (ret_code)
 347                goto init_adminq_exit;
 348
 349        /* allocate buffers in the rings */
 350        ret_code = iavf_alloc_asq_bufs(hw);
 351        if (ret_code)
 352                goto init_adminq_free_rings;
 353
 354        /* initialize base registers */
 355        ret_code = iavf_config_asq_regs(hw);
 356        if (ret_code)
 357                goto init_adminq_free_rings;
 358
 359        /* success! */
 360        hw->aq.asq.count = hw->aq.num_asq_entries;
 361        goto init_adminq_exit;
 362
 363init_adminq_free_rings:
 364        iavf_free_adminq_asq(hw);
 365
 366init_adminq_exit:
 367        return ret_code;
 368}
 369
 370/**
 371 *  iavf_init_arq - initialize ARQ
 372 *  @hw: pointer to the hardware structure
 373 *
 374 *  The main initialization routine for the Admin Receive (Event) Queue.
 375 *  Prior to calling this function, drivers *MUST* set the following fields
 376 *  in the hw->aq structure:
 377 *     - hw->aq.num_asq_entries
 378 *     - hw->aq.arq_buf_size
 379 *
 380 *  Do *NOT* hold the lock when calling this as the memory allocation routines
 381 *  called are not going to be atomic context safe
 382 **/
 383static enum iavf_status iavf_init_arq(struct iavf_hw *hw)
 384{
 385        enum iavf_status ret_code = 0;
 386
 387        if (hw->aq.arq.count > 0) {
 388                /* queue already initialized */
 389                ret_code = IAVF_ERR_NOT_READY;
 390                goto init_adminq_exit;
 391        }
 392
 393        /* verify input for valid configuration */
 394        if ((hw->aq.num_arq_entries == 0) ||
 395            (hw->aq.arq_buf_size == 0)) {
 396                ret_code = IAVF_ERR_CONFIG;
 397                goto init_adminq_exit;
 398        }
 399
 400        hw->aq.arq.next_to_use = 0;
 401        hw->aq.arq.next_to_clean = 0;
 402
 403        /* allocate the ring memory */
 404        ret_code = iavf_alloc_adminq_arq_ring(hw);
 405        if (ret_code)
 406                goto init_adminq_exit;
 407
 408        /* allocate buffers in the rings */
 409        ret_code = iavf_alloc_arq_bufs(hw);
 410        if (ret_code)
 411                goto init_adminq_free_rings;
 412
 413        /* initialize base registers */
 414        ret_code = iavf_config_arq_regs(hw);
 415        if (ret_code)
 416                goto init_adminq_free_rings;
 417
 418        /* success! */
 419        hw->aq.arq.count = hw->aq.num_arq_entries;
 420        goto init_adminq_exit;
 421
 422init_adminq_free_rings:
 423        iavf_free_adminq_arq(hw);
 424
 425init_adminq_exit:
 426        return ret_code;
 427}
 428
 429/**
 430 *  iavf_shutdown_asq - shutdown the ASQ
 431 *  @hw: pointer to the hardware structure
 432 *
 433 *  The main shutdown routine for the Admin Send Queue
 434 **/
 435static enum iavf_status iavf_shutdown_asq(struct iavf_hw *hw)
 436{
 437        enum iavf_status ret_code = 0;
 438
 439        mutex_lock(&hw->aq.asq_mutex);
 440
 441        if (hw->aq.asq.count == 0) {
 442                ret_code = IAVF_ERR_NOT_READY;
 443                goto shutdown_asq_out;
 444        }
 445
 446        /* Stop firmware AdminQ processing */
 447        wr32(hw, hw->aq.asq.head, 0);
 448        wr32(hw, hw->aq.asq.tail, 0);
 449        wr32(hw, hw->aq.asq.len, 0);
 450        wr32(hw, hw->aq.asq.bal, 0);
 451        wr32(hw, hw->aq.asq.bah, 0);
 452
 453        hw->aq.asq.count = 0; /* to indicate uninitialized queue */
 454
 455        /* free ring buffers */
 456        iavf_free_asq_bufs(hw);
 457
 458shutdown_asq_out:
 459        mutex_unlock(&hw->aq.asq_mutex);
 460        return ret_code;
 461}
 462
 463/**
 464 *  iavf_shutdown_arq - shutdown ARQ
 465 *  @hw: pointer to the hardware structure
 466 *
 467 *  The main shutdown routine for the Admin Receive Queue
 468 **/
 469static enum iavf_status iavf_shutdown_arq(struct iavf_hw *hw)
 470{
 471        enum iavf_status ret_code = 0;
 472
 473        mutex_lock(&hw->aq.arq_mutex);
 474
 475        if (hw->aq.arq.count == 0) {
 476                ret_code = IAVF_ERR_NOT_READY;
 477                goto shutdown_arq_out;
 478        }
 479
 480        /* Stop firmware AdminQ processing */
 481        wr32(hw, hw->aq.arq.head, 0);
 482        wr32(hw, hw->aq.arq.tail, 0);
 483        wr32(hw, hw->aq.arq.len, 0);
 484        wr32(hw, hw->aq.arq.bal, 0);
 485        wr32(hw, hw->aq.arq.bah, 0);
 486
 487        hw->aq.arq.count = 0; /* to indicate uninitialized queue */
 488
 489        /* free ring buffers */
 490        iavf_free_arq_bufs(hw);
 491
 492shutdown_arq_out:
 493        mutex_unlock(&hw->aq.arq_mutex);
 494        return ret_code;
 495}
 496
 497/**
 498 *  iavf_init_adminq - main initialization routine for Admin Queue
 499 *  @hw: pointer to the hardware structure
 500 *
 501 *  Prior to calling this function, drivers *MUST* set the following fields
 502 *  in the hw->aq structure:
 503 *     - hw->aq.num_asq_entries
 504 *     - hw->aq.num_arq_entries
 505 *     - hw->aq.arq_buf_size
 506 *     - hw->aq.asq_buf_size
 507 **/
 508enum iavf_status iavf_init_adminq(struct iavf_hw *hw)
 509{
 510        enum iavf_status ret_code;
 511
 512        /* verify input for valid configuration */
 513        if ((hw->aq.num_arq_entries == 0) ||
 514            (hw->aq.num_asq_entries == 0) ||
 515            (hw->aq.arq_buf_size == 0) ||
 516            (hw->aq.asq_buf_size == 0)) {
 517                ret_code = IAVF_ERR_CONFIG;
 518                goto init_adminq_exit;
 519        }
 520
 521        /* Set up register offsets */
 522        iavf_adminq_init_regs(hw);
 523
 524        /* setup ASQ command write back timeout */
 525        hw->aq.asq_cmd_timeout = IAVF_ASQ_CMD_TIMEOUT;
 526
 527        /* allocate the ASQ */
 528        ret_code = iavf_init_asq(hw);
 529        if (ret_code)
 530                goto init_adminq_destroy_locks;
 531
 532        /* allocate the ARQ */
 533        ret_code = iavf_init_arq(hw);
 534        if (ret_code)
 535                goto init_adminq_free_asq;
 536
 537        /* success! */
 538        goto init_adminq_exit;
 539
 540init_adminq_free_asq:
 541        iavf_shutdown_asq(hw);
 542init_adminq_destroy_locks:
 543
 544init_adminq_exit:
 545        return ret_code;
 546}
 547
 548/**
 549 *  iavf_shutdown_adminq - shutdown routine for the Admin Queue
 550 *  @hw: pointer to the hardware structure
 551 **/
 552enum iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
 553{
 554        enum iavf_status ret_code = 0;
 555
 556        if (iavf_check_asq_alive(hw))
 557                iavf_aq_queue_shutdown(hw, true);
 558
 559        iavf_shutdown_asq(hw);
 560        iavf_shutdown_arq(hw);
 561
 562        return ret_code;
 563}
 564
 565/**
 566 *  iavf_clean_asq - cleans Admin send queue
 567 *  @hw: pointer to the hardware structure
 568 *
 569 *  returns the number of free desc
 570 **/
 571static u16 iavf_clean_asq(struct iavf_hw *hw)
 572{
 573        struct iavf_adminq_ring *asq = &hw->aq.asq;
 574        struct iavf_asq_cmd_details *details;
 575        u16 ntc = asq->next_to_clean;
 576        struct iavf_aq_desc desc_cb;
 577        struct iavf_aq_desc *desc;
 578
 579        desc = IAVF_ADMINQ_DESC(*asq, ntc);
 580        details = IAVF_ADMINQ_DETAILS(*asq, ntc);
 581        while (rd32(hw, hw->aq.asq.head) != ntc) {
 582                iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 583                           "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
 584
 585                if (details->callback) {
 586                        IAVF_ADMINQ_CALLBACK cb_func =
 587                                        (IAVF_ADMINQ_CALLBACK)details->callback;
 588                        desc_cb = *desc;
 589                        cb_func(hw, &desc_cb);
 590                }
 591                memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
 592                memset((void *)details, 0,
 593                       sizeof(struct iavf_asq_cmd_details));
 594                ntc++;
 595                if (ntc == asq->count)
 596                        ntc = 0;
 597                desc = IAVF_ADMINQ_DESC(*asq, ntc);
 598                details = IAVF_ADMINQ_DETAILS(*asq, ntc);
 599        }
 600
 601        asq->next_to_clean = ntc;
 602
 603        return IAVF_DESC_UNUSED(asq);
 604}
 605
 606/**
 607 *  iavf_asq_done - check if FW has processed the Admin Send Queue
 608 *  @hw: pointer to the hw struct
 609 *
 610 *  Returns true if the firmware has processed all descriptors on the
 611 *  admin send queue. Returns false if there are still requests pending.
 612 **/
 613bool iavf_asq_done(struct iavf_hw *hw)
 614{
 615        /* AQ designers suggest use of head for better
 616         * timing reliability than DD bit
 617         */
 618        return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
 619}
 620
 621/**
 622 *  iavf_asq_send_command - send command to Admin Queue
 623 *  @hw: pointer to the hw struct
 624 *  @desc: prefilled descriptor describing the command (non DMA mem)
 625 *  @buff: buffer to use for indirect commands
 626 *  @buff_size: size of buffer for indirect commands
 627 *  @cmd_details: pointer to command details structure
 628 *
 629 *  This is the main send command driver routine for the Admin Queue send
 630 *  queue.  It runs the queue, cleans the queue, etc
 631 **/
 632enum iavf_status iavf_asq_send_command(struct iavf_hw *hw,
 633                                       struct iavf_aq_desc *desc,
 634                                       void *buff, /* can be NULL */
 635                                       u16  buff_size,
 636                                       struct iavf_asq_cmd_details *cmd_details)
 637{
 638        struct iavf_dma_mem *dma_buff = NULL;
 639        struct iavf_asq_cmd_details *details;
 640        struct iavf_aq_desc *desc_on_ring;
 641        bool cmd_completed = false;
 642        enum iavf_status status = 0;
 643        u16  retval = 0;
 644        u32  val = 0;
 645
 646        mutex_lock(&hw->aq.asq_mutex);
 647
 648        if (hw->aq.asq.count == 0) {
 649                iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 650                           "AQTX: Admin queue not initialized.\n");
 651                status = IAVF_ERR_QUEUE_EMPTY;
 652                goto asq_send_command_error;
 653        }
 654
 655        hw->aq.asq_last_status = IAVF_AQ_RC_OK;
 656
 657        val = rd32(hw, hw->aq.asq.head);
 658        if (val >= hw->aq.num_asq_entries) {
 659                iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 660                           "AQTX: head overrun at %d\n", val);
 661                status = IAVF_ERR_QUEUE_EMPTY;
 662                goto asq_send_command_error;
 663        }
 664
 665        details = IAVF_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
 666        if (cmd_details) {
 667                *details = *cmd_details;
 668
 669                /* If the cmd_details are defined copy the cookie.  The
 670                 * cpu_to_le32 is not needed here because the data is ignored
 671                 * by the FW, only used by the driver
 672                 */
 673                if (details->cookie) {
 674                        desc->cookie_high =
 675                                cpu_to_le32(upper_32_bits(details->cookie));
 676                        desc->cookie_low =
 677                                cpu_to_le32(lower_32_bits(details->cookie));
 678                }
 679        } else {
 680                memset(details, 0, sizeof(struct iavf_asq_cmd_details));
 681        }
 682
 683        /* clear requested flags and then set additional flags if defined */
 684        desc->flags &= ~cpu_to_le16(details->flags_dis);
 685        desc->flags |= cpu_to_le16(details->flags_ena);
 686
 687        if (buff_size > hw->aq.asq_buf_size) {
 688                iavf_debug(hw,
 689                           IAVF_DEBUG_AQ_MESSAGE,
 690                           "AQTX: Invalid buffer size: %d.\n",
 691                           buff_size);
 692                status = IAVF_ERR_INVALID_SIZE;
 693                goto asq_send_command_error;
 694        }
 695
 696        if (details->postpone && !details->async) {
 697                iavf_debug(hw,
 698                           IAVF_DEBUG_AQ_MESSAGE,
 699                           "AQTX: Async flag not set along with postpone flag");
 700                status = IAVF_ERR_PARAM;
 701                goto asq_send_command_error;
 702        }
 703
 704        /* call clean and check queue available function to reclaim the
 705         * descriptors that were processed by FW, the function returns the
 706         * number of desc available
 707         */
 708        /* the clean function called here could be called in a separate thread
 709         * in case of asynchronous completions
 710         */
 711        if (iavf_clean_asq(hw) == 0) {
 712                iavf_debug(hw,
 713                           IAVF_DEBUG_AQ_MESSAGE,
 714                           "AQTX: Error queue is full.\n");
 715                status = IAVF_ERR_ADMIN_QUEUE_FULL;
 716                goto asq_send_command_error;
 717        }
 718
 719        /* initialize the temp desc pointer with the right desc */
 720        desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
 721
 722        /* if the desc is available copy the temp desc to the right place */
 723        *desc_on_ring = *desc;
 724
 725        /* if buff is not NULL assume indirect command */
 726        if (buff) {
 727                dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use];
 728                /* copy the user buff into the respective DMA buff */
 729                memcpy(dma_buff->va, buff, buff_size);
 730                desc_on_ring->datalen = cpu_to_le16(buff_size);
 731
 732                /* Update the address values in the desc with the pa value
 733                 * for respective buffer
 734                 */
 735                desc_on_ring->params.external.addr_high =
 736                                cpu_to_le32(upper_32_bits(dma_buff->pa));
 737                desc_on_ring->params.external.addr_low =
 738                                cpu_to_le32(lower_32_bits(dma_buff->pa));
 739        }
 740
 741        /* bump the tail */
 742        iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
 743        iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
 744                      buff, buff_size);
 745        (hw->aq.asq.next_to_use)++;
 746        if (hw->aq.asq.next_to_use == hw->aq.asq.count)
 747                hw->aq.asq.next_to_use = 0;
 748        if (!details->postpone)
 749                wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
 750
 751        /* if cmd_details are not defined or async flag is not set,
 752         * we need to wait for desc write back
 753         */
 754        if (!details->async && !details->postpone) {
 755                u32 total_delay = 0;
 756
 757                do {
 758                        /* AQ designers suggest use of head for better
 759                         * timing reliability than DD bit
 760                         */
 761                        if (iavf_asq_done(hw))
 762                                break;
 763                        udelay(50);
 764                        total_delay += 50;
 765                } while (total_delay < hw->aq.asq_cmd_timeout);
 766        }
 767
 768        /* if ready, copy the desc back to temp */
 769        if (iavf_asq_done(hw)) {
 770                *desc = *desc_on_ring;
 771                if (buff)
 772                        memcpy(buff, dma_buff->va, buff_size);
 773                retval = le16_to_cpu(desc->retval);
 774                if (retval != 0) {
 775                        iavf_debug(hw,
 776                                   IAVF_DEBUG_AQ_MESSAGE,
 777                                   "AQTX: Command completed with error 0x%X.\n",
 778                                   retval);
 779
 780                        /* strip off FW internal code */
 781                        retval &= 0xff;
 782                }
 783                cmd_completed = true;
 784                if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_OK)
 785                        status = 0;
 786                else if ((enum iavf_admin_queue_err)retval == IAVF_AQ_RC_EBUSY)
 787                        status = IAVF_ERR_NOT_READY;
 788                else
 789                        status = IAVF_ERR_ADMIN_QUEUE_ERROR;
 790                hw->aq.asq_last_status = (enum iavf_admin_queue_err)retval;
 791        }
 792
 793        iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 794                   "AQTX: desc and buffer writeback:\n");
 795        iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
 796
 797        /* save writeback aq if requested */
 798        if (details->wb_desc)
 799                *details->wb_desc = *desc_on_ring;
 800
 801        /* update the error if time out occurred */
 802        if ((!cmd_completed) &&
 803            (!details->async && !details->postpone)) {
 804                if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
 805                        iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 806                                   "AQTX: AQ Critical error.\n");
 807                        status = IAVF_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
 808                } else {
 809                        iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 810                                   "AQTX: Writeback timeout.\n");
 811                        status = IAVF_ERR_ADMIN_QUEUE_TIMEOUT;
 812                }
 813        }
 814
 815asq_send_command_error:
 816        mutex_unlock(&hw->aq.asq_mutex);
 817        return status;
 818}
 819
 820/**
 821 *  iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
 822 *  @desc:     pointer to the temp descriptor (non DMA mem)
 823 *  @opcode:   the opcode can be used to decide which flags to turn off or on
 824 *
 825 *  Fill the desc with default values
 826 **/
 827void iavf_fill_default_direct_cmd_desc(struct iavf_aq_desc *desc, u16 opcode)
 828{
 829        /* zero out the desc */
 830        memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
 831        desc->opcode = cpu_to_le16(opcode);
 832        desc->flags = cpu_to_le16(IAVF_AQ_FLAG_SI);
 833}
 834
 835/**
 836 *  iavf_clean_arq_element
 837 *  @hw: pointer to the hw struct
 838 *  @e: event info from the receive descriptor, includes any buffers
 839 *  @pending: number of events that could be left to process
 840 *
 841 *  This function cleans one Admin Receive Queue element and returns
 842 *  the contents through e.  It can also return how many events are
 843 *  left to process through 'pending'
 844 **/
 845enum iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
 846                                        struct iavf_arq_event_info *e,
 847                                        u16 *pending)
 848{
 849        u16 ntc = hw->aq.arq.next_to_clean;
 850        struct iavf_aq_desc *desc;
 851        enum iavf_status ret_code = 0;
 852        struct iavf_dma_mem *bi;
 853        u16 desc_idx;
 854        u16 datalen;
 855        u16 flags;
 856        u16 ntu;
 857
 858        /* pre-clean the event info */
 859        memset(&e->desc, 0, sizeof(e->desc));
 860
 861        /* take the lock before we start messing with the ring */
 862        mutex_lock(&hw->aq.arq_mutex);
 863
 864        if (hw->aq.arq.count == 0) {
 865                iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 866                           "AQRX: Admin queue not initialized.\n");
 867                ret_code = IAVF_ERR_QUEUE_EMPTY;
 868                goto clean_arq_element_err;
 869        }
 870
 871        /* set next_to_use to head */
 872        ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
 873        if (ntu == ntc) {
 874                /* nothing to do - shouldn't need to update ring's values */
 875                ret_code = IAVF_ERR_ADMIN_QUEUE_NO_WORK;
 876                goto clean_arq_element_out;
 877        }
 878
 879        /* now clean the next descriptor */
 880        desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
 881        desc_idx = ntc;
 882
 883        hw->aq.arq_last_status =
 884                (enum iavf_admin_queue_err)le16_to_cpu(desc->retval);
 885        flags = le16_to_cpu(desc->flags);
 886        if (flags & IAVF_AQ_FLAG_ERR) {
 887                ret_code = IAVF_ERR_ADMIN_QUEUE_ERROR;
 888                iavf_debug(hw,
 889                           IAVF_DEBUG_AQ_MESSAGE,
 890                           "AQRX: Event received with error 0x%X.\n",
 891                           hw->aq.arq_last_status);
 892        }
 893
 894        e->desc = *desc;
 895        datalen = le16_to_cpu(desc->datalen);
 896        e->msg_len = min(datalen, e->buf_len);
 897        if (e->msg_buf && (e->msg_len != 0))
 898                memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
 899                       e->msg_len);
 900
 901        iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
 902        iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
 903                      hw->aq.arq_buf_size);
 904
 905        /* Restore the original datalen and buffer address in the desc,
 906         * FW updates datalen to indicate the event message
 907         * size
 908         */
 909        bi = &hw->aq.arq.r.arq_bi[ntc];
 910        memset((void *)desc, 0, sizeof(struct iavf_aq_desc));
 911
 912        desc->flags = cpu_to_le16(IAVF_AQ_FLAG_BUF);
 913        if (hw->aq.arq_buf_size > IAVF_AQ_LARGE_BUF)
 914                desc->flags |= cpu_to_le16(IAVF_AQ_FLAG_LB);
 915        desc->datalen = cpu_to_le16((u16)bi->size);
 916        desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
 917        desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
 918
 919        /* set tail = the last cleaned desc index. */
 920        wr32(hw, hw->aq.arq.tail, ntc);
 921        /* ntc is updated to tail + 1 */
 922        ntc++;
 923        if (ntc == hw->aq.num_arq_entries)
 924                ntc = 0;
 925        hw->aq.arq.next_to_clean = ntc;
 926        hw->aq.arq.next_to_use = ntu;
 927
 928clean_arq_element_out:
 929        /* Set pending if needed, unlock and return */
 930        if (pending)
 931                *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
 932
 933clean_arq_element_err:
 934        mutex_unlock(&hw->aq.arq_mutex);
 935
 936        return ret_code;
 937}
 938