linux/drivers/net/ethernet/intel/iavf/i40e_adminq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 2013 - 2018 Intel Corporation. */
   3
   4#include "iavf_status.h"
   5#include "iavf_type.h"
   6#include "iavf_register.h"
   7#include "i40e_adminq.h"
   8#include "iavf_prototype.h"
   9
  10/**
  11 *  i40e_adminq_init_regs - Initialize AdminQ registers
  12 *  @hw: pointer to the hardware structure
  13 *
  14 *  This assumes the alloc_asq and alloc_arq functions have already been called
  15 **/
  16static void i40e_adminq_init_regs(struct iavf_hw *hw)
  17{
  18        /* set head and tail registers in our local struct */
  19        hw->aq.asq.tail = IAVF_VF_ATQT1;
  20        hw->aq.asq.head = IAVF_VF_ATQH1;
  21        hw->aq.asq.len  = IAVF_VF_ATQLEN1;
  22        hw->aq.asq.bal  = IAVF_VF_ATQBAL1;
  23        hw->aq.asq.bah  = IAVF_VF_ATQBAH1;
  24        hw->aq.arq.tail = IAVF_VF_ARQT1;
  25        hw->aq.arq.head = IAVF_VF_ARQH1;
  26        hw->aq.arq.len  = IAVF_VF_ARQLEN1;
  27        hw->aq.arq.bal  = IAVF_VF_ARQBAL1;
  28        hw->aq.arq.bah  = IAVF_VF_ARQBAH1;
  29}
  30
  31/**
  32 *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
  33 *  @hw: pointer to the hardware structure
  34 **/
  35static iavf_status i40e_alloc_adminq_asq_ring(struct iavf_hw *hw)
  36{
  37        iavf_status ret_code;
  38
  39        ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
  40                                         i40e_mem_atq_ring,
  41                                         (hw->aq.num_asq_entries *
  42                                         sizeof(struct i40e_aq_desc)),
  43                                         IAVF_ADMINQ_DESC_ALIGNMENT);
  44        if (ret_code)
  45                return ret_code;
  46
  47        ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
  48                                          (hw->aq.num_asq_entries *
  49                                          sizeof(struct i40e_asq_cmd_details)));
  50        if (ret_code) {
  51                iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
  52                return ret_code;
  53        }
  54
  55        return ret_code;
  56}
  57
  58/**
  59 *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
  60 *  @hw: pointer to the hardware structure
  61 **/
  62static iavf_status i40e_alloc_adminq_arq_ring(struct iavf_hw *hw)
  63{
  64        iavf_status ret_code;
  65
  66        ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
  67                                         i40e_mem_arq_ring,
  68                                         (hw->aq.num_arq_entries *
  69                                         sizeof(struct i40e_aq_desc)),
  70                                         IAVF_ADMINQ_DESC_ALIGNMENT);
  71
  72        return ret_code;
  73}
  74
  75/**
  76 *  i40e_free_adminq_asq - Free Admin Queue send rings
  77 *  @hw: pointer to the hardware structure
  78 *
  79 *  This assumes the posted send buffers have already been cleaned
  80 *  and de-allocated
  81 **/
  82static void i40e_free_adminq_asq(struct iavf_hw *hw)
  83{
  84        iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
  85}
  86
  87/**
  88 *  i40e_free_adminq_arq - Free Admin Queue receive rings
  89 *  @hw: pointer to the hardware structure
  90 *
  91 *  This assumes the posted receive buffers have already been cleaned
  92 *  and de-allocated
  93 **/
  94static void i40e_free_adminq_arq(struct iavf_hw *hw)
  95{
  96        iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
  97}
  98
  99/**
 100 *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
 101 *  @hw: pointer to the hardware structure
 102 **/
 103static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw)
 104{
 105        struct i40e_aq_desc *desc;
 106        struct iavf_dma_mem *bi;
 107        iavf_status ret_code;
 108        int i;
 109
 110        /* We'll be allocating the buffer info memory first, then we can
 111         * allocate the mapped buffers for the event processing
 112         */
 113
 114        /* buffer_info structures do not need alignment */
 115        ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
 116                                          (hw->aq.num_arq_entries *
 117                                           sizeof(struct iavf_dma_mem)));
 118        if (ret_code)
 119                goto alloc_arq_bufs;
 120        hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
 121
 122        /* allocate the mapped buffers */
 123        for (i = 0; i < hw->aq.num_arq_entries; i++) {
 124                bi = &hw->aq.arq.r.arq_bi[i];
 125                ret_code = iavf_allocate_dma_mem(hw, bi,
 126                                                 i40e_mem_arq_buf,
 127                                                 hw->aq.arq_buf_size,
 128                                                 IAVF_ADMINQ_DESC_ALIGNMENT);
 129                if (ret_code)
 130                        goto unwind_alloc_arq_bufs;
 131
 132                /* now configure the descriptors for use */
 133                desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
 134
 135                desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
 136                if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
 137                        desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
 138                desc->opcode = 0;
 139                /* This is in accordance with Admin queue design, there is no
 140                 * register for buffer size configuration
 141                 */
 142                desc->datalen = cpu_to_le16((u16)bi->size);
 143                desc->retval = 0;
 144                desc->cookie_high = 0;
 145                desc->cookie_low = 0;
 146                desc->params.external.addr_high =
 147                        cpu_to_le32(upper_32_bits(bi->pa));
 148                desc->params.external.addr_low =
 149                        cpu_to_le32(lower_32_bits(bi->pa));
 150                desc->params.external.param0 = 0;
 151                desc->params.external.param1 = 0;
 152        }
 153
 154alloc_arq_bufs:
 155        return ret_code;
 156
 157unwind_alloc_arq_bufs:
 158        /* don't try to free the one that failed... */
 159        i--;
 160        for (; i >= 0; i--)
 161                iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
 162        iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
 163
 164        return ret_code;
 165}
 166
 167/**
 168 *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
 169 *  @hw: pointer to the hardware structure
 170 **/
 171static iavf_status i40e_alloc_asq_bufs(struct iavf_hw *hw)
 172{
 173        struct iavf_dma_mem *bi;
 174        iavf_status ret_code;
 175        int i;
 176
 177        /* No mapped memory needed yet, just the buffer info structures */
 178        ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
 179                                          (hw->aq.num_asq_entries *
 180                                           sizeof(struct iavf_dma_mem)));
 181        if (ret_code)
 182                goto alloc_asq_bufs;
 183        hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
 184
 185        /* allocate the mapped buffers */
 186        for (i = 0; i < hw->aq.num_asq_entries; i++) {
 187                bi = &hw->aq.asq.r.asq_bi[i];
 188                ret_code = iavf_allocate_dma_mem(hw, bi,
 189                                                 i40e_mem_asq_buf,
 190                                                 hw->aq.asq_buf_size,
 191                                                 IAVF_ADMINQ_DESC_ALIGNMENT);
 192                if (ret_code)
 193                        goto unwind_alloc_asq_bufs;
 194        }
 195alloc_asq_bufs:
 196        return ret_code;
 197
 198unwind_alloc_asq_bufs:
 199        /* don't try to free the one that failed... */
 200        i--;
 201        for (; i >= 0; i--)
 202                iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
 203        iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
 204
 205        return ret_code;
 206}
 207
 208/**
 209 *  i40e_free_arq_bufs - Free receive queue buffer info elements
 210 *  @hw: pointer to the hardware structure
 211 **/
 212static void i40e_free_arq_bufs(struct iavf_hw *hw)
 213{
 214        int i;
 215
 216        /* free descriptors */
 217        for (i = 0; i < hw->aq.num_arq_entries; i++)
 218                iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
 219
 220        /* free the descriptor memory */
 221        iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
 222
 223        /* free the dma header */
 224        iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
 225}
 226
 227/**
 228 *  i40e_free_asq_bufs - Free send queue buffer info elements
 229 *  @hw: pointer to the hardware structure
 230 **/
 231static void i40e_free_asq_bufs(struct iavf_hw *hw)
 232{
 233        int i;
 234
 235        /* only unmap if the address is non-NULL */
 236        for (i = 0; i < hw->aq.num_asq_entries; i++)
 237                if (hw->aq.asq.r.asq_bi[i].pa)
 238                        iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
 239
 240        /* free the buffer info list */
 241        iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
 242
 243        /* free the descriptor memory */
 244        iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
 245
 246        /* free the dma header */
 247        iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
 248}
 249
 250/**
 251 *  i40e_config_asq_regs - configure ASQ registers
 252 *  @hw: pointer to the hardware structure
 253 *
 254 *  Configure base address and length registers for the transmit queue
 255 **/
 256static iavf_status i40e_config_asq_regs(struct iavf_hw *hw)
 257{
 258        iavf_status ret_code = 0;
 259        u32 reg = 0;
 260
 261        /* Clear Head and Tail */
 262        wr32(hw, hw->aq.asq.head, 0);
 263        wr32(hw, hw->aq.asq.tail, 0);
 264
 265        /* set starting point */
 266        wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
 267                                  IAVF_VF_ATQLEN1_ATQENABLE_MASK));
 268        wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
 269        wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
 270
 271        /* Check one register to verify that config was applied */
 272        reg = rd32(hw, hw->aq.asq.bal);
 273        if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
 274                ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
 275
 276        return ret_code;
 277}
 278
 279/**
 280 *  i40e_config_arq_regs - ARQ register configuration
 281 *  @hw: pointer to the hardware structure
 282 *
 283 * Configure base address and length registers for the receive (event queue)
 284 **/
 285static iavf_status i40e_config_arq_regs(struct iavf_hw *hw)
 286{
 287        iavf_status ret_code = 0;
 288        u32 reg = 0;
 289
 290        /* Clear Head and Tail */
 291        wr32(hw, hw->aq.arq.head, 0);
 292        wr32(hw, hw->aq.arq.tail, 0);
 293
 294        /* set starting point */
 295        wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
 296                                  IAVF_VF_ARQLEN1_ARQENABLE_MASK));
 297        wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
 298        wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
 299
 300        /* Update tail in the HW to post pre-allocated buffers */
 301        wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
 302
 303        /* Check one register to verify that config was applied */
 304        reg = rd32(hw, hw->aq.arq.bal);
 305        if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
 306                ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
 307
 308        return ret_code;
 309}
 310
 311/**
 312 *  i40e_init_asq - main initialization routine for ASQ
 313 *  @hw: pointer to the hardware structure
 314 *
 315 *  This is the main initialization routine for the Admin Send Queue
 316 *  Prior to calling this function, drivers *MUST* set the following fields
 317 *  in the hw->aq structure:
 318 *     - hw->aq.num_asq_entries
 319 *     - hw->aq.arq_buf_size
 320 *
 321 *  Do *NOT* hold the lock when calling this as the memory allocation routines
 322 *  called are not going to be atomic context safe
 323 **/
 324static iavf_status i40e_init_asq(struct iavf_hw *hw)
 325{
 326        iavf_status ret_code = 0;
 327
 328        if (hw->aq.asq.count > 0) {
 329                /* queue already initialized */
 330                ret_code = I40E_ERR_NOT_READY;
 331                goto init_adminq_exit;
 332        }
 333
 334        /* verify input for valid configuration */
 335        if ((hw->aq.num_asq_entries == 0) ||
 336            (hw->aq.asq_buf_size == 0)) {
 337                ret_code = I40E_ERR_CONFIG;
 338                goto init_adminq_exit;
 339        }
 340
 341        hw->aq.asq.next_to_use = 0;
 342        hw->aq.asq.next_to_clean = 0;
 343
 344        /* allocate the ring memory */
 345        ret_code = i40e_alloc_adminq_asq_ring(hw);
 346        if (ret_code)
 347                goto init_adminq_exit;
 348
 349        /* allocate buffers in the rings */
 350        ret_code = i40e_alloc_asq_bufs(hw);
 351        if (ret_code)
 352                goto init_adminq_free_rings;
 353
 354        /* initialize base registers */
 355        ret_code = i40e_config_asq_regs(hw);
 356        if (ret_code)
 357                goto init_adminq_free_rings;
 358
 359        /* success! */
 360        hw->aq.asq.count = hw->aq.num_asq_entries;
 361        goto init_adminq_exit;
 362
 363init_adminq_free_rings:
 364        i40e_free_adminq_asq(hw);
 365
 366init_adminq_exit:
 367        return ret_code;
 368}
 369
 370/**
 371 *  i40e_init_arq - initialize ARQ
 372 *  @hw: pointer to the hardware structure
 373 *
 374 *  The main initialization routine for the Admin Receive (Event) Queue.
 375 *  Prior to calling this function, drivers *MUST* set the following fields
 376 *  in the hw->aq structure:
 377 *     - hw->aq.num_asq_entries
 378 *     - hw->aq.arq_buf_size
 379 *
 380 *  Do *NOT* hold the lock when calling this as the memory allocation routines
 381 *  called are not going to be atomic context safe
 382 **/
 383static iavf_status i40e_init_arq(struct iavf_hw *hw)
 384{
 385        iavf_status ret_code = 0;
 386
 387        if (hw->aq.arq.count > 0) {
 388                /* queue already initialized */
 389                ret_code = I40E_ERR_NOT_READY;
 390                goto init_adminq_exit;
 391        }
 392
 393        /* verify input for valid configuration */
 394        if ((hw->aq.num_arq_entries == 0) ||
 395            (hw->aq.arq_buf_size == 0)) {
 396                ret_code = I40E_ERR_CONFIG;
 397                goto init_adminq_exit;
 398        }
 399
 400        hw->aq.arq.next_to_use = 0;
 401        hw->aq.arq.next_to_clean = 0;
 402
 403        /* allocate the ring memory */
 404        ret_code = i40e_alloc_adminq_arq_ring(hw);
 405        if (ret_code)
 406                goto init_adminq_exit;
 407
 408        /* allocate buffers in the rings */
 409        ret_code = i40e_alloc_arq_bufs(hw);
 410        if (ret_code)
 411                goto init_adminq_free_rings;
 412
 413        /* initialize base registers */
 414        ret_code = i40e_config_arq_regs(hw);
 415        if (ret_code)
 416                goto init_adminq_free_rings;
 417
 418        /* success! */
 419        hw->aq.arq.count = hw->aq.num_arq_entries;
 420        goto init_adminq_exit;
 421
 422init_adminq_free_rings:
 423        i40e_free_adminq_arq(hw);
 424
 425init_adminq_exit:
 426        return ret_code;
 427}
 428
 429/**
 430 *  i40e_shutdown_asq - shutdown the ASQ
 431 *  @hw: pointer to the hardware structure
 432 *
 433 *  The main shutdown routine for the Admin Send Queue
 434 **/
 435static iavf_status i40e_shutdown_asq(struct iavf_hw *hw)
 436{
 437        iavf_status ret_code = 0;
 438
 439        mutex_lock(&hw->aq.asq_mutex);
 440
 441        if (hw->aq.asq.count == 0) {
 442                ret_code = I40E_ERR_NOT_READY;
 443                goto shutdown_asq_out;
 444        }
 445
 446        /* Stop firmware AdminQ processing */
 447        wr32(hw, hw->aq.asq.head, 0);
 448        wr32(hw, hw->aq.asq.tail, 0);
 449        wr32(hw, hw->aq.asq.len, 0);
 450        wr32(hw, hw->aq.asq.bal, 0);
 451        wr32(hw, hw->aq.asq.bah, 0);
 452
 453        hw->aq.asq.count = 0; /* to indicate uninitialized queue */
 454
 455        /* free ring buffers */
 456        i40e_free_asq_bufs(hw);
 457
 458shutdown_asq_out:
 459        mutex_unlock(&hw->aq.asq_mutex);
 460        return ret_code;
 461}
 462
 463/**
 464 *  i40e_shutdown_arq - shutdown ARQ
 465 *  @hw: pointer to the hardware structure
 466 *
 467 *  The main shutdown routine for the Admin Receive Queue
 468 **/
 469static iavf_status i40e_shutdown_arq(struct iavf_hw *hw)
 470{
 471        iavf_status ret_code = 0;
 472
 473        mutex_lock(&hw->aq.arq_mutex);
 474
 475        if (hw->aq.arq.count == 0) {
 476                ret_code = I40E_ERR_NOT_READY;
 477                goto shutdown_arq_out;
 478        }
 479
 480        /* Stop firmware AdminQ processing */
 481        wr32(hw, hw->aq.arq.head, 0);
 482        wr32(hw, hw->aq.arq.tail, 0);
 483        wr32(hw, hw->aq.arq.len, 0);
 484        wr32(hw, hw->aq.arq.bal, 0);
 485        wr32(hw, hw->aq.arq.bah, 0);
 486
 487        hw->aq.arq.count = 0; /* to indicate uninitialized queue */
 488
 489        /* free ring buffers */
 490        i40e_free_arq_bufs(hw);
 491
 492shutdown_arq_out:
 493        mutex_unlock(&hw->aq.arq_mutex);
 494        return ret_code;
 495}
 496
 497/**
 498 *  iavf_init_adminq - main initialization routine for Admin Queue
 499 *  @hw: pointer to the hardware structure
 500 *
 501 *  Prior to calling this function, drivers *MUST* set the following fields
 502 *  in the hw->aq structure:
 503 *     - hw->aq.num_asq_entries
 504 *     - hw->aq.num_arq_entries
 505 *     - hw->aq.arq_buf_size
 506 *     - hw->aq.asq_buf_size
 507 **/
 508iavf_status iavf_init_adminq(struct iavf_hw *hw)
 509{
 510        iavf_status ret_code;
 511
 512        /* verify input for valid configuration */
 513        if ((hw->aq.num_arq_entries == 0) ||
 514            (hw->aq.num_asq_entries == 0) ||
 515            (hw->aq.arq_buf_size == 0) ||
 516            (hw->aq.asq_buf_size == 0)) {
 517                ret_code = I40E_ERR_CONFIG;
 518                goto init_adminq_exit;
 519        }
 520
 521        /* Set up register offsets */
 522        i40e_adminq_init_regs(hw);
 523
 524        /* setup ASQ command write back timeout */
 525        hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
 526
 527        /* allocate the ASQ */
 528        ret_code = i40e_init_asq(hw);
 529        if (ret_code)
 530                goto init_adminq_destroy_locks;
 531
 532        /* allocate the ARQ */
 533        ret_code = i40e_init_arq(hw);
 534        if (ret_code)
 535                goto init_adminq_free_asq;
 536
 537        /* success! */
 538        goto init_adminq_exit;
 539
 540init_adminq_free_asq:
 541        i40e_shutdown_asq(hw);
 542init_adminq_destroy_locks:
 543
 544init_adminq_exit:
 545        return ret_code;
 546}
 547
 548/**
 549 *  iavf_shutdown_adminq - shutdown routine for the Admin Queue
 550 *  @hw: pointer to the hardware structure
 551 **/
 552iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
 553{
 554        iavf_status ret_code = 0;
 555
 556        if (iavf_check_asq_alive(hw))
 557                iavf_aq_queue_shutdown(hw, true);
 558
 559        i40e_shutdown_asq(hw);
 560        i40e_shutdown_arq(hw);
 561
 562        return ret_code;
 563}
 564
 565/**
 566 *  i40e_clean_asq - cleans Admin send queue
 567 *  @hw: pointer to the hardware structure
 568 *
 569 *  returns the number of free desc
 570 **/
 571static u16 i40e_clean_asq(struct iavf_hw *hw)
 572{
 573        struct iavf_adminq_ring *asq = &hw->aq.asq;
 574        struct i40e_asq_cmd_details *details;
 575        u16 ntc = asq->next_to_clean;
 576        struct i40e_aq_desc desc_cb;
 577        struct i40e_aq_desc *desc;
 578
 579        desc = IAVF_ADMINQ_DESC(*asq, ntc);
 580        details = I40E_ADMINQ_DETAILS(*asq, ntc);
 581        while (rd32(hw, hw->aq.asq.head) != ntc) {
 582                iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 583                           "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
 584
 585                if (details->callback) {
 586                        I40E_ADMINQ_CALLBACK cb_func =
 587                                        (I40E_ADMINQ_CALLBACK)details->callback;
 588                        desc_cb = *desc;
 589                        cb_func(hw, &desc_cb);
 590                }
 591                memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
 592                memset((void *)details, 0,
 593                       sizeof(struct i40e_asq_cmd_details));
 594                ntc++;
 595                if (ntc == asq->count)
 596                        ntc = 0;
 597                desc = IAVF_ADMINQ_DESC(*asq, ntc);
 598                details = I40E_ADMINQ_DETAILS(*asq, ntc);
 599        }
 600
 601        asq->next_to_clean = ntc;
 602
 603        return IAVF_DESC_UNUSED(asq);
 604}
 605
 606/**
 607 *  iavf_asq_done - check if FW has processed the Admin Send Queue
 608 *  @hw: pointer to the hw struct
 609 *
 610 *  Returns true if the firmware has processed all descriptors on the
 611 *  admin send queue. Returns false if there are still requests pending.
 612 **/
 613bool iavf_asq_done(struct iavf_hw *hw)
 614{
 615        /* AQ designers suggest use of head for better
 616         * timing reliability than DD bit
 617         */
 618        return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
 619}
 620
 621/**
 622 *  iavf_asq_send_command - send command to Admin Queue
 623 *  @hw: pointer to the hw struct
 624 *  @desc: prefilled descriptor describing the command (non DMA mem)
 625 *  @buff: buffer to use for indirect commands
 626 *  @buff_size: size of buffer for indirect commands
 627 *  @cmd_details: pointer to command details structure
 628 *
 629 *  This is the main send command driver routine for the Admin Queue send
 630 *  queue.  It runs the queue, cleans the queue, etc
 631 **/
 632iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
 633                                  void *buff, /* can be NULL */
 634                                  u16  buff_size,
 635                                  struct i40e_asq_cmd_details *cmd_details)
 636{
 637        struct iavf_dma_mem *dma_buff = NULL;
 638        struct i40e_asq_cmd_details *details;
 639        struct i40e_aq_desc *desc_on_ring;
 640        bool cmd_completed = false;
 641        iavf_status status = 0;
 642        u16  retval = 0;
 643        u32  val = 0;
 644
 645        mutex_lock(&hw->aq.asq_mutex);
 646
 647        if (hw->aq.asq.count == 0) {
 648                iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 649                           "AQTX: Admin queue not initialized.\n");
 650                status = I40E_ERR_QUEUE_EMPTY;
 651                goto asq_send_command_error;
 652        }
 653
 654        hw->aq.asq_last_status = I40E_AQ_RC_OK;
 655
 656        val = rd32(hw, hw->aq.asq.head);
 657        if (val >= hw->aq.num_asq_entries) {
 658                iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 659                           "AQTX: head overrun at %d\n", val);
 660                status = I40E_ERR_QUEUE_EMPTY;
 661                goto asq_send_command_error;
 662        }
 663
 664        details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
 665        if (cmd_details) {
 666                *details = *cmd_details;
 667
 668                /* If the cmd_details are defined copy the cookie.  The
 669                 * cpu_to_le32 is not needed here because the data is ignored
 670                 * by the FW, only used by the driver
 671                 */
 672                if (details->cookie) {
 673                        desc->cookie_high =
 674                                cpu_to_le32(upper_32_bits(details->cookie));
 675                        desc->cookie_low =
 676                                cpu_to_le32(lower_32_bits(details->cookie));
 677                }
 678        } else {
 679                memset(details, 0, sizeof(struct i40e_asq_cmd_details));
 680        }
 681
 682        /* clear requested flags and then set additional flags if defined */
 683        desc->flags &= ~cpu_to_le16(details->flags_dis);
 684        desc->flags |= cpu_to_le16(details->flags_ena);
 685
 686        if (buff_size > hw->aq.asq_buf_size) {
 687                iavf_debug(hw,
 688                           IAVF_DEBUG_AQ_MESSAGE,
 689                           "AQTX: Invalid buffer size: %d.\n",
 690                           buff_size);
 691                status = I40E_ERR_INVALID_SIZE;
 692                goto asq_send_command_error;
 693        }
 694
 695        if (details->postpone && !details->async) {
 696                iavf_debug(hw,
 697                           IAVF_DEBUG_AQ_MESSAGE,
 698                           "AQTX: Async flag not set along with postpone flag");
 699                status = I40E_ERR_PARAM;
 700                goto asq_send_command_error;
 701        }
 702
 703        /* call clean and check queue available function to reclaim the
 704         * descriptors that were processed by FW, the function returns the
 705         * number of desc available
 706         */
 707        /* the clean function called here could be called in a separate thread
 708         * in case of asynchronous completions
 709         */
 710        if (i40e_clean_asq(hw) == 0) {
 711                iavf_debug(hw,
 712                           IAVF_DEBUG_AQ_MESSAGE,
 713                           "AQTX: Error queue is full.\n");
 714                status = I40E_ERR_ADMIN_QUEUE_FULL;
 715                goto asq_send_command_error;
 716        }
 717
 718        /* initialize the temp desc pointer with the right desc */
 719        desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
 720
 721        /* if the desc is available copy the temp desc to the right place */
 722        *desc_on_ring = *desc;
 723
 724        /* if buff is not NULL assume indirect command */
 725        if (buff) {
 726                dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use];
 727                /* copy the user buff into the respective DMA buff */
 728                memcpy(dma_buff->va, buff, buff_size);
 729                desc_on_ring->datalen = cpu_to_le16(buff_size);
 730
 731                /* Update the address values in the desc with the pa value
 732                 * for respective buffer
 733                 */
 734                desc_on_ring->params.external.addr_high =
 735                                cpu_to_le32(upper_32_bits(dma_buff->pa));
 736                desc_on_ring->params.external.addr_low =
 737                                cpu_to_le32(lower_32_bits(dma_buff->pa));
 738        }
 739
 740        /* bump the tail */
 741        iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
 742        iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
 743                      buff, buff_size);
 744        (hw->aq.asq.next_to_use)++;
 745        if (hw->aq.asq.next_to_use == hw->aq.asq.count)
 746                hw->aq.asq.next_to_use = 0;
 747        if (!details->postpone)
 748                wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
 749
 750        /* if cmd_details are not defined or async flag is not set,
 751         * we need to wait for desc write back
 752         */
 753        if (!details->async && !details->postpone) {
 754                u32 total_delay = 0;
 755
 756                do {
 757                        /* AQ designers suggest use of head for better
 758                         * timing reliability than DD bit
 759                         */
 760                        if (iavf_asq_done(hw))
 761                                break;
 762                        udelay(50);
 763                        total_delay += 50;
 764                } while (total_delay < hw->aq.asq_cmd_timeout);
 765        }
 766
 767        /* if ready, copy the desc back to temp */
 768        if (iavf_asq_done(hw)) {
 769                *desc = *desc_on_ring;
 770                if (buff)
 771                        memcpy(buff, dma_buff->va, buff_size);
 772                retval = le16_to_cpu(desc->retval);
 773                if (retval != 0) {
 774                        iavf_debug(hw,
 775                                   IAVF_DEBUG_AQ_MESSAGE,
 776                                   "AQTX: Command completed with error 0x%X.\n",
 777                                   retval);
 778
 779                        /* strip off FW internal code */
 780                        retval &= 0xff;
 781                }
 782                cmd_completed = true;
 783                if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
 784                        status = 0;
 785                else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
 786                        status = I40E_ERR_NOT_READY;
 787                else
 788                        status = I40E_ERR_ADMIN_QUEUE_ERROR;
 789                hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
 790        }
 791
 792        iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 793                   "AQTX: desc and buffer writeback:\n");
 794        iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
 795
 796        /* save writeback aq if requested */
 797        if (details->wb_desc)
 798                *details->wb_desc = *desc_on_ring;
 799
 800        /* update the error if time out occurred */
 801        if ((!cmd_completed) &&
 802            (!details->async && !details->postpone)) {
 803                if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
 804                        iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 805                                   "AQTX: AQ Critical error.\n");
 806                        status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
 807                } else {
 808                        iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 809                                   "AQTX: Writeback timeout.\n");
 810                        status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
 811                }
 812        }
 813
 814asq_send_command_error:
 815        mutex_unlock(&hw->aq.asq_mutex);
 816        return status;
 817}
 818
 819/**
 820 *  iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
 821 *  @desc:     pointer to the temp descriptor (non DMA mem)
 822 *  @opcode:   the opcode can be used to decide which flags to turn off or on
 823 *
 824 *  Fill the desc with default values
 825 **/
 826void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode)
 827{
 828        /* zero out the desc */
 829        memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
 830        desc->opcode = cpu_to_le16(opcode);
 831        desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
 832}
 833
 834/**
 835 *  iavf_clean_arq_element
 836 *  @hw: pointer to the hw struct
 837 *  @e: event info from the receive descriptor, includes any buffers
 838 *  @pending: number of events that could be left to process
 839 *
 840 *  This function cleans one Admin Receive Queue element and returns
 841 *  the contents through e.  It can also return how many events are
 842 *  left to process through 'pending'
 843 **/
 844iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
 845                                   struct i40e_arq_event_info *e,
 846                                   u16 *pending)
 847{
 848        u16 ntc = hw->aq.arq.next_to_clean;
 849        struct i40e_aq_desc *desc;
 850        iavf_status ret_code = 0;
 851        struct iavf_dma_mem *bi;
 852        u16 desc_idx;
 853        u16 datalen;
 854        u16 flags;
 855        u16 ntu;
 856
 857        /* pre-clean the event info */
 858        memset(&e->desc, 0, sizeof(e->desc));
 859
 860        /* take the lock before we start messing with the ring */
 861        mutex_lock(&hw->aq.arq_mutex);
 862
 863        if (hw->aq.arq.count == 0) {
 864                iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
 865                           "AQRX: Admin queue not initialized.\n");
 866                ret_code = I40E_ERR_QUEUE_EMPTY;
 867                goto clean_arq_element_err;
 868        }
 869
 870        /* set next_to_use to head */
 871        ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
 872        if (ntu == ntc) {
 873                /* nothing to do - shouldn't need to update ring's values */
 874                ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
 875                goto clean_arq_element_out;
 876        }
 877
 878        /* now clean the next descriptor */
 879        desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
 880        desc_idx = ntc;
 881
 882        hw->aq.arq_last_status =
 883                (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
 884        flags = le16_to_cpu(desc->flags);
 885        if (flags & I40E_AQ_FLAG_ERR) {
 886                ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
 887                iavf_debug(hw,
 888                           IAVF_DEBUG_AQ_MESSAGE,
 889                           "AQRX: Event received with error 0x%X.\n",
 890                           hw->aq.arq_last_status);
 891        }
 892
 893        e->desc = *desc;
 894        datalen = le16_to_cpu(desc->datalen);
 895        e->msg_len = min(datalen, e->buf_len);
 896        if (e->msg_buf && (e->msg_len != 0))
 897                memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
 898                       e->msg_len);
 899
 900        iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
 901        iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
 902                      hw->aq.arq_buf_size);
 903
 904        /* Restore the original datalen and buffer address in the desc,
 905         * FW updates datalen to indicate the event message
 906         * size
 907         */
 908        bi = &hw->aq.arq.r.arq_bi[ntc];
 909        memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
 910
 911        desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
 912        if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
 913                desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
 914        desc->datalen = cpu_to_le16((u16)bi->size);
 915        desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
 916        desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
 917
 918        /* set tail = the last cleaned desc index. */
 919        wr32(hw, hw->aq.arq.tail, ntc);
 920        /* ntc is updated to tail + 1 */
 921        ntc++;
 922        if (ntc == hw->aq.num_arq_entries)
 923                ntc = 0;
 924        hw->aq.arq.next_to_clean = ntc;
 925        hw->aq.arq.next_to_use = ntu;
 926
 927clean_arq_element_out:
 928        /* Set pending if needed, unlock and return */
 929        if (pending)
 930                *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
 931
 932clean_arq_element_err:
 933        mutex_unlock(&hw->aq.arq_mutex);
 934
 935        return ret_code;
 936}
 937