linux/drivers/net/ethernet/intel/i40e/i40e_adminq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright(c) 2013 - 2018 Intel Corporation. */
   3
   4#include "i40e_status.h"
   5#include "i40e_type.h"
   6#include "i40e_register.h"
   7#include "i40e_adminq.h"
   8#include "i40e_prototype.h"
   9
  10static void i40e_resume_aq(struct i40e_hw *hw);
  11
  12/**
  13 *  i40e_adminq_init_regs - Initialize AdminQ registers
  14 *  @hw: pointer to the hardware structure
  15 *
  16 *  This assumes the alloc_asq and alloc_arq functions have already been called
  17 **/
  18static void i40e_adminq_init_regs(struct i40e_hw *hw)
  19{
  20        /* set head and tail registers in our local struct */
  21        if (i40e_is_vf(hw)) {
  22                hw->aq.asq.tail = I40E_VF_ATQT1;
  23                hw->aq.asq.head = I40E_VF_ATQH1;
  24                hw->aq.asq.len  = I40E_VF_ATQLEN1;
  25                hw->aq.asq.bal  = I40E_VF_ATQBAL1;
  26                hw->aq.asq.bah  = I40E_VF_ATQBAH1;
  27                hw->aq.arq.tail = I40E_VF_ARQT1;
  28                hw->aq.arq.head = I40E_VF_ARQH1;
  29                hw->aq.arq.len  = I40E_VF_ARQLEN1;
  30                hw->aq.arq.bal  = I40E_VF_ARQBAL1;
  31                hw->aq.arq.bah  = I40E_VF_ARQBAH1;
  32        } else {
  33                hw->aq.asq.tail = I40E_PF_ATQT;
  34                hw->aq.asq.head = I40E_PF_ATQH;
  35                hw->aq.asq.len  = I40E_PF_ATQLEN;
  36                hw->aq.asq.bal  = I40E_PF_ATQBAL;
  37                hw->aq.asq.bah  = I40E_PF_ATQBAH;
  38                hw->aq.arq.tail = I40E_PF_ARQT;
  39                hw->aq.arq.head = I40E_PF_ARQH;
  40                hw->aq.arq.len  = I40E_PF_ARQLEN;
  41                hw->aq.arq.bal  = I40E_PF_ARQBAL;
  42                hw->aq.arq.bah  = I40E_PF_ARQBAH;
  43        }
  44}
  45
  46/**
  47 *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
  48 *  @hw: pointer to the hardware structure
  49 **/
  50static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
  51{
  52        i40e_status ret_code;
  53
  54        ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
  55                                         i40e_mem_atq_ring,
  56                                         (hw->aq.num_asq_entries *
  57                                         sizeof(struct i40e_aq_desc)),
  58                                         I40E_ADMINQ_DESC_ALIGNMENT);
  59        if (ret_code)
  60                return ret_code;
  61
  62        ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
  63                                          (hw->aq.num_asq_entries *
  64                                          sizeof(struct i40e_asq_cmd_details)));
  65        if (ret_code) {
  66                i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
  67                return ret_code;
  68        }
  69
  70        return ret_code;
  71}
  72
  73/**
  74 *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
  75 *  @hw: pointer to the hardware structure
  76 **/
  77static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
  78{
  79        i40e_status ret_code;
  80
  81        ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
  82                                         i40e_mem_arq_ring,
  83                                         (hw->aq.num_arq_entries *
  84                                         sizeof(struct i40e_aq_desc)),
  85                                         I40E_ADMINQ_DESC_ALIGNMENT);
  86
  87        return ret_code;
  88}
  89
  90/**
  91 *  i40e_free_adminq_asq - Free Admin Queue send rings
  92 *  @hw: pointer to the hardware structure
  93 *
  94 *  This assumes the posted send buffers have already been cleaned
  95 *  and de-allocated
  96 **/
  97static void i40e_free_adminq_asq(struct i40e_hw *hw)
  98{
  99        i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
 100}
 101
 102/**
 103 *  i40e_free_adminq_arq - Free Admin Queue receive rings
 104 *  @hw: pointer to the hardware structure
 105 *
 106 *  This assumes the posted receive buffers have already been cleaned
 107 *  and de-allocated
 108 **/
 109static void i40e_free_adminq_arq(struct i40e_hw *hw)
 110{
 111        i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
 112}
 113
 114/**
 115 *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
 116 *  @hw: pointer to the hardware structure
 117 **/
 118static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
 119{
 120        i40e_status ret_code;
 121        struct i40e_aq_desc *desc;
 122        struct i40e_dma_mem *bi;
 123        int i;
 124
 125        /* We'll be allocating the buffer info memory first, then we can
 126         * allocate the mapped buffers for the event processing
 127         */
 128
 129        /* buffer_info structures do not need alignment */
 130        ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
 131                (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
 132        if (ret_code)
 133                goto alloc_arq_bufs;
 134        hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
 135
 136        /* allocate the mapped buffers */
 137        for (i = 0; i < hw->aq.num_arq_entries; i++) {
 138                bi = &hw->aq.arq.r.arq_bi[i];
 139                ret_code = i40e_allocate_dma_mem(hw, bi,
 140                                                 i40e_mem_arq_buf,
 141                                                 hw->aq.arq_buf_size,
 142                                                 I40E_ADMINQ_DESC_ALIGNMENT);
 143                if (ret_code)
 144                        goto unwind_alloc_arq_bufs;
 145
 146                /* now configure the descriptors for use */
 147                desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
 148
 149                desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
 150                if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
 151                        desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
 152                desc->opcode = 0;
 153                /* This is in accordance with Admin queue design, there is no
 154                 * register for buffer size configuration
 155                 */
 156                desc->datalen = cpu_to_le16((u16)bi->size);
 157                desc->retval = 0;
 158                desc->cookie_high = 0;
 159                desc->cookie_low = 0;
 160                desc->params.external.addr_high =
 161                        cpu_to_le32(upper_32_bits(bi->pa));
 162                desc->params.external.addr_low =
 163                        cpu_to_le32(lower_32_bits(bi->pa));
 164                desc->params.external.param0 = 0;
 165                desc->params.external.param1 = 0;
 166        }
 167
 168alloc_arq_bufs:
 169        return ret_code;
 170
 171unwind_alloc_arq_bufs:
 172        /* don't try to free the one that failed... */
 173        i--;
 174        for (; i >= 0; i--)
 175                i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
 176        i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
 177
 178        return ret_code;
 179}
 180
 181/**
 182 *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
 183 *  @hw: pointer to the hardware structure
 184 **/
 185static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
 186{
 187        i40e_status ret_code;
 188        struct i40e_dma_mem *bi;
 189        int i;
 190
 191        /* No mapped memory needed yet, just the buffer info structures */
 192        ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
 193                (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
 194        if (ret_code)
 195                goto alloc_asq_bufs;
 196        hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
 197
 198        /* allocate the mapped buffers */
 199        for (i = 0; i < hw->aq.num_asq_entries; i++) {
 200                bi = &hw->aq.asq.r.asq_bi[i];
 201                ret_code = i40e_allocate_dma_mem(hw, bi,
 202                                                 i40e_mem_asq_buf,
 203                                                 hw->aq.asq_buf_size,
 204                                                 I40E_ADMINQ_DESC_ALIGNMENT);
 205                if (ret_code)
 206                        goto unwind_alloc_asq_bufs;
 207        }
 208alloc_asq_bufs:
 209        return ret_code;
 210
 211unwind_alloc_asq_bufs:
 212        /* don't try to free the one that failed... */
 213        i--;
 214        for (; i >= 0; i--)
 215                i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
 216        i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
 217
 218        return ret_code;
 219}
 220
 221/**
 222 *  i40e_free_arq_bufs - Free receive queue buffer info elements
 223 *  @hw: pointer to the hardware structure
 224 **/
 225static void i40e_free_arq_bufs(struct i40e_hw *hw)
 226{
 227        int i;
 228
 229        /* free descriptors */
 230        for (i = 0; i < hw->aq.num_arq_entries; i++)
 231                i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
 232
 233        /* free the descriptor memory */
 234        i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
 235
 236        /* free the dma header */
 237        i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
 238}
 239
 240/**
 241 *  i40e_free_asq_bufs - Free send queue buffer info elements
 242 *  @hw: pointer to the hardware structure
 243 **/
 244static void i40e_free_asq_bufs(struct i40e_hw *hw)
 245{
 246        int i;
 247
 248        /* only unmap if the address is non-NULL */
 249        for (i = 0; i < hw->aq.num_asq_entries; i++)
 250                if (hw->aq.asq.r.asq_bi[i].pa)
 251                        i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
 252
 253        /* free the buffer info list */
 254        i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
 255
 256        /* free the descriptor memory */
 257        i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
 258
 259        /* free the dma header */
 260        i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
 261}
 262
 263/**
 264 *  i40e_config_asq_regs - configure ASQ registers
 265 *  @hw: pointer to the hardware structure
 266 *
 267 *  Configure base address and length registers for the transmit queue
 268 **/
 269static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
 270{
 271        i40e_status ret_code = 0;
 272        u32 reg = 0;
 273
 274        /* Clear Head and Tail */
 275        wr32(hw, hw->aq.asq.head, 0);
 276        wr32(hw, hw->aq.asq.tail, 0);
 277
 278        /* set starting point */
 279        wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
 280                                  I40E_PF_ATQLEN_ATQENABLE_MASK));
 281        wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
 282        wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
 283
 284        /* Check one register to verify that config was applied */
 285        reg = rd32(hw, hw->aq.asq.bal);
 286        if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
 287                ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
 288
 289        return ret_code;
 290}
 291
 292/**
 293 *  i40e_config_arq_regs - ARQ register configuration
 294 *  @hw: pointer to the hardware structure
 295 *
 296 * Configure base address and length registers for the receive (event queue)
 297 **/
 298static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
 299{
 300        i40e_status ret_code = 0;
 301        u32 reg = 0;
 302
 303        /* Clear Head and Tail */
 304        wr32(hw, hw->aq.arq.head, 0);
 305        wr32(hw, hw->aq.arq.tail, 0);
 306
 307        /* set starting point */
 308        wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
 309                                  I40E_PF_ARQLEN_ARQENABLE_MASK));
 310        wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
 311        wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
 312
 313        /* Update tail in the HW to post pre-allocated buffers */
 314        wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
 315
 316        /* Check one register to verify that config was applied */
 317        reg = rd32(hw, hw->aq.arq.bal);
 318        if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
 319                ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
 320
 321        return ret_code;
 322}
 323
 324/**
 325 *  i40e_init_asq - main initialization routine for ASQ
 326 *  @hw: pointer to the hardware structure
 327 *
 328 *  This is the main initialization routine for the Admin Send Queue
 329 *  Prior to calling this function, drivers *MUST* set the following fields
 330 *  in the hw->aq structure:
 331 *     - hw->aq.num_asq_entries
 332 *     - hw->aq.arq_buf_size
 333 *
 334 *  Do *NOT* hold the lock when calling this as the memory allocation routines
 335 *  called are not going to be atomic context safe
 336 **/
 337static i40e_status i40e_init_asq(struct i40e_hw *hw)
 338{
 339        i40e_status ret_code = 0;
 340
 341        if (hw->aq.asq.count > 0) {
 342                /* queue already initialized */
 343                ret_code = I40E_ERR_NOT_READY;
 344                goto init_adminq_exit;
 345        }
 346
 347        /* verify input for valid configuration */
 348        if ((hw->aq.num_asq_entries == 0) ||
 349            (hw->aq.asq_buf_size == 0)) {
 350                ret_code = I40E_ERR_CONFIG;
 351                goto init_adminq_exit;
 352        }
 353
 354        hw->aq.asq.next_to_use = 0;
 355        hw->aq.asq.next_to_clean = 0;
 356
 357        /* allocate the ring memory */
 358        ret_code = i40e_alloc_adminq_asq_ring(hw);
 359        if (ret_code)
 360                goto init_adminq_exit;
 361
 362        /* allocate buffers in the rings */
 363        ret_code = i40e_alloc_asq_bufs(hw);
 364        if (ret_code)
 365                goto init_adminq_free_rings;
 366
 367        /* initialize base registers */
 368        ret_code = i40e_config_asq_regs(hw);
 369        if (ret_code)
 370                goto init_adminq_free_rings;
 371
 372        /* success! */
 373        hw->aq.asq.count = hw->aq.num_asq_entries;
 374        goto init_adminq_exit;
 375
 376init_adminq_free_rings:
 377        i40e_free_adminq_asq(hw);
 378
 379init_adminq_exit:
 380        return ret_code;
 381}
 382
 383/**
 384 *  i40e_init_arq - initialize ARQ
 385 *  @hw: pointer to the hardware structure
 386 *
 387 *  The main initialization routine for the Admin Receive (Event) Queue.
 388 *  Prior to calling this function, drivers *MUST* set the following fields
 389 *  in the hw->aq structure:
 390 *     - hw->aq.num_asq_entries
 391 *     - hw->aq.arq_buf_size
 392 *
 393 *  Do *NOT* hold the lock when calling this as the memory allocation routines
 394 *  called are not going to be atomic context safe
 395 **/
 396static i40e_status i40e_init_arq(struct i40e_hw *hw)
 397{
 398        i40e_status ret_code = 0;
 399
 400        if (hw->aq.arq.count > 0) {
 401                /* queue already initialized */
 402                ret_code = I40E_ERR_NOT_READY;
 403                goto init_adminq_exit;
 404        }
 405
 406        /* verify input for valid configuration */
 407        if ((hw->aq.num_arq_entries == 0) ||
 408            (hw->aq.arq_buf_size == 0)) {
 409                ret_code = I40E_ERR_CONFIG;
 410                goto init_adminq_exit;
 411        }
 412
 413        hw->aq.arq.next_to_use = 0;
 414        hw->aq.arq.next_to_clean = 0;
 415
 416        /* allocate the ring memory */
 417        ret_code = i40e_alloc_adminq_arq_ring(hw);
 418        if (ret_code)
 419                goto init_adminq_exit;
 420
 421        /* allocate buffers in the rings */
 422        ret_code = i40e_alloc_arq_bufs(hw);
 423        if (ret_code)
 424                goto init_adminq_free_rings;
 425
 426        /* initialize base registers */
 427        ret_code = i40e_config_arq_regs(hw);
 428        if (ret_code)
 429                goto init_adminq_free_rings;
 430
 431        /* success! */
 432        hw->aq.arq.count = hw->aq.num_arq_entries;
 433        goto init_adminq_exit;
 434
 435init_adminq_free_rings:
 436        i40e_free_adminq_arq(hw);
 437
 438init_adminq_exit:
 439        return ret_code;
 440}
 441
 442/**
 443 *  i40e_shutdown_asq - shutdown the ASQ
 444 *  @hw: pointer to the hardware structure
 445 *
 446 *  The main shutdown routine for the Admin Send Queue
 447 **/
 448static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
 449{
 450        i40e_status ret_code = 0;
 451
 452        mutex_lock(&hw->aq.asq_mutex);
 453
 454        if (hw->aq.asq.count == 0) {
 455                ret_code = I40E_ERR_NOT_READY;
 456                goto shutdown_asq_out;
 457        }
 458
 459        /* Stop firmware AdminQ processing */
 460        wr32(hw, hw->aq.asq.head, 0);
 461        wr32(hw, hw->aq.asq.tail, 0);
 462        wr32(hw, hw->aq.asq.len, 0);
 463        wr32(hw, hw->aq.asq.bal, 0);
 464        wr32(hw, hw->aq.asq.bah, 0);
 465
 466        hw->aq.asq.count = 0; /* to indicate uninitialized queue */
 467
 468        /* free ring buffers */
 469        i40e_free_asq_bufs(hw);
 470
 471shutdown_asq_out:
 472        mutex_unlock(&hw->aq.asq_mutex);
 473        return ret_code;
 474}
 475
 476/**
 477 *  i40e_shutdown_arq - shutdown ARQ
 478 *  @hw: pointer to the hardware structure
 479 *
 480 *  The main shutdown routine for the Admin Receive Queue
 481 **/
 482static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
 483{
 484        i40e_status ret_code = 0;
 485
 486        mutex_lock(&hw->aq.arq_mutex);
 487
 488        if (hw->aq.arq.count == 0) {
 489                ret_code = I40E_ERR_NOT_READY;
 490                goto shutdown_arq_out;
 491        }
 492
 493        /* Stop firmware AdminQ processing */
 494        wr32(hw, hw->aq.arq.head, 0);
 495        wr32(hw, hw->aq.arq.tail, 0);
 496        wr32(hw, hw->aq.arq.len, 0);
 497        wr32(hw, hw->aq.arq.bal, 0);
 498        wr32(hw, hw->aq.arq.bah, 0);
 499
 500        hw->aq.arq.count = 0; /* to indicate uninitialized queue */
 501
 502        /* free ring buffers */
 503        i40e_free_arq_bufs(hw);
 504
 505shutdown_arq_out:
 506        mutex_unlock(&hw->aq.arq_mutex);
 507        return ret_code;
 508}
 509
 510/**
 511 *  i40e_set_hw_flags - set HW flags
 512 *  @hw: pointer to the hardware structure
 513 **/
 514static void i40e_set_hw_flags(struct i40e_hw *hw)
 515{
 516        struct i40e_adminq_info *aq = &hw->aq;
 517
 518        hw->flags = 0;
 519
 520        switch (hw->mac.type) {
 521        case I40E_MAC_XL710:
 522                if (aq->api_maj_ver > 1 ||
 523                    (aq->api_maj_ver == 1 &&
 524                     aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
 525                        hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
 526                        hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
 527                        /* The ability to RX (not drop) 802.1ad frames */
 528                        hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
 529                }
 530                break;
 531        case I40E_MAC_X722:
 532                hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
 533                             I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
 534
 535                if (aq->api_maj_ver > 1 ||
 536                    (aq->api_maj_ver == 1 &&
 537                     aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
 538                        hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
 539
 540                if (aq->api_maj_ver > 1 ||
 541                    (aq->api_maj_ver == 1 &&
 542                     aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
 543                        hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
 544
 545                if (aq->api_maj_ver > 1 ||
 546                    (aq->api_maj_ver == 1 &&
 547                     aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722))
 548                        hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE;
 549
 550                fallthrough;
 551        default:
 552                break;
 553        }
 554
 555        /* Newer versions of firmware require lock when reading the NVM */
 556        if (aq->api_maj_ver > 1 ||
 557            (aq->api_maj_ver == 1 &&
 558             aq->api_min_ver >= 5))
 559                hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
 560
 561        if (aq->api_maj_ver > 1 ||
 562            (aq->api_maj_ver == 1 &&
 563             aq->api_min_ver >= 8)) {
 564                hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
 565                hw->flags |= I40E_HW_FLAG_DROP_MODE;
 566        }
 567
 568        if (aq->api_maj_ver > 1 ||
 569            (aq->api_maj_ver == 1 &&
 570             aq->api_min_ver >= 9))
 571                hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
 572}
 573
 574/**
 575 *  i40e_init_adminq - main initialization routine for Admin Queue
 576 *  @hw: pointer to the hardware structure
 577 *
 578 *  Prior to calling this function, drivers *MUST* set the following fields
 579 *  in the hw->aq structure:
 580 *     - hw->aq.num_asq_entries
 581 *     - hw->aq.num_arq_entries
 582 *     - hw->aq.arq_buf_size
 583 *     - hw->aq.asq_buf_size
 584 **/
 585i40e_status i40e_init_adminq(struct i40e_hw *hw)
 586{
 587        u16 cfg_ptr, oem_hi, oem_lo;
 588        u16 eetrack_lo, eetrack_hi;
 589        i40e_status ret_code;
 590        int retry = 0;
 591
 592        /* verify input for valid configuration */
 593        if ((hw->aq.num_arq_entries == 0) ||
 594            (hw->aq.num_asq_entries == 0) ||
 595            (hw->aq.arq_buf_size == 0) ||
 596            (hw->aq.asq_buf_size == 0)) {
 597                ret_code = I40E_ERR_CONFIG;
 598                goto init_adminq_exit;
 599        }
 600
 601        /* Set up register offsets */
 602        i40e_adminq_init_regs(hw);
 603
 604        /* setup ASQ command write back timeout */
 605        hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
 606
 607        /* allocate the ASQ */
 608        ret_code = i40e_init_asq(hw);
 609        if (ret_code)
 610                goto init_adminq_destroy_locks;
 611
 612        /* allocate the ARQ */
 613        ret_code = i40e_init_arq(hw);
 614        if (ret_code)
 615                goto init_adminq_free_asq;
 616
 617        /* There are some cases where the firmware may not be quite ready
 618         * for AdminQ operations, so we retry the AdminQ setup a few times
 619         * if we see timeouts in this first AQ call.
 620         */
 621        do {
 622                ret_code = i40e_aq_get_firmware_version(hw,
 623                                                        &hw->aq.fw_maj_ver,
 624                                                        &hw->aq.fw_min_ver,
 625                                                        &hw->aq.fw_build,
 626                                                        &hw->aq.api_maj_ver,
 627                                                        &hw->aq.api_min_ver,
 628                                                        NULL);
 629                if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
 630                        break;
 631                retry++;
 632                msleep(100);
 633                i40e_resume_aq(hw);
 634        } while (retry < 10);
 635        if (ret_code != I40E_SUCCESS)
 636                goto init_adminq_free_arq;
 637
 638        /* Some features were introduced in different FW API version
 639         * for different MAC type.
 640         */
 641        i40e_set_hw_flags(hw);
 642
 643        /* get the NVM version info */
 644        i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
 645                           &hw->nvm.version);
 646        i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
 647        i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
 648        hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
 649        i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
 650        i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
 651                           &oem_hi);
 652        i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
 653                           &oem_lo);
 654        hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
 655
 656        if (hw->mac.type == I40E_MAC_XL710 &&
 657            hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
 658            hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
 659                hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
 660                hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
 661        }
 662        if (hw->mac.type == I40E_MAC_X722 &&
 663            hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
 664            hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) {
 665                hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
 666        }
 667
 668        /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
 669        if (hw->aq.api_maj_ver > 1 ||
 670            (hw->aq.api_maj_ver == 1 &&
 671             hw->aq.api_min_ver >= 7))
 672                hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
 673
 674        if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
 675                ret_code = I40E_ERR_FIRMWARE_API_VERSION;
 676                goto init_adminq_free_arq;
 677        }
 678
 679        /* pre-emptive resource lock release */
 680        i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
 681        hw->nvm_release_on_done = false;
 682        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
 683
 684        ret_code = 0;
 685
 686        /* success! */
 687        goto init_adminq_exit;
 688
 689init_adminq_free_arq:
 690        i40e_shutdown_arq(hw);
 691init_adminq_free_asq:
 692        i40e_shutdown_asq(hw);
 693init_adminq_destroy_locks:
 694
 695init_adminq_exit:
 696        return ret_code;
 697}
 698
 699/**
 700 *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
 701 *  @hw: pointer to the hardware structure
 702 **/
 703void i40e_shutdown_adminq(struct i40e_hw *hw)
 704{
 705        if (i40e_check_asq_alive(hw))
 706                i40e_aq_queue_shutdown(hw, true);
 707
 708        i40e_shutdown_asq(hw);
 709        i40e_shutdown_arq(hw);
 710
 711        if (hw->nvm_buff.va)
 712                i40e_free_virt_mem(hw, &hw->nvm_buff);
 713}
 714
 715/**
 716 *  i40e_clean_asq - cleans Admin send queue
 717 *  @hw: pointer to the hardware structure
 718 *
 719 *  returns the number of free desc
 720 **/
 721static u16 i40e_clean_asq(struct i40e_hw *hw)
 722{
 723        struct i40e_adminq_ring *asq = &(hw->aq.asq);
 724        struct i40e_asq_cmd_details *details;
 725        u16 ntc = asq->next_to_clean;
 726        struct i40e_aq_desc desc_cb;
 727        struct i40e_aq_desc *desc;
 728
 729        desc = I40E_ADMINQ_DESC(*asq, ntc);
 730        details = I40E_ADMINQ_DETAILS(*asq, ntc);
 731        while (rd32(hw, hw->aq.asq.head) != ntc) {
 732                i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
 733                           "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
 734
 735                if (details->callback) {
 736                        I40E_ADMINQ_CALLBACK cb_func =
 737                                        (I40E_ADMINQ_CALLBACK)details->callback;
 738                        desc_cb = *desc;
 739                        cb_func(hw, &desc_cb);
 740                }
 741                memset(desc, 0, sizeof(*desc));
 742                memset(details, 0, sizeof(*details));
 743                ntc++;
 744                if (ntc == asq->count)
 745                        ntc = 0;
 746                desc = I40E_ADMINQ_DESC(*asq, ntc);
 747                details = I40E_ADMINQ_DETAILS(*asq, ntc);
 748        }
 749
 750        asq->next_to_clean = ntc;
 751
 752        return I40E_DESC_UNUSED(asq);
 753}
 754
 755/**
 756 *  i40e_asq_done - check if FW has processed the Admin Send Queue
 757 *  @hw: pointer to the hw struct
 758 *
 759 *  Returns true if the firmware has processed all descriptors on the
 760 *  admin send queue. Returns false if there are still requests pending.
 761 **/
 762static bool i40e_asq_done(struct i40e_hw *hw)
 763{
 764        /* AQ designers suggest use of head for better
 765         * timing reliability than DD bit
 766         */
 767        return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
 768
 769}
 770
 771/**
 772 *  i40e_asq_send_command - send command to Admin Queue
 773 *  @hw: pointer to the hw struct
 774 *  @desc: prefilled descriptor describing the command (non DMA mem)
 775 *  @buff: buffer to use for indirect commands
 776 *  @buff_size: size of buffer for indirect commands
 777 *  @cmd_details: pointer to command details structure
 778 *
 779 *  This is the main send command driver routine for the Admin Queue send
 780 *  queue.  It runs the queue, cleans the queue, etc
 781 **/
 782i40e_status i40e_asq_send_command(struct i40e_hw *hw,
 783                                struct i40e_aq_desc *desc,
 784                                void *buff, /* can be NULL */
 785                                u16  buff_size,
 786                                struct i40e_asq_cmd_details *cmd_details)
 787{
 788        i40e_status status = 0;
 789        struct i40e_dma_mem *dma_buff = NULL;
 790        struct i40e_asq_cmd_details *details;
 791        struct i40e_aq_desc *desc_on_ring;
 792        bool cmd_completed = false;
 793        u16  retval = 0;
 794        u32  val = 0;
 795
 796        mutex_lock(&hw->aq.asq_mutex);
 797
 798        if (hw->aq.asq.count == 0) {
 799                i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
 800                           "AQTX: Admin queue not initialized.\n");
 801                status = I40E_ERR_QUEUE_EMPTY;
 802                goto asq_send_command_error;
 803        }
 804
 805        hw->aq.asq_last_status = I40E_AQ_RC_OK;
 806
 807        val = rd32(hw, hw->aq.asq.head);
 808        if (val >= hw->aq.num_asq_entries) {
 809                i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
 810                           "AQTX: head overrun at %d\n", val);
 811                status = I40E_ERR_ADMIN_QUEUE_FULL;
 812                goto asq_send_command_error;
 813        }
 814
 815        details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
 816        if (cmd_details) {
 817                *details = *cmd_details;
 818
 819                /* If the cmd_details are defined copy the cookie.  The
 820                 * cpu_to_le32 is not needed here because the data is ignored
 821                 * by the FW, only used by the driver
 822                 */
 823                if (details->cookie) {
 824                        desc->cookie_high =
 825                                cpu_to_le32(upper_32_bits(details->cookie));
 826                        desc->cookie_low =
 827                                cpu_to_le32(lower_32_bits(details->cookie));
 828                }
 829        } else {
 830                memset(details, 0, sizeof(struct i40e_asq_cmd_details));
 831        }
 832
 833        /* clear requested flags and then set additional flags if defined */
 834        desc->flags &= ~cpu_to_le16(details->flags_dis);
 835        desc->flags |= cpu_to_le16(details->flags_ena);
 836
 837        if (buff_size > hw->aq.asq_buf_size) {
 838                i40e_debug(hw,
 839                           I40E_DEBUG_AQ_MESSAGE,
 840                           "AQTX: Invalid buffer size: %d.\n",
 841                           buff_size);
 842                status = I40E_ERR_INVALID_SIZE;
 843                goto asq_send_command_error;
 844        }
 845
 846        if (details->postpone && !details->async) {
 847                i40e_debug(hw,
 848                           I40E_DEBUG_AQ_MESSAGE,
 849                           "AQTX: Async flag not set along with postpone flag");
 850                status = I40E_ERR_PARAM;
 851                goto asq_send_command_error;
 852        }
 853
 854        /* call clean and check queue available function to reclaim the
 855         * descriptors that were processed by FW, the function returns the
 856         * number of desc available
 857         */
 858        /* the clean function called here could be called in a separate thread
 859         * in case of asynchronous completions
 860         */
 861        if (i40e_clean_asq(hw) == 0) {
 862                i40e_debug(hw,
 863                           I40E_DEBUG_AQ_MESSAGE,
 864                           "AQTX: Error queue is full.\n");
 865                status = I40E_ERR_ADMIN_QUEUE_FULL;
 866                goto asq_send_command_error;
 867        }
 868
 869        /* initialize the temp desc pointer with the right desc */
 870        desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
 871
 872        /* if the desc is available copy the temp desc to the right place */
 873        *desc_on_ring = *desc;
 874
 875        /* if buff is not NULL assume indirect command */
 876        if (buff != NULL) {
 877                dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
 878                /* copy the user buff into the respective DMA buff */
 879                memcpy(dma_buff->va, buff, buff_size);
 880                desc_on_ring->datalen = cpu_to_le16(buff_size);
 881
 882                /* Update the address values in the desc with the pa value
 883                 * for respective buffer
 884                 */
 885                desc_on_ring->params.external.addr_high =
 886                                cpu_to_le32(upper_32_bits(dma_buff->pa));
 887                desc_on_ring->params.external.addr_low =
 888                                cpu_to_le32(lower_32_bits(dma_buff->pa));
 889        }
 890
 891        /* bump the tail */
 892        i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n");
 893        i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
 894                      buff, buff_size);
 895        (hw->aq.asq.next_to_use)++;
 896        if (hw->aq.asq.next_to_use == hw->aq.asq.count)
 897                hw->aq.asq.next_to_use = 0;
 898        if (!details->postpone)
 899                wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
 900
 901        /* if cmd_details are not defined or async flag is not set,
 902         * we need to wait for desc write back
 903         */
 904        if (!details->async && !details->postpone) {
 905                u32 total_delay = 0;
 906
 907                do {
 908                        /* AQ designers suggest use of head for better
 909                         * timing reliability than DD bit
 910                         */
 911                        if (i40e_asq_done(hw))
 912                                break;
 913                        udelay(50);
 914                        total_delay += 50;
 915                } while (total_delay < hw->aq.asq_cmd_timeout);
 916        }
 917
 918        /* if ready, copy the desc back to temp */
 919        if (i40e_asq_done(hw)) {
 920                *desc = *desc_on_ring;
 921                if (buff != NULL)
 922                        memcpy(buff, dma_buff->va, buff_size);
 923                retval = le16_to_cpu(desc->retval);
 924                if (retval != 0) {
 925                        i40e_debug(hw,
 926                                   I40E_DEBUG_AQ_MESSAGE,
 927                                   "AQTX: Command completed with error 0x%X.\n",
 928                                   retval);
 929
 930                        /* strip off FW internal code */
 931                        retval &= 0xff;
 932                }
 933                cmd_completed = true;
 934                if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
 935                        status = 0;
 936                else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
 937                        status = I40E_ERR_NOT_READY;
 938                else
 939                        status = I40E_ERR_ADMIN_QUEUE_ERROR;
 940                hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
 941        }
 942
 943        i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
 944                   "AQTX: desc and buffer writeback:\n");
 945        i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
 946
 947        /* save writeback aq if requested */
 948        if (details->wb_desc)
 949                *details->wb_desc = *desc_on_ring;
 950
 951        /* update the error if time out occurred */
 952        if ((!cmd_completed) &&
 953            (!details->async && !details->postpone)) {
 954                if (rd32(hw, hw->aq.asq.len) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
 955                        i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
 956                                   "AQTX: AQ Critical error.\n");
 957                        status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
 958                } else {
 959                        i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
 960                                   "AQTX: Writeback timeout.\n");
 961                        status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
 962                }
 963        }
 964
 965asq_send_command_error:
 966        mutex_unlock(&hw->aq.asq_mutex);
 967        return status;
 968}
 969
 970/**
 971 *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
 972 *  @desc:     pointer to the temp descriptor (non DMA mem)
 973 *  @opcode:   the opcode can be used to decide which flags to turn off or on
 974 *
 975 *  Fill the desc with default values
 976 **/
 977void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
 978                                       u16 opcode)
 979{
 980        /* zero out the desc */
 981        memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
 982        desc->opcode = cpu_to_le16(opcode);
 983        desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
 984}
 985
 986/**
 987 *  i40e_clean_arq_element
 988 *  @hw: pointer to the hw struct
 989 *  @e: event info from the receive descriptor, includes any buffers
 990 *  @pending: number of events that could be left to process
 991 *
 992 *  This function cleans one Admin Receive Queue element and returns
 993 *  the contents through e.  It can also return how many events are
 994 *  left to process through 'pending'
 995 **/
 996i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
 997                                             struct i40e_arq_event_info *e,
 998                                             u16 *pending)
 999{
1000        i40e_status ret_code = 0;
1001        u16 ntc = hw->aq.arq.next_to_clean;
1002        struct i40e_aq_desc *desc;
1003        struct i40e_dma_mem *bi;
1004        u16 desc_idx;
1005        u16 datalen;
1006        u16 flags;
1007        u16 ntu;
1008
1009        /* pre-clean the event info */
1010        memset(&e->desc, 0, sizeof(e->desc));
1011
1012        /* take the lock before we start messing with the ring */
1013        mutex_lock(&hw->aq.arq_mutex);
1014
1015        if (hw->aq.arq.count == 0) {
1016                i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1017                           "AQRX: Admin queue not initialized.\n");
1018                ret_code = I40E_ERR_QUEUE_EMPTY;
1019                goto clean_arq_element_err;
1020        }
1021
1022        /* set next_to_use to head */
1023        ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
1024        if (ntu == ntc) {
1025                /* nothing to do - shouldn't need to update ring's values */
1026                ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
1027                goto clean_arq_element_out;
1028        }
1029
1030        /* now clean the next descriptor */
1031        desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1032        desc_idx = ntc;
1033
1034        hw->aq.arq_last_status =
1035                (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
1036        flags = le16_to_cpu(desc->flags);
1037        if (flags & I40E_AQ_FLAG_ERR) {
1038                ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
1039                i40e_debug(hw,
1040                           I40E_DEBUG_AQ_MESSAGE,
1041                           "AQRX: Event received with error 0x%X.\n",
1042                           hw->aq.arq_last_status);
1043        }
1044
1045        e->desc = *desc;
1046        datalen = le16_to_cpu(desc->datalen);
1047        e->msg_len = min(datalen, e->buf_len);
1048        if (e->msg_buf != NULL && (e->msg_len != 0))
1049                memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
1050                       e->msg_len);
1051
1052        i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n");
1053        i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1054                      hw->aq.arq_buf_size);
1055
1056        /* Restore the original datalen and buffer address in the desc,
1057         * FW updates datalen to indicate the event message
1058         * size
1059         */
1060        bi = &hw->aq.arq.r.arq_bi[ntc];
1061        memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
1062
1063        desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
1064        if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1065                desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
1066        desc->datalen = cpu_to_le16((u16)bi->size);
1067        desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1068        desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1069
1070        /* set tail = the last cleaned desc index. */
1071        wr32(hw, hw->aq.arq.tail, ntc);
1072        /* ntc is updated to tail + 1 */
1073        ntc++;
1074        if (ntc == hw->aq.num_arq_entries)
1075                ntc = 0;
1076        hw->aq.arq.next_to_clean = ntc;
1077        hw->aq.arq.next_to_use = ntu;
1078
1079        i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode), &e->desc);
1080clean_arq_element_out:
1081        /* Set pending if needed, unlock and return */
1082        if (pending)
1083                *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1084clean_arq_element_err:
1085        mutex_unlock(&hw->aq.arq_mutex);
1086
1087        return ret_code;
1088}
1089
1090static void i40e_resume_aq(struct i40e_hw *hw)
1091{
1092        /* Registers are reset after PF reset */
1093        hw->aq.asq.next_to_use = 0;
1094        hw->aq.asq.next_to_clean = 0;
1095
1096        i40e_config_asq_regs(hw);
1097
1098        hw->aq.arq.next_to_use = 0;
1099        hw->aq.arq.next_to_clean = 0;
1100
1101        i40e_config_arq_regs(hw);
1102}
1103