linux/drivers/net/ethernet/intel/ice/ice_controlq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2018, Intel Corporation. */
   3
   4#include "ice_common.h"
   5
   6/**
   7 * ice_adminq_init_regs - Initialize AdminQ registers
   8 * @hw: pointer to the hardware structure
   9 *
  10 * This assumes the alloc_sq and alloc_rq functions have already been called
  11 */
  12static void ice_adminq_init_regs(struct ice_hw *hw)
  13{
  14        struct ice_ctl_q_info *cq = &hw->adminq;
  15
  16        cq->sq.head = PF_FW_ATQH;
  17        cq->sq.tail = PF_FW_ATQT;
  18        cq->sq.len = PF_FW_ATQLEN;
  19        cq->sq.bah = PF_FW_ATQBAH;
  20        cq->sq.bal = PF_FW_ATQBAL;
  21        cq->sq.len_mask = PF_FW_ATQLEN_ATQLEN_M;
  22        cq->sq.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;
  23        cq->sq.head_mask = PF_FW_ATQH_ATQH_M;
  24
  25        cq->rq.head = PF_FW_ARQH;
  26        cq->rq.tail = PF_FW_ARQT;
  27        cq->rq.len = PF_FW_ARQLEN;
  28        cq->rq.bah = PF_FW_ARQBAH;
  29        cq->rq.bal = PF_FW_ARQBAL;
  30        cq->rq.len_mask = PF_FW_ARQLEN_ARQLEN_M;
  31        cq->rq.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;
  32        cq->rq.head_mask = PF_FW_ARQH_ARQH_M;
  33}
  34
  35/**
  36 * ice_mailbox_init_regs - Initialize Mailbox registers
  37 * @hw: pointer to the hardware structure
  38 *
  39 * This assumes the alloc_sq and alloc_rq functions have already been called
  40 */
  41static void ice_mailbox_init_regs(struct ice_hw *hw)
  42{
  43        struct ice_ctl_q_info *cq = &hw->mailboxq;
  44
  45        /* set head and tail registers in our local struct */
  46        cq->sq.head = PF_MBX_ATQH;
  47        cq->sq.tail = PF_MBX_ATQT;
  48        cq->sq.len = PF_MBX_ATQLEN;
  49        cq->sq.bah = PF_MBX_ATQBAH;
  50        cq->sq.bal = PF_MBX_ATQBAL;
  51        cq->sq.len_mask = PF_MBX_ATQLEN_ATQLEN_M;
  52        cq->sq.len_ena_mask = PF_MBX_ATQLEN_ATQENABLE_M;
  53        cq->sq.head_mask = PF_MBX_ATQH_ATQH_M;
  54
  55        cq->rq.head = PF_MBX_ARQH;
  56        cq->rq.tail = PF_MBX_ARQT;
  57        cq->rq.len = PF_MBX_ARQLEN;
  58        cq->rq.bah = PF_MBX_ARQBAH;
  59        cq->rq.bal = PF_MBX_ARQBAL;
  60        cq->rq.len_mask = PF_MBX_ARQLEN_ARQLEN_M;
  61        cq->rq.len_ena_mask = PF_MBX_ARQLEN_ARQENABLE_M;
  62        cq->rq.head_mask = PF_MBX_ARQH_ARQH_M;
  63}
  64
  65/**
  66 * ice_check_sq_alive
  67 * @hw: pointer to the hw struct
  68 * @cq: pointer to the specific Control queue
  69 *
  70 * Returns true if Queue is enabled else false.
  71 */
  72bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
  73{
  74        /* check both queue-length and queue-enable fields */
  75        if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
  76                return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
  77                                                cq->sq.len_ena_mask)) ==
  78                        (cq->num_sq_entries | cq->sq.len_ena_mask);
  79
  80        return false;
  81}
  82
  83/**
  84 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
  85 * @hw: pointer to the hardware structure
  86 * @cq: pointer to the specific Control queue
  87 */
  88static enum ice_status
  89ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
  90{
  91        size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
  92
  93        cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
  94                                                 &cq->sq.desc_buf.pa,
  95                                                 GFP_KERNEL | __GFP_ZERO);
  96        if (!cq->sq.desc_buf.va)
  97                return ICE_ERR_NO_MEMORY;
  98        cq->sq.desc_buf.size = size;
  99
 100        cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
 101                                      sizeof(struct ice_sq_cd), GFP_KERNEL);
 102        if (!cq->sq.cmd_buf) {
 103                dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
 104                                   cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
 105                cq->sq.desc_buf.va = NULL;
 106                cq->sq.desc_buf.pa = 0;
 107                cq->sq.desc_buf.size = 0;
 108                return ICE_ERR_NO_MEMORY;
 109        }
 110
 111        return 0;
 112}
 113
 114/**
 115 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
 116 * @hw: pointer to the hardware structure
 117 * @cq: pointer to the specific Control queue
 118 */
 119static enum ice_status
 120ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 121{
 122        size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
 123
 124        cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
 125                                                 &cq->rq.desc_buf.pa,
 126                                                 GFP_KERNEL | __GFP_ZERO);
 127        if (!cq->rq.desc_buf.va)
 128                return ICE_ERR_NO_MEMORY;
 129        cq->rq.desc_buf.size = size;
 130        return 0;
 131}
 132
 133/**
 134 * ice_free_ctrlq_sq_ring - Free Control Transmit Queue (ATQ) rings
 135 * @hw: pointer to the hardware structure
 136 * @cq: pointer to the specific Control queue
 137 *
 138 * This assumes the posted send buffers have already been cleaned
 139 * and de-allocated
 140 */
 141static void ice_free_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 142{
 143        dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
 144                           cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
 145        cq->sq.desc_buf.va = NULL;
 146        cq->sq.desc_buf.pa = 0;
 147        cq->sq.desc_buf.size = 0;
 148}
 149
 150/**
 151 * ice_free_ctrlq_rq_ring - Free Control Receive Queue (ARQ) rings
 152 * @hw: pointer to the hardware structure
 153 * @cq: pointer to the specific Control queue
 154 *
 155 * This assumes the posted receive buffers have already been cleaned
 156 * and de-allocated
 157 */
 158static void ice_free_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 159{
 160        dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.desc_buf.size,
 161                           cq->rq.desc_buf.va, cq->rq.desc_buf.pa);
 162        cq->rq.desc_buf.va = NULL;
 163        cq->rq.desc_buf.pa = 0;
 164        cq->rq.desc_buf.size = 0;
 165}
 166
 167/**
 168 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
 169 * @hw: pointer to the hardware structure
 170 * @cq: pointer to the specific Control queue
 171 */
 172static enum ice_status
 173ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 174{
 175        int i;
 176
 177        /* We'll be allocating the buffer info memory first, then we can
 178         * allocate the mapped buffers for the event processing
 179         */
 180        cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
 181                                       sizeof(cq->rq.desc_buf), GFP_KERNEL);
 182        if (!cq->rq.dma_head)
 183                return ICE_ERR_NO_MEMORY;
 184        cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
 185
 186        /* allocate the mapped buffers */
 187        for (i = 0; i < cq->num_rq_entries; i++) {
 188                struct ice_aq_desc *desc;
 189                struct ice_dma_mem *bi;
 190
 191                bi = &cq->rq.r.rq_bi[i];
 192                bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
 193                                             cq->rq_buf_size, &bi->pa,
 194                                             GFP_KERNEL | __GFP_ZERO);
 195                if (!bi->va)
 196                        goto unwind_alloc_rq_bufs;
 197                bi->size = cq->rq_buf_size;
 198
 199                /* now configure the descriptors for use */
 200                desc = ICE_CTL_Q_DESC(cq->rq, i);
 201
 202                desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
 203                if (cq->rq_buf_size > ICE_AQ_LG_BUF)
 204                        desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
 205                desc->opcode = 0;
 206                /* This is in accordance with Admin queue design, there is no
 207                 * register for buffer size configuration
 208                 */
 209                desc->datalen = cpu_to_le16(bi->size);
 210                desc->retval = 0;
 211                desc->cookie_high = 0;
 212                desc->cookie_low = 0;
 213                desc->params.generic.addr_high =
 214                        cpu_to_le32(upper_32_bits(bi->pa));
 215                desc->params.generic.addr_low =
 216                        cpu_to_le32(lower_32_bits(bi->pa));
 217                desc->params.generic.param0 = 0;
 218                desc->params.generic.param1 = 0;
 219        }
 220        return 0;
 221
 222unwind_alloc_rq_bufs:
 223        /* don't try to free the one that failed... */
 224        i--;
 225        for (; i >= 0; i--) {
 226                dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
 227                                   cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
 228                cq->rq.r.rq_bi[i].va = NULL;
 229                cq->rq.r.rq_bi[i].pa = 0;
 230                cq->rq.r.rq_bi[i].size = 0;
 231        }
 232        devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
 233
 234        return ICE_ERR_NO_MEMORY;
 235}
 236
 237/**
 238 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
 239 * @hw: pointer to the hardware structure
 240 * @cq: pointer to the specific Control queue
 241 */
 242static enum ice_status
 243ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 244{
 245        int i;
 246
 247        /* No mapped memory needed yet, just the buffer info structures */
 248        cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
 249                                       sizeof(cq->sq.desc_buf), GFP_KERNEL);
 250        if (!cq->sq.dma_head)
 251                return ICE_ERR_NO_MEMORY;
 252        cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
 253
 254        /* allocate the mapped buffers */
 255        for (i = 0; i < cq->num_sq_entries; i++) {
 256                struct ice_dma_mem *bi;
 257
 258                bi = &cq->sq.r.sq_bi[i];
 259                bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
 260                                             cq->sq_buf_size, &bi->pa,
 261                                             GFP_KERNEL | __GFP_ZERO);
 262                if (!bi->va)
 263                        goto unwind_alloc_sq_bufs;
 264                bi->size = cq->sq_buf_size;
 265        }
 266        return 0;
 267
 268unwind_alloc_sq_bufs:
 269        /* don't try to free the one that failed... */
 270        i--;
 271        for (; i >= 0; i--) {
 272                dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size,
 273                                   cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa);
 274                cq->sq.r.sq_bi[i].va = NULL;
 275                cq->sq.r.sq_bi[i].pa = 0;
 276                cq->sq.r.sq_bi[i].size = 0;
 277        }
 278        devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
 279
 280        return ICE_ERR_NO_MEMORY;
 281}
 282
 283/**
 284 * ice_free_rq_bufs - Free ARQ buffer info elements
 285 * @hw: pointer to the hardware structure
 286 * @cq: pointer to the specific Control queue
 287 */
 288static void ice_free_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 289{
 290        int i;
 291
 292        /* free descriptors */
 293        for (i = 0; i < cq->num_rq_entries; i++) {
 294                dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
 295                                   cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
 296                cq->rq.r.rq_bi[i].va = NULL;
 297                cq->rq.r.rq_bi[i].pa = 0;
 298                cq->rq.r.rq_bi[i].size = 0;
 299        }
 300
 301        /* free the dma header */
 302        devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
 303}
 304
 305/**
 306 * ice_free_sq_bufs - Free ATQ buffer info elements
 307 * @hw: pointer to the hardware structure
 308 * @cq: pointer to the specific Control queue
 309 */
 310static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 311{
 312        int i;
 313
 314        /* only unmap if the address is non-NULL */
 315        for (i = 0; i < cq->num_sq_entries; i++)
 316                if (cq->sq.r.sq_bi[i].pa) {
 317                        dmam_free_coherent(ice_hw_to_dev(hw),
 318                                           cq->sq.r.sq_bi[i].size,
 319                                           cq->sq.r.sq_bi[i].va,
 320                                           cq->sq.r.sq_bi[i].pa);
 321                        cq->sq.r.sq_bi[i].va = NULL;
 322                        cq->sq.r.sq_bi[i].pa = 0;
 323                        cq->sq.r.sq_bi[i].size = 0;
 324                }
 325
 326        /* free the buffer info list */
 327        devm_kfree(ice_hw_to_dev(hw), cq->sq.cmd_buf);
 328
 329        /* free the dma header */
 330        devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
 331}
 332
 333/**
 334 * ice_cfg_sq_regs - configure Control ATQ registers
 335 * @hw: pointer to the hardware structure
 336 * @cq: pointer to the specific Control queue
 337 *
 338 * Configure base address and length registers for the transmit queue
 339 */
 340static enum ice_status
 341ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 342{
 343        u32 reg = 0;
 344
 345        /* Clear Head and Tail */
 346        wr32(hw, cq->sq.head, 0);
 347        wr32(hw, cq->sq.tail, 0);
 348
 349        /* set starting point */
 350        wr32(hw, cq->sq.len, (cq->num_sq_entries | cq->sq.len_ena_mask));
 351        wr32(hw, cq->sq.bal, lower_32_bits(cq->sq.desc_buf.pa));
 352        wr32(hw, cq->sq.bah, upper_32_bits(cq->sq.desc_buf.pa));
 353
 354        /* Check one register to verify that config was applied */
 355        reg = rd32(hw, cq->sq.bal);
 356        if (reg != lower_32_bits(cq->sq.desc_buf.pa))
 357                return ICE_ERR_AQ_ERROR;
 358
 359        return 0;
 360}
 361
 362/**
 363 * ice_cfg_rq_regs - configure Control ARQ register
 364 * @hw: pointer to the hardware structure
 365 * @cq: pointer to the specific Control queue
 366 *
 367 * Configure base address and length registers for the receive (event q)
 368 */
 369static enum ice_status
 370ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 371{
 372        u32 reg = 0;
 373
 374        /* Clear Head and Tail */
 375        wr32(hw, cq->rq.head, 0);
 376        wr32(hw, cq->rq.tail, 0);
 377
 378        /* set starting point */
 379        wr32(hw, cq->rq.len, (cq->num_rq_entries | cq->rq.len_ena_mask));
 380        wr32(hw, cq->rq.bal, lower_32_bits(cq->rq.desc_buf.pa));
 381        wr32(hw, cq->rq.bah, upper_32_bits(cq->rq.desc_buf.pa));
 382
 383        /* Update tail in the HW to post pre-allocated buffers */
 384        wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
 385
 386        /* Check one register to verify that config was applied */
 387        reg = rd32(hw, cq->rq.bal);
 388        if (reg != lower_32_bits(cq->rq.desc_buf.pa))
 389                return ICE_ERR_AQ_ERROR;
 390
 391        return 0;
 392}
 393
 394/**
 395 * ice_init_sq - main initialization routine for Control ATQ
 396 * @hw: pointer to the hardware structure
 397 * @cq: pointer to the specific Control queue
 398 *
 399 * This is the main initialization routine for the Control Send Queue
 400 * Prior to calling this function, drivers *MUST* set the following fields
 401 * in the cq->structure:
 402 *     - cq->num_sq_entries
 403 *     - cq->sq_buf_size
 404 *
 405 * Do *NOT* hold the lock when calling this as the memory allocation routines
 406 * called are not going to be atomic context safe
 407 */
 408static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 409{
 410        enum ice_status ret_code;
 411
 412        if (cq->sq.count > 0) {
 413                /* queue already initialized */
 414                ret_code = ICE_ERR_NOT_READY;
 415                goto init_ctrlq_exit;
 416        }
 417
 418        /* verify input for valid configuration */
 419        if (!cq->num_sq_entries || !cq->sq_buf_size) {
 420                ret_code = ICE_ERR_CFG;
 421                goto init_ctrlq_exit;
 422        }
 423
 424        cq->sq.next_to_use = 0;
 425        cq->sq.next_to_clean = 0;
 426
 427        /* allocate the ring memory */
 428        ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
 429        if (ret_code)
 430                goto init_ctrlq_exit;
 431
 432        /* allocate buffers in the rings */
 433        ret_code = ice_alloc_sq_bufs(hw, cq);
 434        if (ret_code)
 435                goto init_ctrlq_free_rings;
 436
 437        /* initialize base registers */
 438        ret_code = ice_cfg_sq_regs(hw, cq);
 439        if (ret_code)
 440                goto init_ctrlq_free_rings;
 441
 442        /* success! */
 443        cq->sq.count = cq->num_sq_entries;
 444        goto init_ctrlq_exit;
 445
 446init_ctrlq_free_rings:
 447        ice_free_ctrlq_sq_ring(hw, cq);
 448
 449init_ctrlq_exit:
 450        return ret_code;
 451}
 452
 453/**
 454 * ice_init_rq - initialize ARQ
 455 * @hw: pointer to the hardware structure
 456 * @cq: pointer to the specific Control queue
 457 *
 458 * The main initialization routine for the Admin Receive (Event) Queue.
 459 * Prior to calling this function, drivers *MUST* set the following fields
 460 * in the cq->structure:
 461 *     - cq->num_rq_entries
 462 *     - cq->rq_buf_size
 463 *
 464 * Do *NOT* hold the lock when calling this as the memory allocation routines
 465 * called are not going to be atomic context safe
 466 */
 467static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 468{
 469        enum ice_status ret_code;
 470
 471        if (cq->rq.count > 0) {
 472                /* queue already initialized */
 473                ret_code = ICE_ERR_NOT_READY;
 474                goto init_ctrlq_exit;
 475        }
 476
 477        /* verify input for valid configuration */
 478        if (!cq->num_rq_entries || !cq->rq_buf_size) {
 479                ret_code = ICE_ERR_CFG;
 480                goto init_ctrlq_exit;
 481        }
 482
 483        cq->rq.next_to_use = 0;
 484        cq->rq.next_to_clean = 0;
 485
 486        /* allocate the ring memory */
 487        ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
 488        if (ret_code)
 489                goto init_ctrlq_exit;
 490
 491        /* allocate buffers in the rings */
 492        ret_code = ice_alloc_rq_bufs(hw, cq);
 493        if (ret_code)
 494                goto init_ctrlq_free_rings;
 495
 496        /* initialize base registers */
 497        ret_code = ice_cfg_rq_regs(hw, cq);
 498        if (ret_code)
 499                goto init_ctrlq_free_rings;
 500
 501        /* success! */
 502        cq->rq.count = cq->num_rq_entries;
 503        goto init_ctrlq_exit;
 504
 505init_ctrlq_free_rings:
 506        ice_free_ctrlq_rq_ring(hw, cq);
 507
 508init_ctrlq_exit:
 509        return ret_code;
 510}
 511
 512/**
 513 * ice_shutdown_sq - shutdown the Control ATQ
 514 * @hw: pointer to the hardware structure
 515 * @cq: pointer to the specific Control queue
 516 *
 517 * The main shutdown routine for the Control Transmit Queue
 518 */
 519static enum ice_status
 520ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 521{
 522        enum ice_status ret_code = 0;
 523
 524        mutex_lock(&cq->sq_lock);
 525
 526        if (!cq->sq.count) {
 527                ret_code = ICE_ERR_NOT_READY;
 528                goto shutdown_sq_out;
 529        }
 530
 531        /* Stop firmware AdminQ processing */
 532        wr32(hw, cq->sq.head, 0);
 533        wr32(hw, cq->sq.tail, 0);
 534        wr32(hw, cq->sq.len, 0);
 535        wr32(hw, cq->sq.bal, 0);
 536        wr32(hw, cq->sq.bah, 0);
 537
 538        cq->sq.count = 0;       /* to indicate uninitialized queue */
 539
 540        /* free ring buffers and the ring itself */
 541        ice_free_sq_bufs(hw, cq);
 542        ice_free_ctrlq_sq_ring(hw, cq);
 543
 544shutdown_sq_out:
 545        mutex_unlock(&cq->sq_lock);
 546        return ret_code;
 547}
 548
 549/**
 550 * ice_aq_ver_check - Check the reported AQ API version.
 551 * @hw: pointer to the hardware structure
 552 *
 553 * Checks if the driver should load on a given AQ API version.
 554 *
 555 * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
 556 */
 557static bool ice_aq_ver_check(struct ice_hw *hw)
 558{
 559        if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
 560                /* Major API version is newer than expected, don't load */
 561                dev_warn(ice_hw_to_dev(hw),
 562                         "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
 563                return false;
 564        } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
 565                if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
 566                        dev_info(ice_hw_to_dev(hw),
 567                                 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
 568                else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
 569                        dev_info(ice_hw_to_dev(hw),
 570                                 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
 571        } else {
 572                /* Major API version is older than expected, log a warning */
 573                dev_info(ice_hw_to_dev(hw),
 574                         "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
 575        }
 576        return true;
 577}
 578
 579/**
 580 * ice_shutdown_rq - shutdown Control ARQ
 581 * @hw: pointer to the hardware structure
 582 * @cq: pointer to the specific Control queue
 583 *
 584 * The main shutdown routine for the Control Receive Queue
 585 */
 586static enum ice_status
 587ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 588{
 589        enum ice_status ret_code = 0;
 590
 591        mutex_lock(&cq->rq_lock);
 592
 593        if (!cq->rq.count) {
 594                ret_code = ICE_ERR_NOT_READY;
 595                goto shutdown_rq_out;
 596        }
 597
 598        /* Stop Control Queue processing */
 599        wr32(hw, cq->rq.head, 0);
 600        wr32(hw, cq->rq.tail, 0);
 601        wr32(hw, cq->rq.len, 0);
 602        wr32(hw, cq->rq.bal, 0);
 603        wr32(hw, cq->rq.bah, 0);
 604
 605        /* set rq.count to 0 to indicate uninitialized queue */
 606        cq->rq.count = 0;
 607
 608        /* free ring buffers and the ring itself */
 609        ice_free_rq_bufs(hw, cq);
 610        ice_free_ctrlq_rq_ring(hw, cq);
 611
 612shutdown_rq_out:
 613        mutex_unlock(&cq->rq_lock);
 614        return ret_code;
 615}
 616
 617/**
 618 * ice_init_check_adminq - Check version for Admin Queue to know if its alive
 619 * @hw: pointer to the hardware structure
 620 */
 621static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
 622{
 623        struct ice_ctl_q_info *cq = &hw->adminq;
 624        enum ice_status status;
 625
 626        status = ice_aq_get_fw_ver(hw, NULL);
 627        if (status)
 628                goto init_ctrlq_free_rq;
 629
 630        if (!ice_aq_ver_check(hw)) {
 631                status = ICE_ERR_FW_API_VER;
 632                goto init_ctrlq_free_rq;
 633        }
 634
 635        return 0;
 636
 637init_ctrlq_free_rq:
 638        if (cq->rq.count) {
 639                ice_shutdown_rq(hw, cq);
 640                mutex_destroy(&cq->rq_lock);
 641        }
 642        if (cq->sq.count) {
 643                ice_shutdown_sq(hw, cq);
 644                mutex_destroy(&cq->sq_lock);
 645        }
 646        return status;
 647}
 648
 649/**
 650 * ice_init_ctrlq - main initialization routine for any control Queue
 651 * @hw: pointer to the hardware structure
 652 * @q_type: specific Control queue type
 653 *
 654 * Prior to calling this function, drivers *MUST* set the following fields
 655 * in the cq->structure:
 656 *     - cq->num_sq_entries
 657 *     - cq->num_rq_entries
 658 *     - cq->rq_buf_size
 659 *     - cq->sq_buf_size
 660 *
 661 */
 662static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
 663{
 664        struct ice_ctl_q_info *cq;
 665        enum ice_status ret_code;
 666
 667        switch (q_type) {
 668        case ICE_CTL_Q_ADMIN:
 669                ice_adminq_init_regs(hw);
 670                cq = &hw->adminq;
 671                break;
 672        case ICE_CTL_Q_MAILBOX:
 673                ice_mailbox_init_regs(hw);
 674                cq = &hw->mailboxq;
 675                break;
 676        default:
 677                return ICE_ERR_PARAM;
 678        }
 679        cq->qtype = q_type;
 680
 681        /* verify input for valid configuration */
 682        if (!cq->num_rq_entries || !cq->num_sq_entries ||
 683            !cq->rq_buf_size || !cq->sq_buf_size) {
 684                return ICE_ERR_CFG;
 685        }
 686        mutex_init(&cq->sq_lock);
 687        mutex_init(&cq->rq_lock);
 688
 689        /* setup SQ command write back timeout */
 690        cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
 691
 692        /* allocate the ATQ */
 693        ret_code = ice_init_sq(hw, cq);
 694        if (ret_code)
 695                goto init_ctrlq_destroy_locks;
 696
 697        /* allocate the ARQ */
 698        ret_code = ice_init_rq(hw, cq);
 699        if (ret_code)
 700                goto init_ctrlq_free_sq;
 701
 702        /* success! */
 703        return 0;
 704
 705init_ctrlq_free_sq:
 706        ice_shutdown_sq(hw, cq);
 707init_ctrlq_destroy_locks:
 708        mutex_destroy(&cq->sq_lock);
 709        mutex_destroy(&cq->rq_lock);
 710        return ret_code;
 711}
 712
 713/**
 714 * ice_init_all_ctrlq - main initialization routine for all control queues
 715 * @hw: pointer to the hardware structure
 716 *
 717 * Prior to calling this function, drivers *MUST* set the following fields
 718 * in the cq->structure for all control queues:
 719 *     - cq->num_sq_entries
 720 *     - cq->num_rq_entries
 721 *     - cq->rq_buf_size
 722 *     - cq->sq_buf_size
 723 */
 724enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
 725{
 726        enum ice_status ret_code;
 727
 728        /* Init FW admin queue */
 729        ret_code = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
 730        if (ret_code)
 731                return ret_code;
 732
 733        ret_code = ice_init_check_adminq(hw);
 734        if (ret_code)
 735                return ret_code;
 736
 737        /* Init Mailbox queue */
 738        return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
 739}
 740
 741/**
 742 * ice_shutdown_ctrlq - shutdown routine for any control queue
 743 * @hw: pointer to the hardware structure
 744 * @q_type: specific Control queue type
 745 */
 746static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
 747{
 748        struct ice_ctl_q_info *cq;
 749
 750        switch (q_type) {
 751        case ICE_CTL_Q_ADMIN:
 752                cq = &hw->adminq;
 753                if (ice_check_sq_alive(hw, cq))
 754                        ice_aq_q_shutdown(hw, true);
 755                break;
 756        case ICE_CTL_Q_MAILBOX:
 757                cq = &hw->mailboxq;
 758                break;
 759        default:
 760                return;
 761        }
 762
 763        if (cq->sq.count) {
 764                ice_shutdown_sq(hw, cq);
 765                mutex_destroy(&cq->sq_lock);
 766        }
 767        if (cq->rq.count) {
 768                ice_shutdown_rq(hw, cq);
 769                mutex_destroy(&cq->rq_lock);
 770        }
 771}
 772
 773/**
 774 * ice_shutdown_all_ctrlq - shutdown routine for all control queues
 775 * @hw: pointer to the hardware structure
 776 */
 777void ice_shutdown_all_ctrlq(struct ice_hw *hw)
 778{
 779        /* Shutdown FW admin queue */
 780        ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
 781        /* Shutdown PF-VF Mailbox */
 782        ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
 783}
 784
 785/**
 786 * ice_clean_sq - cleans Admin send queue (ATQ)
 787 * @hw: pointer to the hardware structure
 788 * @cq: pointer to the specific Control queue
 789 *
 790 * returns the number of free desc
 791 */
 792static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 793{
 794        struct ice_ctl_q_ring *sq = &cq->sq;
 795        u16 ntc = sq->next_to_clean;
 796        struct ice_sq_cd *details;
 797        struct ice_aq_desc *desc;
 798
 799        desc = ICE_CTL_Q_DESC(*sq, ntc);
 800        details = ICE_CTL_Q_DETAILS(*sq, ntc);
 801
 802        while (rd32(hw, cq->sq.head) != ntc) {
 803                ice_debug(hw, ICE_DBG_AQ_MSG,
 804                          "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
 805                memset(desc, 0, sizeof(*desc));
 806                memset(details, 0, sizeof(*details));
 807                ntc++;
 808                if (ntc == sq->count)
 809                        ntc = 0;
 810                desc = ICE_CTL_Q_DESC(*sq, ntc);
 811                details = ICE_CTL_Q_DETAILS(*sq, ntc);
 812        }
 813
 814        sq->next_to_clean = ntc;
 815
 816        return ICE_CTL_Q_DESC_UNUSED(sq);
 817}
 818
 819/**
 820 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
 821 * @hw: pointer to the hw struct
 822 * @cq: pointer to the specific Control queue
 823 *
 824 * Returns true if the firmware has processed all descriptors on the
 825 * admin send queue. Returns false if there are still requests pending.
 826 */
 827static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 828{
 829        /* AQ designers suggest use of head for better
 830         * timing reliability than DD bit
 831         */
 832        return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
 833}
 834
 835/**
 836 * ice_sq_send_cmd - send command to Control Queue (ATQ)
 837 * @hw: pointer to the hw struct
 838 * @cq: pointer to the specific Control queue
 839 * @desc: prefilled descriptor describing the command (non DMA mem)
 840 * @buf: buffer to use for indirect commands (or NULL for direct commands)
 841 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
 842 * @cd: pointer to command details structure
 843 *
 844 * This is the main send command routine for the ATQ.  It runs the q,
 845 * cleans the queue, etc.
 846 */
 847enum ice_status
 848ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
 849                struct ice_aq_desc *desc, void *buf, u16 buf_size,
 850                struct ice_sq_cd *cd)
 851{
 852        struct ice_dma_mem *dma_buf = NULL;
 853        struct ice_aq_desc *desc_on_ring;
 854        bool cmd_completed = false;
 855        enum ice_status status = 0;
 856        struct ice_sq_cd *details;
 857        u32 total_delay = 0;
 858        u16 retval = 0;
 859        u32 val = 0;
 860
 861        /* if reset is in progress return a soft error */
 862        if (hw->reset_ongoing)
 863                return ICE_ERR_RESET_ONGOING;
 864        mutex_lock(&cq->sq_lock);
 865
 866        cq->sq_last_status = ICE_AQ_RC_OK;
 867
 868        if (!cq->sq.count) {
 869                ice_debug(hw, ICE_DBG_AQ_MSG,
 870                          "Control Send queue not initialized.\n");
 871                status = ICE_ERR_AQ_EMPTY;
 872                goto sq_send_command_error;
 873        }
 874
 875        if ((buf && !buf_size) || (!buf && buf_size)) {
 876                status = ICE_ERR_PARAM;
 877                goto sq_send_command_error;
 878        }
 879
 880        if (buf) {
 881                if (buf_size > cq->sq_buf_size) {
 882                        ice_debug(hw, ICE_DBG_AQ_MSG,
 883                                  "Invalid buffer size for Control Send queue: %d.\n",
 884                                  buf_size);
 885                        status = ICE_ERR_INVAL_SIZE;
 886                        goto sq_send_command_error;
 887                }
 888
 889                desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
 890                if (buf_size > ICE_AQ_LG_BUF)
 891                        desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
 892        }
 893
 894        val = rd32(hw, cq->sq.head);
 895        if (val >= cq->num_sq_entries) {
 896                ice_debug(hw, ICE_DBG_AQ_MSG,
 897                          "head overrun at %d in the Control Send Queue ring\n",
 898                          val);
 899                status = ICE_ERR_AQ_EMPTY;
 900                goto sq_send_command_error;
 901        }
 902
 903        details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
 904        if (cd)
 905                *details = *cd;
 906        else
 907                memset(details, 0, sizeof(*details));
 908
 909        /* Call clean and check queue available function to reclaim the
 910         * descriptors that were processed by FW/MBX; the function returns the
 911         * number of desc available. The clean function called here could be
 912         * called in a separate thread in case of asynchronous completions.
 913         */
 914        if (ice_clean_sq(hw, cq) == 0) {
 915                ice_debug(hw, ICE_DBG_AQ_MSG,
 916                          "Error: Control Send Queue is full.\n");
 917                status = ICE_ERR_AQ_FULL;
 918                goto sq_send_command_error;
 919        }
 920
 921        /* initialize the temp desc pointer with the right desc */
 922        desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
 923
 924        /* if the desc is available copy the temp desc to the right place */
 925        memcpy(desc_on_ring, desc, sizeof(*desc_on_ring));
 926
 927        /* if buf is not NULL assume indirect command */
 928        if (buf) {
 929                dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
 930                /* copy the user buf into the respective DMA buf */
 931                memcpy(dma_buf->va, buf, buf_size);
 932                desc_on_ring->datalen = cpu_to_le16(buf_size);
 933
 934                /* Update the address values in the desc with the pa value
 935                 * for respective buffer
 936                 */
 937                desc_on_ring->params.generic.addr_high =
 938                        cpu_to_le32(upper_32_bits(dma_buf->pa));
 939                desc_on_ring->params.generic.addr_low =
 940                        cpu_to_le32(lower_32_bits(dma_buf->pa));
 941        }
 942
 943        /* Debug desc and buffer */
 944        ice_debug(hw, ICE_DBG_AQ_MSG,
 945                  "ATQ: Control Send queue desc and buffer:\n");
 946
 947        ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc_on_ring, buf, buf_size);
 948
 949        (cq->sq.next_to_use)++;
 950        if (cq->sq.next_to_use == cq->sq.count)
 951                cq->sq.next_to_use = 0;
 952        wr32(hw, cq->sq.tail, cq->sq.next_to_use);
 953
 954        do {
 955                if (ice_sq_done(hw, cq))
 956                        break;
 957
 958                mdelay(1);
 959                total_delay++;
 960        } while (total_delay < cq->sq_cmd_timeout);
 961
 962        /* if ready, copy the desc back to temp */
 963        if (ice_sq_done(hw, cq)) {
 964                memcpy(desc, desc_on_ring, sizeof(*desc));
 965                if (buf) {
 966                        /* get returned length to copy */
 967                        u16 copy_size = le16_to_cpu(desc->datalen);
 968
 969                        if (copy_size > buf_size) {
 970                                ice_debug(hw, ICE_DBG_AQ_MSG,
 971                                          "Return len %d > than buf len %d\n",
 972                                          copy_size, buf_size);
 973                                status = ICE_ERR_AQ_ERROR;
 974                        } else {
 975                                memcpy(buf, dma_buf->va, copy_size);
 976                        }
 977                }
 978                retval = le16_to_cpu(desc->retval);
 979                if (retval) {
 980                        ice_debug(hw, ICE_DBG_AQ_MSG,
 981                                  "Control Send Queue command completed with error 0x%x\n",
 982                                  retval);
 983
 984                        /* strip off FW internal code */
 985                        retval &= 0xff;
 986                }
 987                cmd_completed = true;
 988                if (!status && retval != ICE_AQ_RC_OK)
 989                        status = ICE_ERR_AQ_ERROR;
 990                cq->sq_last_status = (enum ice_aq_err)retval;
 991        }
 992
 993        ice_debug(hw, ICE_DBG_AQ_MSG,
 994                  "ATQ: desc and buffer writeback:\n");
 995
 996        ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, buf, buf_size);
 997
 998        /* save writeback AQ if requested */
 999        if (details->wb_desc)
1000                memcpy(details->wb_desc, desc_on_ring,
1001                       sizeof(*details->wb_desc));
1002
1003        /* update the error if time out occurred */
1004        if (!cmd_completed) {
1005                ice_debug(hw, ICE_DBG_AQ_MSG,
1006                          "Control Send Queue Writeback timeout.\n");
1007                status = ICE_ERR_AQ_TIMEOUT;
1008        }
1009
1010sq_send_command_error:
1011        mutex_unlock(&cq->sq_lock);
1012        return status;
1013}
1014
1015/**
1016 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1017 * @desc: pointer to the temp descriptor (non DMA mem)
1018 * @opcode: the opcode can be used to decide which flags to turn off or on
1019 *
1020 * Fill the desc with default values
1021 */
1022void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1023{
1024        /* zero out the desc */
1025        memset(desc, 0, sizeof(*desc));
1026        desc->opcode = cpu_to_le16(opcode);
1027        desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
1028}
1029
1030/**
1031 * ice_clean_rq_elem
1032 * @hw: pointer to the hw struct
1033 * @cq: pointer to the specific Control queue
1034 * @e: event info from the receive descriptor, includes any buffers
1035 * @pending: number of events that could be left to process
1036 *
1037 * This function cleans one Admin Receive Queue element and returns
1038 * the contents through e.  It can also return how many events are
1039 * left to process through 'pending'.
1040 */
1041enum ice_status
1042ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1043                  struct ice_rq_event_info *e, u16 *pending)
1044{
1045        u16 ntc = cq->rq.next_to_clean;
1046        enum ice_status ret_code = 0;
1047        struct ice_aq_desc *desc;
1048        struct ice_dma_mem *bi;
1049        u16 desc_idx;
1050        u16 datalen;
1051        u16 flags;
1052        u16 ntu;
1053
1054        /* pre-clean the event info */
1055        memset(&e->desc, 0, sizeof(e->desc));
1056
1057        /* take the lock before we start messing with the ring */
1058        mutex_lock(&cq->rq_lock);
1059
1060        if (!cq->rq.count) {
1061                ice_debug(hw, ICE_DBG_AQ_MSG,
1062                          "Control Receive queue not initialized.\n");
1063                ret_code = ICE_ERR_AQ_EMPTY;
1064                goto clean_rq_elem_err;
1065        }
1066
1067        /* set next_to_use to head */
1068        ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1069
1070        if (ntu == ntc) {
1071                /* nothing to do - shouldn't need to update ring's values */
1072                ret_code = ICE_ERR_AQ_NO_WORK;
1073                goto clean_rq_elem_out;
1074        }
1075
1076        /* now clean the next descriptor */
1077        desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1078        desc_idx = ntc;
1079
1080        cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
1081        flags = le16_to_cpu(desc->flags);
1082        if (flags & ICE_AQ_FLAG_ERR) {
1083                ret_code = ICE_ERR_AQ_ERROR;
1084                ice_debug(hw, ICE_DBG_AQ_MSG,
1085                          "Control Receive Queue Event received with error 0x%x\n",
1086                          cq->rq_last_status);
1087        }
1088        memcpy(&e->desc, desc, sizeof(e->desc));
1089        datalen = le16_to_cpu(desc->datalen);
1090        e->msg_len = min(datalen, e->buf_len);
1091        if (e->msg_buf && e->msg_len)
1092                memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
1093
1094        ice_debug(hw, ICE_DBG_AQ_MSG, "ARQ: desc and buffer:\n");
1095
1096        ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, e->msg_buf,
1097                     cq->rq_buf_size);
1098
1099        /* Restore the original datalen and buffer address in the desc,
1100         * FW updates datalen to indicate the event message size
1101         */
1102        bi = &cq->rq.r.rq_bi[ntc];
1103        memset(desc, 0, sizeof(*desc));
1104
1105        desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
1106        if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1107                desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1108        desc->datalen = cpu_to_le16(bi->size);
1109        desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1110        desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1111
1112        /* set tail = the last cleaned desc index. */
1113        wr32(hw, cq->rq.tail, ntc);
1114        /* ntc is updated to tail + 1 */
1115        ntc++;
1116        if (ntc == cq->num_rq_entries)
1117                ntc = 0;
1118        cq->rq.next_to_clean = ntc;
1119        cq->rq.next_to_use = ntu;
1120
1121clean_rq_elem_out:
1122        /* Set pending if needed, unlock and return */
1123        if (pending) {
1124                /* re-read HW head to calculate actual pending messages */
1125                ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1126                *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1127        }
1128clean_rq_elem_err:
1129        mutex_unlock(&cq->rq_lock);
1130
1131        return ret_code;
1132}
1133