dpdk/drivers/net/ice/base/ice_controlq.c
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2001-2021 Intel Corporation
   3 */
   4
   5#include "ice_common.h"
   6
   7#define ICE_CQ_INIT_REGS(qinfo, prefix)                         \
   8do {                                                            \
   9        (qinfo)->sq.head = prefix##_ATQH;                       \
  10        (qinfo)->sq.tail = prefix##_ATQT;                       \
  11        (qinfo)->sq.len = prefix##_ATQLEN;                      \
  12        (qinfo)->sq.bah = prefix##_ATQBAH;                      \
  13        (qinfo)->sq.bal = prefix##_ATQBAL;                      \
  14        (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M;        \
  15        (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
  16        (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M;  \
  17        (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M;           \
  18        (qinfo)->rq.head = prefix##_ARQH;                       \
  19        (qinfo)->rq.tail = prefix##_ARQT;                       \
  20        (qinfo)->rq.len = prefix##_ARQLEN;                      \
  21        (qinfo)->rq.bah = prefix##_ARQBAH;                      \
  22        (qinfo)->rq.bal = prefix##_ARQBAL;                      \
  23        (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M;        \
  24        (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
  25        (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M;  \
  26        (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M;           \
  27} while (0)
  28
  29/**
  30 * ice_adminq_init_regs - Initialize AdminQ registers
  31 * @hw: pointer to the hardware structure
  32 *
  33 * This assumes the alloc_sq and alloc_rq functions have already been called
  34 */
  35static void ice_adminq_init_regs(struct ice_hw *hw)
  36{
  37        struct ice_ctl_q_info *cq = &hw->adminq;
  38
  39        ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
  40
  41        ICE_CQ_INIT_REGS(cq, PF_FW);
  42}
  43
  44/**
  45 * ice_mailbox_init_regs - Initialize Mailbox registers
  46 * @hw: pointer to the hardware structure
  47 *
  48 * This assumes the alloc_sq and alloc_rq functions have already been called
  49 */
  50static void ice_mailbox_init_regs(struct ice_hw *hw)
  51{
  52        struct ice_ctl_q_info *cq = &hw->mailboxq;
  53
  54        ICE_CQ_INIT_REGS(cq, PF_MBX);
  55}
  56
  57/**
  58 * ice_check_sq_alive
  59 * @hw: pointer to the HW struct
  60 * @cq: pointer to the specific Control queue
  61 *
  62 * Returns true if Queue is enabled else false.
  63 */
  64bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
  65{
  66        /* check both queue-length and queue-enable fields */
  67        if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
  68                return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
  69                                                cq->sq.len_ena_mask)) ==
  70                        (cq->num_sq_entries | cq->sq.len_ena_mask);
  71
  72        return false;
  73}
  74
  75/**
  76 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
  77 * @hw: pointer to the hardware structure
  78 * @cq: pointer to the specific Control queue
  79 */
  80static enum ice_status
  81ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
  82{
  83        size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
  84
  85        cq->sq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->sq.desc_buf, size);
  86        if (!cq->sq.desc_buf.va)
  87                return ICE_ERR_NO_MEMORY;
  88
  89        cq->sq.cmd_buf = ice_calloc(hw, cq->num_sq_entries,
  90                                    sizeof(struct ice_sq_cd));
  91        if (!cq->sq.cmd_buf) {
  92                ice_free_dma_mem(hw, &cq->sq.desc_buf);
  93                return ICE_ERR_NO_MEMORY;
  94        }
  95
  96        return ICE_SUCCESS;
  97}
  98
  99/**
 100 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
 101 * @hw: pointer to the hardware structure
 102 * @cq: pointer to the specific Control queue
 103 */
 104static enum ice_status
 105ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 106{
 107        size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
 108
 109        cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
 110        if (!cq->rq.desc_buf.va)
 111                return ICE_ERR_NO_MEMORY;
 112        return ICE_SUCCESS;
 113}
 114
 115/**
 116 * ice_free_cq_ring - Free control queue ring
 117 * @hw: pointer to the hardware structure
 118 * @ring: pointer to the specific control queue ring
 119 *
 120 * This assumes the posted buffers have already been cleaned
 121 * and de-allocated
 122 */
 123static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
 124{
 125        ice_free_dma_mem(hw, &ring->desc_buf);
 126}
 127
 128/**
 129 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
 130 * @hw: pointer to the hardware structure
 131 * @cq: pointer to the specific Control queue
 132 */
 133static enum ice_status
 134ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 135{
 136        int i;
 137
 138        /* We'll be allocating the buffer info memory first, then we can
 139         * allocate the mapped buffers for the event processing
 140         */
 141        cq->rq.dma_head = ice_calloc(hw, cq->num_rq_entries,
 142                                     sizeof(cq->rq.desc_buf));
 143        if (!cq->rq.dma_head)
 144                return ICE_ERR_NO_MEMORY;
 145        cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
 146
 147        /* allocate the mapped buffers */
 148        for (i = 0; i < cq->num_rq_entries; i++) {
 149                struct ice_aq_desc *desc;
 150                struct ice_dma_mem *bi;
 151
 152                bi = &cq->rq.r.rq_bi[i];
 153                bi->va = ice_alloc_dma_mem(hw, bi, cq->rq_buf_size);
 154                if (!bi->va)
 155                        goto unwind_alloc_rq_bufs;
 156
 157                /* now configure the descriptors for use */
 158                desc = ICE_CTL_Q_DESC(cq->rq, i);
 159
 160                desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
 161                if (cq->rq_buf_size > ICE_AQ_LG_BUF)
 162                        desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
 163                desc->opcode = 0;
 164                /* This is in accordance with Admin queue design, there is no
 165                 * register for buffer size configuration
 166                 */
 167                desc->datalen = CPU_TO_LE16(bi->size);
 168                desc->retval = 0;
 169                desc->cookie_high = 0;
 170                desc->cookie_low = 0;
 171                desc->params.generic.addr_high =
 172                        CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
 173                desc->params.generic.addr_low =
 174                        CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
 175                desc->params.generic.param0 = 0;
 176                desc->params.generic.param1 = 0;
 177        }
 178        return ICE_SUCCESS;
 179
 180unwind_alloc_rq_bufs:
 181        /* don't try to free the one that failed... */
 182        i--;
 183        for (; i >= 0; i--)
 184                ice_free_dma_mem(hw, &cq->rq.r.rq_bi[i]);
 185        cq->rq.r.rq_bi = NULL;
 186        ice_free(hw, cq->rq.dma_head);
 187        cq->rq.dma_head = NULL;
 188
 189        return ICE_ERR_NO_MEMORY;
 190}
 191
 192/**
 193 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
 194 * @hw: pointer to the hardware structure
 195 * @cq: pointer to the specific Control queue
 196 */
 197static enum ice_status
 198ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 199{
 200        int i;
 201
 202        /* No mapped memory needed yet, just the buffer info structures */
 203        cq->sq.dma_head = ice_calloc(hw, cq->num_sq_entries,
 204                                     sizeof(cq->sq.desc_buf));
 205        if (!cq->sq.dma_head)
 206                return ICE_ERR_NO_MEMORY;
 207        cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
 208
 209        /* allocate the mapped buffers */
 210        for (i = 0; i < cq->num_sq_entries; i++) {
 211                struct ice_dma_mem *bi;
 212
 213                bi = &cq->sq.r.sq_bi[i];
 214                bi->va = ice_alloc_dma_mem(hw, bi, cq->sq_buf_size);
 215                if (!bi->va)
 216                        goto unwind_alloc_sq_bufs;
 217        }
 218        return ICE_SUCCESS;
 219
 220unwind_alloc_sq_bufs:
 221        /* don't try to free the one that failed... */
 222        i--;
 223        for (; i >= 0; i--)
 224                ice_free_dma_mem(hw, &cq->sq.r.sq_bi[i]);
 225        cq->sq.r.sq_bi = NULL;
 226        ice_free(hw, cq->sq.dma_head);
 227        cq->sq.dma_head = NULL;
 228
 229        return ICE_ERR_NO_MEMORY;
 230}
 231
 232static enum ice_status
 233ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
 234{
 235        /* Clear Head and Tail */
 236        wr32(hw, ring->head, 0);
 237        wr32(hw, ring->tail, 0);
 238
 239        /* set starting point */
 240        wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
 241        wr32(hw, ring->bal, ICE_LO_DWORD(ring->desc_buf.pa));
 242        wr32(hw, ring->bah, ICE_HI_DWORD(ring->desc_buf.pa));
 243
 244        /* Check one register to verify that config was applied */
 245        if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
 246                return ICE_ERR_AQ_ERROR;
 247
 248        return ICE_SUCCESS;
 249}
 250
 251/**
 252 * ice_cfg_sq_regs - configure Control ATQ registers
 253 * @hw: pointer to the hardware structure
 254 * @cq: pointer to the specific Control queue
 255 *
 256 * Configure base address and length registers for the transmit queue
 257 */
 258static enum ice_status
 259ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 260{
 261        return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
 262}
 263
 264/**
 265 * ice_cfg_rq_regs - configure Control ARQ register
 266 * @hw: pointer to the hardware structure
 267 * @cq: pointer to the specific Control queue
 268 *
 269 * Configure base address and length registers for the receive (event queue)
 270 */
 271static enum ice_status
 272ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 273{
 274        enum ice_status status;
 275
 276        status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
 277        if (status)
 278                return status;
 279
 280        /* Update tail in the HW to post pre-allocated buffers */
 281        wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
 282
 283        return ICE_SUCCESS;
 284}
 285
 286#define ICE_FREE_CQ_BUFS(hw, qi, ring)                                  \
 287do {                                                                    \
 288        /* free descriptors */                                          \
 289        if ((qi)->ring.r.ring##_bi) {                                   \
 290                int i;                                                  \
 291                                                                        \
 292                for (i = 0; i < (qi)->num_##ring##_entries; i++)        \
 293                        if ((qi)->ring.r.ring##_bi[i].pa)               \
 294                                ice_free_dma_mem((hw),                  \
 295                                        &(qi)->ring.r.ring##_bi[i]);    \
 296        }                                                               \
 297        /* free the buffer info list */                                 \
 298        if ((qi)->ring.cmd_buf)                                         \
 299                ice_free(hw, (qi)->ring.cmd_buf);                       \
 300        /* free DMA head */                                             \
 301        ice_free(hw, (qi)->ring.dma_head);                              \
 302} while (0)
 303
 304/**
 305 * ice_init_sq - main initialization routine for Control ATQ
 306 * @hw: pointer to the hardware structure
 307 * @cq: pointer to the specific Control queue
 308 *
 309 * This is the main initialization routine for the Control Send Queue
 310 * Prior to calling this function, the driver *MUST* set the following fields
 311 * in the cq->structure:
 312 *     - cq->num_sq_entries
 313 *     - cq->sq_buf_size
 314 *
 315 * Do *NOT* hold the lock when calling this as the memory allocation routines
 316 * called are not going to be atomic context safe
 317 */
 318static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 319{
 320        enum ice_status ret_code;
 321
 322        ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
 323
 324        if (cq->sq.count > 0) {
 325                /* queue already initialized */
 326                ret_code = ICE_ERR_NOT_READY;
 327                goto init_ctrlq_exit;
 328        }
 329
 330        /* verify input for valid configuration */
 331        if (!cq->num_sq_entries || !cq->sq_buf_size) {
 332                ret_code = ICE_ERR_CFG;
 333                goto init_ctrlq_exit;
 334        }
 335
 336        cq->sq.next_to_use = 0;
 337        cq->sq.next_to_clean = 0;
 338
 339        /* allocate the ring memory */
 340        ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
 341        if (ret_code)
 342                goto init_ctrlq_exit;
 343
 344        /* allocate buffers in the rings */
 345        ret_code = ice_alloc_sq_bufs(hw, cq);
 346        if (ret_code)
 347                goto init_ctrlq_free_rings;
 348
 349        /* initialize base registers */
 350        ret_code = ice_cfg_sq_regs(hw, cq);
 351        if (ret_code)
 352                goto init_ctrlq_free_rings;
 353
 354        /* success! */
 355        cq->sq.count = cq->num_sq_entries;
 356        goto init_ctrlq_exit;
 357
 358init_ctrlq_free_rings:
 359        ICE_FREE_CQ_BUFS(hw, cq, sq);
 360        ice_free_cq_ring(hw, &cq->sq);
 361
 362init_ctrlq_exit:
 363        return ret_code;
 364}
 365
 366/**
 367 * ice_init_rq - initialize ARQ
 368 * @hw: pointer to the hardware structure
 369 * @cq: pointer to the specific Control queue
 370 *
 371 * The main initialization routine for the Admin Receive (Event) Queue.
 372 * Prior to calling this function, the driver *MUST* set the following fields
 373 * in the cq->structure:
 374 *     - cq->num_rq_entries
 375 *     - cq->rq_buf_size
 376 *
 377 * Do *NOT* hold the lock when calling this as the memory allocation routines
 378 * called are not going to be atomic context safe
 379 */
 380static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 381{
 382        enum ice_status ret_code;
 383
 384        ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
 385
 386        if (cq->rq.count > 0) {
 387                /* queue already initialized */
 388                ret_code = ICE_ERR_NOT_READY;
 389                goto init_ctrlq_exit;
 390        }
 391
 392        /* verify input for valid configuration */
 393        if (!cq->num_rq_entries || !cq->rq_buf_size) {
 394                ret_code = ICE_ERR_CFG;
 395                goto init_ctrlq_exit;
 396        }
 397
 398        cq->rq.next_to_use = 0;
 399        cq->rq.next_to_clean = 0;
 400
 401        /* allocate the ring memory */
 402        ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
 403        if (ret_code)
 404                goto init_ctrlq_exit;
 405
 406        /* allocate buffers in the rings */
 407        ret_code = ice_alloc_rq_bufs(hw, cq);
 408        if (ret_code)
 409                goto init_ctrlq_free_rings;
 410
 411        /* initialize base registers */
 412        ret_code = ice_cfg_rq_regs(hw, cq);
 413        if (ret_code)
 414                goto init_ctrlq_free_rings;
 415
 416        /* success! */
 417        cq->rq.count = cq->num_rq_entries;
 418        goto init_ctrlq_exit;
 419
 420init_ctrlq_free_rings:
 421        ICE_FREE_CQ_BUFS(hw, cq, rq);
 422        ice_free_cq_ring(hw, &cq->rq);
 423
 424init_ctrlq_exit:
 425        return ret_code;
 426}
 427
 428/**
 429 * ice_shutdown_sq - shutdown the Control ATQ
 430 * @hw: pointer to the hardware structure
 431 * @cq: pointer to the specific Control queue
 432 *
 433 * The main shutdown routine for the Control Transmit Queue
 434 */
 435static enum ice_status
 436ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 437{
 438        enum ice_status ret_code = ICE_SUCCESS;
 439
 440        ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
 441
 442        ice_acquire_lock(&cq->sq_lock);
 443
 444        if (!cq->sq.count) {
 445                ret_code = ICE_ERR_NOT_READY;
 446                goto shutdown_sq_out;
 447        }
 448
 449        /* Stop firmware AdminQ processing */
 450        wr32(hw, cq->sq.head, 0);
 451        wr32(hw, cq->sq.tail, 0);
 452        wr32(hw, cq->sq.len, 0);
 453        wr32(hw, cq->sq.bal, 0);
 454        wr32(hw, cq->sq.bah, 0);
 455
 456        cq->sq.count = 0;       /* to indicate uninitialized queue */
 457
 458        /* free ring buffers and the ring itself */
 459        ICE_FREE_CQ_BUFS(hw, cq, sq);
 460        ice_free_cq_ring(hw, &cq->sq);
 461
 462shutdown_sq_out:
 463        ice_release_lock(&cq->sq_lock);
 464        return ret_code;
 465}
 466
 467/**
 468 * ice_aq_ver_check - Check the reported AQ API version.
 469 * @hw: pointer to the hardware structure
 470 *
 471 * Checks if the driver should load on a given AQ API version.
 472 *
 473 * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
 474 */
 475static bool ice_aq_ver_check(struct ice_hw *hw)
 476{
 477        if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
 478                /* Major API version is newer than expected, don't load */
 479                ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
 480                return false;
 481        } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
 482                if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
 483                        ice_info(hw, "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
 484                else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
 485                        ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
 486        } else {
 487                /* Major API version is older than expected, log a warning */
 488                ice_info(hw, "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
 489        }
 490        return true;
 491}
 492
 493/**
 494 * ice_shutdown_rq - shutdown Control ARQ
 495 * @hw: pointer to the hardware structure
 496 * @cq: pointer to the specific Control queue
 497 *
 498 * The main shutdown routine for the Control Receive Queue
 499 */
 500static enum ice_status
 501ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 502{
 503        enum ice_status ret_code = ICE_SUCCESS;
 504
 505        ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
 506
 507        ice_acquire_lock(&cq->rq_lock);
 508
 509        if (!cq->rq.count) {
 510                ret_code = ICE_ERR_NOT_READY;
 511                goto shutdown_rq_out;
 512        }
 513
 514        /* Stop Control Queue processing */
 515        wr32(hw, cq->rq.head, 0);
 516        wr32(hw, cq->rq.tail, 0);
 517        wr32(hw, cq->rq.len, 0);
 518        wr32(hw, cq->rq.bal, 0);
 519        wr32(hw, cq->rq.bah, 0);
 520
 521        /* set rq.count to 0 to indicate uninitialized queue */
 522        cq->rq.count = 0;
 523
 524        /* free ring buffers and the ring itself */
 525        ICE_FREE_CQ_BUFS(hw, cq, rq);
 526        ice_free_cq_ring(hw, &cq->rq);
 527
 528shutdown_rq_out:
 529        ice_release_lock(&cq->rq_lock);
 530        return ret_code;
 531}
 532
 533/**
 534 * ice_init_check_adminq - Check version for Admin Queue to know if its alive
 535 * @hw: pointer to the hardware structure
 536 */
 537static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
 538{
 539        struct ice_ctl_q_info *cq = &hw->adminq;
 540        enum ice_status status;
 541
 542        ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
 543
 544        status = ice_aq_get_fw_ver(hw, NULL);
 545        if (status)
 546                goto init_ctrlq_free_rq;
 547
 548        if (!ice_aq_ver_check(hw)) {
 549                status = ICE_ERR_FW_API_VER;
 550                goto init_ctrlq_free_rq;
 551        }
 552
 553        return ICE_SUCCESS;
 554
 555init_ctrlq_free_rq:
 556        ice_shutdown_rq(hw, cq);
 557        ice_shutdown_sq(hw, cq);
 558        return status;
 559}
 560
 561/**
 562 * ice_init_ctrlq - main initialization routine for any control Queue
 563 * @hw: pointer to the hardware structure
 564 * @q_type: specific Control queue type
 565 *
 566 * Prior to calling this function, the driver *MUST* set the following fields
 567 * in the cq->structure:
 568 *     - cq->num_sq_entries
 569 *     - cq->num_rq_entries
 570 *     - cq->rq_buf_size
 571 *     - cq->sq_buf_size
 572 *
 573 * NOTE: this function does not initialize the controlq locks
 574 */
 575static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
 576{
 577        struct ice_ctl_q_info *cq;
 578        enum ice_status ret_code;
 579
 580        ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
 581
 582        switch (q_type) {
 583        case ICE_CTL_Q_ADMIN:
 584                ice_adminq_init_regs(hw);
 585                cq = &hw->adminq;
 586                break;
 587        case ICE_CTL_Q_MAILBOX:
 588                ice_mailbox_init_regs(hw);
 589                cq = &hw->mailboxq;
 590                break;
 591        default:
 592                return ICE_ERR_PARAM;
 593        }
 594        cq->qtype = q_type;
 595
 596        /* verify input for valid configuration */
 597        if (!cq->num_rq_entries || !cq->num_sq_entries ||
 598            !cq->rq_buf_size || !cq->sq_buf_size) {
 599                return ICE_ERR_CFG;
 600        }
 601
 602        /* setup SQ command write back timeout */
 603        cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
 604
 605        /* allocate the ATQ */
 606        ret_code = ice_init_sq(hw, cq);
 607        if (ret_code)
 608                return ret_code;
 609
 610        /* allocate the ARQ */
 611        ret_code = ice_init_rq(hw, cq);
 612        if (ret_code)
 613                goto init_ctrlq_free_sq;
 614
 615        /* success! */
 616        return ICE_SUCCESS;
 617
 618init_ctrlq_free_sq:
 619        ice_shutdown_sq(hw, cq);
 620        return ret_code;
 621}
 622
 623/**
 624 * ice_shutdown_ctrlq - shutdown routine for any control queue
 625 * @hw: pointer to the hardware structure
 626 * @q_type: specific Control queue type
 627 *
 628 * NOTE: this function does not destroy the control queue locks.
 629 */
 630static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
 631{
 632        struct ice_ctl_q_info *cq;
 633
 634        ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
 635
 636        switch (q_type) {
 637        case ICE_CTL_Q_ADMIN:
 638                cq = &hw->adminq;
 639                if (ice_check_sq_alive(hw, cq))
 640                        ice_aq_q_shutdown(hw, true);
 641                break;
 642        case ICE_CTL_Q_MAILBOX:
 643                cq = &hw->mailboxq;
 644                break;
 645        default:
 646                return;
 647        }
 648
 649        ice_shutdown_sq(hw, cq);
 650        ice_shutdown_rq(hw, cq);
 651}
 652
 653/**
 654 * ice_shutdown_all_ctrlq - shutdown routine for all control queues
 655 * @hw: pointer to the hardware structure
 656 *
 657 * NOTE: this function does not destroy the control queue locks. The driver
 658 * may call this at runtime to shutdown and later restart control queues, such
 659 * as in response to a reset event.
 660 */
 661void ice_shutdown_all_ctrlq(struct ice_hw *hw)
 662{
 663        ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
 664        /* Shutdown FW admin queue */
 665        ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
 666        /* Shutdown PF-VF Mailbox */
 667        ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
 668}
 669
 670/**
 671 * ice_init_all_ctrlq - main initialization routine for all control queues
 672 * @hw: pointer to the hardware structure
 673 *
 674 * Prior to calling this function, the driver MUST* set the following fields
 675 * in the cq->structure for all control queues:
 676 *     - cq->num_sq_entries
 677 *     - cq->num_rq_entries
 678 *     - cq->rq_buf_size
 679 *     - cq->sq_buf_size
 680 *
 681 * NOTE: this function does not initialize the controlq locks.
 682 */
 683enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
 684{
 685        enum ice_status status;
 686        u32 retry = 0;
 687
 688        ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
 689
 690        /* Init FW admin queue */
 691        do {
 692                status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
 693                if (status)
 694                        return status;
 695
 696                status = ice_init_check_adminq(hw);
 697                if (status != ICE_ERR_AQ_FW_CRITICAL)
 698                        break;
 699
 700                ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
 701                ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
 702                ice_msec_delay(ICE_CTL_Q_ADMIN_INIT_MSEC, true);
 703        } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
 704
 705        if (status)
 706                return status;
 707        /* Init Mailbox queue */
 708        return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
 709}
 710
 711/**
 712 * ice_init_ctrlq_locks - Initialize locks for a control queue
 713 * @cq: pointer to the control queue
 714 *
 715 * Initializes the send and receive queue locks for a given control queue.
 716 */
 717static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
 718{
 719        ice_init_lock(&cq->sq_lock);
 720        ice_init_lock(&cq->rq_lock);
 721}
 722
 723/**
 724 * ice_create_all_ctrlq - main initialization routine for all control queues
 725 * @hw: pointer to the hardware structure
 726 *
 727 * Prior to calling this function, the driver *MUST* set the following fields
 728 * in the cq->structure for all control queues:
 729 *     - cq->num_sq_entries
 730 *     - cq->num_rq_entries
 731 *     - cq->rq_buf_size
 732 *     - cq->sq_buf_size
 733 *
 734 * This function creates all the control queue locks and then calls
 735 * ice_init_all_ctrlq. It should be called once during driver load. If the
 736 * driver needs to re-initialize control queues at run time it should call
 737 * ice_init_all_ctrlq instead.
 738 */
 739enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
 740{
 741        ice_init_ctrlq_locks(&hw->adminq);
 742        ice_init_ctrlq_locks(&hw->mailboxq);
 743
 744        return ice_init_all_ctrlq(hw);
 745}
 746
 747/**
 748 * ice_destroy_ctrlq_locks - Destroy locks for a control queue
 749 * @cq: pointer to the control queue
 750 *
 751 * Destroys the send and receive queue locks for a given control queue.
 752 */
 753static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
 754{
 755        ice_destroy_lock(&cq->sq_lock);
 756        ice_destroy_lock(&cq->rq_lock);
 757}
 758
 759/**
 760 * ice_destroy_all_ctrlq - exit routine for all control queues
 761 * @hw: pointer to the hardware structure
 762 *
 763 * This function shuts down all the control queues and then destroys the
 764 * control queue locks. It should be called once during driver unload. The
 765 * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
 766 * reinitialize control queues, such as in response to a reset event.
 767 */
 768void ice_destroy_all_ctrlq(struct ice_hw *hw)
 769{
 770        /* shut down all the control queues first */
 771        ice_shutdown_all_ctrlq(hw);
 772
 773        ice_destroy_ctrlq_locks(&hw->adminq);
 774        ice_destroy_ctrlq_locks(&hw->mailboxq);
 775}
 776
 777/**
 778 * ice_clean_sq - cleans Admin send queue (ATQ)
 779 * @hw: pointer to the hardware structure
 780 * @cq: pointer to the specific Control queue
 781 *
 782 * returns the number of free desc
 783 */
 784static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 785{
 786        struct ice_ctl_q_ring *sq = &cq->sq;
 787        u16 ntc = sq->next_to_clean;
 788        struct ice_sq_cd *details;
 789        struct ice_aq_desc *desc;
 790
 791        desc = ICE_CTL_Q_DESC(*sq, ntc);
 792        details = ICE_CTL_Q_DETAILS(*sq, ntc);
 793
 794        while (rd32(hw, cq->sq.head) != ntc) {
 795                ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
 796                ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
 797                ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
 798                ntc++;
 799                if (ntc == sq->count)
 800                        ntc = 0;
 801                desc = ICE_CTL_Q_DESC(*sq, ntc);
 802                details = ICE_CTL_Q_DETAILS(*sq, ntc);
 803        }
 804
 805        sq->next_to_clean = ntc;
 806
 807        return ICE_CTL_Q_DESC_UNUSED(sq);
 808}
 809
 810/**
 811 * ice_debug_cq
 812 * @hw: pointer to the hardware structure
 813 * @desc: pointer to control queue descriptor
 814 * @buf: pointer to command buffer
 815 * @buf_len: max length of buf
 816 *
 817 * Dumps debug log about control command with descriptor contents.
 818 */
 819static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
 820{
 821        struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
 822        u16 datalen, flags;
 823
 824        if (!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
 825                return;
 826
 827        if (!desc)
 828                return;
 829
 830        datalen = LE16_TO_CPU(cq_desc->datalen);
 831        flags = LE16_TO_CPU(cq_desc->flags);
 832
 833        ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
 834                  LE16_TO_CPU(cq_desc->opcode), flags, datalen,
 835                  LE16_TO_CPU(cq_desc->retval));
 836        ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
 837                  LE32_TO_CPU(cq_desc->cookie_high),
 838                  LE32_TO_CPU(cq_desc->cookie_low));
 839        ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1)  0x%08X 0x%08X\n",
 840                  LE32_TO_CPU(cq_desc->params.generic.param0),
 841                  LE32_TO_CPU(cq_desc->params.generic.param1));
 842        ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l)   0x%08X 0x%08X\n",
 843                  LE32_TO_CPU(cq_desc->params.generic.addr_high),
 844                  LE32_TO_CPU(cq_desc->params.generic.addr_low));
 845        /* Dump buffer iff 1) one exists and 2) is either a response indicated
 846         * by the DD and/or CMP flag set or a command with the RD flag set.
 847         */
 848        if (buf && cq_desc->datalen != 0 &&
 849            (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP) ||
 850             flags & ICE_AQ_FLAG_RD)) {
 851                ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
 852                ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf,
 853                                MIN_T(u16, buf_len, datalen));
 854        }
 855}
 856
 857/**
 858 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
 859 * @hw: pointer to the HW struct
 860 * @cq: pointer to the specific Control queue
 861 *
 862 * Returns true if the firmware has processed all descriptors on the
 863 * admin send queue. Returns false if there are still requests pending.
 864 */
 865static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
 866{
 867        /* AQ designers suggest use of head for better
 868         * timing reliability than DD bit
 869         */
 870        return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
 871}
 872
 873/**
 874 * ice_sq_send_cmd_nolock - send command to Control Queue (ATQ)
 875 * @hw: pointer to the HW struct
 876 * @cq: pointer to the specific Control queue
 877 * @desc: prefilled descriptor describing the command (non DMA mem)
 878 * @buf: buffer to use for indirect commands (or NULL for direct commands)
 879 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
 880 * @cd: pointer to command details structure
 881 *
 882 * This is the main send command routine for the ATQ. It runs the queue,
 883 * cleans the queue, etc.
 884 */
 885static enum ice_status
 886ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
 887                       struct ice_aq_desc *desc, void *buf, u16 buf_size,
 888                       struct ice_sq_cd *cd)
 889{
 890        struct ice_dma_mem *dma_buf = NULL;
 891        struct ice_aq_desc *desc_on_ring;
 892        bool cmd_completed = false;
 893        enum ice_status status = ICE_SUCCESS;
 894        struct ice_sq_cd *details;
 895        u32 total_delay = 0;
 896        u16 retval = 0;
 897        u32 val = 0;
 898
 899        /* if reset is in progress return a soft error */
 900        if (hw->reset_ongoing)
 901                return ICE_ERR_RESET_ONGOING;
 902
 903        cq->sq_last_status = ICE_AQ_RC_OK;
 904
 905        if (!cq->sq.count) {
 906                ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
 907                status = ICE_ERR_AQ_EMPTY;
 908                goto sq_send_command_error;
 909        }
 910
 911        if ((buf && !buf_size) || (!buf && buf_size)) {
 912                status = ICE_ERR_PARAM;
 913                goto sq_send_command_error;
 914        }
 915
 916        if (buf) {
 917                if (buf_size > cq->sq_buf_size) {
 918                        ice_debug(hw, ICE_DBG_AQ_MSG, "Invalid buffer size for Control Send queue: %d.\n",
 919                                  buf_size);
 920                        status = ICE_ERR_INVAL_SIZE;
 921                        goto sq_send_command_error;
 922                }
 923
 924                desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF);
 925                if (buf_size > ICE_AQ_LG_BUF)
 926                        desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
 927        }
 928
 929        val = rd32(hw, cq->sq.head);
 930        if (val >= cq->num_sq_entries) {
 931                ice_debug(hw, ICE_DBG_AQ_MSG, "head overrun at %d in the Control Send Queue ring\n",
 932                          val);
 933                status = ICE_ERR_AQ_EMPTY;
 934                goto sq_send_command_error;
 935        }
 936
 937        details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
 938        if (cd)
 939                *details = *cd;
 940        else
 941                ice_memset(details, 0, sizeof(*details), ICE_NONDMA_MEM);
 942
 943        /* Call clean and check queue available function to reclaim the
 944         * descriptors that were processed by FW/MBX; the function returns the
 945         * number of desc available. The clean function called here could be
 946         * called in a separate thread in case of asynchronous completions.
 947         */
 948        if (ice_clean_sq(hw, cq) == 0) {
 949                ice_debug(hw, ICE_DBG_AQ_MSG, "Error: Control Send Queue is full.\n");
 950                status = ICE_ERR_AQ_FULL;
 951                goto sq_send_command_error;
 952        }
 953
 954        /* initialize the temp desc pointer with the right desc */
 955        desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
 956
 957        /* if the desc is available copy the temp desc to the right place */
 958        ice_memcpy(desc_on_ring, desc, sizeof(*desc_on_ring),
 959                   ICE_NONDMA_TO_DMA);
 960
 961        /* if buf is not NULL assume indirect command */
 962        if (buf) {
 963                dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
 964                /* copy the user buf into the respective DMA buf */
 965                ice_memcpy(dma_buf->va, buf, buf_size, ICE_NONDMA_TO_DMA);
 966                desc_on_ring->datalen = CPU_TO_LE16(buf_size);
 967
 968                /* Update the address values in the desc with the pa value
 969                 * for respective buffer
 970                 */
 971                desc_on_ring->params.generic.addr_high =
 972                        CPU_TO_LE32(ICE_HI_DWORD(dma_buf->pa));
 973                desc_on_ring->params.generic.addr_low =
 974                        CPU_TO_LE32(ICE_LO_DWORD(dma_buf->pa));
 975        }
 976
 977        /* Debug desc and buffer */
 978        ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n");
 979
 980        ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
 981
 982        (cq->sq.next_to_use)++;
 983        if (cq->sq.next_to_use == cq->sq.count)
 984                cq->sq.next_to_use = 0;
 985        wr32(hw, cq->sq.tail, cq->sq.next_to_use);
 986
 987        do {
 988                if (ice_sq_done(hw, cq))
 989                        break;
 990
 991                ice_usec_delay(ICE_CTL_Q_SQ_CMD_USEC, false);
 992                total_delay++;
 993        } while (total_delay < cq->sq_cmd_timeout);
 994
 995        /* if ready, copy the desc back to temp */
 996        if (ice_sq_done(hw, cq)) {
 997                ice_memcpy(desc, desc_on_ring, sizeof(*desc),
 998                           ICE_DMA_TO_NONDMA);
 999                if (buf) {
1000                        /* get returned length to copy */
1001                        u16 copy_size = LE16_TO_CPU(desc->datalen);
1002
1003                        if (copy_size > buf_size) {
1004                                ice_debug(hw, ICE_DBG_AQ_MSG, "Return len %d > than buf len %d\n",
1005                                          copy_size, buf_size);
1006                                status = ICE_ERR_AQ_ERROR;
1007                        } else {
1008                                ice_memcpy(buf, dma_buf->va, copy_size,
1009                                           ICE_DMA_TO_NONDMA);
1010                        }
1011                }
1012                retval = LE16_TO_CPU(desc->retval);
1013                if (retval) {
1014                        ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue command 0x%04X completed with error 0x%X\n",
1015                                  LE16_TO_CPU(desc->opcode),
1016                                  retval);
1017
1018                        /* strip off FW internal code */
1019                        retval &= 0xff;
1020                }
1021                cmd_completed = true;
1022                if (!status && retval != ICE_AQ_RC_OK)
1023                        status = ICE_ERR_AQ_ERROR;
1024                cq->sq_last_status = (enum ice_aq_err)retval;
1025        }
1026
1027        ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
1028
1029        ice_debug_cq(hw, (void *)desc, buf, buf_size);
1030
1031        /* save writeback AQ if requested */
1032        if (details->wb_desc)
1033                ice_memcpy(details->wb_desc, desc_on_ring,
1034                           sizeof(*details->wb_desc), ICE_DMA_TO_NONDMA);
1035
1036        /* update the error if time out occurred */
1037        if (!cmd_completed) {
1038                if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1039                    rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1040                        ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
1041                        status = ICE_ERR_AQ_FW_CRITICAL;
1042                } else {
1043                        ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send Queue Writeback timeout.\n");
1044                        status = ICE_ERR_AQ_TIMEOUT;
1045                }
1046        }
1047
1048sq_send_command_error:
1049        return status;
1050}
1051
1052/**
1053 * ice_sq_send_cmd - send command to Control Queue (ATQ)
1054 * @hw: pointer to the HW struct
1055 * @cq: pointer to the specific Control queue
1056 * @desc: prefilled descriptor describing the command
1057 * @buf: buffer to use for indirect commands (or NULL for direct commands)
1058 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
1059 * @cd: pointer to command details structure
1060 *
1061 * This is the main send command routine for the ATQ. It runs the queue,
1062 * cleans the queue, etc.
1063 */
1064enum ice_status
1065ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1066                struct ice_aq_desc *desc, void *buf, u16 buf_size,
1067                struct ice_sq_cd *cd)
1068{
1069        enum ice_status status = ICE_SUCCESS;
1070
1071        /* if reset is in progress return a soft error */
1072        if (hw->reset_ongoing)
1073                return ICE_ERR_RESET_ONGOING;
1074
1075        ice_acquire_lock(&cq->sq_lock);
1076        status = ice_sq_send_cmd_nolock(hw, cq, desc, buf, buf_size, cd);
1077        ice_release_lock(&cq->sq_lock);
1078
1079        return status;
1080}
1081
1082/**
1083 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1084 * @desc: pointer to the temp descriptor (non DMA mem)
1085 * @opcode: the opcode can be used to decide which flags to turn off or on
1086 *
1087 * Fill the desc with default values
1088 */
1089void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1090{
1091        /* zero out the desc */
1092        ice_memset(desc, 0, sizeof(*desc), ICE_NONDMA_MEM);
1093        desc->opcode = CPU_TO_LE16(opcode);
1094        desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_SI);
1095}
1096
1097/**
1098 * ice_clean_rq_elem
1099 * @hw: pointer to the HW struct
1100 * @cq: pointer to the specific Control queue
1101 * @e: event info from the receive descriptor, includes any buffers
1102 * @pending: number of events that could be left to process
1103 *
1104 * This function cleans one Admin Receive Queue element and returns
1105 * the contents through e. It can also return how many events are
1106 * left to process through 'pending'.
1107 */
1108enum ice_status
1109ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1110                  struct ice_rq_event_info *e, u16 *pending)
1111{
1112        u16 ntc = cq->rq.next_to_clean;
1113        enum ice_aq_err rq_last_status;
1114        enum ice_status ret_code = ICE_SUCCESS;
1115        struct ice_aq_desc *desc;
1116        struct ice_dma_mem *bi;
1117        u16 desc_idx;
1118        u16 datalen;
1119        u16 flags;
1120        u16 ntu;
1121
1122        /* pre-clean the event info */
1123        ice_memset(&e->desc, 0, sizeof(e->desc), ICE_NONDMA_MEM);
1124
1125        /* take the lock before we start messing with the ring */
1126        ice_acquire_lock(&cq->rq_lock);
1127
1128        if (!cq->rq.count) {
1129                ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive queue not initialized.\n");
1130                ret_code = ICE_ERR_AQ_EMPTY;
1131                goto clean_rq_elem_err;
1132        }
1133
1134        /* set next_to_use to head */
1135        ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1136
1137        if (ntu == ntc) {
1138                /* nothing to do - shouldn't need to update ring's values */
1139                ret_code = ICE_ERR_AQ_NO_WORK;
1140                goto clean_rq_elem_out;
1141        }
1142
1143        /* now clean the next descriptor */
1144        desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1145        desc_idx = ntc;
1146
1147        rq_last_status = (enum ice_aq_err)LE16_TO_CPU(desc->retval);
1148        flags = LE16_TO_CPU(desc->flags);
1149        if (flags & ICE_AQ_FLAG_ERR) {
1150                ret_code = ICE_ERR_AQ_ERROR;
1151                ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1152                          LE16_TO_CPU(desc->opcode), rq_last_status);
1153        }
1154        ice_memcpy(&e->desc, desc, sizeof(e->desc), ICE_DMA_TO_NONDMA);
1155        datalen = LE16_TO_CPU(desc->datalen);
1156        e->msg_len = MIN_T(u16, datalen, e->buf_len);
1157        if (e->msg_buf && e->msg_len)
1158                ice_memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va,
1159                           e->msg_len, ICE_DMA_TO_NONDMA);
1160
1161        ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1162
1163        ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
1164
1165        /* Restore the original datalen and buffer address in the desc,
1166         * FW updates datalen to indicate the event message size
1167         */
1168        bi = &cq->rq.r.rq_bi[ntc];
1169        ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
1170
1171        desc->flags = CPU_TO_LE16(ICE_AQ_FLAG_BUF);
1172        if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1173                desc->flags |= CPU_TO_LE16(ICE_AQ_FLAG_LB);
1174        desc->datalen = CPU_TO_LE16(bi->size);
1175        desc->params.generic.addr_high = CPU_TO_LE32(ICE_HI_DWORD(bi->pa));
1176        desc->params.generic.addr_low = CPU_TO_LE32(ICE_LO_DWORD(bi->pa));
1177
1178        /* set tail = the last cleaned desc index. */
1179        wr32(hw, cq->rq.tail, ntc);
1180        /* ntc is updated to tail + 1 */
1181        ntc++;
1182        if (ntc == cq->num_rq_entries)
1183                ntc = 0;
1184        cq->rq.next_to_clean = ntc;
1185        cq->rq.next_to_use = ntu;
1186
1187clean_rq_elem_out:
1188        /* Set pending if needed, unlock and return */
1189        if (pending) {
1190                /* re-read HW head to calculate actual pending messages */
1191                ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1192                *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1193        }
1194clean_rq_elem_err:
1195        ice_release_lock(&cq->rq_lock);
1196
1197        return ret_code;
1198}
1199