linux/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
<<
>>
Prefs
   1/*******************************************************************************
   2*
   3* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
   4*
   5* This software is available to you under a choice of one of two
   6* licenses.  You may choose to be licensed under the terms of the GNU
   7* General Public License (GPL) Version 2, available from the file
   8* COPYING in the main directory of this source tree, or the
   9* OpenFabrics.org BSD license below:
  10*
  11*   Redistribution and use in source and binary forms, with or
  12*   without modification, are permitted provided that the following
  13*   conditions are met:
  14*
  15*    - Redistributions of source code must retain the above
  16*       copyright notice, this list of conditions and the following
  17*       disclaimer.
  18*
  19*    - Redistributions in binary form must reproduce the above
  20*       copyright notice, this list of conditions and the following
  21*       disclaimer in the documentation and/or other materials
  22*       provided with the distribution.
  23*
  24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31* SOFTWARE.
  32*
  33*******************************************************************************/
  34
  35#include "i40iw_osdep.h"
  36#include "i40iw_register.h"
  37#include "i40iw_status.h"
  38#include "i40iw_hmc.h"
  39
  40#include "i40iw_d.h"
  41#include "i40iw_type.h"
  42#include "i40iw_p.h"
  43#include "i40iw_vf.h"
  44#include "i40iw_virtchnl.h"
  45
  46/**
  47 * i40iw_insert_wqe_hdr - write wqe header
  48 * @wqe: cqp wqe for header
  49 * @header: header for the cqp wqe
  50 */
  51static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
  52{
  53        wmb();            /* make sure WQE is populated before polarity is set */
  54        set_64bit_val(wqe, 24, header);
  55}
  56
  57/**
  58 * i40iw_get_cqp_reg_info - get head and tail for cqp using registers
  59 * @cqp: struct for cqp hw
  60 * @val: cqp tail register value
  61 * @tail:wqtail register value
  62 * @error: cqp processing err
  63 */
  64static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp *cqp,
  65                                          u32 *val,
  66                                          u32 *tail,
  67                                          u32 *error)
  68{
  69        if (cqp->dev->is_pf) {
  70                *val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPTAIL);
  71                *tail = RS_32(*val, I40E_PFPE_CQPTAIL_WQTAIL);
  72                *error = RS_32(*val, I40E_PFPE_CQPTAIL_CQP_OP_ERR);
  73        } else {
  74                *val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPTAIL1);
  75                *tail = RS_32(*val, I40E_VFPE_CQPTAIL_WQTAIL);
  76                *error = RS_32(*val, I40E_VFPE_CQPTAIL_CQP_OP_ERR);
  77        }
  78}
  79
  80/**
  81 * i40iw_cqp_poll_registers - poll cqp registers
  82 * @cqp: struct for cqp hw
  83 * @tail:wqtail register value
  84 * @count: how many times to try for completion
  85 */
  86static enum i40iw_status_code i40iw_cqp_poll_registers(
  87                                                struct i40iw_sc_cqp *cqp,
  88                                                u32 tail,
  89                                                u32 count)
  90{
  91        u32 i = 0;
  92        u32 newtail, error, val;
  93
  94        while (i < count) {
  95                i++;
  96                i40iw_get_cqp_reg_info(cqp, &val, &newtail, &error);
  97                if (error) {
  98                        error = (cqp->dev->is_pf) ?
  99                                 i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES) :
 100                                 i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
 101                        return I40IW_ERR_CQP_COMPL_ERROR;
 102                }
 103                if (newtail != tail) {
 104                        /* SUCCESS */
 105                        I40IW_RING_MOVE_TAIL(cqp->sq_ring);
 106                        cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
 107                        return 0;
 108                }
 109                udelay(I40IW_SLEEP_COUNT);
 110        }
 111        return I40IW_ERR_TIMEOUT;
 112}
 113
 114/**
 115 * i40iw_sc_parse_fpm_commit_buf - parse fpm commit buffer
 116 * @buf: ptr to fpm commit buffer
 117 * @info: ptr to i40iw_hmc_obj_info struct
 118 * @sd: number of SDs for HMC objects
 119 *
 120 * parses fpm commit info and copy base value
 121 * of hmc objects in hmc_info
 122 */
 123static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
 124                                u64 *buf,
 125                                struct i40iw_hmc_obj_info *info,
 126                                u32 *sd)
 127{
 128        u64 temp;
 129        u64 size;
 130        u64 base = 0;
 131        u32 i, j;
 132        u32 k = 0;
 133        u32 low;
 134
 135        /* copy base values in obj_info */
 136        for (i = I40IW_HMC_IW_QP, j = 0;
 137                        i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
 138                get_64bit_val(buf, j, &temp);
 139                info[i].base = RS_64_1(temp, 32) * 512;
 140                if (info[i].base > base) {
 141                        base = info[i].base;
 142                        k = i;
 143                }
 144                low = (u32)(temp);
 145                if (low)
 146                        info[i].cnt = low;
 147        }
 148        size = info[k].cnt * info[k].size + info[k].base;
 149        if (size & 0x1FFFFF)
 150                *sd = (u32)((size >> 21) + 1); /* add 1 for remainder */
 151        else
 152                *sd = (u32)(size >> 21);
 153
 154        return 0;
 155}
 156
 157/**
 158 * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
 159 * @buf: ptr to fpm query buffer
 160 * @info: ptr to i40iw_hmc_obj_info struct
 161 * @hmc_fpm_misc: ptr to fpm data
 162 *
 163 * parses fpm query buffer and copy max_cnt and
 164 * size value of hmc objects in hmc_info
 165 */
 166static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
 167                                u64 *buf,
 168                                struct i40iw_hmc_info *hmc_info,
 169                                struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
 170{
 171        u64 temp;
 172        struct i40iw_hmc_obj_info *obj_info;
 173        u32 i, j, size;
 174        u16 max_pe_sds;
 175
 176        obj_info = hmc_info->hmc_obj;
 177
 178        get_64bit_val(buf, 0, &temp);
 179        hmc_info->first_sd_index = (u16)RS_64(temp, I40IW_QUERY_FPM_FIRST_PE_SD_INDEX);
 180        max_pe_sds = (u16)RS_64(temp, I40IW_QUERY_FPM_MAX_PE_SDS);
 181
 182        /* Reduce SD count for VFs by 1 to account for PBLE backing page rounding */
 183        if (hmc_info->hmc_fn_id >= I40IW_FIRST_VF_FPM_ID)
 184                max_pe_sds--;
 185        hmc_fpm_misc->max_sds = max_pe_sds;
 186        hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
 187
 188        for (i = I40IW_HMC_IW_QP, j = 8;
 189             i <= I40IW_HMC_IW_ARP; i++, j += 8) {
 190                get_64bit_val(buf, j, &temp);
 191                if (i == I40IW_HMC_IW_QP)
 192                        obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
 193                else if (i == I40IW_HMC_IW_CQ)
 194                        obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
 195                else
 196                        obj_info[i].max_cnt = (u32)temp;
 197
 198                size = (u32)RS_64_1(temp, 32);
 199                obj_info[i].size = ((u64)1 << size);
 200        }
 201        for (i = I40IW_HMC_IW_MR, j = 48;
 202                        i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
 203                get_64bit_val(buf, j, &temp);
 204                obj_info[i].max_cnt = (u32)temp;
 205                size = (u32)RS_64_1(temp, 32);
 206                obj_info[i].size = LS_64_1(1, size);
 207        }
 208
 209        get_64bit_val(buf, 120, &temp);
 210        hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
 211        get_64bit_val(buf, 120, &temp);
 212        hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
 213        get_64bit_val(buf, 120, &temp);
 214        hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
 215        get_64bit_val(buf, 64, &temp);
 216        hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
 217        if (!hmc_fpm_misc->xf_block_size)
 218                return I40IW_ERR_INVALID_SIZE;
 219        get_64bit_val(buf, 80, &temp);
 220        hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
 221        if (!hmc_fpm_misc->q1_block_size)
 222                return I40IW_ERR_INVALID_SIZE;
 223        return 0;
 224}
 225
 226/**
 227 * i40iw_fill_qos_list - Change all unknown qs handles to available ones
 228 * @qs_list: list of qs_handles to be fixed with valid qs_handles
 229 */
 230static void i40iw_fill_qos_list(u16 *qs_list)
 231{
 232        u16 qshandle = qs_list[0];
 233        int i;
 234
 235        for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
 236                if (qs_list[i] == QS_HANDLE_UNKNOWN)
 237                        qs_list[i] = qshandle;
 238                else
 239                        qshandle = qs_list[i];
 240        }
 241}
 242
 243/**
 244 * i40iw_qp_from_entry - Given entry, get to the qp structure
 245 * @entry: Points to list of qp structure
 246 */
 247static struct i40iw_sc_qp *i40iw_qp_from_entry(struct list_head *entry)
 248{
 249        if (!entry)
 250                return NULL;
 251
 252        return (struct i40iw_sc_qp *)((char *)entry - offsetof(struct i40iw_sc_qp, list));
 253}
 254
 255/**
 256 * i40iw_get_qp - get the next qp from the list given current qp
 257 * @head: Listhead of qp's
 258 * @qp: current qp
 259 */
 260static struct i40iw_sc_qp *i40iw_get_qp(struct list_head *head, struct i40iw_sc_qp *qp)
 261{
 262        struct list_head *entry = NULL;
 263        struct list_head *lastentry;
 264
 265        if (list_empty(head))
 266                return NULL;
 267
 268        if (!qp) {
 269                entry = head->next;
 270        } else {
 271                lastentry = &qp->list;
 272                entry = (lastentry != head) ? lastentry->next : NULL;
 273        }
 274
 275        return i40iw_qp_from_entry(entry);
 276}
 277
 278/**
 279 * i40iw_change_l2params - given the new l2 parameters, change all qp
 280 * @vsi: pointer to the vsi structure
 281 * @l2params: New paramaters from l2
 282 */
 283void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params)
 284{
 285        struct i40iw_sc_dev *dev = vsi->dev;
 286        struct i40iw_sc_qp *qp = NULL;
 287        bool qs_handle_change = false;
 288        unsigned long flags;
 289        u16 qs_handle;
 290        int i;
 291
 292        vsi->mss = l2params->mss;
 293
 294        i40iw_fill_qos_list(l2params->qs_handle_list);
 295        for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
 296                qs_handle = l2params->qs_handle_list[i];
 297                if (vsi->qos[i].qs_handle != qs_handle)
 298                        qs_handle_change = true;
 299                spin_lock_irqsave(&vsi->qos[i].lock, flags);
 300                qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
 301                while (qp) {
 302                        if (qs_handle_change) {
 303                                qp->qs_handle = qs_handle;
 304                                /* issue cqp suspend command */
 305                                i40iw_qp_suspend_resume(dev, qp, true);
 306                        }
 307                        qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
 308                }
 309                spin_unlock_irqrestore(&vsi->qos[i].lock, flags);
 310                vsi->qos[i].qs_handle = qs_handle;
 311        }
 312}
 313
 314/**
 315 * i40iw_qp_rem_qos - remove qp from qos lists during destroy qp
 316 * @qp: qp to be removed from qos
 317 */
 318static void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)
 319{
 320        struct i40iw_sc_vsi *vsi = qp->vsi;
 321        unsigned long flags;
 322
 323        if (!qp->on_qoslist)
 324                return;
 325        spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
 326        list_del(&qp->list);
 327        spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
 328}
 329
 330/**
 331 * i40iw_qp_add_qos - called during setctx fot qp to be added to qos
 332 * @qp: qp to be added to qos
 333 */
 334void i40iw_qp_add_qos(struct i40iw_sc_qp *qp)
 335{
 336        struct i40iw_sc_vsi *vsi = qp->vsi;
 337        unsigned long flags;
 338
 339        if (qp->on_qoslist)
 340                return;
 341        spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
 342        qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
 343        list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
 344        qp->on_qoslist = true;
 345        spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
 346}
 347
 348/**
 349 * i40iw_sc_pd_init - initialize sc pd struct
 350 * @dev: sc device struct
 351 * @pd: sc pd ptr
 352 * @pd_id: pd_id for allocated pd
 353 * @abi_ver: ABI version from user context, -1 if not valid
 354 */
 355static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev,
 356                             struct i40iw_sc_pd *pd,
 357                             u16 pd_id,
 358                             int abi_ver)
 359{
 360        pd->size = sizeof(*pd);
 361        pd->pd_id = pd_id;
 362        pd->abi_ver = abi_ver;
 363        pd->dev = dev;
 364}
 365
 366/**
 367 * i40iw_get_encoded_wqe_size - given wq size, returns hardware encoded size
 368 * @wqsize: size of the wq (sq, rq, srq) to encoded_size
 369 * @cqpsq: encoded size for sq for cqp as its encoded size is 1+ other wq's
 370 */
 371u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq)
 372{
 373        u8 encoded_size = 0;
 374
 375        /* cqp sq's hw coded value starts from 1 for size of 4
 376         * while it starts from 0 for qp' wq's.
 377         */
 378        if (cqpsq)
 379                encoded_size = 1;
 380        wqsize >>= 2;
 381        while (wqsize >>= 1)
 382                encoded_size++;
 383        return encoded_size;
 384}
 385
 386/**
 387 * i40iw_sc_cqp_init - Initialize buffers for a control Queue Pair
 388 * @cqp: IWARP control queue pair pointer
 389 * @info: IWARP control queue pair init info pointer
 390 *
 391 * Initializes the object and context buffers for a control Queue Pair.
 392 */
 393static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
 394                                                struct i40iw_cqp_init_info *info)
 395{
 396        u8 hw_sq_size;
 397
 398        if ((info->sq_size > I40IW_CQP_SW_SQSIZE_2048) ||
 399            (info->sq_size < I40IW_CQP_SW_SQSIZE_4) ||
 400            ((info->sq_size & (info->sq_size - 1))))
 401                return I40IW_ERR_INVALID_SIZE;
 402
 403        hw_sq_size = i40iw_get_encoded_wqe_size(info->sq_size, true);
 404        cqp->size = sizeof(*cqp);
 405        cqp->sq_size = info->sq_size;
 406        cqp->hw_sq_size = hw_sq_size;
 407        cqp->sq_base = info->sq;
 408        cqp->host_ctx = info->host_ctx;
 409        cqp->sq_pa = info->sq_pa;
 410        cqp->host_ctx_pa = info->host_ctx_pa;
 411        cqp->dev = info->dev;
 412        cqp->struct_ver = info->struct_ver;
 413        cqp->scratch_array = info->scratch_array;
 414        cqp->polarity = 0;
 415        cqp->en_datacenter_tcp = info->en_datacenter_tcp;
 416        cqp->enabled_vf_count = info->enabled_vf_count;
 417        cqp->hmc_profile = info->hmc_profile;
 418        info->dev->cqp = cqp;
 419
 420        I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
 421        cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] = 0;
 422        cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS] = 0;
 423
 424        i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
 425                    "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
 426                    __func__, cqp->sq_size, cqp->hw_sq_size,
 427                    cqp->sq_base, cqp->sq_pa, cqp, cqp->polarity);
 428        return 0;
 429}
 430
 431/**
 432 * i40iw_sc_cqp_create - create cqp during bringup
 433 * @cqp: struct for cqp hw
 434 * @maj_err: If error, major err number
 435 * @min_err: If error, minor err number
 436 */
 437static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
 438                                                  u16 *maj_err,
 439                                                  u16 *min_err)
 440{
 441        u64 temp;
 442        u32 cnt = 0, p1, p2, val = 0, err_code;
 443        enum i40iw_status_code ret_code;
 444
 445        *maj_err = 0;
 446        *min_err = 0;
 447
 448        ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
 449                                          &cqp->sdbuf,
 450                                          128,
 451                                          I40IW_SD_BUF_ALIGNMENT);
 452
 453        if (ret_code)
 454                goto exit;
 455
 456        temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) |
 457               LS_64(cqp->struct_ver, I40IW_CQPHC_SVER);
 458
 459        set_64bit_val(cqp->host_ctx, 0, temp);
 460        set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
 461        temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) |
 462               LS_64(cqp->hmc_profile, I40IW_CQPHC_HMC_PROFILE);
 463        set_64bit_val(cqp->host_ctx, 16, temp);
 464        set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
 465        set_64bit_val(cqp->host_ctx, 32, 0);
 466        set_64bit_val(cqp->host_ctx, 40, 0);
 467        set_64bit_val(cqp->host_ctx, 48, 0);
 468        set_64bit_val(cqp->host_ctx, 56, 0);
 469
 470        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQP_HOST_CTX",
 471                        cqp->host_ctx, I40IW_CQP_CTX_SIZE * 8);
 472
 473        p1 = RS_32_1(cqp->host_ctx_pa, 32);
 474        p2 = (u32)cqp->host_ctx_pa;
 475
 476        if (cqp->dev->is_pf) {
 477                i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, p1);
 478                i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, p2);
 479        } else {
 480                i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, p1);
 481                i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, p2);
 482        }
 483        do {
 484                if (cnt++ > I40IW_DONE_COUNT) {
 485                        i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
 486                        ret_code = I40IW_ERR_TIMEOUT;
 487                        /*
 488                         * read PFPE_CQPERRORCODES register to get the minor
 489                         * and major error code
 490                         */
 491                        if (cqp->dev->is_pf)
 492                                err_code = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES);
 493                        else
 494                                err_code = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
 495                        *min_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE);
 496                        *maj_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE);
 497                        goto exit;
 498                }
 499                udelay(I40IW_SLEEP_COUNT);
 500                if (cqp->dev->is_pf)
 501                        val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CCQPSTATUS);
 502                else
 503                        val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CCQPSTATUS1);
 504        } while (!val);
 505
 506exit:
 507        if (!ret_code)
 508                cqp->process_cqp_sds = i40iw_update_sds_noccq;
 509        return ret_code;
 510}
 511
 512/**
 513 * i40iw_sc_cqp_post_sq - post of cqp's sq
 514 * @cqp: struct for cqp hw
 515 */
 516void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
 517{
 518        if (cqp->dev->is_pf)
 519                i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
 520        else
 521                i40iw_wr32(cqp->dev->hw, I40E_VFPE_CQPDB1, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
 522
 523        i40iw_debug(cqp->dev,
 524                    I40IW_DEBUG_WQE,
 525                    "%s: HEAD_TAIL[%04d,%04d,%04d]\n",
 526                    __func__,
 527                    cqp->sq_ring.head,
 528                    cqp->sq_ring.tail,
 529                    cqp->sq_ring.size);
 530}
 531
 532/**
 533 * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
 534 * @cqp: struct for cqp hw
 535 * @wqe_idx: we index of cqp ring
 536 */
 537u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
 538{
 539        u64 *wqe = NULL;
 540        u32     wqe_idx;
 541        enum i40iw_status_code ret_code;
 542
 543        if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
 544                i40iw_debug(cqp->dev,
 545                            I40IW_DEBUG_WQE,
 546                            "%s: ring is full head %x tail %x size %x\n",
 547                            __func__,
 548                            cqp->sq_ring.head,
 549                            cqp->sq_ring.tail,
 550                            cqp->sq_ring.size);
 551                return NULL;
 552        }
 553        I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
 554        cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
 555        if (ret_code)
 556                return NULL;
 557        if (!wqe_idx)
 558                cqp->polarity = !cqp->polarity;
 559
 560        wqe = cqp->sq_base[wqe_idx].elem;
 561        cqp->scratch_array[wqe_idx] = scratch;
 562        I40IW_CQP_INIT_WQE(wqe);
 563
 564        return wqe;
 565}
 566
 567/**
 568 * i40iw_sc_cqp_destroy - destroy cqp during close
 569 * @cqp: struct for cqp hw
 570 */
 571static enum i40iw_status_code i40iw_sc_cqp_destroy(struct i40iw_sc_cqp *cqp)
 572{
 573        u32 cnt = 0, val = 1;
 574        enum i40iw_status_code ret_code = 0;
 575        u32 cqpstat_addr;
 576
 577        if (cqp->dev->is_pf) {
 578                i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, 0);
 579                i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, 0);
 580                cqpstat_addr = I40E_PFPE_CCQPSTATUS;
 581        } else {
 582                i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, 0);
 583                i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, 0);
 584                cqpstat_addr = I40E_VFPE_CCQPSTATUS1;
 585        }
 586        do {
 587                if (cnt++ > I40IW_DONE_COUNT) {
 588                        ret_code = I40IW_ERR_TIMEOUT;
 589                        break;
 590                }
 591                udelay(I40IW_SLEEP_COUNT);
 592                val = i40iw_rd32(cqp->dev->hw, cqpstat_addr);
 593        } while (val);
 594
 595        i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
 596        return ret_code;
 597}
 598
 599/**
 600 * i40iw_sc_ccq_arm - enable intr for control cq
 601 * @ccq: ccq sc struct
 602 */
 603static void i40iw_sc_ccq_arm(struct i40iw_sc_cq *ccq)
 604{
 605        u64 temp_val;
 606        u16 sw_cq_sel;
 607        u8 arm_next_se;
 608        u8 arm_seq_num;
 609
 610        /* write to cq doorbell shadow area */
 611        /* arm next se should always be zero */
 612        get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
 613
 614        sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);
 615        arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);
 616
 617        arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);
 618        arm_seq_num++;
 619
 620        temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |
 621                   LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |
 622                   LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |
 623                   LS_64(1, I40IW_CQ_DBSA_ARM_NEXT);
 624
 625        set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
 626
 627        wmb();       /* make sure shadow area is updated before arming */
 628
 629        if (ccq->dev->is_pf)
 630                i40iw_wr32(ccq->dev->hw, I40E_PFPE_CQARM, ccq->cq_uk.cq_id);
 631        else
 632                i40iw_wr32(ccq->dev->hw, I40E_VFPE_CQARM1, ccq->cq_uk.cq_id);
 633}
 634
 635/**
 636 * i40iw_sc_ccq_get_cqe_info - get ccq's cq entry
 637 * @ccq: ccq sc struct
 638 * @info: completion q entry to return
 639 */
 640static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info(
 641                                        struct i40iw_sc_cq *ccq,
 642                                        struct i40iw_ccq_cqe_info *info)
 643{
 644        u64 qp_ctx, temp, temp1;
 645        u64 *cqe;
 646        struct i40iw_sc_cqp *cqp;
 647        u32 wqe_idx;
 648        u8 polarity;
 649        enum i40iw_status_code ret_code = 0;
 650
 651        if (ccq->cq_uk.avoid_mem_cflct)
 652                cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(&ccq->cq_uk);
 653        else
 654                cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&ccq->cq_uk);
 655
 656        get_64bit_val(cqe, 24, &temp);
 657        polarity = (u8)RS_64(temp, I40IW_CQ_VALID);
 658        if (polarity != ccq->cq_uk.polarity)
 659                return I40IW_ERR_QUEUE_EMPTY;
 660
 661        get_64bit_val(cqe, 8, &qp_ctx);
 662        cqp = (struct i40iw_sc_cqp *)(unsigned long)qp_ctx;
 663        info->error = (bool)RS_64(temp, I40IW_CQ_ERROR);
 664        info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
 665        if (info->error) {
 666                info->maj_err_code = (u16)RS_64(temp, I40IW_CQ_MAJERR);
 667                info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
 668        }
 669        wqe_idx = (u32)RS_64(temp, I40IW_CQ_WQEIDX);
 670        info->scratch = cqp->scratch_array[wqe_idx];
 671
 672        get_64bit_val(cqe, 16, &temp1);
 673        info->op_ret_val = (u32)RS_64(temp1, I40IW_CCQ_OPRETVAL);
 674        get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
 675        info->op_code = (u8)RS_64(temp1, I40IW_CQPSQ_OPCODE);
 676        info->cqp = cqp;
 677
 678        /*  move the head for cq */
 679        I40IW_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
 680        if (I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring) == 0)
 681                ccq->cq_uk.polarity ^= 1;
 682
 683        /* update cq tail in cq shadow memory also */
 684        I40IW_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
 685        set_64bit_val(ccq->cq_uk.shadow_area,
 686                      0,
 687                      I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring));
 688        wmb(); /* write shadow area before tail */
 689        I40IW_RING_MOVE_TAIL(cqp->sq_ring);
 690        ccq->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
 691
 692        return ret_code;
 693}
 694
 695/**
 696 * i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
 697 * @cqp: struct for cqp hw
 698 * @op_code: cqp opcode for completion
 699 * @info: completion q entry to return
 700 */
 701static enum i40iw_status_code i40iw_sc_poll_for_cqp_op_done(
 702                                        struct i40iw_sc_cqp *cqp,
 703                                        u8 op_code,
 704                                        struct i40iw_ccq_cqe_info *compl_info)
 705{
 706        struct i40iw_ccq_cqe_info info;
 707        struct i40iw_sc_cq *ccq;
 708        enum i40iw_status_code ret_code = 0;
 709        u32 cnt = 0;
 710
 711        memset(&info, 0, sizeof(info));
 712        ccq = cqp->dev->ccq;
 713        while (1) {
 714                if (cnt++ > I40IW_DONE_COUNT)
 715                        return I40IW_ERR_TIMEOUT;
 716
 717                if (i40iw_sc_ccq_get_cqe_info(ccq, &info)) {
 718                        udelay(I40IW_SLEEP_COUNT);
 719                        continue;
 720                }
 721
 722                if (info.error) {
 723                        ret_code = I40IW_ERR_CQP_COMPL_ERROR;
 724                        break;
 725                }
 726                /* check if opcode is cq create */
 727                if (op_code != info.op_code) {
 728                        i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
 729                                    "%s: opcode mismatch for my op code 0x%x, returned opcode %x\n",
 730                                    __func__, op_code, info.op_code);
 731                }
 732                /* success, exit out of the loop */
 733                if (op_code == info.op_code)
 734                        break;
 735        }
 736
 737        if (compl_info)
 738                memcpy(compl_info, &info, sizeof(*compl_info));
 739
 740        return ret_code;
 741}
 742
 743/**
 744 * i40iw_sc_manage_push_page - Handle push page
 745 * @cqp: struct for cqp hw
 746 * @info: push page info
 747 * @scratch: u64 saved to be used during cqp completion
 748 * @post_sq: flag for cqp db to ring
 749 */
 750static enum i40iw_status_code i40iw_sc_manage_push_page(
 751                                struct i40iw_sc_cqp *cqp,
 752                                struct i40iw_cqp_manage_push_page_info *info,
 753                                u64 scratch,
 754                                bool post_sq)
 755{
 756        u64 *wqe;
 757        u64 header;
 758
 759        if (info->push_idx >= I40IW_MAX_PUSH_PAGE_COUNT)
 760                return I40IW_ERR_INVALID_PUSH_PAGE_INDEX;
 761
 762        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
 763        if (!wqe)
 764                return I40IW_ERR_RING_FULL;
 765
 766        set_64bit_val(wqe, 16, info->qs_handle);
 767
 768        header = LS_64(info->push_idx, I40IW_CQPSQ_MPP_PPIDX) |
 769                 LS_64(I40IW_CQP_OP_MANAGE_PUSH_PAGES, I40IW_CQPSQ_OPCODE) |
 770                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
 771                 LS_64(info->free_page, I40IW_CQPSQ_MPP_FREE_PAGE);
 772
 773        i40iw_insert_wqe_hdr(wqe, header);
 774
 775        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE",
 776                        wqe, I40IW_CQP_WQE_SIZE * 8);
 777
 778        if (post_sq)
 779                i40iw_sc_cqp_post_sq(cqp);
 780        return 0;
 781}
 782
 783/**
 784 * i40iw_sc_manage_hmc_pm_func_table - manage of function table
 785 * @cqp: struct for cqp hw
 786 * @scratch: u64 saved to be used during cqp completion
 787 * @vf_index: vf index for cqp
 788 * @free_pm_fcn: function number
 789 * @post_sq: flag for cqp db to ring
 790 */
 791static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table(
 792                                struct i40iw_sc_cqp *cqp,
 793                                u64 scratch,
 794                                u8 vf_index,
 795                                bool free_pm_fcn,
 796                                bool post_sq)
 797{
 798        u64 *wqe;
 799        u64 header;
 800
 801        if (vf_index >= I40IW_MAX_VF_PER_PF)
 802                return I40IW_ERR_INVALID_VF_ID;
 803        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
 804        if (!wqe)
 805                return I40IW_ERR_RING_FULL;
 806
 807        header = LS_64(vf_index, I40IW_CQPSQ_MHMC_VFIDX) |
 808                 LS_64(I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, I40IW_CQPSQ_OPCODE) |
 809                 LS_64(free_pm_fcn, I40IW_CQPSQ_MHMC_FREEPMFN) |
 810                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
 811
 812        i40iw_insert_wqe_hdr(wqe, header);
 813        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
 814                        wqe, I40IW_CQP_WQE_SIZE * 8);
 815        if (post_sq)
 816                i40iw_sc_cqp_post_sq(cqp);
 817        return 0;
 818}
 819
 820/**
 821 * i40iw_sc_set_hmc_resource_profile - cqp wqe for hmc profile
 822 * @cqp: struct for cqp hw
 823 * @scratch: u64 saved to be used during cqp completion
 824 * @hmc_profile_type: type of profile to set
 825 * @vf_num: vf number for profile
 826 * @post_sq: flag for cqp db to ring
 827 * @poll_registers: flag to poll register for cqp completion
 828 */
 829static enum i40iw_status_code i40iw_sc_set_hmc_resource_profile(
 830                                struct i40iw_sc_cqp *cqp,
 831                                u64 scratch,
 832                                u8 hmc_profile_type,
 833                                u8 vf_num, bool post_sq,
 834                                bool poll_registers)
 835{
 836        u64 *wqe;
 837        u64 header;
 838        u32 val, tail, error;
 839        enum i40iw_status_code ret_code = 0;
 840
 841        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
 842        if (!wqe)
 843                return I40IW_ERR_RING_FULL;
 844
 845        set_64bit_val(wqe, 16,
 846                      (LS_64(hmc_profile_type, I40IW_CQPSQ_SHMCRP_HMC_PROFILE) |
 847                                LS_64(vf_num, I40IW_CQPSQ_SHMCRP_VFNUM)));
 848
 849        header = LS_64(I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE, I40IW_CQPSQ_OPCODE) |
 850                       LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
 851
 852        i40iw_insert_wqe_hdr(wqe, header);
 853
 854        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
 855                        wqe, I40IW_CQP_WQE_SIZE * 8);
 856
 857        i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
 858        if (error)
 859                return I40IW_ERR_CQP_COMPL_ERROR;
 860
 861        if (post_sq) {
 862                i40iw_sc_cqp_post_sq(cqp);
 863                if (poll_registers)
 864                        ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000000);
 865                else
 866                        ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
 867                                                                 I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
 868                                                                 NULL);
 869        }
 870
 871        return ret_code;
 872}
 873
 874/**
 875 * i40iw_sc_manage_hmc_pm_func_table_done - wait for cqp wqe completion for function table
 876 * @cqp: struct for cqp hw
 877 */
 878static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table_done(struct i40iw_sc_cqp *cqp)
 879{
 880        return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, NULL);
 881}
 882
 883/**
 884 * i40iw_sc_commit_fpm_values_done - wait for cqp eqe completion for fpm commit
 885 * @cqp: struct for cqp hw
 886 */
 887static enum i40iw_status_code i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cqp *cqp)
 888{
 889        return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_COMMIT_FPM_VALUES, NULL);
 890}
 891
 892/**
 893 * i40iw_sc_commit_fpm_values - cqp wqe for commit fpm values
 894 * @cqp: struct for cqp hw
 895 * @scratch: u64 saved to be used during cqp completion
 896 * @hmc_fn_id: hmc function id
 897 * @commit_fpm_mem; Memory for fpm values
 898 * @post_sq: flag for cqp db to ring
 899 * @wait_type: poll ccq or cqp registers for cqp completion
 900 */
 901static enum i40iw_status_code i40iw_sc_commit_fpm_values(
 902                                        struct i40iw_sc_cqp *cqp,
 903                                        u64 scratch,
 904                                        u8 hmc_fn_id,
 905                                        struct i40iw_dma_mem *commit_fpm_mem,
 906                                        bool post_sq,
 907                                        u8 wait_type)
 908{
 909        u64 *wqe;
 910        u64 header;
 911        u32 tail, val, error;
 912        enum i40iw_status_code ret_code = 0;
 913
 914        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
 915        if (!wqe)
 916                return I40IW_ERR_RING_FULL;
 917
 918        set_64bit_val(wqe, 16, hmc_fn_id);
 919        set_64bit_val(wqe, 32, commit_fpm_mem->pa);
 920
 921        header = LS_64(I40IW_CQP_OP_COMMIT_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
 922                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
 923
 924        i40iw_insert_wqe_hdr(wqe, header);
 925
 926        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "COMMIT_FPM_VALUES WQE",
 927                        wqe, I40IW_CQP_WQE_SIZE * 8);
 928
 929        i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
 930        if (error)
 931                return I40IW_ERR_CQP_COMPL_ERROR;
 932
 933        if (post_sq) {
 934                i40iw_sc_cqp_post_sq(cqp);
 935
 936                if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
 937                        ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
 938                else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
 939                        ret_code = i40iw_sc_commit_fpm_values_done(cqp);
 940        }
 941
 942        return ret_code;
 943}
 944
 945/**
 946 * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm
 947 * @cqp: struct for cqp hw
 948 */
 949static enum i40iw_status_code i40iw_sc_query_fpm_values_done(struct i40iw_sc_cqp *cqp)
 950{
 951        return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_QUERY_FPM_VALUES, NULL);
 952}
 953
 954/**
 955 * i40iw_sc_query_fpm_values - cqp wqe query fpm values
 956 * @cqp: struct for cqp hw
 957 * @scratch: u64 saved to be used during cqp completion
 958 * @hmc_fn_id: hmc function id
 959 * @query_fpm_mem: memory for return fpm values
 960 * @post_sq: flag for cqp db to ring
 961 * @wait_type: poll ccq or cqp registers for cqp completion
 962 */
 963static enum i40iw_status_code i40iw_sc_query_fpm_values(
 964                                        struct i40iw_sc_cqp *cqp,
 965                                        u64 scratch,
 966                                        u8 hmc_fn_id,
 967                                        struct i40iw_dma_mem *query_fpm_mem,
 968                                        bool post_sq,
 969                                        u8 wait_type)
 970{
 971        u64 *wqe;
 972        u64 header;
 973        u32 tail, val, error;
 974        enum i40iw_status_code ret_code = 0;
 975
 976        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
 977        if (!wqe)
 978                return I40IW_ERR_RING_FULL;
 979
 980        set_64bit_val(wqe, 16, hmc_fn_id);
 981        set_64bit_val(wqe, 32, query_fpm_mem->pa);
 982
 983        header = LS_64(I40IW_CQP_OP_QUERY_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
 984                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
 985
 986        i40iw_insert_wqe_hdr(wqe, header);
 987
 988        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_FPM WQE",
 989                        wqe, I40IW_CQP_WQE_SIZE * 8);
 990
 991        /* read the tail from CQP_TAIL register */
 992        i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
 993
 994        if (error)
 995                return I40IW_ERR_CQP_COMPL_ERROR;
 996
 997        if (post_sq) {
 998                i40iw_sc_cqp_post_sq(cqp);
 999                if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
1000                        ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
1001                else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
1002                        ret_code = i40iw_sc_query_fpm_values_done(cqp);
1003        }
1004
1005        return ret_code;
1006}
1007
1008/**
1009 * i40iw_sc_add_arp_cache_entry - cqp wqe add arp cache entry
1010 * @cqp: struct for cqp hw
1011 * @info: arp entry information
1012 * @scratch: u64 saved to be used during cqp completion
1013 * @post_sq: flag for cqp db to ring
1014 */
1015static enum i40iw_status_code i40iw_sc_add_arp_cache_entry(
1016                                struct i40iw_sc_cqp *cqp,
1017                                struct i40iw_add_arp_cache_entry_info *info,
1018                                u64 scratch,
1019                                bool post_sq)
1020{
1021        u64 *wqe;
1022        u64 temp, header;
1023
1024        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1025        if (!wqe)
1026                return I40IW_ERR_RING_FULL;
1027        set_64bit_val(wqe, 8, info->reach_max);
1028
1029        temp = info->mac_addr[5] |
1030               LS_64_1(info->mac_addr[4], 8) |
1031               LS_64_1(info->mac_addr[3], 16) |
1032               LS_64_1(info->mac_addr[2], 24) |
1033               LS_64_1(info->mac_addr[1], 32) |
1034               LS_64_1(info->mac_addr[0], 40);
1035
1036        set_64bit_val(wqe, 16, temp);
1037
1038        header = info->arp_index |
1039                 LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
1040                 LS_64((info->permanent ? 1 : 0), I40IW_CQPSQ_MAT_PERMANENT) |
1041                 LS_64(1, I40IW_CQPSQ_MAT_ENTRYVALID) |
1042                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1043
1044        i40iw_insert_wqe_hdr(wqe, header);
1045
1046        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_ENTRY WQE",
1047                        wqe, I40IW_CQP_WQE_SIZE * 8);
1048
1049        if (post_sq)
1050                i40iw_sc_cqp_post_sq(cqp);
1051        return 0;
1052}
1053
1054/**
1055 * i40iw_sc_del_arp_cache_entry - dele arp cache entry
1056 * @cqp: struct for cqp hw
1057 * @scratch: u64 saved to be used during cqp completion
1058 * @arp_index: arp index to delete arp entry
1059 * @post_sq: flag for cqp db to ring
1060 */
1061static enum i40iw_status_code i40iw_sc_del_arp_cache_entry(
1062                                        struct i40iw_sc_cqp *cqp,
1063                                        u64 scratch,
1064                                        u16 arp_index,
1065                                        bool post_sq)
1066{
1067        u64 *wqe;
1068        u64 header;
1069
1070        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1071        if (!wqe)
1072                return I40IW_ERR_RING_FULL;
1073
1074        header = arp_index |
1075                 LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
1076                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1077        i40iw_insert_wqe_hdr(wqe, header);
1078
1079        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE",
1080                        wqe, I40IW_CQP_WQE_SIZE * 8);
1081
1082        if (post_sq)
1083                i40iw_sc_cqp_post_sq(cqp);
1084        return 0;
1085}
1086
1087/**
1088 * i40iw_sc_query_arp_cache_entry - cqp wqe to query arp and arp index
1089 * @cqp: struct for cqp hw
1090 * @scratch: u64 saved to be used during cqp completion
1091 * @arp_index: arp index to delete arp entry
1092 * @post_sq: flag for cqp db to ring
1093 */
1094static enum i40iw_status_code i40iw_sc_query_arp_cache_entry(
1095                                struct i40iw_sc_cqp *cqp,
1096                                u64 scratch,
1097                                u16 arp_index,
1098                                bool post_sq)
1099{
1100        u64 *wqe;
1101        u64 header;
1102
1103        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1104        if (!wqe)
1105                return I40IW_ERR_RING_FULL;
1106
1107        header = arp_index |
1108                 LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
1109                 LS_64(1, I40IW_CQPSQ_MAT_QUERY) |
1110                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1111
1112        i40iw_insert_wqe_hdr(wqe, header);
1113
1114        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_ARP_CACHE_ENTRY WQE",
1115                        wqe, I40IW_CQP_WQE_SIZE * 8);
1116
1117        if (post_sq)
1118                i40iw_sc_cqp_post_sq(cqp);
1119        return 0;
1120}
1121
1122/**
1123 * i40iw_sc_manage_apbvt_entry - for adding and deleting apbvt entries
1124 * @cqp: struct for cqp hw
1125 * @info: info for apbvt entry to add or delete
1126 * @scratch: u64 saved to be used during cqp completion
1127 * @post_sq: flag for cqp db to ring
1128 */
1129static enum i40iw_status_code i40iw_sc_manage_apbvt_entry(
1130                                struct i40iw_sc_cqp *cqp,
1131                                struct i40iw_apbvt_info *info,
1132                                u64 scratch,
1133                                bool post_sq)
1134{
1135        u64 *wqe;
1136        u64 header;
1137
1138        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1139        if (!wqe)
1140                return I40IW_ERR_RING_FULL;
1141
1142        set_64bit_val(wqe, 16, info->port);
1143
1144        header = LS_64(I40IW_CQP_OP_MANAGE_APBVT, I40IW_CQPSQ_OPCODE) |
1145                 LS_64(info->add, I40IW_CQPSQ_MAPT_ADDPORT) |
1146                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1147
1148        i40iw_insert_wqe_hdr(wqe, header);
1149
1150        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_APBVT WQE",
1151                        wqe, I40IW_CQP_WQE_SIZE * 8);
1152
1153        if (post_sq)
1154                i40iw_sc_cqp_post_sq(cqp);
1155        return 0;
1156}
1157
1158/**
1159 * i40iw_sc_manage_qhash_table_entry - manage quad hash entries
1160 * @cqp: struct for cqp hw
1161 * @info: info for quad hash to manage
1162 * @scratch: u64 saved to be used during cqp completion
1163 * @post_sq: flag for cqp db to ring
1164 *
1165 * This is called before connection establishment is started. For passive connections, when
1166 * listener is created, it will call with entry type of  I40IW_QHASH_TYPE_TCP_SYN with local
1167 * ip address and tcp port. When SYN is received (passive connections) or
1168 * sent (active connections), this routine is called with entry type of
1169 * I40IW_QHASH_TYPE_TCP_ESTABLISHED and quad is passed in info.
1170 *
1171 * When iwarp connection is done and its state moves to RTS, the quad hash entry in
1172 * the hardware will point to iwarp's qp number and requires no calls from the driver.
1173 */
1174static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
1175                                        struct i40iw_sc_cqp *cqp,
1176                                        struct i40iw_qhash_table_info *info,
1177                                        u64 scratch,
1178                                        bool post_sq)
1179{
1180        u64 *wqe;
1181        u64 qw1 = 0;
1182        u64 qw2 = 0;
1183        u64 temp;
1184        struct i40iw_sc_vsi *vsi = info->vsi;
1185
1186        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1187        if (!wqe)
1188                return I40IW_ERR_RING_FULL;
1189
1190        temp = info->mac_addr[5] |
1191                LS_64_1(info->mac_addr[4], 8) |
1192                LS_64_1(info->mac_addr[3], 16) |
1193                LS_64_1(info->mac_addr[2], 24) |
1194                LS_64_1(info->mac_addr[1], 32) |
1195                LS_64_1(info->mac_addr[0], 40);
1196
1197        set_64bit_val(wqe, 0, temp);
1198
1199        qw1 = LS_64(info->qp_num, I40IW_CQPSQ_QHASH_QPN) |
1200              LS_64(info->dest_port, I40IW_CQPSQ_QHASH_DEST_PORT);
1201        if (info->ipv4_valid) {
1202                set_64bit_val(wqe,
1203                              48,
1204                              LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
1205        } else {
1206                set_64bit_val(wqe,
1207                              56,
1208                              LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
1209                              LS_64(info->dest_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
1210
1211                set_64bit_val(wqe,
1212                              48,
1213                              LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
1214                              LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
1215        }
1216        qw2 = LS_64(vsi->qos[info->user_pri].qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
1217        if (info->vlan_valid)
1218                qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID);
1219        set_64bit_val(wqe, 16, qw2);
1220        if (info->entry_type == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
1221                qw1 |= LS_64(info->src_port, I40IW_CQPSQ_QHASH_SRC_PORT);
1222                if (!info->ipv4_valid) {
1223                        set_64bit_val(wqe,
1224                                      40,
1225                                      LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
1226                                      LS_64(info->src_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
1227                        set_64bit_val(wqe,
1228                                      32,
1229                                      LS_64(info->src_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
1230                                      LS_64(info->src_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
1231                } else {
1232                        set_64bit_val(wqe,
1233                                      32,
1234                                      LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
1235                }
1236        }
1237
1238        set_64bit_val(wqe, 8, qw1);
1239        temp = LS_64(cqp->polarity, I40IW_CQPSQ_QHASH_WQEVALID) |
1240               LS_64(I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY, I40IW_CQPSQ_QHASH_OPCODE) |
1241               LS_64(info->manage, I40IW_CQPSQ_QHASH_MANAGE) |
1242               LS_64(info->ipv4_valid, I40IW_CQPSQ_QHASH_IPV4VALID) |
1243               LS_64(info->vlan_valid, I40IW_CQPSQ_QHASH_VLANVALID) |
1244               LS_64(info->entry_type, I40IW_CQPSQ_QHASH_ENTRYTYPE);
1245
1246        i40iw_insert_wqe_hdr(wqe, temp);
1247
1248        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_QHASH WQE",
1249                        wqe, I40IW_CQP_WQE_SIZE * 8);
1250
1251        if (post_sq)
1252                i40iw_sc_cqp_post_sq(cqp);
1253        return 0;
1254}
1255
1256/**
1257 * i40iw_sc_alloc_local_mac_ipaddr_entry - cqp wqe for loc mac entry
1258 * @cqp: struct for cqp hw
1259 * @scratch: u64 saved to be used during cqp completion
1260 * @post_sq: flag for cqp db to ring
1261 */
1262static enum i40iw_status_code i40iw_sc_alloc_local_mac_ipaddr_entry(
1263                                        struct i40iw_sc_cqp *cqp,
1264                                        u64 scratch,
1265                                        bool post_sq)
1266{
1267        u64 *wqe;
1268        u64 header;
1269
1270        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1271        if (!wqe)
1272                return I40IW_ERR_RING_FULL;
1273        header = LS_64(I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY, I40IW_CQPSQ_OPCODE) |
1274                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1275
1276        i40iw_insert_wqe_hdr(wqe, header);
1277        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ALLOCATE_LOCAL_MAC_IPADDR WQE",
1278                        wqe, I40IW_CQP_WQE_SIZE * 8);
1279        if (post_sq)
1280                i40iw_sc_cqp_post_sq(cqp);
1281        return 0;
1282}
1283
1284/**
1285 * i40iw_sc_add_local_mac_ipaddr_entry - add mac enry
1286 * @cqp: struct for cqp hw
1287 * @info:mac addr info
1288 * @scratch: u64 saved to be used during cqp completion
1289 * @post_sq: flag for cqp db to ring
1290 */
1291static enum i40iw_status_code i40iw_sc_add_local_mac_ipaddr_entry(
1292                                struct i40iw_sc_cqp *cqp,
1293                                struct i40iw_local_mac_ipaddr_entry_info *info,
1294                                u64 scratch,
1295                                bool post_sq)
1296{
1297        u64 *wqe;
1298        u64 temp, header;
1299
1300        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1301        if (!wqe)
1302                return I40IW_ERR_RING_FULL;
1303        temp = info->mac_addr[5] |
1304                LS_64_1(info->mac_addr[4], 8) |
1305                LS_64_1(info->mac_addr[3], 16) |
1306                LS_64_1(info->mac_addr[2], 24) |
1307                LS_64_1(info->mac_addr[1], 32) |
1308                LS_64_1(info->mac_addr[0], 40);
1309
1310        set_64bit_val(wqe, 32, temp);
1311
1312        header = LS_64(info->entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
1313                 LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
1314                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1315
1316        i40iw_insert_wqe_hdr(wqe, header);
1317
1318        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ADD_LOCAL_MAC_IPADDR WQE",
1319                        wqe, I40IW_CQP_WQE_SIZE * 8);
1320
1321        if (post_sq)
1322                i40iw_sc_cqp_post_sq(cqp);
1323        return 0;
1324}
1325
1326/**
1327 * i40iw_sc_del_local_mac_ipaddr_entry - cqp wqe to dele local mac
1328 * @cqp: struct for cqp hw
1329 * @scratch: u64 saved to be used during cqp completion
1330 * @entry_idx: index of mac entry
1331 * @ ignore_ref_count: to force mac adde delete
1332 * @post_sq: flag for cqp db to ring
1333 */
1334static enum i40iw_status_code i40iw_sc_del_local_mac_ipaddr_entry(
1335                                struct i40iw_sc_cqp *cqp,
1336                                u64 scratch,
1337                                u8 entry_idx,
1338                                u8 ignore_ref_count,
1339                                bool post_sq)
1340{
1341        u64 *wqe;
1342        u64 header;
1343
1344        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1345        if (!wqe)
1346                return I40IW_ERR_RING_FULL;
1347        header = LS_64(entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
1348                 LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
1349                 LS_64(1, I40IW_CQPSQ_MLIPA_FREEENTRY) |
1350                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
1351                 LS_64(ignore_ref_count, I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT);
1352
1353        i40iw_insert_wqe_hdr(wqe, header);
1354
1355        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE",
1356                        wqe, I40IW_CQP_WQE_SIZE * 8);
1357
1358        if (post_sq)
1359                i40iw_sc_cqp_post_sq(cqp);
1360        return 0;
1361}
1362
1363/**
1364 * i40iw_sc_cqp_nop - send a nop wqe
1365 * @cqp: struct for cqp hw
1366 * @scratch: u64 saved to be used during cqp completion
1367 * @post_sq: flag for cqp db to ring
1368 */
1369static enum i40iw_status_code i40iw_sc_cqp_nop(struct i40iw_sc_cqp *cqp,
1370                                               u64 scratch,
1371                                               bool post_sq)
1372{
1373        u64 *wqe;
1374        u64 header;
1375
1376        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1377        if (!wqe)
1378                return I40IW_ERR_RING_FULL;
1379        header = LS_64(I40IW_CQP_OP_NOP, I40IW_CQPSQ_OPCODE) |
1380                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1381        i40iw_insert_wqe_hdr(wqe, header);
1382        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "NOP WQE",
1383                        wqe, I40IW_CQP_WQE_SIZE * 8);
1384
1385        if (post_sq)
1386                i40iw_sc_cqp_post_sq(cqp);
1387        return 0;
1388}
1389
1390/**
1391 * i40iw_sc_ceq_init - initialize ceq
1392 * @ceq: ceq sc structure
1393 * @info: ceq initialization info
1394 */
1395static enum i40iw_status_code i40iw_sc_ceq_init(struct i40iw_sc_ceq *ceq,
1396                                                struct i40iw_ceq_init_info *info)
1397{
1398        u32 pble_obj_cnt;
1399
1400        if ((info->elem_cnt < I40IW_MIN_CEQ_ENTRIES) ||
1401            (info->elem_cnt > I40IW_MAX_CEQ_ENTRIES))
1402                return I40IW_ERR_INVALID_SIZE;
1403
1404        if (info->ceq_id >= I40IW_MAX_CEQID)
1405                return I40IW_ERR_INVALID_CEQ_ID;
1406
1407        pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1408
1409        if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1410                return I40IW_ERR_INVALID_PBLE_INDEX;
1411
1412        ceq->size = sizeof(*ceq);
1413        ceq->ceqe_base = (struct i40iw_ceqe *)info->ceqe_base;
1414        ceq->ceq_id = info->ceq_id;
1415        ceq->dev = info->dev;
1416        ceq->elem_cnt = info->elem_cnt;
1417        ceq->ceq_elem_pa = info->ceqe_pa;
1418        ceq->virtual_map = info->virtual_map;
1419
1420        ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
1421        ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
1422        ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
1423
1424        ceq->tph_en = info->tph_en;
1425        ceq->tph_val = info->tph_val;
1426        ceq->polarity = 1;
1427        I40IW_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
1428        ceq->dev->ceq[info->ceq_id] = ceq;
1429
1430        return 0;
1431}
1432
1433/**
1434 * i40iw_sc_ceq_create - create ceq wqe
1435 * @ceq: ceq sc structure
1436 * @scratch: u64 saved to be used during cqp completion
1437 * @post_sq: flag for cqp db to ring
1438 */
1439static enum i40iw_status_code i40iw_sc_ceq_create(struct i40iw_sc_ceq *ceq,
1440                                                  u64 scratch,
1441                                                  bool post_sq)
1442{
1443        struct i40iw_sc_cqp *cqp;
1444        u64 *wqe;
1445        u64 header;
1446
1447        cqp = ceq->dev->cqp;
1448        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1449        if (!wqe)
1450                return I40IW_ERR_RING_FULL;
1451        set_64bit_val(wqe, 16, ceq->elem_cnt);
1452        set_64bit_val(wqe, 32, (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
1453        set_64bit_val(wqe, 48, (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
1454        set_64bit_val(wqe, 56, LS_64(ceq->tph_val, I40IW_CQPSQ_TPHVAL));
1455
1456        header = ceq->ceq_id |
1457                 LS_64(I40IW_CQP_OP_CREATE_CEQ, I40IW_CQPSQ_OPCODE) |
1458                 LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
1459                 LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
1460                 LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
1461                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1462
1463        i40iw_insert_wqe_hdr(wqe, header);
1464
1465        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_CREATE WQE",
1466                        wqe, I40IW_CQP_WQE_SIZE * 8);
1467
1468        if (post_sq)
1469                i40iw_sc_cqp_post_sq(cqp);
1470        return 0;
1471}
1472
1473/**
1474 * i40iw_sc_cceq_create_done - poll for control ceq wqe to complete
1475 * @ceq: ceq sc structure
1476 */
1477static enum i40iw_status_code i40iw_sc_cceq_create_done(struct i40iw_sc_ceq *ceq)
1478{
1479        struct i40iw_sc_cqp *cqp;
1480
1481        cqp = ceq->dev->cqp;
1482        return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CEQ, NULL);
1483}
1484
1485/**
1486 * i40iw_sc_cceq_destroy_done - poll for destroy cceq to complete
1487 * @ceq: ceq sc structure
1488 */
1489static enum i40iw_status_code i40iw_sc_cceq_destroy_done(struct i40iw_sc_ceq *ceq)
1490{
1491        struct i40iw_sc_cqp *cqp;
1492
1493        cqp = ceq->dev->cqp;
1494        cqp->process_cqp_sds = i40iw_update_sds_noccq;
1495        return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_CEQ, NULL);
1496}
1497
1498/**
1499 * i40iw_sc_cceq_create - create cceq
1500 * @ceq: ceq sc structure
1501 * @scratch: u64 saved to be used during cqp completion
1502 */
1503static enum i40iw_status_code i40iw_sc_cceq_create(struct i40iw_sc_ceq *ceq, u64 scratch)
1504{
1505        enum i40iw_status_code ret_code;
1506
1507        ret_code = i40iw_sc_ceq_create(ceq, scratch, true);
1508        if (!ret_code)
1509                ret_code = i40iw_sc_cceq_create_done(ceq);
1510        return ret_code;
1511}
1512
1513/**
1514 * i40iw_sc_ceq_destroy - destroy ceq
1515 * @ceq: ceq sc structure
1516 * @scratch: u64 saved to be used during cqp completion
1517 * @post_sq: flag for cqp db to ring
1518 */
1519static enum i40iw_status_code i40iw_sc_ceq_destroy(struct i40iw_sc_ceq *ceq,
1520                                                   u64 scratch,
1521                                                   bool post_sq)
1522{
1523        struct i40iw_sc_cqp *cqp;
1524        u64 *wqe;
1525        u64 header;
1526
1527        cqp = ceq->dev->cqp;
1528        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1529        if (!wqe)
1530                return I40IW_ERR_RING_FULL;
1531        set_64bit_val(wqe, 16, ceq->elem_cnt);
1532        set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
1533        header = ceq->ceq_id |
1534                 LS_64(I40IW_CQP_OP_DESTROY_CEQ, I40IW_CQPSQ_OPCODE) |
1535                 LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
1536                 LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
1537                 LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
1538                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1539        i40iw_insert_wqe_hdr(wqe, header);
1540        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_DESTROY WQE",
1541                        wqe, I40IW_CQP_WQE_SIZE * 8);
1542
1543        if (post_sq)
1544                i40iw_sc_cqp_post_sq(cqp);
1545        return 0;
1546}
1547
1548/**
1549 * i40iw_sc_process_ceq - process ceq
1550 * @dev: sc device struct
1551 * @ceq: ceq sc structure
1552 */
1553static void *i40iw_sc_process_ceq(struct i40iw_sc_dev *dev, struct i40iw_sc_ceq *ceq)
1554{
1555        u64 temp;
1556        u64 *ceqe;
1557        struct i40iw_sc_cq *cq = NULL;
1558        u8 polarity;
1559
1560        ceqe = (u64 *)I40IW_GET_CURRENT_CEQ_ELEMENT(ceq);
1561        get_64bit_val(ceqe, 0, &temp);
1562        polarity = (u8)RS_64(temp, I40IW_CEQE_VALID);
1563        if (polarity != ceq->polarity)
1564                return cq;
1565
1566        cq = (struct i40iw_sc_cq *)(unsigned long)LS_64_1(temp, 1);
1567
1568        I40IW_RING_MOVE_TAIL(ceq->ceq_ring);
1569        if (I40IW_RING_GETCURRENT_TAIL(ceq->ceq_ring) == 0)
1570                ceq->polarity ^= 1;
1571
1572        if (dev->is_pf)
1573                i40iw_wr32(dev->hw, I40E_PFPE_CQACK, cq->cq_uk.cq_id);
1574        else
1575                i40iw_wr32(dev->hw, I40E_VFPE_CQACK1, cq->cq_uk.cq_id);
1576
1577        return cq;
1578}
1579
1580/**
1581 * i40iw_sc_aeq_init - initialize aeq
1582 * @aeq: aeq structure ptr
1583 * @info: aeq initialization info
1584 */
1585static enum i40iw_status_code i40iw_sc_aeq_init(struct i40iw_sc_aeq *aeq,
1586                                                struct i40iw_aeq_init_info *info)
1587{
1588        u32 pble_obj_cnt;
1589
1590        if ((info->elem_cnt < I40IW_MIN_AEQ_ENTRIES) ||
1591            (info->elem_cnt > I40IW_MAX_AEQ_ENTRIES))
1592                return I40IW_ERR_INVALID_SIZE;
1593        pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1594
1595        if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1596                return I40IW_ERR_INVALID_PBLE_INDEX;
1597
1598        aeq->size = sizeof(*aeq);
1599        aeq->polarity = 1;
1600        aeq->aeqe_base = (struct i40iw_sc_aeqe *)info->aeqe_base;
1601        aeq->dev = info->dev;
1602        aeq->elem_cnt = info->elem_cnt;
1603
1604        aeq->aeq_elem_pa = info->aeq_elem_pa;
1605        I40IW_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
1606        info->dev->aeq = aeq;
1607
1608        aeq->virtual_map = info->virtual_map;
1609        aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
1610        aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
1611        aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
1612        info->dev->aeq = aeq;
1613        return 0;
1614}
1615
1616/**
1617 * i40iw_sc_aeq_create - create aeq
1618 * @aeq: aeq structure ptr
1619 * @scratch: u64 saved to be used during cqp completion
1620 * @post_sq: flag for cqp db to ring
1621 */
1622static enum i40iw_status_code i40iw_sc_aeq_create(struct i40iw_sc_aeq *aeq,
1623                                                  u64 scratch,
1624                                                  bool post_sq)
1625{
1626        u64 *wqe;
1627        struct i40iw_sc_cqp *cqp;
1628        u64 header;
1629
1630        cqp = aeq->dev->cqp;
1631        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1632        if (!wqe)
1633                return I40IW_ERR_RING_FULL;
1634        set_64bit_val(wqe, 16, aeq->elem_cnt);
1635        set_64bit_val(wqe, 32,
1636                      (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
1637        set_64bit_val(wqe, 48,
1638                      (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
1639
1640        header = LS_64(I40IW_CQP_OP_CREATE_AEQ, I40IW_CQPSQ_OPCODE) |
1641                 LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
1642                 LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
1643                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1644
1645        i40iw_insert_wqe_hdr(wqe, header);
1646        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_CREATE WQE",
1647                        wqe, I40IW_CQP_WQE_SIZE * 8);
1648        if (post_sq)
1649                i40iw_sc_cqp_post_sq(cqp);
1650        return 0;
1651}
1652
1653/**
1654 * i40iw_sc_aeq_destroy - destroy aeq during close
1655 * @aeq: aeq structure ptr
1656 * @scratch: u64 saved to be used during cqp completion
1657 * @post_sq: flag for cqp db to ring
1658 */
1659static enum i40iw_status_code i40iw_sc_aeq_destroy(struct i40iw_sc_aeq *aeq,
1660                                                   u64 scratch,
1661                                                   bool post_sq)
1662{
1663        u64 *wqe;
1664        struct i40iw_sc_cqp *cqp;
1665        u64 header;
1666
1667        cqp = aeq->dev->cqp;
1668        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1669        if (!wqe)
1670                return I40IW_ERR_RING_FULL;
1671        set_64bit_val(wqe, 16, aeq->elem_cnt);
1672        set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
1673        header = LS_64(I40IW_CQP_OP_DESTROY_AEQ, I40IW_CQPSQ_OPCODE) |
1674                 LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
1675                 LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
1676                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1677        i40iw_insert_wqe_hdr(wqe, header);
1678
1679        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_DESTROY WQE",
1680                        wqe, I40IW_CQP_WQE_SIZE * 8);
1681        if (post_sq)
1682                i40iw_sc_cqp_post_sq(cqp);
1683        return 0;
1684}
1685
1686/**
1687 * i40iw_sc_get_next_aeqe - get next aeq entry
1688 * @aeq: aeq structure ptr
1689 * @info: aeqe info to be returned
1690 */
1691static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
1692                                                     struct i40iw_aeqe_info *info)
1693{
1694        u64 temp, compl_ctx;
1695        u64 *aeqe;
1696        u16 wqe_idx;
1697        u8 ae_src;
1698        u8 polarity;
1699
1700        aeqe = (u64 *)I40IW_GET_CURRENT_AEQ_ELEMENT(aeq);
1701        get_64bit_val(aeqe, 0, &compl_ctx);
1702        get_64bit_val(aeqe, 8, &temp);
1703        polarity = (u8)RS_64(temp, I40IW_AEQE_VALID);
1704
1705        if (aeq->polarity != polarity)
1706                return I40IW_ERR_QUEUE_EMPTY;
1707
1708        i40iw_debug_buf(aeq->dev, I40IW_DEBUG_WQE, "AEQ_ENTRY", aeqe, 16);
1709
1710        ae_src = (u8)RS_64(temp, I40IW_AEQE_AESRC);
1711        wqe_idx = (u16)RS_64(temp, I40IW_AEQE_WQDESCIDX);
1712        info->qp_cq_id = (u32)RS_64(temp, I40IW_AEQE_QPCQID);
1713        info->ae_id = (u16)RS_64(temp, I40IW_AEQE_AECODE);
1714        info->tcp_state = (u8)RS_64(temp, I40IW_AEQE_TCPSTATE);
1715        info->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE);
1716        info->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA);
1717        info->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW);
1718        switch (ae_src) {
1719        case I40IW_AE_SOURCE_RQ:
1720        case I40IW_AE_SOURCE_RQ_0011:
1721                info->qp = true;
1722                info->wqe_idx = wqe_idx;
1723                info->compl_ctx = compl_ctx;
1724                break;
1725        case I40IW_AE_SOURCE_CQ:
1726        case I40IW_AE_SOURCE_CQ_0110:
1727        case I40IW_AE_SOURCE_CQ_1010:
1728        case I40IW_AE_SOURCE_CQ_1110:
1729                info->cq = true;
1730                info->compl_ctx = LS_64_1(compl_ctx, 1);
1731                break;
1732        case I40IW_AE_SOURCE_SQ:
1733        case I40IW_AE_SOURCE_SQ_0111:
1734                info->qp = true;
1735                info->sq = true;
1736                info->wqe_idx = wqe_idx;
1737                info->compl_ctx = compl_ctx;
1738                break;
1739        case I40IW_AE_SOURCE_IN_RR_WR:
1740        case I40IW_AE_SOURCE_IN_RR_WR_1011:
1741                info->qp = true;
1742                info->compl_ctx = compl_ctx;
1743                info->in_rdrsp_wr = true;
1744                break;
1745        case I40IW_AE_SOURCE_OUT_RR:
1746        case I40IW_AE_SOURCE_OUT_RR_1111:
1747                info->qp = true;
1748                info->compl_ctx = compl_ctx;
1749                info->out_rdrsp = true;
1750                break;
1751        default:
1752                break;
1753        }
1754        I40IW_RING_MOVE_TAIL(aeq->aeq_ring);
1755        if (I40IW_RING_GETCURRENT_TAIL(aeq->aeq_ring) == 0)
1756                aeq->polarity ^= 1;
1757        return 0;
1758}
1759
1760/**
1761 * i40iw_sc_repost_aeq_entries - repost completed aeq entries
1762 * @dev: sc device struct
1763 * @count: allocate count
1764 */
1765static enum i40iw_status_code i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev *dev,
1766                                                          u32 count)
1767{
1768        if (count > I40IW_MAX_AEQ_ALLOCATE_COUNT)
1769                return I40IW_ERR_INVALID_SIZE;
1770
1771        if (dev->is_pf)
1772                i40iw_wr32(dev->hw, I40E_PFPE_AEQALLOC, count);
1773        else
1774                i40iw_wr32(dev->hw, I40E_VFPE_AEQALLOC1, count);
1775
1776        return 0;
1777}
1778
1779/**
1780 * i40iw_sc_aeq_create_done - create aeq
1781 * @aeq: aeq structure ptr
1782 */
1783static enum i40iw_status_code i40iw_sc_aeq_create_done(struct i40iw_sc_aeq *aeq)
1784{
1785        struct i40iw_sc_cqp *cqp;
1786
1787        cqp = aeq->dev->cqp;
1788        return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_AEQ, NULL);
1789}
1790
1791/**
1792 * i40iw_sc_aeq_destroy_done - destroy of aeq during close
1793 * @aeq: aeq structure ptr
1794 */
1795static enum i40iw_status_code i40iw_sc_aeq_destroy_done(struct i40iw_sc_aeq *aeq)
1796{
1797        struct i40iw_sc_cqp *cqp;
1798
1799        cqp = aeq->dev->cqp;
1800        return  i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_AEQ, NULL);
1801}
1802
1803/**
1804 * i40iw_sc_ccq_init - initialize control cq
1805 * @cq: sc's cq ctruct
1806 * @info: info for control cq initialization
1807 */
1808static enum i40iw_status_code i40iw_sc_ccq_init(struct i40iw_sc_cq *cq,
1809                                                struct i40iw_ccq_init_info *info)
1810{
1811        u32 pble_obj_cnt;
1812
1813        if (info->num_elem < I40IW_MIN_CQ_SIZE || info->num_elem > I40IW_MAX_CQ_SIZE)
1814                return I40IW_ERR_INVALID_SIZE;
1815
1816        if (info->ceq_id > I40IW_MAX_CEQID)
1817                return I40IW_ERR_INVALID_CEQ_ID;
1818
1819        pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1820
1821        if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1822                return I40IW_ERR_INVALID_PBLE_INDEX;
1823
1824        cq->cq_pa = info->cq_pa;
1825        cq->cq_uk.cq_base = info->cq_base;
1826        cq->shadow_area_pa = info->shadow_area_pa;
1827        cq->cq_uk.shadow_area = info->shadow_area;
1828        cq->shadow_read_threshold = info->shadow_read_threshold;
1829        cq->dev = info->dev;
1830        cq->ceq_id = info->ceq_id;
1831        cq->cq_uk.cq_size = info->num_elem;
1832        cq->cq_type = I40IW_CQ_TYPE_CQP;
1833        cq->ceqe_mask = info->ceqe_mask;
1834        I40IW_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
1835
1836        cq->cq_uk.cq_id = 0;    /* control cq is id 0 always */
1837        cq->ceq_id_valid = info->ceq_id_valid;
1838        cq->tph_en = info->tph_en;
1839        cq->tph_val = info->tph_val;
1840        cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
1841
1842        cq->pbl_list = info->pbl_list;
1843        cq->virtual_map = info->virtual_map;
1844        cq->pbl_chunk_size = info->pbl_chunk_size;
1845        cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
1846        cq->cq_uk.polarity = true;
1847
1848        /* following are only for iw cqs so initialize them to zero */
1849        cq->cq_uk.cqe_alloc_reg = NULL;
1850        info->dev->ccq = cq;
1851        return 0;
1852}
1853
1854/**
1855 * i40iw_sc_ccq_create_done - poll cqp for ccq create
1856 * @ccq: ccq sc struct
1857 */
1858static enum i40iw_status_code i40iw_sc_ccq_create_done(struct i40iw_sc_cq *ccq)
1859{
1860        struct i40iw_sc_cqp *cqp;
1861
1862        cqp = ccq->dev->cqp;
1863        return  i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CQ, NULL);
1864}
1865
1866/**
1867 * i40iw_sc_ccq_create - create control cq
1868 * @ccq: ccq sc struct
1869 * @scratch: u64 saved to be used during cqp completion
1870 * @check_overflow: overlow flag for ccq
1871 * @post_sq: flag for cqp db to ring
1872 */
1873static enum i40iw_status_code i40iw_sc_ccq_create(struct i40iw_sc_cq *ccq,
1874                                                  u64 scratch,
1875                                                  bool check_overflow,
1876                                                  bool post_sq)
1877{
1878        u64 *wqe;
1879        struct i40iw_sc_cqp *cqp;
1880        u64 header;
1881        enum i40iw_status_code ret_code;
1882
1883        cqp = ccq->dev->cqp;
1884        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1885        if (!wqe)
1886                return I40IW_ERR_RING_FULL;
1887        set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
1888        set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
1889        set_64bit_val(wqe, 16,
1890                      LS_64(ccq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
1891        set_64bit_val(wqe, 32, (ccq->virtual_map ? 0 : ccq->cq_pa));
1892        set_64bit_val(wqe, 40, ccq->shadow_area_pa);
1893        set_64bit_val(wqe, 48,
1894                      (ccq->virtual_map ? ccq->first_pm_pbl_idx : 0));
1895        set_64bit_val(wqe, 56,
1896                      LS_64(ccq->tph_val, I40IW_CQPSQ_TPHVAL));
1897
1898        header = ccq->cq_uk.cq_id |
1899                 LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
1900                 LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
1901                 LS_64(ccq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
1902                 LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
1903                 LS_64(ccq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
1904                 LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
1905                 LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
1906                 LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
1907                 LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
1908                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1909
1910        i40iw_insert_wqe_hdr(wqe, header);
1911
1912        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_CREATE WQE",
1913                        wqe, I40IW_CQP_WQE_SIZE * 8);
1914
1915        if (post_sq) {
1916                i40iw_sc_cqp_post_sq(cqp);
1917                ret_code = i40iw_sc_ccq_create_done(ccq);
1918                if (ret_code)
1919                        return ret_code;
1920        }
1921        cqp->process_cqp_sds = i40iw_cqp_sds_cmd;
1922
1923        return 0;
1924}
1925
1926/**
1927 * i40iw_sc_ccq_destroy - destroy ccq during close
1928 * @ccq: ccq sc struct
1929 * @scratch: u64 saved to be used during cqp completion
1930 * @post_sq: flag for cqp db to ring
1931 */
1932static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,
1933                                                   u64 scratch,
1934                                                   bool post_sq)
1935{
1936        struct i40iw_sc_cqp *cqp;
1937        u64 *wqe;
1938        u64 header;
1939        enum i40iw_status_code ret_code = 0;
1940        u32 tail, val, error;
1941
1942        cqp = ccq->dev->cqp;
1943        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
1944        if (!wqe)
1945                return I40IW_ERR_RING_FULL;
1946        set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
1947        set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
1948        set_64bit_val(wqe, 40, ccq->shadow_area_pa);
1949
1950        header = ccq->cq_uk.cq_id |
1951                 LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
1952                 LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
1953                 LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
1954                 LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
1955                 LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
1956                 LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
1957                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
1958
1959        i40iw_insert_wqe_hdr(wqe, header);
1960
1961        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_DESTROY WQE",
1962                        wqe, I40IW_CQP_WQE_SIZE * 8);
1963
1964        i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
1965        if (error)
1966                return I40IW_ERR_CQP_COMPL_ERROR;
1967
1968        if (post_sq) {
1969                i40iw_sc_cqp_post_sq(cqp);
1970                ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
1971        }
1972
1973        return ret_code;
1974}
1975
1976/**
1977 * i40iw_sc_cq_init - initialize completion q
1978 * @cq: cq struct
1979 * @info: cq initialization info
1980 */
1981static enum i40iw_status_code i40iw_sc_cq_init(struct i40iw_sc_cq *cq,
1982                                               struct i40iw_cq_init_info *info)
1983{
1984        u32 __iomem *cqe_alloc_reg = NULL;
1985        enum i40iw_status_code ret_code;
1986        u32 pble_obj_cnt;
1987        u32 arm_offset;
1988
1989        pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
1990
1991        if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
1992                return I40IW_ERR_INVALID_PBLE_INDEX;
1993
1994        cq->cq_pa = info->cq_base_pa;
1995        cq->dev = info->dev;
1996        cq->ceq_id = info->ceq_id;
1997        arm_offset = (info->dev->is_pf) ? I40E_PFPE_CQARM : I40E_VFPE_CQARM1;
1998        if (i40iw_get_hw_addr(cq->dev))
1999                cqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(cq->dev) +
2000                                              arm_offset);
2001        info->cq_uk_init_info.cqe_alloc_reg = cqe_alloc_reg;
2002        ret_code = i40iw_cq_uk_init(&cq->cq_uk, &info->cq_uk_init_info);
2003        if (ret_code)
2004                return ret_code;
2005        cq->virtual_map = info->virtual_map;
2006        cq->pbl_chunk_size = info->pbl_chunk_size;
2007        cq->ceqe_mask = info->ceqe_mask;
2008        cq->cq_type = (info->type) ? info->type : I40IW_CQ_TYPE_IWARP;
2009
2010        cq->shadow_area_pa = info->shadow_area_pa;
2011        cq->shadow_read_threshold = info->shadow_read_threshold;
2012
2013        cq->ceq_id_valid = info->ceq_id_valid;
2014        cq->tph_en = info->tph_en;
2015        cq->tph_val = info->tph_val;
2016
2017        cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2018
2019        return 0;
2020}
2021
2022/**
2023 * i40iw_sc_cq_create - create completion q
2024 * @cq: cq struct
2025 * @scratch: u64 saved to be used during cqp completion
2026 * @check_overflow: flag for overflow check
2027 * @post_sq: flag for cqp db to ring
2028 */
2029static enum i40iw_status_code i40iw_sc_cq_create(struct i40iw_sc_cq *cq,
2030                                                 u64 scratch,
2031                                                 bool check_overflow,
2032                                                 bool post_sq)
2033{
2034        u64 *wqe;
2035        struct i40iw_sc_cqp *cqp;
2036        u64 header;
2037
2038        if (cq->cq_uk.cq_id > I40IW_MAX_CQID)
2039                return I40IW_ERR_INVALID_CQ_ID;
2040
2041        if (cq->ceq_id > I40IW_MAX_CEQID)
2042                return I40IW_ERR_INVALID_CEQ_ID;
2043
2044        cqp = cq->dev->cqp;
2045        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2046        if (!wqe)
2047                return I40IW_ERR_RING_FULL;
2048
2049        set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
2050        set_64bit_val(wqe, 8, RS_64_1(cq, 1));
2051        set_64bit_val(wqe,
2052                      16,
2053                      LS_64(cq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
2054
2055        set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
2056
2057        set_64bit_val(wqe, 40, cq->shadow_area_pa);
2058        set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2059        set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
2060
2061        header = cq->cq_uk.cq_id |
2062                 LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
2063                 LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
2064                 LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2065                 LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
2066                 LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2067                 LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2068                 LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2069                 LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
2070                 LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2071                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2072
2073        i40iw_insert_wqe_hdr(wqe, header);
2074
2075        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_CREATE WQE",
2076                        wqe, I40IW_CQP_WQE_SIZE * 8);
2077
2078        if (post_sq)
2079                i40iw_sc_cqp_post_sq(cqp);
2080        return 0;
2081}
2082
2083/**
2084 * i40iw_sc_cq_destroy - destroy completion q
2085 * @cq: cq struct
2086 * @scratch: u64 saved to be used during cqp completion
2087 * @post_sq: flag for cqp db to ring
2088 */
2089static enum i40iw_status_code i40iw_sc_cq_destroy(struct i40iw_sc_cq *cq,
2090                                                  u64 scratch,
2091                                                  bool post_sq)
2092{
2093        struct i40iw_sc_cqp *cqp;
2094        u64 *wqe;
2095        u64 header;
2096
2097        cqp = cq->dev->cqp;
2098        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2099        if (!wqe)
2100                return I40IW_ERR_RING_FULL;
2101        set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
2102        set_64bit_val(wqe, 8, RS_64_1(cq, 1));
2103        set_64bit_val(wqe, 40, cq->shadow_area_pa);
2104        set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2105
2106        header = cq->cq_uk.cq_id |
2107                 LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
2108                 LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
2109                 LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2110                 LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2111                 LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2112                 LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2113                 LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
2114                 LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2115                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2116
2117        i40iw_insert_wqe_hdr(wqe, header);
2118
2119        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_DESTROY WQE",
2120                        wqe, I40IW_CQP_WQE_SIZE * 8);
2121
2122        if (post_sq)
2123                i40iw_sc_cqp_post_sq(cqp);
2124        return 0;
2125}
2126
2127/**
2128 * i40iw_sc_cq_modify - modify a Completion Queue
2129 * @cq: cq struct
2130 * @info: modification info struct
2131 * @scratch:
2132 * @post_sq: flag to post to sq
2133 */
2134static enum i40iw_status_code i40iw_sc_cq_modify(struct i40iw_sc_cq *cq,
2135                                                 struct i40iw_modify_cq_info *info,
2136                                                 u64 scratch,
2137                                                 bool post_sq)
2138{
2139        struct i40iw_sc_cqp *cqp;
2140        u64 *wqe;
2141        u64 header;
2142        u32 cq_size, ceq_id, first_pm_pbl_idx;
2143        u8 pbl_chunk_size;
2144        bool virtual_map, ceq_id_valid, check_overflow;
2145        u32 pble_obj_cnt;
2146
2147        if (info->ceq_valid && (info->ceq_id > I40IW_MAX_CEQID))
2148                return I40IW_ERR_INVALID_CEQ_ID;
2149
2150        pble_obj_cnt = cq->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2151
2152        if (info->cq_resize && info->virtual_map &&
2153            (info->first_pm_pbl_idx >= pble_obj_cnt))
2154                return I40IW_ERR_INVALID_PBLE_INDEX;
2155
2156        cqp = cq->dev->cqp;
2157        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2158        if (!wqe)
2159                return I40IW_ERR_RING_FULL;
2160
2161        cq->pbl_list = info->pbl_list;
2162        cq->cq_pa = info->cq_pa;
2163        cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
2164
2165        cq_size = info->cq_resize ? info->cq_size : cq->cq_uk.cq_size;
2166        if (info->ceq_change) {
2167                ceq_id_valid = true;
2168                ceq_id = info->ceq_id;
2169        } else {
2170                ceq_id_valid = cq->ceq_id_valid;
2171                ceq_id = ceq_id_valid ? cq->ceq_id : 0;
2172        }
2173        virtual_map = info->cq_resize ? info->virtual_map : cq->virtual_map;
2174        first_pm_pbl_idx = (info->cq_resize ?
2175                            (info->virtual_map ? info->first_pm_pbl_idx : 0) :
2176                            (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
2177        pbl_chunk_size = (info->cq_resize ?
2178                          (info->virtual_map ? info->pbl_chunk_size : 0) :
2179                          (cq->virtual_map ? cq->pbl_chunk_size : 0));
2180        check_overflow = info->check_overflow_change ? info->check_overflow :
2181                         cq->check_overflow;
2182        cq->cq_uk.cq_size = cq_size;
2183        cq->ceq_id_valid = ceq_id_valid;
2184        cq->ceq_id = ceq_id;
2185        cq->virtual_map = virtual_map;
2186        cq->first_pm_pbl_idx = first_pm_pbl_idx;
2187        cq->pbl_chunk_size = pbl_chunk_size;
2188        cq->check_overflow = check_overflow;
2189
2190        set_64bit_val(wqe, 0, cq_size);
2191        set_64bit_val(wqe, 8, RS_64_1(cq, 1));
2192        set_64bit_val(wqe, 16,
2193                      LS_64(info->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
2194        set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
2195        set_64bit_val(wqe, 40, cq->shadow_area_pa);
2196        set_64bit_val(wqe, 48, (cq->virtual_map ? first_pm_pbl_idx : 0));
2197        set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
2198
2199        header = cq->cq_uk.cq_id |
2200                 LS_64(ceq_id, I40IW_CQPSQ_CQ_CEQID) |
2201                 LS_64(I40IW_CQP_OP_MODIFY_CQ, I40IW_CQPSQ_OPCODE) |
2202                 LS_64(info->cq_resize, I40IW_CQPSQ_CQ_CQRESIZE) |
2203                 LS_64(pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
2204                 LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
2205                 LS_64(virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
2206                 LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
2207                 LS_64(ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
2208                 LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
2209                 LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
2210                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2211
2212        i40iw_insert_wqe_hdr(wqe, header);
2213
2214        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_MODIFY WQE",
2215                        wqe, I40IW_CQP_WQE_SIZE * 8);
2216
2217        if (post_sq)
2218                i40iw_sc_cqp_post_sq(cqp);
2219        return 0;
2220}
2221
2222/**
2223 * i40iw_sc_qp_init - initialize qp
2224 * @qp: sc qp
2225 * @info: initialization qp info
2226 */
2227static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
2228                                               struct i40iw_qp_init_info *info)
2229{
2230        u32 __iomem *wqe_alloc_reg = NULL;
2231        enum i40iw_status_code ret_code;
2232        u32 pble_obj_cnt;
2233        u8 wqe_size;
2234        u32 offset;
2235
2236        qp->dev = info->pd->dev;
2237        qp->vsi = info->vsi;
2238        qp->sq_pa = info->sq_pa;
2239        qp->rq_pa = info->rq_pa;
2240        qp->hw_host_ctx_pa = info->host_ctx_pa;
2241        qp->q2_pa = info->q2_pa;
2242        qp->shadow_area_pa = info->shadow_area_pa;
2243
2244        qp->q2_buf = info->q2;
2245        qp->pd = info->pd;
2246        qp->hw_host_ctx = info->host_ctx;
2247        offset = (qp->pd->dev->is_pf) ? I40E_PFPE_WQEALLOC : I40E_VFPE_WQEALLOC1;
2248        if (i40iw_get_hw_addr(qp->pd->dev))
2249                wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
2250                                              offset);
2251
2252        info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg;
2253        info->qp_uk_init_info.abi_ver = qp->pd->abi_ver;
2254        ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info);
2255        if (ret_code)
2256                return ret_code;
2257        qp->virtual_map = info->virtual_map;
2258
2259        pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2260
2261        if ((info->virtual_map && (info->sq_pa >= pble_obj_cnt)) ||
2262            (info->virtual_map && (info->rq_pa >= pble_obj_cnt)))
2263                return I40IW_ERR_INVALID_PBLE_INDEX;
2264
2265        qp->llp_stream_handle = (void *)(-1);
2266        qp->qp_type = (info->type) ? info->type : I40IW_QP_TYPE_IWARP;
2267
2268        qp->hw_sq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
2269                                                    false);
2270        i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
2271                    __func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size);
2272
2273        switch (qp->pd->abi_ver) {
2274        case 4:
2275                ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
2276                                                       &wqe_size);
2277                if (ret_code)
2278                        return ret_code;
2279                break;
2280        case 5: /* fallthrough until next ABI version */
2281        default:
2282                if (qp->qp_uk.max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
2283                        return I40IW_ERR_INVALID_FRAG_COUNT;
2284                wqe_size = I40IW_MAX_WQE_SIZE_RQ;
2285                break;
2286        }
2287        qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size *
2288                                (wqe_size / I40IW_QP_WQE_MIN_SIZE), false);
2289        i40iw_debug(qp->dev, I40IW_DEBUG_WQE,
2290                    "%s: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
2291                    __func__, qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
2292        qp->sq_tph_val = info->sq_tph_val;
2293        qp->rq_tph_val = info->rq_tph_val;
2294        qp->sq_tph_en = info->sq_tph_en;
2295        qp->rq_tph_en = info->rq_tph_en;
2296        qp->rcv_tph_en = info->rcv_tph_en;
2297        qp->xmit_tph_en = info->xmit_tph_en;
2298        qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
2299        qp->exception_lan_queue = qp->pd->dev->exception_lan_queue;
2300
2301        return 0;
2302}
2303
2304/**
2305 * i40iw_sc_qp_create - create qp
2306 * @qp: sc qp
2307 * @info: qp create info
2308 * @scratch: u64 saved to be used during cqp completion
2309 * @post_sq: flag for cqp db to ring
2310 */
2311static enum i40iw_status_code i40iw_sc_qp_create(
2312                                struct i40iw_sc_qp *qp,
2313                                struct i40iw_create_qp_info *info,
2314                                u64 scratch,
2315                                bool post_sq)
2316{
2317        struct i40iw_sc_cqp *cqp;
2318        u64 *wqe;
2319        u64 header;
2320
2321        if ((qp->qp_uk.qp_id < I40IW_MIN_IW_QP_ID) ||
2322            (qp->qp_uk.qp_id > I40IW_MAX_IW_QP_ID))
2323                return I40IW_ERR_INVALID_QP_ID;
2324
2325        cqp = qp->pd->dev->cqp;
2326        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2327        if (!wqe)
2328                return I40IW_ERR_RING_FULL;
2329
2330        set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2331
2332        set_64bit_val(wqe, 40, qp->shadow_area_pa);
2333
2334        header = qp->qp_uk.qp_id |
2335                 LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
2336                 LS_64((info->ord_valid ? 1 : 0), I40IW_CQPSQ_QP_ORDVALID) |
2337                 LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
2338                 LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2339                 LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
2340                 LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
2341                 LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
2342                 LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
2343                 LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
2344                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2345
2346        i40iw_insert_wqe_hdr(wqe, header);
2347        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_CREATE WQE",
2348                        wqe, I40IW_CQP_WQE_SIZE * 8);
2349
2350        if (post_sq)
2351                i40iw_sc_cqp_post_sq(cqp);
2352        return 0;
2353}
2354
2355/**
2356 * i40iw_sc_qp_modify - modify qp cqp wqe
2357 * @qp: sc qp
2358 * @info: modify qp info
2359 * @scratch: u64 saved to be used during cqp completion
2360 * @post_sq: flag for cqp db to ring
2361 */
2362static enum i40iw_status_code i40iw_sc_qp_modify(
2363                                struct i40iw_sc_qp *qp,
2364                                struct i40iw_modify_qp_info *info,
2365                                u64 scratch,
2366                                bool post_sq)
2367{
2368        u64 *wqe;
2369        struct i40iw_sc_cqp *cqp;
2370        u64 header;
2371        u8 term_actions = 0;
2372        u8 term_len = 0;
2373
2374        cqp = qp->pd->dev->cqp;
2375        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2376        if (!wqe)
2377                return I40IW_ERR_RING_FULL;
2378        if (info->next_iwarp_state == I40IW_QP_STATE_TERMINATE) {
2379                if (info->dont_send_fin)
2380                        term_actions += I40IWQP_TERM_SEND_TERM_ONLY;
2381                if (info->dont_send_term)
2382                        term_actions += I40IWQP_TERM_SEND_FIN_ONLY;
2383                if ((term_actions == I40IWQP_TERM_SEND_TERM_AND_FIN) ||
2384                    (term_actions == I40IWQP_TERM_SEND_TERM_ONLY))
2385                        term_len = info->termlen;
2386        }
2387
2388        set_64bit_val(wqe,
2389                      8,
2390                      LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));
2391
2392        set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2393        set_64bit_val(wqe, 40, qp->shadow_area_pa);
2394
2395        header = qp->qp_uk.qp_id |
2396                 LS_64(I40IW_CQP_OP_MODIFY_QP, I40IW_CQPSQ_OPCODE) |
2397                 LS_64(info->ord_valid, I40IW_CQPSQ_QP_ORDVALID) |
2398                 LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
2399                 LS_64(info->cached_var_valid, I40IW_CQPSQ_QP_CACHEDVARVALID) |
2400                 LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
2401                 LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
2402                 LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
2403                 LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2404                 LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
2405                 LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
2406                 LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
2407                 LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) |
2408                 LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
2409                 LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
2410                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2411
2412        i40iw_insert_wqe_hdr(wqe, header);
2413
2414        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_MODIFY WQE",
2415                        wqe, I40IW_CQP_WQE_SIZE * 8);
2416
2417        if (post_sq)
2418                i40iw_sc_cqp_post_sq(cqp);
2419        return 0;
2420}
2421
2422/**
2423 * i40iw_sc_qp_destroy - cqp destroy qp
2424 * @qp: sc qp
2425 * @scratch: u64 saved to be used during cqp completion
2426 * @remove_hash_idx: flag if to remove hash idx
2427 * @ignore_mw_bnd: memory window bind flag
2428 * @post_sq: flag for cqp db to ring
2429 */
2430static enum i40iw_status_code i40iw_sc_qp_destroy(
2431                                        struct i40iw_sc_qp *qp,
2432                                        u64 scratch,
2433                                        bool remove_hash_idx,
2434                                        bool ignore_mw_bnd,
2435                                        bool post_sq)
2436{
2437        u64 *wqe;
2438        struct i40iw_sc_cqp *cqp;
2439        u64 header;
2440
2441        i40iw_qp_rem_qos(qp);
2442        cqp = qp->pd->dev->cqp;
2443        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2444        if (!wqe)
2445                return I40IW_ERR_RING_FULL;
2446        set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
2447        set_64bit_val(wqe, 40, qp->shadow_area_pa);
2448
2449        header = qp->qp_uk.qp_id |
2450                 LS_64(I40IW_CQP_OP_DESTROY_QP, I40IW_CQPSQ_OPCODE) |
2451                 LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
2452                 LS_64(ignore_mw_bnd, I40IW_CQPSQ_QP_IGNOREMWBOUND) |
2453                 LS_64(remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
2454                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2455
2456        i40iw_insert_wqe_hdr(wqe, header);
2457        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_DESTROY WQE",
2458                        wqe, I40IW_CQP_WQE_SIZE * 8);
2459
2460        if (post_sq)
2461                i40iw_sc_cqp_post_sq(cqp);
2462        return 0;
2463}
2464
2465/**
2466 * i40iw_sc_qp_flush_wqes - flush qp's wqe
2467 * @qp: sc qp
2468 * @info: dlush information
2469 * @scratch: u64 saved to be used during cqp completion
2470 * @post_sq: flag for cqp db to ring
2471 */
2472static enum i40iw_status_code i40iw_sc_qp_flush_wqes(
2473                                struct i40iw_sc_qp *qp,
2474                                struct i40iw_qp_flush_info *info,
2475                                u64 scratch,
2476                                bool post_sq)
2477{
2478        u64 temp = 0;
2479        u64 *wqe;
2480        struct i40iw_sc_cqp *cqp;
2481        u64 header;
2482        bool flush_sq = false, flush_rq = false;
2483
2484        if (info->rq && !qp->flush_rq)
2485                flush_rq = true;
2486
2487        if (info->sq && !qp->flush_sq)
2488                flush_sq = true;
2489
2490        qp->flush_sq |= flush_sq;
2491        qp->flush_rq |= flush_rq;
2492        if (!flush_sq && !flush_rq) {
2493                if (info->ae_code != I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR)
2494                        return 0;
2495        }
2496
2497        cqp = qp->pd->dev->cqp;
2498        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2499        if (!wqe)
2500                return I40IW_ERR_RING_FULL;
2501        if (info->userflushcode) {
2502                if (flush_rq) {
2503                        temp |= LS_64(info->rq_minor_code, I40IW_CQPSQ_FWQE_RQMNERR) |
2504                                LS_64(info->rq_major_code, I40IW_CQPSQ_FWQE_RQMJERR);
2505                }
2506                if (flush_sq) {
2507                        temp |= LS_64(info->sq_minor_code, I40IW_CQPSQ_FWQE_SQMNERR) |
2508                                LS_64(info->sq_major_code, I40IW_CQPSQ_FWQE_SQMJERR);
2509                }
2510        }
2511        set_64bit_val(wqe, 16, temp);
2512
2513        temp = (info->generate_ae) ?
2514                info->ae_code | LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE) : 0;
2515
2516        set_64bit_val(wqe, 8, temp);
2517
2518        header = qp->qp_uk.qp_id |
2519                 LS_64(I40IW_CQP_OP_FLUSH_WQES, I40IW_CQPSQ_OPCODE) |
2520                 LS_64(info->generate_ae, I40IW_CQPSQ_FWQE_GENERATE_AE) |
2521                 LS_64(info->userflushcode, I40IW_CQPSQ_FWQE_USERFLCODE) |
2522                 LS_64(flush_sq, I40IW_CQPSQ_FWQE_FLUSHSQ) |
2523                 LS_64(flush_rq, I40IW_CQPSQ_FWQE_FLUSHRQ) |
2524                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2525
2526        i40iw_insert_wqe_hdr(wqe, header);
2527
2528        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_FLUSH WQE",
2529                        wqe, I40IW_CQP_WQE_SIZE * 8);
2530
2531        if (post_sq)
2532                i40iw_sc_cqp_post_sq(cqp);
2533        return 0;
2534}
2535
2536/**
2537 * i40iw_sc_qp_upload_context - upload qp's context
2538 * @dev: sc device struct
2539 * @info: upload context info ptr for return
2540 * @scratch: u64 saved to be used during cqp completion
2541 * @post_sq: flag for cqp db to ring
2542 */
2543static enum i40iw_status_code i40iw_sc_qp_upload_context(
2544                                        struct i40iw_sc_dev *dev,
2545                                        struct i40iw_upload_context_info *info,
2546                                        u64 scratch,
2547                                        bool post_sq)
2548{
2549        u64 *wqe;
2550        struct i40iw_sc_cqp *cqp;
2551        u64 header;
2552
2553        cqp = dev->cqp;
2554        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2555        if (!wqe)
2556                return I40IW_ERR_RING_FULL;
2557        set_64bit_val(wqe, 16, info->buf_pa);
2558
2559        header = LS_64(info->qp_id, I40IW_CQPSQ_UCTX_QPID) |
2560                 LS_64(I40IW_CQP_OP_UPLOAD_CONTEXT, I40IW_CQPSQ_OPCODE) |
2561                 LS_64(info->qp_type, I40IW_CQPSQ_UCTX_QPTYPE) |
2562                 LS_64(info->raw_format, I40IW_CQPSQ_UCTX_RAWFORMAT) |
2563                 LS_64(info->freeze_qp, I40IW_CQPSQ_UCTX_FREEZEQP) |
2564                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2565
2566        i40iw_insert_wqe_hdr(wqe, header);
2567
2568        i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QP_UPLOAD_CTX WQE",
2569                        wqe, I40IW_CQP_WQE_SIZE * 8);
2570
2571        if (post_sq)
2572                i40iw_sc_cqp_post_sq(cqp);
2573        return 0;
2574}
2575
2576/**
2577 * i40iw_sc_qp_setctx - set qp's context
2578 * @qp: sc qp
2579 * @qp_ctx: context ptr
2580 * @info: ctx info
2581 */
2582static enum i40iw_status_code i40iw_sc_qp_setctx(
2583                                struct i40iw_sc_qp *qp,
2584                                u64 *qp_ctx,
2585                                struct i40iw_qp_host_ctx_info *info)
2586{
2587        struct i40iwarp_offload_info *iw;
2588        struct i40iw_tcp_offload_info *tcp;
2589        struct i40iw_sc_vsi *vsi;
2590        struct i40iw_sc_dev *dev;
2591        u64 qw0, qw3, qw7 = 0;
2592
2593        iw = info->iwarp_info;
2594        tcp = info->tcp_info;
2595        vsi = qp->vsi;
2596        dev = qp->dev;
2597        if (info->add_to_qoslist) {
2598                qp->user_pri = info->user_pri;
2599                i40iw_qp_add_qos(qp);
2600                i40iw_debug(qp->dev, I40IW_DEBUG_DCB, "%s qp[%d] UP[%d] qset[%d]\n",
2601                            __func__, qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle);
2602        }
2603        qw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) |
2604              LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) |
2605              LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) |
2606              LS_64(qp->xmit_tph_en, I40IWQPC_XMITTPHEN) |
2607              LS_64(qp->rq_tph_en, I40IWQPC_RQTPHEN) |
2608              LS_64(qp->sq_tph_en, I40IWQPC_SQTPHEN) |
2609              LS_64(info->push_idx, I40IWQPC_PPIDX) |
2610              LS_64(info->push_mode_en, I40IWQPC_PMENA);
2611
2612        set_64bit_val(qp_ctx, 8, qp->sq_pa);
2613        set_64bit_val(qp_ctx, 16, qp->rq_pa);
2614
2615        qw3 = LS_64(qp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
2616              LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
2617              LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE);
2618
2619        set_64bit_val(qp_ctx,
2620                      128,
2621                      LS_64(info->err_rq_idx, I40IWQPC_ERR_RQ_IDX));
2622
2623        set_64bit_val(qp_ctx,
2624                      136,
2625                      LS_64(info->send_cq_num, I40IWQPC_TXCQNUM) |
2626                      LS_64(info->rcv_cq_num, I40IWQPC_RXCQNUM));
2627
2628        set_64bit_val(qp_ctx,
2629                      168,
2630                      LS_64(info->qp_compl_ctx, I40IWQPC_QPCOMPCTX));
2631        set_64bit_val(qp_ctx,
2632                      176,
2633                      LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
2634                      LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
2635                      LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) |
2636                      LS_64(qp->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));
2637
2638        if (info->iwarp_info_valid) {
2639                qw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) |
2640                       LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER);
2641
2642                qw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX);
2643                set_64bit_val(qp_ctx,
2644                              144,
2645                              LS_64(qp->q2_pa, I40IWQPC_Q2ADDR) |
2646                              LS_64(vsi->fcn_id, I40IWQPC_STAT_INDEX));
2647                set_64bit_val(qp_ctx,
2648                              152,
2649                              LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT));
2650
2651                set_64bit_val(qp_ctx,
2652                              160,
2653                              LS_64(iw->ord_size, I40IWQPC_ORDSIZE) |
2654                              LS_64(iw->ird_size, I40IWQPC_IRDSIZE) |
2655                              LS_64(iw->wr_rdresp_en, I40IWQPC_WRRDRSPOK) |
2656                              LS_64(iw->rd_enable, I40IWQPC_RDOK) |
2657                              LS_64(iw->snd_mark_en, I40IWQPC_SNDMARKERS) |
2658                              LS_64(iw->bind_en, I40IWQPC_BINDEN) |
2659                              LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) |
2660                              LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) |
2661                              LS_64((((vsi->stats_fcn_id_alloc) &&
2662                                      (dev->is_pf) && (vsi->fcn_id >= I40IW_FIRST_NON_PF_STAT)) ? 1 : 0),
2663                                    I40IWQPC_USESTATSINSTANCE) |
2664                              LS_64(1, I40IWQPC_IWARPMODE) |
2665                              LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) |
2666                              LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) |
2667                              LS_64(iw->rcv_no_mpa_crc, I40IWQPC_RCVNOMPACRC) |
2668                              LS_64(iw->rcv_mark_offset, I40IWQPC_RCVMARKOFFSET) |
2669                              LS_64(iw->snd_mark_offset, I40IWQPC_SNDMARKOFFSET));
2670        }
2671        if (info->tcp_info_valid) {
2672                qw0 |= LS_64(tcp->ipv4, I40IWQPC_IPV4) |
2673                       LS_64(tcp->no_nagle, I40IWQPC_NONAGLE) |
2674                       LS_64(tcp->insert_vlan_tag, I40IWQPC_INSERTVLANTAG) |
2675                       LS_64(tcp->time_stamp, I40IWQPC_TIMESTAMP) |
2676                       LS_64(tcp->cwnd_inc_limit, I40IWQPC_LIMIT) |
2677                       LS_64(tcp->drop_ooo_seg, I40IWQPC_DROPOOOSEG) |
2678                       LS_64(tcp->dup_ack_thresh, I40IWQPC_DUPACK_THRESH);
2679
2680                qw3 |= LS_64(tcp->ttl, I40IWQPC_TTL) |
2681                       LS_64(tcp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
2682                       LS_64(tcp->avoid_stretch_ack, I40IWQPC_AVOIDSTRETCHACK) |
2683                       LS_64(tcp->tos, I40IWQPC_TOS) |
2684                       LS_64(tcp->src_port, I40IWQPC_SRCPORTNUM) |
2685                       LS_64(tcp->dst_port, I40IWQPC_DESTPORTNUM);
2686
2687                qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
2688                set_64bit_val(qp_ctx,
2689                              32,
2690                              LS_64(tcp->dest_ip_addr2, I40IWQPC_DESTIPADDR2) |
2691                              LS_64(tcp->dest_ip_addr3, I40IWQPC_DESTIPADDR3));
2692
2693                set_64bit_val(qp_ctx,
2694                              40,
2695                              LS_64(tcp->dest_ip_addr0, I40IWQPC_DESTIPADDR0) |
2696                              LS_64(tcp->dest_ip_addr1, I40IWQPC_DESTIPADDR1));
2697
2698                set_64bit_val(qp_ctx,
2699                              48,
2700                              LS_64(tcp->snd_mss, I40IWQPC_SNDMSS) |
2701                                LS_64(tcp->vlan_tag, I40IWQPC_VLANTAG) |
2702                                LS_64(tcp->arp_idx, I40IWQPC_ARPIDX));
2703
2704                qw7 |= LS_64(tcp->flow_label, I40IWQPC_FLOWLABEL) |
2705                       LS_64(tcp->wscale, I40IWQPC_WSCALE) |
2706                       LS_64(tcp->ignore_tcp_opt, I40IWQPC_IGNORE_TCP_OPT) |
2707                       LS_64(tcp->ignore_tcp_uns_opt, I40IWQPC_IGNORE_TCP_UNS_OPT) |
2708                       LS_64(tcp->tcp_state, I40IWQPC_TCPSTATE) |
2709                       LS_64(tcp->rcv_wscale, I40IWQPC_RCVSCALE) |
2710                       LS_64(tcp->snd_wscale, I40IWQPC_SNDSCALE);
2711
2712                set_64bit_val(qp_ctx,
2713                              72,
2714                              LS_64(tcp->time_stamp_recent, I40IWQPC_TIMESTAMP_RECENT) |
2715                              LS_64(tcp->time_stamp_age, I40IWQPC_TIMESTAMP_AGE));
2716                set_64bit_val(qp_ctx,
2717                              80,
2718                              LS_64(tcp->snd_nxt, I40IWQPC_SNDNXT) |
2719                              LS_64(tcp->snd_wnd, I40IWQPC_SNDWND));
2720
2721                set_64bit_val(qp_ctx,
2722                              88,
2723                              LS_64(tcp->rcv_nxt, I40IWQPC_RCVNXT) |
2724                              LS_64(tcp->rcv_wnd, I40IWQPC_RCVWND));
2725                set_64bit_val(qp_ctx,
2726                              96,
2727                              LS_64(tcp->snd_max, I40IWQPC_SNDMAX) |
2728                              LS_64(tcp->snd_una, I40IWQPC_SNDUNA));
2729                set_64bit_val(qp_ctx,
2730                              104,
2731                              LS_64(tcp->srtt, I40IWQPC_SRTT) |
2732                              LS_64(tcp->rtt_var, I40IWQPC_RTTVAR));
2733                set_64bit_val(qp_ctx,
2734                              112,
2735                              LS_64(tcp->ss_thresh, I40IWQPC_SSTHRESH) |
2736                              LS_64(tcp->cwnd, I40IWQPC_CWND));
2737                set_64bit_val(qp_ctx,
2738                              120,
2739                              LS_64(tcp->snd_wl1, I40IWQPC_SNDWL1) |
2740                              LS_64(tcp->snd_wl2, I40IWQPC_SNDWL2));
2741                set_64bit_val(qp_ctx,
2742                              128,
2743                              LS_64(tcp->max_snd_window, I40IWQPC_MAXSNDWND) |
2744                              LS_64(tcp->rexmit_thresh, I40IWQPC_REXMIT_THRESH));
2745                set_64bit_val(qp_ctx,
2746                              184,
2747                              LS_64(tcp->local_ipaddr3, I40IWQPC_LOCAL_IPADDR3) |
2748                              LS_64(tcp->local_ipaddr2, I40IWQPC_LOCAL_IPADDR2));
2749                set_64bit_val(qp_ctx,
2750                              192,
2751                              LS_64(tcp->local_ipaddr1, I40IWQPC_LOCAL_IPADDR1) |
2752                              LS_64(tcp->local_ipaddr0, I40IWQPC_LOCAL_IPADDR0));
2753        }
2754
2755        set_64bit_val(qp_ctx, 0, qw0);
2756        set_64bit_val(qp_ctx, 24, qw3);
2757        set_64bit_val(qp_ctx, 56, qw7);
2758
2759        i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "QP_HOST)CTX WQE",
2760                        qp_ctx, I40IW_QP_CTX_SIZE);
2761        return 0;
2762}
2763
2764/**
2765 * i40iw_sc_alloc_stag - mr stag alloc
2766 * @dev: sc device struct
2767 * @info: stag info
2768 * @scratch: u64 saved to be used during cqp completion
2769 * @post_sq: flag for cqp db to ring
2770 */
2771static enum i40iw_status_code i40iw_sc_alloc_stag(
2772                                struct i40iw_sc_dev *dev,
2773                                struct i40iw_allocate_stag_info *info,
2774                                u64 scratch,
2775                                bool post_sq)
2776{
2777        u64 *wqe;
2778        struct i40iw_sc_cqp *cqp;
2779        u64 header;
2780        enum i40iw_page_size page_size;
2781
2782        page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
2783        cqp = dev->cqp;
2784        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2785        if (!wqe)
2786                return I40IW_ERR_RING_FULL;
2787        set_64bit_val(wqe,
2788                      8,
2789                      LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID) |
2790                      LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN));
2791        set_64bit_val(wqe,
2792                      16,
2793                      LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
2794        set_64bit_val(wqe,
2795                      40,
2796                      LS_64(info->hmc_fcn_index, I40IW_CQPSQ_STAG_HMCFNIDX));
2797
2798        header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
2799                 LS_64(1, I40IW_CQPSQ_STAG_MR) |
2800                 LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
2801                 LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
2802                 LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
2803                 LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
2804                 LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
2805                 LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
2806                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2807
2808        i40iw_insert_wqe_hdr(wqe, header);
2809
2810        i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "ALLOC_STAG WQE",
2811                        wqe, I40IW_CQP_WQE_SIZE * 8);
2812
2813        if (post_sq)
2814                i40iw_sc_cqp_post_sq(cqp);
2815        return 0;
2816}
2817
2818/**
2819 * i40iw_sc_mr_reg_non_shared - non-shared mr registration
2820 * @dev: sc device struct
2821 * @info: mr info
2822 * @scratch: u64 saved to be used during cqp completion
2823 * @post_sq: flag for cqp db to ring
2824 */
2825static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
2826                                struct i40iw_sc_dev *dev,
2827                                struct i40iw_reg_ns_stag_info *info,
2828                                u64 scratch,
2829                                bool post_sq)
2830{
2831        u64 *wqe;
2832        u64 temp;
2833        struct i40iw_sc_cqp *cqp;
2834        u64 header;
2835        u32 pble_obj_cnt;
2836        bool remote_access;
2837        u8 addr_type;
2838        enum i40iw_page_size page_size;
2839
2840        page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
2841        if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
2842                                   I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
2843                remote_access = true;
2844        else
2845                remote_access = false;
2846
2847        pble_obj_cnt = dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
2848
2849        if (info->chunk_size && (info->first_pm_pbl_index >= pble_obj_cnt))
2850                return I40IW_ERR_INVALID_PBLE_INDEX;
2851
2852        cqp = dev->cqp;
2853        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2854        if (!wqe)
2855                return I40IW_ERR_RING_FULL;
2856
2857        temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
2858        set_64bit_val(wqe, 0, temp);
2859
2860        set_64bit_val(wqe,
2861                      8,
2862                      LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN) |
2863                      LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
2864
2865        set_64bit_val(wqe,
2866                      16,
2867                      LS_64(info->stag_key, I40IW_CQPSQ_STAG_KEY) |
2868                      LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
2869        if (!info->chunk_size) {
2870                set_64bit_val(wqe, 32, info->reg_addr_pa);
2871                set_64bit_val(wqe, 48, 0);
2872        } else {
2873                set_64bit_val(wqe, 32, 0);
2874                set_64bit_val(wqe, 48, info->first_pm_pbl_index);
2875        }
2876        set_64bit_val(wqe, 40, info->hmc_fcn_index);
2877        set_64bit_val(wqe, 56, 0);
2878
2879        addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
2880        header = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) |
2881                 LS_64(1, I40IW_CQPSQ_STAG_MR) |
2882                 LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
2883                 LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
2884                 LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
2885                 LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
2886                 LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
2887                 LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
2888                 LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
2889                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2890
2891        i40iw_insert_wqe_hdr(wqe, header);
2892
2893        i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_NS WQE",
2894                        wqe, I40IW_CQP_WQE_SIZE * 8);
2895
2896        if (post_sq)
2897                i40iw_sc_cqp_post_sq(cqp);
2898        return 0;
2899}
2900
2901/**
2902 * i40iw_sc_mr_reg_shared - registered shared memory region
2903 * @dev: sc device struct
2904 * @info: info for shared memory registeration
2905 * @scratch: u64 saved to be used during cqp completion
2906 * @post_sq: flag for cqp db to ring
2907 */
2908static enum i40iw_status_code i40iw_sc_mr_reg_shared(
2909                                        struct i40iw_sc_dev *dev,
2910                                        struct i40iw_register_shared_stag *info,
2911                                        u64 scratch,
2912                                        bool post_sq)
2913{
2914        u64 *wqe;
2915        struct i40iw_sc_cqp *cqp;
2916        u64 temp, va64, fbo, header;
2917        u32 va32;
2918        bool remote_access;
2919        u8 addr_type;
2920
2921        if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
2922                                   I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
2923                remote_access = true;
2924        else
2925                remote_access = false;
2926        cqp = dev->cqp;
2927        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2928        if (!wqe)
2929                return I40IW_ERR_RING_FULL;
2930        va64 = (uintptr_t)(info->va);
2931        va32 = (u32)(va64 & 0x00000000FFFFFFFF);
2932        fbo = (u64)(va32 & (4096 - 1));
2933
2934        set_64bit_val(wqe,
2935                      0,
2936                      (info->addr_type == I40IW_ADDR_TYPE_VA_BASED ? (uintptr_t)info->va : fbo));
2937
2938        set_64bit_val(wqe,
2939                      8,
2940                      LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
2941        temp = LS_64(info->new_stag_key, I40IW_CQPSQ_STAG_KEY) |
2942               LS_64(info->new_stag_idx, I40IW_CQPSQ_STAG_IDX) |
2943               LS_64(info->parent_stag_idx, I40IW_CQPSQ_STAG_PARENTSTAGIDX);
2944        set_64bit_val(wqe, 16, temp);
2945
2946        addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
2947        header = LS_64(I40IW_CQP_OP_REG_SMR, I40IW_CQPSQ_OPCODE) |
2948                 LS_64(1, I40IW_CQPSQ_STAG_MR) |
2949                 LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
2950                 LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
2951                 LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
2952                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2953
2954        i40iw_insert_wqe_hdr(wqe, header);
2955
2956        i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_SHARED WQE",
2957                        wqe, I40IW_CQP_WQE_SIZE * 8);
2958
2959        if (post_sq)
2960                i40iw_sc_cqp_post_sq(cqp);
2961        return 0;
2962}
2963
2964/**
2965 * i40iw_sc_dealloc_stag - deallocate stag
2966 * @dev: sc device struct
2967 * @info: dealloc stag info
2968 * @scratch: u64 saved to be used during cqp completion
2969 * @post_sq: flag for cqp db to ring
2970 */
2971static enum i40iw_status_code i40iw_sc_dealloc_stag(
2972                                        struct i40iw_sc_dev *dev,
2973                                        struct i40iw_dealloc_stag_info *info,
2974                                        u64 scratch,
2975                                        bool post_sq)
2976{
2977        u64 header;
2978        u64 *wqe;
2979        struct i40iw_sc_cqp *cqp;
2980
2981        cqp = dev->cqp;
2982        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
2983        if (!wqe)
2984                return I40IW_ERR_RING_FULL;
2985        set_64bit_val(wqe,
2986                      8,
2987                      LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
2988        set_64bit_val(wqe,
2989                      16,
2990                      LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
2991
2992        header = LS_64(I40IW_CQP_OP_DEALLOC_STAG, I40IW_CQPSQ_OPCODE) |
2993                 LS_64(info->mr, I40IW_CQPSQ_STAG_MR) |
2994                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
2995
2996        i40iw_insert_wqe_hdr(wqe, header);
2997
2998        i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "DEALLOC_STAG WQE",
2999                        wqe, I40IW_CQP_WQE_SIZE * 8);
3000
3001        if (post_sq)
3002                i40iw_sc_cqp_post_sq(cqp);
3003        return 0;
3004}
3005
3006/**
3007 * i40iw_sc_query_stag - query hardware for stag
3008 * @dev: sc device struct
3009 * @scratch: u64 saved to be used during cqp completion
3010 * @stag_index: stag index for query
3011 * @post_sq: flag for cqp db to ring
3012 */
3013static enum i40iw_status_code i40iw_sc_query_stag(struct i40iw_sc_dev *dev,
3014                                                  u64 scratch,
3015                                                  u32 stag_index,
3016                                                  bool post_sq)
3017{
3018        u64 header;
3019        u64 *wqe;
3020        struct i40iw_sc_cqp *cqp;
3021
3022        cqp = dev->cqp;
3023        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3024        if (!wqe)
3025                return I40IW_ERR_RING_FULL;
3026        set_64bit_val(wqe,
3027                      16,
3028                      LS_64(stag_index, I40IW_CQPSQ_QUERYSTAG_IDX));
3029
3030        header = LS_64(I40IW_CQP_OP_QUERY_STAG, I40IW_CQPSQ_OPCODE) |
3031                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3032
3033        i40iw_insert_wqe_hdr(wqe, header);
3034
3035        i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QUERY_STAG WQE",
3036                        wqe, I40IW_CQP_WQE_SIZE * 8);
3037
3038        if (post_sq)
3039                i40iw_sc_cqp_post_sq(cqp);
3040        return 0;
3041}
3042
3043/**
3044 * i40iw_sc_mw_alloc - mw allocate
3045 * @dev: sc device struct
3046 * @scratch: u64 saved to be used during cqp completion
3047 * @mw_stag_index:stag index
3048 * @pd_id: pd is for this mw
3049 * @post_sq: flag for cqp db to ring
3050 */
3051static enum i40iw_status_code i40iw_sc_mw_alloc(
3052                                        struct i40iw_sc_dev *dev,
3053                                        u64 scratch,
3054                                        u32 mw_stag_index,
3055                                        u16 pd_id,
3056                                        bool post_sq)
3057{
3058        u64 header;
3059        struct i40iw_sc_cqp *cqp;
3060        u64 *wqe;
3061
3062        cqp = dev->cqp;
3063        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3064        if (!wqe)
3065                return I40IW_ERR_RING_FULL;
3066        set_64bit_val(wqe, 8, LS_64(pd_id, I40IW_CQPSQ_STAG_PDID));
3067        set_64bit_val(wqe,
3068                      16,
3069                      LS_64(mw_stag_index, I40IW_CQPSQ_STAG_IDX));
3070
3071        header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
3072                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3073
3074        i40iw_insert_wqe_hdr(wqe, header);
3075
3076        i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MW_ALLOC WQE",
3077                        wqe, I40IW_CQP_WQE_SIZE * 8);
3078
3079        if (post_sq)
3080                i40iw_sc_cqp_post_sq(cqp);
3081        return 0;
3082}
3083
3084/**
3085 * i40iw_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
3086 * @qp: sc qp struct
3087 * @info: fast mr info
3088 * @post_sq: flag for cqp db to ring
3089 */
3090enum i40iw_status_code i40iw_sc_mr_fast_register(
3091                                struct i40iw_sc_qp *qp,
3092                                struct i40iw_fast_reg_stag_info *info,
3093                                bool post_sq)
3094{
3095        u64 temp, header;
3096        u64 *wqe;
3097        u32 wqe_idx;
3098        enum i40iw_page_size page_size;
3099
3100        page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
3101        wqe = i40iw_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, I40IW_QP_WQE_MIN_SIZE,
3102                                         0, info->wr_id);
3103        if (!wqe)
3104                return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
3105
3106        i40iw_debug(qp->dev, I40IW_DEBUG_MR, "%s: wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
3107                    __func__, info->wr_id, wqe_idx,
3108                    &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
3109        temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
3110        set_64bit_val(wqe, 0, temp);
3111
3112        temp = RS_64(info->first_pm_pbl_index >> 16, I40IWQPSQ_FIRSTPMPBLIDXHI);
3113        set_64bit_val(wqe,
3114                      8,
3115                      LS_64(temp, I40IWQPSQ_FIRSTPMPBLIDXHI) |
3116                      LS_64(info->reg_addr_pa >> I40IWQPSQ_PBLADDR_SHIFT, I40IWQPSQ_PBLADDR));
3117
3118        set_64bit_val(wqe,
3119                      16,
3120                      info->total_len |
3121                      LS_64(info->first_pm_pbl_index, I40IWQPSQ_FIRSTPMPBLIDXLO));
3122
3123        header = LS_64(info->stag_key, I40IWQPSQ_STAGKEY) |
3124                 LS_64(info->stag_idx, I40IWQPSQ_STAGINDEX) |
3125                 LS_64(I40IWQP_OP_FAST_REGISTER, I40IWQPSQ_OPCODE) |
3126                 LS_64(info->chunk_size, I40IWQPSQ_LPBLSIZE) |
3127                 LS_64(page_size, I40IWQPSQ_HPAGESIZE) |
3128                 LS_64(info->access_rights, I40IWQPSQ_STAGRIGHTS) |
3129                 LS_64(info->addr_type, I40IWQPSQ_VABASEDTO) |
3130                 LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
3131                 LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
3132                 LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
3133                 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3134
3135        i40iw_insert_wqe_hdr(wqe, header);
3136
3137        i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "FAST_REG WQE",
3138                        wqe, I40IW_QP_WQE_MIN_SIZE);
3139
3140        if (post_sq)
3141                i40iw_qp_post_wr(&qp->qp_uk);
3142        return 0;
3143}
3144
3145/**
3146 * i40iw_sc_send_lsmm - send last streaming mode message
3147 * @qp: sc qp struct
3148 * @lsmm_buf: buffer with lsmm message
3149 * @size: size of lsmm buffer
3150 * @stag: stag of lsmm buffer
3151 */
3152static void i40iw_sc_send_lsmm(struct i40iw_sc_qp *qp,
3153                               void *lsmm_buf,
3154                               u32 size,
3155                               i40iw_stag stag)
3156{
3157        u64 *wqe;
3158        u64 header;
3159        struct i40iw_qp_uk *qp_uk;
3160
3161        qp_uk = &qp->qp_uk;
3162        wqe = qp_uk->sq_base->elem;
3163
3164        set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
3165
3166        set_64bit_val(wqe, 8, (size | LS_64(stag, I40IWQPSQ_FRAG_STAG)));
3167
3168        set_64bit_val(wqe, 16, 0);
3169
3170        header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3171                 LS_64(1, I40IWQPSQ_STREAMMODE) |
3172                 LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
3173                 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3174
3175        i40iw_insert_wqe_hdr(wqe, header);
3176
3177        i40iw_debug_buf(qp->dev, I40IW_DEBUG_QP, "SEND_LSMM WQE",
3178                        wqe, I40IW_QP_WQE_MIN_SIZE);
3179}
3180
3181/**
3182 * i40iw_sc_send_lsmm_nostag - for privilege qp
3183 * @qp: sc qp struct
3184 * @lsmm_buf: buffer with lsmm message
3185 * @size: size of lsmm buffer
3186 */
3187static void i40iw_sc_send_lsmm_nostag(struct i40iw_sc_qp *qp,
3188                                      void *lsmm_buf,
3189                                      u32 size)
3190{
3191        u64 *wqe;
3192        u64 header;
3193        struct i40iw_qp_uk *qp_uk;
3194
3195        qp_uk = &qp->qp_uk;
3196        wqe = qp_uk->sq_base->elem;
3197
3198        set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
3199
3200        set_64bit_val(wqe, 8, size);
3201
3202        set_64bit_val(wqe, 16, 0);
3203
3204        header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3205                 LS_64(1, I40IWQPSQ_STREAMMODE) |
3206                 LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
3207                 LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3208
3209        i40iw_insert_wqe_hdr(wqe, header);
3210
3211        i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE",
3212                        wqe, I40IW_QP_WQE_MIN_SIZE);
3213}
3214
3215/**
3216 * i40iw_sc_send_rtt - send last read0 or write0
3217 * @qp: sc qp struct
3218 * @read: Do read0 or write0
3219 */
3220static void i40iw_sc_send_rtt(struct i40iw_sc_qp *qp, bool read)
3221{
3222        u64 *wqe;
3223        u64 header;
3224        struct i40iw_qp_uk *qp_uk;
3225
3226        qp_uk = &qp->qp_uk;
3227        wqe = qp_uk->sq_base->elem;
3228
3229        set_64bit_val(wqe, 0, 0);
3230        set_64bit_val(wqe, 8, 0);
3231        set_64bit_val(wqe, 16, 0);
3232        if (read) {
3233                header = LS_64(0x1234, I40IWQPSQ_REMSTAG) |
3234                         LS_64(I40IWQP_OP_RDMA_READ, I40IWQPSQ_OPCODE) |
3235                         LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3236                set_64bit_val(wqe, 8, ((u64)0xabcd << 32));
3237        } else {
3238                header = LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
3239                         LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3240        }
3241
3242        i40iw_insert_wqe_hdr(wqe, header);
3243
3244        i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "RTR WQE",
3245                        wqe, I40IW_QP_WQE_MIN_SIZE);
3246}
3247
3248/**
3249 * i40iw_sc_post_wqe0 - send wqe with opcode
3250 * @qp: sc qp struct
3251 * @opcode: opcode to use for wqe0
3252 */
3253static enum i40iw_status_code i40iw_sc_post_wqe0(struct i40iw_sc_qp *qp, u8 opcode)
3254{
3255        u64 *wqe;
3256        u64 header;
3257        struct i40iw_qp_uk *qp_uk;
3258
3259        qp_uk = &qp->qp_uk;
3260        wqe = qp_uk->sq_base->elem;
3261
3262        if (!wqe)
3263                return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
3264        switch (opcode) {
3265        case I40IWQP_OP_NOP:
3266                set_64bit_val(wqe, 0, 0);
3267                set_64bit_val(wqe, 8, 0);
3268                set_64bit_val(wqe, 16, 0);
3269                header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
3270                         LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
3271
3272                i40iw_insert_wqe_hdr(wqe, header);
3273                break;
3274        case I40IWQP_OP_RDMA_SEND:
3275                set_64bit_val(wqe, 0, 0);
3276                set_64bit_val(wqe, 8, 0);
3277                set_64bit_val(wqe, 16, 0);
3278                header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
3279                         LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID) |
3280                         LS_64(1, I40IWQPSQ_STREAMMODE) |
3281                         LS_64(1, I40IWQPSQ_WAITFORRCVPDU);
3282
3283                i40iw_insert_wqe_hdr(wqe, header);
3284                break;
3285        default:
3286                i40iw_debug(qp->dev, I40IW_DEBUG_QP, "%s: Invalid WQE zero opcode\n",
3287                            __func__);
3288                break;
3289        }
3290        return 0;
3291}
3292
3293/**
3294 * i40iw_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
3295 * @dev : ptr to i40iw_dev struct
3296 * @hmc_fn_id: hmc function id
3297 */
3298enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
3299{
3300        struct i40iw_hmc_info *hmc_info;
3301        struct i40iw_dma_mem query_fpm_mem;
3302        struct i40iw_virt_mem virt_mem;
3303        struct i40iw_vfdev *vf_dev = NULL;
3304        u32 mem_size;
3305        enum i40iw_status_code ret_code = 0;
3306        bool poll_registers = true;
3307        u16 iw_vf_idx;
3308        u8 wait_type;
3309
3310        if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
3311            (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
3312                return I40IW_ERR_INVALID_HMCFN_ID;
3313
3314        i40iw_debug(dev, I40IW_DEBUG_HMC, "hmc_fn_id %u, dev->hmc_fn_id %u\n", hmc_fn_id,
3315                    dev->hmc_fn_id);
3316        if (hmc_fn_id == dev->hmc_fn_id) {
3317                hmc_info = dev->hmc_info;
3318                query_fpm_mem.pa = dev->fpm_query_buf_pa;
3319                query_fpm_mem.va = dev->fpm_query_buf;
3320        } else {
3321                vf_dev = i40iw_vfdev_from_fpm(dev, hmc_fn_id);
3322                if (!vf_dev)
3323                        return I40IW_ERR_INVALID_VF_ID;
3324
3325                hmc_info = &vf_dev->hmc_info;
3326                iw_vf_idx = vf_dev->iw_vf_idx;
3327                i40iw_debug(dev, I40IW_DEBUG_HMC, "vf_dev %p, hmc_info %p, hmc_obj %p\n", vf_dev,
3328                            hmc_info, hmc_info->hmc_obj);
3329                if (!vf_dev->fpm_query_buf) {
3330                        if (!dev->vf_fpm_query_buf[iw_vf_idx].va) {
3331                                ret_code = i40iw_alloc_query_fpm_buf(dev,
3332                                                                     &dev->vf_fpm_query_buf[iw_vf_idx]);
3333                                if (ret_code)
3334                                        return ret_code;
3335                        }
3336                        vf_dev->fpm_query_buf = dev->vf_fpm_query_buf[iw_vf_idx].va;
3337                        vf_dev->fpm_query_buf_pa = dev->vf_fpm_query_buf[iw_vf_idx].pa;
3338                }
3339                query_fpm_mem.pa = vf_dev->fpm_query_buf_pa;
3340                query_fpm_mem.va = vf_dev->fpm_query_buf;
3341                /**
3342                 * It is HARDWARE specific:
3343                 * this call is done by PF for VF and
3344                 * i40iw_sc_query_fpm_values needs ccq poll
3345                 * because PF ccq is already created.
3346                 */
3347                poll_registers = false;
3348        }
3349
3350        hmc_info->hmc_fn_id = hmc_fn_id;
3351
3352        if (hmc_fn_id != dev->hmc_fn_id) {
3353                ret_code =
3354                        i40iw_cqp_query_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
3355        } else {
3356                wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
3357                            (u8)I40IW_CQP_WAIT_POLL_CQ;
3358
3359                ret_code = i40iw_sc_query_fpm_values(
3360                                        dev->cqp,
3361                                        0,
3362                                        hmc_info->hmc_fn_id,
3363                                        &query_fpm_mem,
3364                                        true,
3365                                        wait_type);
3366        }
3367        if (ret_code)
3368                return ret_code;
3369
3370        /* parse the fpm_query_buf and fill hmc obj info */
3371        ret_code =
3372                i40iw_sc_parse_fpm_query_buf((u64 *)query_fpm_mem.va,
3373                                             hmc_info,
3374                                             &dev->hmc_fpm_misc);
3375        if (ret_code)
3376                return ret_code;
3377        i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "QUERY FPM BUFFER",
3378                        query_fpm_mem.va, I40IW_QUERY_FPM_BUF_SIZE);
3379
3380        if (hmc_fn_id != dev->hmc_fn_id) {
3381                i40iw_cqp_commit_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
3382
3383                /* parse the fpm_commit_buf and fill hmc obj info */
3384                i40iw_sc_parse_fpm_commit_buf((u64 *)query_fpm_mem.va, hmc_info->hmc_obj, &hmc_info->sd_table.sd_cnt);
3385                mem_size = sizeof(struct i40iw_hmc_sd_entry) *
3386                           (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index);
3387                ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
3388                if (ret_code)
3389                        return ret_code;
3390                hmc_info->sd_table.sd_entry = virt_mem.va;
3391        }
3392
3393        /* fill size of objects which are fixed */
3394        hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4;
3395        hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4;
3396        hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8;
3397        hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
3398        hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
3399
3400        return ret_code;
3401}
3402
3403/**
3404 * i40iw_sc_configure_iw_fpm() - commits hmc obj cnt values using cqp command and
3405 * populates fpm base address in hmc_info
3406 * @dev : ptr to i40iw_dev struct
3407 * @hmc_fn_id: hmc function id
3408 */
3409static enum i40iw_status_code i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev *dev,
3410                                                        u8 hmc_fn_id)
3411{
3412        struct i40iw_hmc_info *hmc_info;
3413        struct i40iw_hmc_obj_info *obj_info;
3414        u64 *buf;
3415        struct i40iw_dma_mem commit_fpm_mem;
3416        u32 i, j;
3417        enum i40iw_status_code ret_code = 0;
3418        bool poll_registers = true;
3419        u8 wait_type;
3420
3421        if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
3422            (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
3423                return I40IW_ERR_INVALID_HMCFN_ID;
3424
3425        if (hmc_fn_id == dev->hmc_fn_id) {
3426                hmc_info = dev->hmc_info;
3427        } else {
3428                hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, hmc_fn_id);
3429                poll_registers = false;
3430        }
3431        if (!hmc_info)
3432                return I40IW_ERR_BAD_PTR;
3433
3434        obj_info = hmc_info->hmc_obj;
3435        buf = dev->fpm_commit_buf;
3436
3437        /* copy cnt values in commit buf */
3438        for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE;
3439             i++, j += 8)
3440                set_64bit_val(buf, j, (u64)obj_info[i].cnt);
3441
3442        set_64bit_val(buf, 40, 0);   /* APBVT rsvd */
3443
3444        commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
3445        commit_fpm_mem.va = dev->fpm_commit_buf;
3446        wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
3447                        (u8)I40IW_CQP_WAIT_POLL_CQ;
3448        ret_code = i40iw_sc_commit_fpm_values(
3449                                        dev->cqp,
3450                                        0,
3451                                        hmc_info->hmc_fn_id,
3452                                        &commit_fpm_mem,
3453                                        true,
3454                                        wait_type);
3455
3456        /* parse the fpm_commit_buf and fill hmc obj info */
3457        if (!ret_code)
3458                ret_code = i40iw_sc_parse_fpm_commit_buf(dev->fpm_commit_buf,
3459                                                         hmc_info->hmc_obj,
3460                                                         &hmc_info->sd_table.sd_cnt);
3461
3462        i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "COMMIT FPM BUFFER",
3463                        commit_fpm_mem.va, I40IW_COMMIT_FPM_BUF_SIZE);
3464
3465        return ret_code;
3466}
3467
3468/**
3469 * cqp_sds_wqe_fill - fill cqp wqe doe sd
3470 * @cqp: struct for cqp hw
3471 * @info; sd info for wqe
3472 * @scratch: u64 saved to be used during cqp completion
3473 */
3474static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
3475                                               struct i40iw_update_sds_info *info,
3476                                               u64 scratch)
3477{
3478        u64 data;
3479        u64 header;
3480        u64 *wqe;
3481        int mem_entries, wqe_entries;
3482        struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
3483
3484        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3485        if (!wqe)
3486                return I40IW_ERR_RING_FULL;
3487
3488        I40IW_CQP_INIT_WQE(wqe);
3489        wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
3490        mem_entries = info->cnt - wqe_entries;
3491
3492        header = LS_64(I40IW_CQP_OP_UPDATE_PE_SDS, I40IW_CQPSQ_OPCODE) |
3493                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
3494                 LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
3495
3496        if (mem_entries) {
3497                memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4));
3498                data = sdbuf->pa;
3499        } else {
3500                data = 0;
3501        }
3502        data |= LS_64(info->hmc_fn_id, I40IW_CQPSQ_UPESD_HMCFNID);
3503
3504        set_64bit_val(wqe, 16, data);
3505
3506        switch (wqe_entries) {
3507        case 3:
3508                set_64bit_val(wqe, 48,
3509                              (LS_64(info->entry[2].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
3510                                        LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
3511
3512                set_64bit_val(wqe, 56, info->entry[2].data);
3513                /* fallthrough */
3514        case 2:
3515                set_64bit_val(wqe, 32,
3516                              (LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
3517                                        LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
3518
3519                set_64bit_val(wqe, 40, info->entry[1].data);
3520                /* fallthrough */
3521        case 1:
3522                set_64bit_val(wqe, 0,
3523                              LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD));
3524
3525                set_64bit_val(wqe, 8, info->entry[0].data);
3526                break;
3527        default:
3528                break;
3529        }
3530
3531        i40iw_insert_wqe_hdr(wqe, header);
3532
3533        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "UPDATE_PE_SDS WQE",
3534                        wqe, I40IW_CQP_WQE_SIZE * 8);
3535        return 0;
3536}
3537
3538/**
3539 * i40iw_update_pe_sds - cqp wqe for sd
3540 * @dev: ptr to i40iw_dev struct
3541 * @info: sd info for sd's
3542 * @scratch: u64 saved to be used during cqp completion
3543 */
3544static enum i40iw_status_code i40iw_update_pe_sds(struct i40iw_sc_dev *dev,
3545                                                  struct i40iw_update_sds_info *info,
3546                                                  u64 scratch)
3547{
3548        struct i40iw_sc_cqp *cqp = dev->cqp;
3549        enum i40iw_status_code ret_code;
3550
3551        ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
3552        if (!ret_code)
3553                i40iw_sc_cqp_post_sq(cqp);
3554
3555        return ret_code;
3556}
3557
3558/**
3559 * i40iw_update_sds_noccq - update sd before ccq created
3560 * @dev: sc device struct
3561 * @info: sd info for sd's
3562 */
3563enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev,
3564                                              struct i40iw_update_sds_info *info)
3565{
3566        u32 error, val, tail;
3567        struct i40iw_sc_cqp *cqp = dev->cqp;
3568        enum i40iw_status_code ret_code;
3569
3570        ret_code = cqp_sds_wqe_fill(cqp, info, 0);
3571        if (ret_code)
3572                return ret_code;
3573        i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
3574        if (error)
3575                return I40IW_ERR_CQP_COMPL_ERROR;
3576
3577        i40iw_sc_cqp_post_sq(cqp);
3578        ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
3579
3580        return ret_code;
3581}
3582
3583/**
3584 * i40iw_sc_suspend_qp - suspend qp for param change
3585 * @cqp: struct for cqp hw
3586 * @qp: sc qp struct
3587 * @scratch: u64 saved to be used during cqp completion
3588 */
3589enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,
3590                                           struct i40iw_sc_qp *qp,
3591                                           u64 scratch)
3592{
3593        u64 header;
3594        u64 *wqe;
3595
3596        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3597        if (!wqe)
3598                return I40IW_ERR_RING_FULL;
3599        header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_SUSPENDQP_QPID) |
3600                 LS_64(I40IW_CQP_OP_SUSPEND_QP, I40IW_CQPSQ_OPCODE) |
3601                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3602
3603        i40iw_insert_wqe_hdr(wqe, header);
3604
3605        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SUSPEND_QP WQE",
3606                        wqe, I40IW_CQP_WQE_SIZE * 8);
3607
3608        i40iw_sc_cqp_post_sq(cqp);
3609        return 0;
3610}
3611
3612/**
3613 * i40iw_sc_resume_qp - resume qp after suspend
3614 * @cqp: struct for cqp hw
3615 * @qp: sc qp struct
3616 * @scratch: u64 saved to be used during cqp completion
3617 */
3618enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,
3619                                          struct i40iw_sc_qp *qp,
3620                                          u64 scratch)
3621{
3622        u64 header;
3623        u64 *wqe;
3624
3625        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3626        if (!wqe)
3627                return I40IW_ERR_RING_FULL;
3628        set_64bit_val(wqe,
3629                      16,
3630                        LS_64(qp->qs_handle, I40IW_CQPSQ_RESUMEQP_QSHANDLE));
3631
3632        header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_RESUMEQP_QPID) |
3633                 LS_64(I40IW_CQP_OP_RESUME_QP, I40IW_CQPSQ_OPCODE) |
3634                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3635
3636        i40iw_insert_wqe_hdr(wqe, header);
3637
3638        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "RESUME_QP WQE",
3639                        wqe, I40IW_CQP_WQE_SIZE * 8);
3640
3641        i40iw_sc_cqp_post_sq(cqp);
3642        return 0;
3643}
3644
3645/**
3646 * i40iw_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
3647 * @cqp: struct for cqp hw
3648 * @scratch: u64 saved to be used during cqp completion
3649 * @hmc_fn_id: hmc function id
3650 * @post_sq: flag for cqp db to ring
3651 * @poll_registers: flag to poll register for cqp completion
3652 */
3653enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(
3654                                        struct i40iw_sc_cqp *cqp,
3655                                        u64 scratch,
3656                                        u8 hmc_fn_id,
3657                                        bool post_sq,
3658                                        bool poll_registers)
3659{
3660        u64 header;
3661        u64 *wqe;
3662        u32 tail, val, error;
3663        enum i40iw_status_code ret_code = 0;
3664
3665        wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
3666        if (!wqe)
3667                return I40IW_ERR_RING_FULL;
3668        set_64bit_val(wqe,
3669                      16,
3670                      LS_64(hmc_fn_id, I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID));
3671
3672        header = LS_64(I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, I40IW_CQPSQ_OPCODE) |
3673                 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
3674
3675        i40iw_insert_wqe_hdr(wqe, header);
3676
3677        i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE",
3678                        wqe, I40IW_CQP_WQE_SIZE * 8);
3679        i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
3680        if (error) {
3681                ret_code = I40IW_ERR_CQP_COMPL_ERROR;
3682                return ret_code;
3683        }
3684        if (post_sq) {
3685                i40iw_sc_cqp_post_sq(cqp);
3686                if (poll_registers)
3687                        /* check for cqp sq tail update */
3688                        ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
3689                else
3690                        ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
3691                                                                 I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
3692                                                                 NULL);
3693        }
3694
3695        return ret_code;
3696}
3697
3698/**
3699 * i40iw_ring_full - check if cqp ring is full
3700 * @cqp: struct for cqp hw
3701 */
3702static bool i40iw_ring_full(struct i40iw_sc_cqp *cqp)
3703{
3704        return I40IW_RING_FULL_ERR(cqp->sq_ring);
3705}
3706
3707/**
3708 * i40iw_est_sd - returns approximate number of SDs for HMC
3709 * @dev: sc device struct
3710 * @hmc_info: hmc structure, size and count for HMC objects
3711 */
3712static u64 i40iw_est_sd(struct i40iw_sc_dev *dev, struct i40iw_hmc_info *hmc_info)
3713{
3714        int i;
3715        u64 size = 0;
3716        u64 sd;
3717
3718        for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_PBLE; i++)
3719                size += hmc_info->hmc_obj[i].cnt * hmc_info->hmc_obj[i].size;
3720
3721        if (dev->is_pf)
3722                size += hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;
3723
3724        if (size & 0x1FFFFF)
3725                sd = (size >> 21) + 1; /* add 1 for remainder */
3726        else
3727                sd = size >> 21;
3728
3729        if (!dev->is_pf) {
3730                /* 2MB alignment for VF PBLE HMC */
3731                size = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;
3732                if (size & 0x1FFFFF)
3733                        sd += (size >> 21) + 1; /* add 1 for remainder */
3734                else
3735                        sd += size >> 21;
3736        }
3737
3738        return sd;
3739}
3740
3741/**
3742 * i40iw_config_fpm_values - configure HMC objects
3743 * @dev: sc device struct
3744 * @qp_count: desired qp count
3745 */
3746enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count)
3747{
3748        struct i40iw_virt_mem virt_mem;
3749        u32 i, mem_size;
3750        u32 qpwantedoriginal, qpwanted, mrwanted, pblewanted;
3751        u32 powerof2;
3752        u64 sd_needed;
3753        u32 loop_count = 0;
3754
3755        struct i40iw_hmc_info *hmc_info;
3756        struct i40iw_hmc_fpm_misc *hmc_fpm_misc;
3757        enum i40iw_status_code ret_code = 0;
3758
3759        hmc_info = dev->hmc_info;
3760        hmc_fpm_misc = &dev->hmc_fpm_misc;
3761
3762        ret_code = i40iw_sc_init_iw_hmc(dev, dev->hmc_fn_id);
3763        if (ret_code) {
3764                i40iw_debug(dev, I40IW_DEBUG_HMC,
3765                            "i40iw_sc_init_iw_hmc returned error_code = %d\n",
3766                            ret_code);
3767                return ret_code;
3768        }
3769
3770        for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
3771                hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
3772        sd_needed = i40iw_est_sd(dev, hmc_info);
3773        i40iw_debug(dev, I40IW_DEBUG_HMC,
3774                    "%s: FW initial max sd_count[%08lld] first_sd_index[%04d]\n",
3775                    __func__, sd_needed, hmc_info->first_sd_index);
3776        i40iw_debug(dev, I40IW_DEBUG_HMC,
3777                    "%s: sd count %d where max sd is %d\n",
3778                    __func__, hmc_info->sd_table.sd_cnt,
3779                    hmc_fpm_misc->max_sds);
3780
3781        qpwanted = min(qp_count, hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt);
3782        qpwantedoriginal = qpwanted;
3783        mrwanted = hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt;
3784        pblewanted = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt;
3785
3786        i40iw_debug(dev, I40IW_DEBUG_HMC,
3787                    "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d\n",
3788                    qp_count, hmc_fpm_misc->max_sds,
3789                    hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt,
3790                    hmc_info->hmc_obj[I40IW_HMC_IW_CQ].max_cnt,
3791                    hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt,
3792                    hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt);
3793
3794        do {
3795                ++loop_count;
3796                hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt = qpwanted;
3797                hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt =
3798                        min(2 * qpwanted, hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt);
3799                hmc_info->hmc_obj[I40IW_HMC_IW_SRQ].cnt = 0x00; /* Reserved */
3800                hmc_info->hmc_obj[I40IW_HMC_IW_HTE].cnt =
3801                                        qpwanted * hmc_fpm_misc->ht_multiplier;
3802                hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt =
3803                        hmc_info->hmc_obj[I40IW_HMC_IW_ARP].max_cnt;
3804                hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;
3805                hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;
3806
3807                hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted;
3808                hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted;
3809                hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =
3810                        hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
3811                hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =
3812                        hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
3813                hmc_info->hmc_obj[I40IW_HMC_IW_TIMER].cnt =
3814                        ((qpwanted) / 512 + 1) * hmc_fpm_misc->timer_bucket;
3815                hmc_info->hmc_obj[I40IW_HMC_IW_FSIMC].cnt = 0x00;
3816                hmc_info->hmc_obj[I40IW_HMC_IW_FSIAV].cnt = 0x00;
3817                hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt = pblewanted;
3818
3819                /* How much memory is needed for all the objects. */
3820                sd_needed = i40iw_est_sd(dev, hmc_info);
3821                if ((loop_count > 1000) ||
3822                    ((!(loop_count % 10)) &&
3823                    (qpwanted > qpwantedoriginal * 2 / 3))) {
3824                        if (qpwanted > FPM_MULTIPLIER) {
3825                                qpwanted -= FPM_MULTIPLIER;
3826                                powerof2 = 1;
3827                                while (powerof2 < qpwanted)
3828                                        powerof2 *= 2;
3829                                powerof2 /= 2;
3830                                qpwanted = powerof2;
3831                        } else {
3832                                qpwanted /= 2;
3833                        }
3834                }
3835                if (mrwanted > FPM_MULTIPLIER * 10)
3836                        mrwanted -= FPM_MULTIPLIER * 10;
3837                if (pblewanted > FPM_MULTIPLIER * 1000)
3838                        pblewanted -= FPM_MULTIPLIER * 1000;
3839        } while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000);
3840
3841        sd_needed = i40iw_est_sd(dev, hmc_info);
3842
3843        i40iw_debug(dev, I40IW_DEBUG_HMC,
3844                    "loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n",
3845                    loop_count, sd_needed,
3846                    hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt,
3847                    hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt,
3848                    hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt,
3849                    hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt);
3850
3851        ret_code = i40iw_sc_configure_iw_fpm(dev, dev->hmc_fn_id);
3852        if (ret_code) {
3853                i40iw_debug(dev, I40IW_DEBUG_HMC,
3854                            "configure_iw_fpm returned error_code[x%08X]\n",
3855                            i40iw_rd32(dev->hw, dev->is_pf ? I40E_PFPE_CQPERRCODES : I40E_VFPE_CQPERRCODES1));
3856                return ret_code;
3857        }
3858
3859        mem_size = sizeof(struct i40iw_hmc_sd_entry) *
3860                   (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
3861        ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
3862        if (ret_code) {
3863                i40iw_debug(dev, I40IW_DEBUG_HMC,
3864                            "%s: failed to allocate memory for sd_entry buffer\n",
3865                            __func__);
3866                return ret_code;
3867        }
3868        hmc_info->sd_table.sd_entry = virt_mem.va;
3869
3870        return ret_code;
3871}
3872
3873/**
3874 * i40iw_exec_cqp_cmd - execute cqp cmd when wqe are available
3875 * @dev: rdma device
3876 * @pcmdinfo: cqp command info
3877 */
3878static enum i40iw_status_code i40iw_exec_cqp_cmd(struct i40iw_sc_dev *dev,
3879                                                 struct cqp_commands_info *pcmdinfo)
3880{
3881        enum i40iw_status_code status;
3882        struct i40iw_dma_mem values_mem;
3883
3884        dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
3885        switch (pcmdinfo->cqp_cmd) {
3886        case OP_DELETE_LOCAL_MAC_IPADDR_ENTRY:
3887                status = i40iw_sc_del_local_mac_ipaddr_entry(
3888                                pcmdinfo->in.u.del_local_mac_ipaddr_entry.cqp,
3889                                pcmdinfo->in.u.del_local_mac_ipaddr_entry.scratch,
3890                                pcmdinfo->in.u.del_local_mac_ipaddr_entry.entry_idx,
3891                                pcmdinfo->in.u.del_local_mac_ipaddr_entry.ignore_ref_count,
3892                                pcmdinfo->post_sq);
3893                break;
3894        case OP_CEQ_DESTROY:
3895                status = i40iw_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
3896                                              pcmdinfo->in.u.ceq_destroy.scratch,
3897                                              pcmdinfo->post_sq);
3898                break;
3899        case OP_AEQ_DESTROY:
3900                status = i40iw_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
3901                                              pcmdinfo->in.u.aeq_destroy.scratch,
3902                                              pcmdinfo->post_sq);
3903
3904                break;
3905        case OP_DELETE_ARP_CACHE_ENTRY:
3906                status = i40iw_sc_del_arp_cache_entry(
3907                                pcmdinfo->in.u.del_arp_cache_entry.cqp,
3908                                pcmdinfo->in.u.del_arp_cache_entry.scratch,
3909                                pcmdinfo->in.u.del_arp_cache_entry.arp_index,
3910                                pcmdinfo->post_sq);
3911                break;
3912        case OP_MANAGE_APBVT_ENTRY:
3913                status = i40iw_sc_manage_apbvt_entry(
3914                                pcmdinfo->in.u.manage_apbvt_entry.cqp,
3915                                &pcmdinfo->in.u.manage_apbvt_entry.info,
3916                                pcmdinfo->in.u.manage_apbvt_entry.scratch,
3917                                pcmdinfo->post_sq);
3918                break;
3919        case OP_CEQ_CREATE:
3920                status = i40iw_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
3921                                             pcmdinfo->in.u.ceq_create.scratch,
3922                                             pcmdinfo->post_sq);
3923                break;
3924        case OP_AEQ_CREATE:
3925                status = i40iw_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
3926                                             pcmdinfo->in.u.aeq_create.scratch,
3927                                             pcmdinfo->post_sq);
3928                break;
3929        case OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY:
3930                status = i40iw_sc_alloc_local_mac_ipaddr_entry(
3931                                pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.cqp,
3932                                pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.scratch,
3933                                pcmdinfo->post_sq);
3934                break;
3935        case OP_ADD_LOCAL_MAC_IPADDR_ENTRY:
3936                status = i40iw_sc_add_local_mac_ipaddr_entry(
3937                                pcmdinfo->in.u.add_local_mac_ipaddr_entry.cqp,
3938                                &pcmdinfo->in.u.add_local_mac_ipaddr_entry.info,
3939                                pcmdinfo->in.u.add_local_mac_ipaddr_entry.scratch,
3940                                pcmdinfo->post_sq);
3941                break;
3942        case OP_MANAGE_QHASH_TABLE_ENTRY:
3943                status = i40iw_sc_manage_qhash_table_entry(
3944                                pcmdinfo->in.u.manage_qhash_table_entry.cqp,
3945                                &pcmdinfo->in.u.manage_qhash_table_entry.info,
3946                                pcmdinfo->in.u.manage_qhash_table_entry.scratch,
3947                                pcmdinfo->post_sq);
3948
3949                break;
3950        case OP_QP_MODIFY:
3951                status = i40iw_sc_qp_modify(
3952                                pcmdinfo->in.u.qp_modify.qp,
3953                                &pcmdinfo->in.u.qp_modify.info,
3954                                pcmdinfo->in.u.qp_modify.scratch,
3955                                pcmdinfo->post_sq);
3956
3957                break;
3958        case OP_QP_UPLOAD_CONTEXT:
3959                status = i40iw_sc_qp_upload_context(
3960                                pcmdinfo->in.u.qp_upload_context.dev,
3961                                &pcmdinfo->in.u.qp_upload_context.info,
3962                                pcmdinfo->in.u.qp_upload_context.scratch,
3963                                pcmdinfo->post_sq);
3964
3965                break;
3966        case OP_CQ_CREATE:
3967                status = i40iw_sc_cq_create(
3968                                pcmdinfo->in.u.cq_create.cq,
3969                                pcmdinfo->in.u.cq_create.scratch,
3970                                pcmdinfo->in.u.cq_create.check_overflow,
3971                                pcmdinfo->post_sq);
3972                break;
3973        case OP_CQ_DESTROY:
3974                status = i40iw_sc_cq_destroy(
3975                                pcmdinfo->in.u.cq_destroy.cq,
3976                                pcmdinfo->in.u.cq_destroy.scratch,
3977                                pcmdinfo->post_sq);
3978
3979                break;
3980        case OP_QP_CREATE:
3981                status = i40iw_sc_qp_create(
3982                                pcmdinfo->in.u.qp_create.qp,
3983                                &pcmdinfo->in.u.qp_create.info,
3984                                pcmdinfo->in.u.qp_create.scratch,
3985                                pcmdinfo->post_sq);
3986                break;
3987        case OP_QP_DESTROY:
3988                status = i40iw_sc_qp_destroy(
3989                                pcmdinfo->in.u.qp_destroy.qp,
3990                                pcmdinfo->in.u.qp_destroy.scratch,
3991                                pcmdinfo->in.u.qp_destroy.remove_hash_idx,
3992                                pcmdinfo->in.u.qp_destroy.
3993                                ignore_mw_bnd,
3994                                pcmdinfo->post_sq);
3995
3996                break;
3997        case OP_ALLOC_STAG:
3998                status = i40iw_sc_alloc_stag(
3999                                pcmdinfo->in.u.alloc_stag.dev,
4000                                &pcmdinfo->in.u.alloc_stag.info,
4001                                pcmdinfo->in.u.alloc_stag.scratch,
4002                                pcmdinfo->post_sq);
4003                break;
4004        case OP_MR_REG_NON_SHARED:
4005                status = i40iw_sc_mr_reg_non_shared(
4006                                pcmdinfo->in.u.mr_reg_non_shared.dev,
4007                                &pcmdinfo->in.u.mr_reg_non_shared.info,
4008                                pcmdinfo->in.u.mr_reg_non_shared.scratch,
4009                                pcmdinfo->post_sq);
4010
4011                break;
4012        case OP_DEALLOC_STAG:
4013                status = i40iw_sc_dealloc_stag(
4014                                pcmdinfo->in.u.dealloc_stag.dev,
4015                                &pcmdinfo->in.u.dealloc_stag.info,
4016                                pcmdinfo->in.u.dealloc_stag.scratch,
4017                                pcmdinfo->post_sq);
4018
4019                break;
4020        case OP_MW_ALLOC:
4021                status = i40iw_sc_mw_alloc(
4022                                pcmdinfo->in.u.mw_alloc.dev,
4023                                pcmdinfo->in.u.mw_alloc.scratch,
4024                                pcmdinfo->in.u.mw_alloc.mw_stag_index,
4025                                pcmdinfo->in.u.mw_alloc.pd_id,
4026                                pcmdinfo->post_sq);
4027
4028                break;
4029        case OP_QP_FLUSH_WQES:
4030                status = i40iw_sc_qp_flush_wqes(
4031                                pcmdinfo->in.u.qp_flush_wqes.qp,
4032                                &pcmdinfo->in.u.qp_flush_wqes.info,
4033                                pcmdinfo->in.u.qp_flush_wqes.
4034                                scratch, pcmdinfo->post_sq);
4035                break;
4036        case OP_ADD_ARP_CACHE_ENTRY:
4037                status = i40iw_sc_add_arp_cache_entry(
4038                                pcmdinfo->in.u.add_arp_cache_entry.cqp,
4039                                &pcmdinfo->in.u.add_arp_cache_entry.info,
4040                                pcmdinfo->in.u.add_arp_cache_entry.scratch,
4041                                pcmdinfo->post_sq);
4042                break;
4043        case OP_MANAGE_PUSH_PAGE:
4044                status = i40iw_sc_manage_push_page(
4045                                pcmdinfo->in.u.manage_push_page.cqp,
4046                                &pcmdinfo->in.u.manage_push_page.info,
4047                                pcmdinfo->in.u.manage_push_page.scratch,
4048                                pcmdinfo->post_sq);
4049                break;
4050        case OP_UPDATE_PE_SDS:
4051                /* case I40IW_CQP_OP_UPDATE_PE_SDS */
4052                status = i40iw_update_pe_sds(
4053                                pcmdinfo->in.u.update_pe_sds.dev,
4054                                &pcmdinfo->in.u.update_pe_sds.info,
4055                                pcmdinfo->in.u.update_pe_sds.
4056                                scratch);
4057
4058                break;
4059        case OP_MANAGE_HMC_PM_FUNC_TABLE:
4060                status = i40iw_sc_manage_hmc_pm_func_table(
4061                                pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
4062                                pcmdinfo->in.u.manage_hmc_pm.scratch,
4063                                (u8)pcmdinfo->in.u.manage_hmc_pm.info.vf_id,
4064                                pcmdinfo->in.u.manage_hmc_pm.info.free_fcn,
4065                                true);
4066                break;
4067        case OP_SUSPEND:
4068                status = i40iw_sc_suspend_qp(
4069                                pcmdinfo->in.u.suspend_resume.cqp,
4070                                pcmdinfo->in.u.suspend_resume.qp,
4071                                pcmdinfo->in.u.suspend_resume.scratch);
4072                break;
4073        case OP_RESUME:
4074                status = i40iw_sc_resume_qp(
4075                                pcmdinfo->in.u.suspend_resume.cqp,
4076                                pcmdinfo->in.u.suspend_resume.qp,
4077                                pcmdinfo->in.u.suspend_resume.scratch);
4078                break;
4079        case OP_MANAGE_VF_PBLE_BP:
4080                status = i40iw_manage_vf_pble_bp(
4081                                pcmdinfo->in.u.manage_vf_pble_bp.cqp,
4082                                &pcmdinfo->in.u.manage_vf_pble_bp.info,
4083                                pcmdinfo->in.u.manage_vf_pble_bp.scratch, true);
4084                break;
4085        case OP_QUERY_FPM_VALUES:
4086                values_mem.pa = pcmdinfo->in.u.query_fpm_values.fpm_values_pa;
4087                values_mem.va = pcmdinfo->in.u.query_fpm_values.fpm_values_va;
4088                status = i40iw_sc_query_fpm_values(
4089                                pcmdinfo->in.u.query_fpm_values.cqp,
4090                                pcmdinfo->in.u.query_fpm_values.scratch,
4091                                pcmdinfo->in.u.query_fpm_values.hmc_fn_id,
4092                                &values_mem, true, I40IW_CQP_WAIT_EVENT);
4093                break;
4094        case OP_COMMIT_FPM_VALUES:
4095                values_mem.pa = pcmdinfo->in.u.commit_fpm_values.fpm_values_pa;
4096                values_mem.va = pcmdinfo->in.u.commit_fpm_values.fpm_values_va;
4097                status = i40iw_sc_commit_fpm_values(
4098                                pcmdinfo->in.u.commit_fpm_values.cqp,
4099                                pcmdinfo->in.u.commit_fpm_values.scratch,
4100                                pcmdinfo->in.u.commit_fpm_values.hmc_fn_id,
4101                                &values_mem,
4102                                true,
4103                                I40IW_CQP_WAIT_EVENT);
4104                break;
4105        default:
4106                status = I40IW_NOT_SUPPORTED;
4107                break;
4108        }
4109
4110        return status;
4111}
4112
4113/**
4114 * i40iw_process_cqp_cmd - process all cqp commands
4115 * @dev: sc device struct
4116 * @pcmdinfo: cqp command info
4117 */
4118enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
4119                                             struct cqp_commands_info *pcmdinfo)
4120{
4121        enum i40iw_status_code status = 0;
4122        unsigned long flags;
4123
4124        spin_lock_irqsave(&dev->cqp_lock, flags);
4125        if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp))
4126                status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
4127        else
4128                list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
4129        spin_unlock_irqrestore(&dev->cqp_lock, flags);
4130        return status;
4131}
4132
4133/**
4134 * i40iw_process_bh - called from tasklet for cqp list
4135 * @dev: sc device struct
4136 */
4137enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev)
4138{
4139        enum i40iw_status_code status = 0;
4140        struct cqp_commands_info *pcmdinfo;
4141        unsigned long flags;
4142
4143        spin_lock_irqsave(&dev->cqp_lock, flags);
4144        while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) {
4145                pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
4146
4147                status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
4148                if (status)
4149                        break;
4150        }
4151        spin_unlock_irqrestore(&dev->cqp_lock, flags);
4152        return status;
4153}
4154
4155/**
4156 * i40iw_iwarp_opcode - determine if incoming is rdma layer
4157 * @info: aeq info for the packet
4158 * @pkt: packet for error
4159 */
4160static u32 i40iw_iwarp_opcode(struct i40iw_aeqe_info *info, u8 *pkt)
4161{
4162        __be16 *mpa;
4163        u32 opcode = 0xffffffff;
4164
4165        if (info->q2_data_written) {
4166                mpa = (__be16 *)pkt;
4167                opcode = ntohs(mpa[1]) & 0xf;
4168        }
4169        return opcode;
4170}
4171
4172/**
4173 * i40iw_locate_mpa - return pointer to mpa in the pkt
4174 * @pkt: packet with data
4175 */
4176static u8 *i40iw_locate_mpa(u8 *pkt)
4177{
4178        /* skip over ethernet header */
4179        pkt += I40IW_MAC_HLEN;
4180
4181        /* Skip over IP and TCP headers */
4182        pkt += 4 * (pkt[0] & 0x0f);
4183        pkt += 4 * ((pkt[12] >> 4) & 0x0f);
4184        return pkt;
4185}
4186
4187/**
4188 * i40iw_setup_termhdr - termhdr for terminate pkt
4189 * @qp: sc qp ptr for pkt
4190 * @hdr: term hdr
4191 * @opcode: flush opcode for termhdr
4192 * @layer_etype: error layer + error type
4193 * @err: error cod ein the header
4194 */
4195static void i40iw_setup_termhdr(struct i40iw_sc_qp *qp,
4196                                struct i40iw_terminate_hdr *hdr,
4197                                enum i40iw_flush_opcode opcode,
4198                                u8 layer_etype,
4199                                u8 err)
4200{
4201        qp->flush_code = opcode;
4202        hdr->layer_etype = layer_etype;
4203        hdr->error_code = err;
4204}
4205
4206/**
4207 * i40iw_bld_terminate_hdr - build terminate message header
4208 * @qp: qp associated with received terminate AE
4209 * @info: the struct contiaing AE information
4210 */
4211static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
4212                                   struct i40iw_aeqe_info *info)
4213{
4214        u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
4215        u16 ddp_seg_len;
4216        int copy_len = 0;
4217        u8 is_tagged = 0;
4218        u32 opcode;
4219        struct i40iw_terminate_hdr *termhdr;
4220
4221        termhdr = (struct i40iw_terminate_hdr *)qp->q2_buf;
4222        memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
4223
4224        if (info->q2_data_written) {
4225                /* Use data from offending packet to fill in ddp & rdma hdrs */
4226                pkt = i40iw_locate_mpa(pkt);
4227                ddp_seg_len = ntohs(*(__be16 *)pkt);
4228                if (ddp_seg_len) {
4229                        copy_len = 2;
4230                        termhdr->hdrct = DDP_LEN_FLAG;
4231                        if (pkt[2] & 0x80) {
4232                                is_tagged = 1;
4233                                if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
4234                                        copy_len += TERM_DDP_LEN_TAGGED;
4235                                        termhdr->hdrct |= DDP_HDR_FLAG;
4236                                }
4237                        } else {
4238                                if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
4239                                        copy_len += TERM_DDP_LEN_UNTAGGED;
4240                                        termhdr->hdrct |= DDP_HDR_FLAG;
4241                                }
4242
4243                                if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
4244                                        if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
4245                                                copy_len += TERM_RDMA_LEN;
4246                                                termhdr->hdrct |= RDMA_HDR_FLAG;
4247                                        }
4248                                }
4249                        }
4250                }
4251        }
4252
4253        opcode = i40iw_iwarp_opcode(info, pkt);
4254
4255        switch (info->ae_id) {
4256        case I40IW_AE_AMP_UNALLOCATED_STAG:
4257                qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4258                if (opcode == I40IW_OP_TYPE_RDMA_WRITE)
4259                        i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4260                                            (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_STAG);
4261                else
4262                        i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4263                                            (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
4264                break;
4265        case I40IW_AE_AMP_BOUNDS_VIOLATION:
4266                qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4267                if (info->q2_data_written)
4268                        i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4269                                            (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_BOUNDS);
4270                else
4271                        i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4272                                            (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_BOUNDS);
4273                break;
4274        case I40IW_AE_AMP_BAD_PD:
4275                switch (opcode) {
4276                case I40IW_OP_TYPE_RDMA_WRITE:
4277                        i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
4278                                            (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_UNASSOC_STAG);
4279                        break;
4280                case I40IW_OP_TYPE_SEND_INV:
4281                case I40IW_OP_TYPE_SEND_SOL_INV:
4282                        i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4283                                            (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_CANT_INV_STAG);
4284                        break;
4285                default:
4286                        i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4287                                            (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_UNASSOC_STAG);
4288                }
4289                break;
4290        case I40IW_AE_AMP_INVALID_STAG:
4291                qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4292                i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4293                                    (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
4294                break;
4295        case I40IW_AE_AMP_BAD_QP:
4296                i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
4297                                    (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
4298                break;
4299        case I40IW_AE_AMP_BAD_STAG_KEY:
4300        case I40IW_AE_AMP_BAD_STAG_INDEX:
4301                qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4302                switch (opcode) {
4303                case I40IW_OP_TYPE_SEND_INV:
4304                case I40IW_OP_TYPE_SEND_SOL_INV:
4305                        i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
4306                                            (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_CANT_INV_STAG);
4307                        break;
4308                default:
4309                        i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4310                                            (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_STAG);
4311                }
4312                break;
4313        case I40IW_AE_AMP_RIGHTS_VIOLATION:
4314        case I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
4315        case I40IW_AE_PRIV_OPERATION_DENIED:
4316                qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4317                i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4318                                    (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_ACCESS);
4319                break;
4320        case I40IW_AE_AMP_TO_WRAP:
4321                qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4322                i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
4323                                    (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP);
4324                break;
4325        case I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
4326                i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
4327                                    (LAYER_MPA << 4) | DDP_LLP, MPA_MARKER);
4328                break;
4329        case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
4330                i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4331                                    (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
4332                break;
4333        case I40IW_AE_LLP_SEGMENT_TOO_LARGE:
4334        case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
4335                i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
4336                                    (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
4337                break;
4338        case I40IW_AE_LCE_QP_CATASTROPHIC:
4339        case I40IW_AE_DDP_NO_L_BIT:
4340                i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
4341                                    (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
4342                break;
4343        case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN:
4344        case I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
4345                i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4346                                    (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE);
4347                break;
4348        case I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
4349                qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
4350                i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
4351                                    (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_TOO_LONG);
4352                break;
4353        case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
4354                if (is_tagged)
4355                        i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4356                                            (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_DDP_VER);
4357                else
4358                        i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4359                                            (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_DDP_VER);
4360                break;
4361        case I40IW_AE_DDP_UBE_INVALID_MO:
4362                i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4363                                    (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MO);
4364                break;
4365        case I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
4366                i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
4367                                    (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_NO_BUF);
4368                break;
4369        case I40IW_AE_DDP_UBE_INVALID_QN:
4370                i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4371                                    (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
4372                break;
4373        case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
4374                i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
4375                                    (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_RDMAP_VER);
4376                break;
4377        case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
4378                i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
4379                                    (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNEXPECTED_OP);
4380                break;
4381        default:
4382                i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
4383                                    (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNSPECIFIED);
4384                break;
4385        }
4386
4387        if (copy_len)
4388                memcpy(termhdr + 1, pkt, copy_len);
4389
4390        return sizeof(struct i40iw_terminate_hdr) + copy_len;
4391}
4392
4393/**
4394 * i40iw_terminate_send_fin() - Send fin for terminate message
4395 * @qp: qp associated with received terminate AE
4396 */
4397void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp)
4398{
4399        /* Send the fin only */
4400        i40iw_term_modify_qp(qp,
4401                             I40IW_QP_STATE_TERMINATE,
4402                             I40IWQP_TERM_SEND_FIN_ONLY,
4403                             0);
4404}
4405
4406/**
4407 * i40iw_terminate_connection() - Bad AE and send terminate to remote QP
4408 * @qp: qp associated with received terminate AE
4409 * @info: the struct contiaing AE information
4410 */
4411void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
4412{
4413        u8 termlen = 0;
4414
4415        if (qp->term_flags & I40IW_TERM_SENT)
4416                return;         /* Sanity check */
4417
4418        /* Eventtype can change from bld_terminate_hdr */
4419        qp->eventtype = TERM_EVENT_QP_FATAL;
4420        termlen = i40iw_bld_terminate_hdr(qp, info);
4421        i40iw_terminate_start_timer(qp);
4422        qp->term_flags |= I40IW_TERM_SENT;
4423        i40iw_term_modify_qp(qp, I40IW_QP_STATE_TERMINATE,
4424                             I40IWQP_TERM_SEND_TERM_ONLY, termlen);
4425}
4426
4427/**
4428 * i40iw_terminate_received - handle terminate received AE
4429 * @qp: qp associated with received terminate AE
4430 * @info: the struct contiaing AE information
4431 */
4432void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
4433{
4434        u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
4435        __be32 *mpa;
4436        u8 ddp_ctl;
4437        u8 rdma_ctl;
4438        u16 aeq_id = 0;
4439        struct i40iw_terminate_hdr *termhdr;
4440
4441        mpa = (__be32 *)i40iw_locate_mpa(pkt);
4442        if (info->q2_data_written) {
4443                /* did not validate the frame - do it now */
4444                ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
4445                rdma_ctl = ntohl(mpa[0]) & 0xff;
4446                if ((ddp_ctl & 0xc0) != 0x40)
4447                        aeq_id = I40IW_AE_LCE_QP_CATASTROPHIC;
4448                else if ((ddp_ctl & 0x03) != 1)
4449                        aeq_id = I40IW_AE_DDP_UBE_INVALID_DDP_VERSION;
4450                else if (ntohl(mpa[2]) != 2)
4451                        aeq_id = I40IW_AE_DDP_UBE_INVALID_QN;
4452                else if (ntohl(mpa[3]) != 1)
4453                        aeq_id = I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN;
4454                else if (ntohl(mpa[4]) != 0)
4455                        aeq_id = I40IW_AE_DDP_UBE_INVALID_MO;
4456                else if ((rdma_ctl & 0xc0) != 0x40)
4457                        aeq_id = I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
4458
4459                info->ae_id = aeq_id;
4460                if (info->ae_id) {
4461                        /* Bad terminate recvd - send back a terminate */
4462                        i40iw_terminate_connection(qp, info);
4463                        return;
4464                }
4465        }
4466
4467        qp->term_flags |= I40IW_TERM_RCVD;
4468        qp->eventtype = TERM_EVENT_QP_FATAL;
4469        termhdr = (struct i40iw_terminate_hdr *)&mpa[5];
4470        if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
4471            termhdr->layer_etype == RDMAP_REMOTE_OP) {
4472                i40iw_terminate_done(qp, 0);
4473        } else {
4474                i40iw_terminate_start_timer(qp);
4475                i40iw_terminate_send_fin(qp);
4476        }
4477}
4478
4479/**
4480 * i40iw_sc_vsi_init - Initialize virtual device
4481 * @vsi: pointer to the vsi structure
4482 * @info: parameters to initialize vsi
4483 **/
4484void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info)
4485{
4486        int i;
4487
4488        vsi->dev = info->dev;
4489        vsi->back_vsi = info->back_vsi;
4490        vsi->mss = info->params->mss;
4491        i40iw_fill_qos_list(info->params->qs_handle_list);
4492
4493        for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
4494                vsi->qos[i].qs_handle = info->params->qs_handle_list[i];
4495                i40iw_debug(vsi->dev, I40IW_DEBUG_DCB, "qset[%d]: %d\n", i,
4496                            vsi->qos[i].qs_handle);
4497                spin_lock_init(&vsi->qos[i].lock);
4498                INIT_LIST_HEAD(&vsi->qos[i].qplist);
4499        }
4500}
4501
4502/**
4503 * i40iw_hw_stats_init - Initiliaze HW stats table
4504 * @stats: pestat struct
4505 * @fcn_idx: PCI fn id
4506 * @is_pf: Is it a PF?
4507 *
4508 * Populate the HW stats table with register offset addr for each
4509 * stats. And start the perioidic stats timer.
4510 */
4511void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 fcn_idx, bool is_pf)
4512{
4513        u32 stats_reg_offset;
4514        u32 stats_index;
4515        struct i40iw_dev_hw_stats_offsets *stats_table =
4516                &stats->hw_stats_offsets;
4517        struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
4518
4519        if (is_pf) {
4520                stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
4521                                I40E_GLPES_PFIP4RXDISCARD(fcn_idx);
4522                stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
4523                                I40E_GLPES_PFIP4RXTRUNC(fcn_idx);
4524                stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
4525                                I40E_GLPES_PFIP4TXNOROUTE(fcn_idx);
4526                stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
4527                                I40E_GLPES_PFIP6RXDISCARD(fcn_idx);
4528                stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
4529                                I40E_GLPES_PFIP6RXTRUNC(fcn_idx);
4530                stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
4531                                I40E_GLPES_PFIP6TXNOROUTE(fcn_idx);
4532                stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
4533                                I40E_GLPES_PFTCPRTXSEG(fcn_idx);
4534                stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
4535                                I40E_GLPES_PFTCPRXOPTERR(fcn_idx);
4536                stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
4537                                I40E_GLPES_PFTCPRXPROTOERR(fcn_idx);
4538
4539                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
4540                                I40E_GLPES_PFIP4RXOCTSLO(fcn_idx);
4541                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
4542                                I40E_GLPES_PFIP4RXPKTSLO(fcn_idx);
4543                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
4544                                I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx);
4545                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
4546                                I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx);
4547                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
4548                                I40E_GLPES_PFIP4TXOCTSLO(fcn_idx);
4549                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
4550                                I40E_GLPES_PFIP4TXPKTSLO(fcn_idx);
4551                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
4552                                I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx);
4553                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
4554                                I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx);
4555                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
4556                                I40E_GLPES_PFIP6RXOCTSLO(fcn_idx);
4557                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
4558                                I40E_GLPES_PFIP6RXPKTSLO(fcn_idx);
4559                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
4560                                I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx);
4561                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
4562                                I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx);
4563                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
4564                                I40E_GLPES_PFIP6TXOCTSLO(fcn_idx);
4565                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4566                                I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
4567                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4568                                I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
4569                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
4570                                I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx);
4571                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
4572                                I40E_GLPES_PFTCPRXSEGSLO(fcn_idx);
4573                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
4574                                I40E_GLPES_PFTCPTXSEGLO(fcn_idx);
4575                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
4576                                I40E_GLPES_PFRDMARXRDSLO(fcn_idx);
4577                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
4578                                I40E_GLPES_PFRDMARXSNDSLO(fcn_idx);
4579                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
4580                                I40E_GLPES_PFRDMARXWRSLO(fcn_idx);
4581                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
4582                                I40E_GLPES_PFRDMATXRDSLO(fcn_idx);
4583                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
4584                                I40E_GLPES_PFRDMATXSNDSLO(fcn_idx);
4585                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
4586                                I40E_GLPES_PFRDMATXWRSLO(fcn_idx);
4587                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
4588                                I40E_GLPES_PFRDMAVBNDLO(fcn_idx);
4589                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
4590                                I40E_GLPES_PFRDMAVINVLO(fcn_idx);
4591        } else {
4592                stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
4593                                I40E_GLPES_VFIP4RXDISCARD(fcn_idx);
4594                stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
4595                                I40E_GLPES_VFIP4RXTRUNC(fcn_idx);
4596                stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
4597                                I40E_GLPES_VFIP4TXNOROUTE(fcn_idx);
4598                stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
4599                                I40E_GLPES_VFIP6RXDISCARD(fcn_idx);
4600                stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
4601                                I40E_GLPES_VFIP6RXTRUNC(fcn_idx);
4602                stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
4603                                I40E_GLPES_VFIP6TXNOROUTE(fcn_idx);
4604                stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
4605                                I40E_GLPES_VFTCPRTXSEG(fcn_idx);
4606                stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
4607                                I40E_GLPES_VFTCPRXOPTERR(fcn_idx);
4608                stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
4609                                I40E_GLPES_VFTCPRXPROTOERR(fcn_idx);
4610
4611                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
4612                                I40E_GLPES_VFIP4RXOCTSLO(fcn_idx);
4613                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
4614                                I40E_GLPES_VFIP4RXPKTSLO(fcn_idx);
4615                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
4616                                I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx);
4617                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
4618                                I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx);
4619                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
4620                                I40E_GLPES_VFIP4TXOCTSLO(fcn_idx);
4621                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
4622                                I40E_GLPES_VFIP4TXPKTSLO(fcn_idx);
4623                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
4624                                I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx);
4625                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
4626                                I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx);
4627                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
4628                                I40E_GLPES_VFIP6RXOCTSLO(fcn_idx);
4629                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
4630                                I40E_GLPES_VFIP6RXPKTSLO(fcn_idx);
4631                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
4632                                I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx);
4633                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
4634                                I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx);
4635                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
4636                                I40E_GLPES_VFIP6TXOCTSLO(fcn_idx);
4637                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4638                                I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
4639                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
4640                                I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
4641                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
4642                                I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx);
4643                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
4644                                I40E_GLPES_VFTCPRXSEGSLO(fcn_idx);
4645                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
4646                                I40E_GLPES_VFTCPTXSEGLO(fcn_idx);
4647                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
4648                                I40E_GLPES_VFRDMARXRDSLO(fcn_idx);
4649                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
4650                                I40E_GLPES_VFRDMARXSNDSLO(fcn_idx);
4651                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
4652                                I40E_GLPES_VFRDMARXWRSLO(fcn_idx);
4653                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
4654                                I40E_GLPES_VFRDMATXRDSLO(fcn_idx);
4655                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
4656                                I40E_GLPES_VFRDMATXSNDSLO(fcn_idx);
4657                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
4658                                I40E_GLPES_VFRDMATXWRSLO(fcn_idx);
4659                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
4660                                I40E_GLPES_VFRDMAVBNDLO(fcn_idx);
4661                stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
4662                                I40E_GLPES_VFRDMAVINVLO(fcn_idx);
4663        }
4664
4665        for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
4666             stats_index++) {
4667                stats_reg_offset = stats_table->stats_offset_64[stats_index];
4668                last_rd_stats->stats_value_64[stats_index] =
4669                        readq(stats->hw->hw_addr + stats_reg_offset);
4670        }
4671
4672        for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
4673             stats_index++) {
4674                stats_reg_offset = stats_table->stats_offset_32[stats_index];
4675                last_rd_stats->stats_value_32[stats_index] =
4676                        i40iw_rd32(stats->hw, stats_reg_offset);
4677        }
4678}
4679
4680/**
4681 * i40iw_hw_stats_read_32 - Read 32-bit HW stats counters and accommodates for roll-overs.
4682 * @stat: pestat struct
4683 * @index: index in HW stats table which contains offset reg-addr
4684 * @value: hw stats value
4685 */
4686void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
4687                            enum i40iw_hw_stats_index_32b index,
4688                            u64 *value)
4689{
4690        struct i40iw_dev_hw_stats_offsets *stats_table =
4691                &stats->hw_stats_offsets;
4692        struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
4693        struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
4694        u64 new_stats_value = 0;
4695        u32 stats_reg_offset = stats_table->stats_offset_32[index];
4696
4697        new_stats_value = i40iw_rd32(stats->hw, stats_reg_offset);
4698        /*roll-over case */
4699        if (new_stats_value < last_rd_stats->stats_value_32[index])
4700                hw_stats->stats_value_32[index] += new_stats_value;
4701        else
4702                hw_stats->stats_value_32[index] +=
4703                        new_stats_value - last_rd_stats->stats_value_32[index];
4704        last_rd_stats->stats_value_32[index] = new_stats_value;
4705        *value = hw_stats->stats_value_32[index];
4706}
4707
4708/**
4709 * i40iw_hw_stats_read_64 - Read HW stats counters (greater than 32-bit) and accommodates for roll-overs.
4710 * @stats: pestat struct
4711 * @index: index in HW stats table which contains offset reg-addr
4712 * @value: hw stats value
4713 */
4714void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
4715                            enum i40iw_hw_stats_index_64b index,
4716                            u64 *value)
4717{
4718        struct i40iw_dev_hw_stats_offsets *stats_table =
4719                &stats->hw_stats_offsets;
4720        struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
4721        struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
4722        u64 new_stats_value = 0;
4723        u32 stats_reg_offset = stats_table->stats_offset_64[index];
4724
4725        new_stats_value = readq(stats->hw->hw_addr + stats_reg_offset);
4726        /*roll-over case */
4727        if (new_stats_value < last_rd_stats->stats_value_64[index])
4728                hw_stats->stats_value_64[index] += new_stats_value;
4729        else
4730                hw_stats->stats_value_64[index] +=
4731                        new_stats_value - last_rd_stats->stats_value_64[index];
4732        last_rd_stats->stats_value_64[index] = new_stats_value;
4733        *value = hw_stats->stats_value_64[index];
4734}
4735
4736/**
4737 * i40iw_hw_stats_read_all - read all HW stat counters
4738 * @stats: pestat struct
4739 * @stats_values: hw stats structure
4740 *
4741 * Read all the HW stat counters and populates hw_stats structure
4742 * of passed-in vsi's pestat as well as copy created in stat_values.
4743 */
4744void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats,
4745                             struct i40iw_dev_hw_stats *stats_values)
4746{
4747        u32 stats_index;
4748        unsigned long flags;
4749
4750        spin_lock_irqsave(&stats->lock, flags);
4751
4752        for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
4753             stats_index++)
4754                i40iw_hw_stats_read_32(stats, stats_index,
4755                                       &stats_values->stats_value_32[stats_index]);
4756        for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
4757             stats_index++)
4758                i40iw_hw_stats_read_64(stats, stats_index,
4759                                       &stats_values->stats_value_64[stats_index]);
4760        spin_unlock_irqrestore(&stats->lock, flags);
4761}
4762
4763/**
4764 * i40iw_hw_stats_refresh_all - Update all HW stats structs
4765 * @stats: pestat struct
4766 *
4767 * Read all the HW stats counters to refresh values in hw_stats structure
4768 * of passed-in dev's pestat
4769 */
4770void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats)
4771{
4772        u64 stats_value;
4773        u32 stats_index;
4774        unsigned long flags;
4775
4776        spin_lock_irqsave(&stats->lock, flags);
4777
4778        for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
4779             stats_index++)
4780                i40iw_hw_stats_read_32(stats, stats_index, &stats_value);
4781        for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
4782             stats_index++)
4783                i40iw_hw_stats_read_64(stats, stats_index, &stats_value);
4784        spin_unlock_irqrestore(&stats->lock, flags);
4785}
4786
4787/**
4788 * i40iw_get_fcn_id - Return the function id
4789 * @dev: pointer to the device
4790 */
4791static u8 i40iw_get_fcn_id(struct i40iw_sc_dev *dev)
4792{
4793        u8 fcn_id = I40IW_INVALID_FCN_ID;
4794        u8 i;
4795
4796        for (i = I40IW_FIRST_NON_PF_STAT; i < I40IW_MAX_STATS_COUNT; i++)
4797                if (!dev->fcn_id_array[i]) {
4798                        fcn_id = i;
4799                        dev->fcn_id_array[i] = true;
4800                        break;
4801                }
4802        return fcn_id;
4803}
4804
4805/**
4806 * i40iw_vsi_stats_init - Initialize the vsi statistics
4807 * @vsi: pointer to the vsi structure
4808 * @info: The info structure used for initialization
4809 */
4810enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info)
4811{
4812        u8 fcn_id = info->fcn_id;
4813
4814        if (info->alloc_fcn_id)
4815                fcn_id = i40iw_get_fcn_id(vsi->dev);
4816
4817        if (fcn_id == I40IW_INVALID_FCN_ID)
4818                return I40IW_ERR_NOT_READY;
4819
4820        vsi->pestat = info->pestat;
4821        vsi->pestat->hw = vsi->dev->hw;
4822
4823        if (info->stats_initialize) {
4824                i40iw_hw_stats_init(vsi->pestat, fcn_id, true);
4825                spin_lock_init(&vsi->pestat->lock);
4826                i40iw_hw_stats_start_timer(vsi);
4827        }
4828        vsi->stats_fcn_id_alloc = info->alloc_fcn_id;
4829        vsi->fcn_id = fcn_id;
4830        return I40IW_SUCCESS;
4831}
4832
4833/**
4834 * i40iw_vsi_stats_free - Free the vsi stats
4835 * @vsi: pointer to the vsi structure
4836 */
4837void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
4838{
4839        u8 fcn_id = vsi->fcn_id;
4840
4841        if ((vsi->stats_fcn_id_alloc) && (fcn_id != I40IW_INVALID_FCN_ID))
4842                vsi->dev->fcn_id_array[fcn_id] = false;
4843        i40iw_hw_stats_stop_timer(vsi);
4844}
4845
4846static struct i40iw_cqp_ops iw_cqp_ops = {
4847        .cqp_init = i40iw_sc_cqp_init,
4848        .cqp_create = i40iw_sc_cqp_create,
4849        .cqp_post_sq = i40iw_sc_cqp_post_sq,
4850        .cqp_get_next_send_wqe = i40iw_sc_cqp_get_next_send_wqe,
4851        .cqp_destroy = i40iw_sc_cqp_destroy,
4852        .poll_for_cqp_op_done = i40iw_sc_poll_for_cqp_op_done
4853};
4854
4855static struct i40iw_ccq_ops iw_ccq_ops = {
4856        .ccq_init = i40iw_sc_ccq_init,
4857        .ccq_create = i40iw_sc_ccq_create,
4858        .ccq_destroy = i40iw_sc_ccq_destroy,
4859        .ccq_create_done = i40iw_sc_ccq_create_done,
4860        .ccq_get_cqe_info = i40iw_sc_ccq_get_cqe_info,
4861        .ccq_arm = i40iw_sc_ccq_arm
4862};
4863
4864static struct i40iw_ceq_ops iw_ceq_ops = {
4865        .ceq_init = i40iw_sc_ceq_init,
4866        .ceq_create = i40iw_sc_ceq_create,
4867        .cceq_create_done = i40iw_sc_cceq_create_done,
4868        .cceq_destroy_done = i40iw_sc_cceq_destroy_done,
4869        .cceq_create = i40iw_sc_cceq_create,
4870        .ceq_destroy = i40iw_sc_ceq_destroy,
4871        .process_ceq = i40iw_sc_process_ceq
4872};
4873
4874static struct i40iw_aeq_ops iw_aeq_ops = {
4875        .aeq_init = i40iw_sc_aeq_init,
4876        .aeq_create = i40iw_sc_aeq_create,
4877        .aeq_destroy = i40iw_sc_aeq_destroy,
4878        .get_next_aeqe = i40iw_sc_get_next_aeqe,
4879        .repost_aeq_entries = i40iw_sc_repost_aeq_entries,
4880        .aeq_create_done = i40iw_sc_aeq_create_done,
4881        .aeq_destroy_done = i40iw_sc_aeq_destroy_done
4882};
4883
4884/* iwarp pd ops */
4885static struct i40iw_pd_ops iw_pd_ops = {
4886        .pd_init = i40iw_sc_pd_init,
4887};
4888
4889static struct i40iw_priv_qp_ops iw_priv_qp_ops = {
4890        .qp_init = i40iw_sc_qp_init,
4891        .qp_create = i40iw_sc_qp_create,
4892        .qp_modify = i40iw_sc_qp_modify,
4893        .qp_destroy = i40iw_sc_qp_destroy,
4894        .qp_flush_wqes = i40iw_sc_qp_flush_wqes,
4895        .qp_upload_context = i40iw_sc_qp_upload_context,
4896        .qp_setctx = i40iw_sc_qp_setctx,
4897        .qp_send_lsmm = i40iw_sc_send_lsmm,
4898        .qp_send_lsmm_nostag = i40iw_sc_send_lsmm_nostag,
4899        .qp_send_rtt = i40iw_sc_send_rtt,
4900        .qp_post_wqe0 = i40iw_sc_post_wqe0,
4901        .iw_mr_fast_register = i40iw_sc_mr_fast_register
4902};
4903
4904static struct i40iw_priv_cq_ops iw_priv_cq_ops = {
4905        .cq_init = i40iw_sc_cq_init,
4906        .cq_create = i40iw_sc_cq_create,
4907        .cq_destroy = i40iw_sc_cq_destroy,
4908        .cq_modify = i40iw_sc_cq_modify,
4909};
4910
4911static struct i40iw_mr_ops iw_mr_ops = {
4912        .alloc_stag = i40iw_sc_alloc_stag,
4913        .mr_reg_non_shared = i40iw_sc_mr_reg_non_shared,
4914        .mr_reg_shared = i40iw_sc_mr_reg_shared,
4915        .dealloc_stag = i40iw_sc_dealloc_stag,
4916        .query_stag = i40iw_sc_query_stag,
4917        .mw_alloc = i40iw_sc_mw_alloc
4918};
4919
4920static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = {
4921        .manage_push_page = i40iw_sc_manage_push_page,
4922        .manage_hmc_pm_func_table = i40iw_sc_manage_hmc_pm_func_table,
4923        .set_hmc_resource_profile = i40iw_sc_set_hmc_resource_profile,
4924        .commit_fpm_values = i40iw_sc_commit_fpm_values,
4925        .query_fpm_values = i40iw_sc_query_fpm_values,
4926        .static_hmc_pages_allocated = i40iw_sc_static_hmc_pages_allocated,
4927        .add_arp_cache_entry = i40iw_sc_add_arp_cache_entry,
4928        .del_arp_cache_entry = i40iw_sc_del_arp_cache_entry,
4929        .query_arp_cache_entry = i40iw_sc_query_arp_cache_entry,
4930        .manage_apbvt_entry = i40iw_sc_manage_apbvt_entry,
4931        .manage_qhash_table_entry = i40iw_sc_manage_qhash_table_entry,
4932        .alloc_local_mac_ipaddr_table_entry = i40iw_sc_alloc_local_mac_ipaddr_entry,
4933        .add_local_mac_ipaddr_entry = i40iw_sc_add_local_mac_ipaddr_entry,
4934        .del_local_mac_ipaddr_entry = i40iw_sc_del_local_mac_ipaddr_entry,
4935        .cqp_nop = i40iw_sc_cqp_nop,
4936        .commit_fpm_values_done = i40iw_sc_commit_fpm_values_done,
4937        .query_fpm_values_done = i40iw_sc_query_fpm_values_done,
4938        .manage_hmc_pm_func_table_done = i40iw_sc_manage_hmc_pm_func_table_done,
4939        .update_suspend_qp = i40iw_sc_suspend_qp,
4940        .update_resume_qp = i40iw_sc_resume_qp
4941};
4942
4943static struct i40iw_hmc_ops iw_hmc_ops = {
4944        .init_iw_hmc = i40iw_sc_init_iw_hmc,
4945        .parse_fpm_query_buf = i40iw_sc_parse_fpm_query_buf,
4946        .configure_iw_fpm = i40iw_sc_configure_iw_fpm,
4947        .parse_fpm_commit_buf = i40iw_sc_parse_fpm_commit_buf,
4948        .create_hmc_object = i40iw_sc_create_hmc_obj,
4949        .del_hmc_object = i40iw_sc_del_hmc_obj
4950};
4951
4952/**
4953 * i40iw_device_init - Initialize IWARP device
4954 * @dev: IWARP device pointer
4955 * @info: IWARP init info
4956 */
4957enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
4958                                         struct i40iw_device_init_info *info)
4959{
4960        u32 val;
4961        u32 vchnl_ver = 0;
4962        u16 hmc_fcn = 0;
4963        enum i40iw_status_code ret_code = 0;
4964        u8 db_size;
4965
4966        spin_lock_init(&dev->cqp_lock);
4967        INIT_LIST_HEAD(&dev->cqp_cmd_head);             /* for the cqp commands backlog. */
4968
4969        i40iw_device_init_uk(&dev->dev_uk);
4970
4971        dev->debug_mask = info->debug_mask;
4972
4973        dev->hmc_fn_id = info->hmc_fn_id;
4974        dev->exception_lan_queue = info->exception_lan_queue;
4975        dev->is_pf = info->is_pf;
4976
4977        dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
4978        dev->fpm_query_buf = info->fpm_query_buf;
4979
4980        dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
4981        dev->fpm_commit_buf = info->fpm_commit_buf;
4982
4983        dev->hw = info->hw;
4984        dev->hw->hw_addr = info->bar0;
4985
4986        if (dev->is_pf) {
4987                val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
4988                dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
4989
4990                val = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL);
4991                db_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE);
4992                if ((db_size != I40IW_PE_DB_SIZE_4M) &&
4993                    (db_size != I40IW_PE_DB_SIZE_8M)) {
4994                        i40iw_debug(dev, I40IW_DEBUG_DEV,
4995                                    "%s: PE doorbell is not enabled in CSR val 0x%x\n",
4996                                    __func__, val);
4997                        ret_code = I40IW_ERR_PE_DOORBELL_NOT_ENABLED;
4998                        return ret_code;
4999                }
5000                dev->db_addr = dev->hw->hw_addr + I40IW_DB_ADDR_OFFSET;
5001                dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_pf;
5002        } else {
5003                dev->db_addr = dev->hw->hw_addr + I40IW_VF_DB_ADDR_OFFSET;
5004        }
5005
5006        dev->cqp_ops = &iw_cqp_ops;
5007        dev->ccq_ops = &iw_ccq_ops;
5008        dev->ceq_ops = &iw_ceq_ops;
5009        dev->aeq_ops = &iw_aeq_ops;
5010        dev->cqp_misc_ops = &iw_cqp_misc_ops;
5011        dev->iw_pd_ops = &iw_pd_ops;
5012        dev->iw_priv_qp_ops = &iw_priv_qp_ops;
5013        dev->iw_priv_cq_ops = &iw_priv_cq_ops;
5014        dev->mr_ops = &iw_mr_ops;
5015        dev->hmc_ops = &iw_hmc_ops;
5016        dev->vchnl_if.vchnl_send = info->vchnl_send;
5017        if (dev->vchnl_if.vchnl_send)
5018                dev->vchnl_up = true;
5019        else
5020                dev->vchnl_up = false;
5021        if (!dev->is_pf) {
5022                dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_vf;
5023                ret_code = i40iw_vchnl_vf_get_ver(dev, &vchnl_ver);
5024                if (!ret_code) {
5025                        i40iw_debug(dev, I40IW_DEBUG_DEV,
5026                                    "%s: Get Channel version rc = 0x%0x, version is %u\n",
5027                                __func__, ret_code, vchnl_ver);
5028                        ret_code = i40iw_vchnl_vf_get_hmc_fcn(dev, &hmc_fcn);
5029                        if (!ret_code) {
5030                                i40iw_debug(dev, I40IW_DEBUG_DEV,
5031                                            "%s Get HMC function rc = 0x%0x, hmc fcn is %u\n",
5032                                            __func__, ret_code, hmc_fcn);
5033                                dev->hmc_fn_id = (u8)hmc_fcn;
5034                        }
5035                }
5036        }
5037        dev->iw_vf_cqp_ops = &iw_vf_cqp_ops;
5038
5039        return ret_code;
5040}
5041