linux/drivers/infiniband/hw/i40iw/i40iw_utils.c
<<
>>
Prefs
   1/*******************************************************************************
   2*
   3* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
   4*
   5* This software is available to you under a choice of one of two
   6* licenses.  You may choose to be licensed under the terms of the GNU
   7* General Public License (GPL) Version 2, available from the file
   8* COPYING in the main directory of this source tree, or the
   9* OpenFabrics.org BSD license below:
  10*
  11*   Redistribution and use in source and binary forms, with or
  12*   without modification, are permitted provided that the following
  13*   conditions are met:
  14*
  15*    - Redistributions of source code must retain the above
  16*       copyright notice, this list of conditions and the following
  17*       disclaimer.
  18*
  19*    - Redistributions in binary form must reproduce the above
  20*       copyright notice, this list of conditions and the following
  21*       disclaimer in the documentation and/or other materials
  22*       provided with the distribution.
  23*
  24* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31* SOFTWARE.
  32*
  33*******************************************************************************/
  34
  35#include <linux/module.h>
  36#include <linux/moduleparam.h>
  37#include <linux/netdevice.h>
  38#include <linux/etherdevice.h>
  39#include <linux/ethtool.h>
  40#include <linux/mii.h>
  41#include <linux/if_vlan.h>
  42#include <linux/crc32.h>
  43#include <linux/in.h>
  44#include <linux/ip.h>
  45#include <linux/tcp.h>
  46#include <linux/init.h>
  47#include <linux/io.h>
  48#include <asm/irq.h>
  49#include <asm/byteorder.h>
  50#include <net/netevent.h>
  51#include <net/neighbour.h>
  52#include "i40iw.h"
  53
  54/**
  55 * i40iw_arp_table - manage arp table
  56 * @iwdev: iwarp device
  57 * @ip_addr: ip address for device
  58 * @mac_addr: mac address ptr
  59 * @action: modify, delete or add
  60 */
  61int i40iw_arp_table(struct i40iw_device *iwdev,
  62                    u32 *ip_addr,
  63                    bool ipv4,
  64                    u8 *mac_addr,
  65                    u32 action)
  66{
  67        int arp_index;
  68        int err;
  69        u32 ip[4];
  70
  71        if (ipv4) {
  72                memset(ip, 0, sizeof(ip));
  73                ip[0] = *ip_addr;
  74        } else {
  75                memcpy(ip, ip_addr, sizeof(ip));
  76        }
  77
  78        for (arp_index = 0; (u32)arp_index < iwdev->arp_table_size; arp_index++)
  79                if (memcmp(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip)) == 0)
  80                        break;
  81        switch (action) {
  82        case I40IW_ARP_ADD:
  83                if (arp_index != iwdev->arp_table_size)
  84                        return -1;
  85
  86                arp_index = 0;
  87                err = i40iw_alloc_resource(iwdev, iwdev->allocated_arps,
  88                                           iwdev->arp_table_size,
  89                                           (u32 *)&arp_index,
  90                                           &iwdev->next_arp_index);
  91
  92                if (err)
  93                        return err;
  94
  95                memcpy(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip));
  96                ether_addr_copy(iwdev->arp_table[arp_index].mac_addr, mac_addr);
  97                break;
  98        case I40IW_ARP_RESOLVE:
  99                if (arp_index == iwdev->arp_table_size)
 100                        return -1;
 101                break;
 102        case I40IW_ARP_DELETE:
 103                if (arp_index == iwdev->arp_table_size)
 104                        return -1;
 105                memset(iwdev->arp_table[arp_index].ip_addr, 0,
 106                       sizeof(iwdev->arp_table[arp_index].ip_addr));
 107                eth_zero_addr(iwdev->arp_table[arp_index].mac_addr);
 108                i40iw_free_resource(iwdev, iwdev->allocated_arps, arp_index);
 109                break;
 110        default:
 111                return -1;
 112        }
 113        return arp_index;
 114}
 115
 116/**
 117 * i40iw_wr32 - write 32 bits to hw register
 118 * @hw: hardware information including registers
 119 * @reg: register offset
 120 * @value: vvalue to write to register
 121 */
 122inline void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value)
 123{
 124        writel(value, hw->hw_addr + reg);
 125}
 126
 127/**
 128 * i40iw_rd32 - read a 32 bit hw register
 129 * @hw: hardware information including registers
 130 * @reg: register offset
 131 *
 132 * Return value of register content
 133 */
 134inline u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg)
 135{
 136        return readl(hw->hw_addr + reg);
 137}
 138
 139/**
 140 * i40iw_inetaddr_event - system notifier for ipv4 addr events
 141 * @notfier: not used
 142 * @event: event for notifier
 143 * @ptr: if address
 144 */
 145int i40iw_inetaddr_event(struct notifier_block *notifier,
 146                         unsigned long event,
 147                         void *ptr)
 148{
 149        struct in_ifaddr *ifa = ptr;
 150        struct net_device *event_netdev = ifa->ifa_dev->dev;
 151        struct net_device *netdev;
 152        struct net_device *upper_dev;
 153        struct i40iw_device *iwdev;
 154        struct i40iw_handler *hdl;
 155        u32 local_ipaddr;
 156        u32 action = I40IW_ARP_ADD;
 157
 158        hdl = i40iw_find_netdev(event_netdev);
 159        if (!hdl)
 160                return NOTIFY_DONE;
 161
 162        iwdev = &hdl->device;
 163        if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
 164                return NOTIFY_DONE;
 165
 166        netdev = iwdev->ldev->netdev;
 167        upper_dev = netdev_master_upper_dev_get(netdev);
 168        if (netdev != event_netdev)
 169                return NOTIFY_DONE;
 170
 171        if (upper_dev) {
 172                struct in_device *in;
 173
 174                rcu_read_lock();
 175                in = __in_dev_get_rcu(upper_dev);
 176
 177                if (!in->ifa_list)
 178                        local_ipaddr = 0;
 179                else
 180                        local_ipaddr = ntohl(in->ifa_list->ifa_address);
 181
 182                rcu_read_unlock();
 183        } else {
 184                local_ipaddr = ntohl(ifa->ifa_address);
 185        }
 186        switch (event) {
 187        case NETDEV_DOWN:
 188                action = I40IW_ARP_DELETE;
 189                /* Fall through */
 190        case NETDEV_UP:
 191                /* Fall through */
 192        case NETDEV_CHANGEADDR:
 193
 194                /* Just skip if no need to handle ARP cache */
 195                if (!local_ipaddr)
 196                        break;
 197
 198                i40iw_manage_arp_cache(iwdev,
 199                                       netdev->dev_addr,
 200                                       &local_ipaddr,
 201                                       true,
 202                                       action);
 203                i40iw_if_notify(iwdev, netdev, &local_ipaddr, true,
 204                                (action == I40IW_ARP_ADD) ? true : false);
 205                break;
 206        default:
 207                break;
 208        }
 209        return NOTIFY_DONE;
 210}
 211
 212/**
 213 * i40iw_inet6addr_event - system notifier for ipv6 addr events
 214 * @notfier: not used
 215 * @event: event for notifier
 216 * @ptr: if address
 217 */
 218int i40iw_inet6addr_event(struct notifier_block *notifier,
 219                          unsigned long event,
 220                          void *ptr)
 221{
 222        struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
 223        struct net_device *event_netdev = ifa->idev->dev;
 224        struct net_device *netdev;
 225        struct i40iw_device *iwdev;
 226        struct i40iw_handler *hdl;
 227        u32 local_ipaddr6[4];
 228        u32 action = I40IW_ARP_ADD;
 229
 230        hdl = i40iw_find_netdev(event_netdev);
 231        if (!hdl)
 232                return NOTIFY_DONE;
 233
 234        iwdev = &hdl->device;
 235        if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
 236                return NOTIFY_DONE;
 237
 238        netdev = iwdev->ldev->netdev;
 239        if (netdev != event_netdev)
 240                return NOTIFY_DONE;
 241
 242        i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
 243        switch (event) {
 244        case NETDEV_DOWN:
 245                action = I40IW_ARP_DELETE;
 246                /* Fall through */
 247        case NETDEV_UP:
 248                /* Fall through */
 249        case NETDEV_CHANGEADDR:
 250                i40iw_manage_arp_cache(iwdev,
 251                                       netdev->dev_addr,
 252                                       local_ipaddr6,
 253                                       false,
 254                                       action);
 255                i40iw_if_notify(iwdev, netdev, local_ipaddr6, false,
 256                                (action == I40IW_ARP_ADD) ? true : false);
 257                break;
 258        default:
 259                break;
 260        }
 261        return NOTIFY_DONE;
 262}
 263
 264/**
 265 * i40iw_net_event - system notifier for netevents
 266 * @notfier: not used
 267 * @event: event for notifier
 268 * @ptr: neighbor
 269 */
 270int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *ptr)
 271{
 272        struct neighbour *neigh = ptr;
 273        struct i40iw_device *iwdev;
 274        struct i40iw_handler *iwhdl;
 275        __be32 *p;
 276        u32 local_ipaddr[4];
 277
 278        switch (event) {
 279        case NETEVENT_NEIGH_UPDATE:
 280                iwhdl = i40iw_find_netdev((struct net_device *)neigh->dev);
 281                if (!iwhdl)
 282                        return NOTIFY_DONE;
 283                iwdev = &iwhdl->device;
 284                if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
 285                        return NOTIFY_DONE;
 286                p = (__be32 *)neigh->primary_key;
 287                i40iw_copy_ip_ntohl(local_ipaddr, p);
 288                if (neigh->nud_state & NUD_VALID) {
 289                        i40iw_manage_arp_cache(iwdev,
 290                                               neigh->ha,
 291                                               local_ipaddr,
 292                                               false,
 293                                               I40IW_ARP_ADD);
 294
 295                } else {
 296                        i40iw_manage_arp_cache(iwdev,
 297                                               neigh->ha,
 298                                               local_ipaddr,
 299                                               false,
 300                                               I40IW_ARP_DELETE);
 301                }
 302                break;
 303        default:
 304                break;
 305        }
 306        return NOTIFY_DONE;
 307}
 308
 309/**
 310 * i40iw_netdevice_event - system notifier for netdev events
 311 * @notfier: not used
 312 * @event: event for notifier
 313 * @ptr: netdev
 314 */
 315int i40iw_netdevice_event(struct notifier_block *notifier,
 316                          unsigned long event,
 317                          void *ptr)
 318{
 319        struct net_device *event_netdev;
 320        struct net_device *netdev;
 321        struct i40iw_device *iwdev;
 322        struct i40iw_handler *hdl;
 323
 324        event_netdev = netdev_notifier_info_to_dev(ptr);
 325
 326        hdl = i40iw_find_netdev(event_netdev);
 327        if (!hdl)
 328                return NOTIFY_DONE;
 329
 330        iwdev = &hdl->device;
 331        if (iwdev->init_state < RDMA_DEV_REGISTERED || iwdev->closing)
 332                return NOTIFY_DONE;
 333
 334        netdev = iwdev->ldev->netdev;
 335        if (netdev != event_netdev)
 336                return NOTIFY_DONE;
 337
 338        iwdev->iw_status = 1;
 339
 340        switch (event) {
 341        case NETDEV_DOWN:
 342                iwdev->iw_status = 0;
 343                /* Fall through */
 344        case NETDEV_UP:
 345                i40iw_port_ibevent(iwdev);
 346                break;
 347        default:
 348                break;
 349        }
 350        return NOTIFY_DONE;
 351}
 352
 353/**
 354 * i40iw_get_cqp_request - get cqp struct
 355 * @cqp: device cqp ptr
 356 * @wait: cqp to be used in wait mode
 357 */
 358struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait)
 359{
 360        struct i40iw_cqp_request *cqp_request = NULL;
 361        unsigned long flags;
 362
 363        spin_lock_irqsave(&cqp->req_lock, flags);
 364        if (!list_empty(&cqp->cqp_avail_reqs)) {
 365                cqp_request = list_entry(cqp->cqp_avail_reqs.next,
 366                                         struct i40iw_cqp_request, list);
 367                list_del_init(&cqp_request->list);
 368        }
 369        spin_unlock_irqrestore(&cqp->req_lock, flags);
 370        if (!cqp_request) {
 371                cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC);
 372                if (cqp_request) {
 373                        cqp_request->dynamic = true;
 374                        INIT_LIST_HEAD(&cqp_request->list);
 375                        init_waitqueue_head(&cqp_request->waitq);
 376                }
 377        }
 378        if (!cqp_request) {
 379                i40iw_pr_err("CQP Request Fail: No Memory");
 380                return NULL;
 381        }
 382
 383        if (wait) {
 384                atomic_set(&cqp_request->refcount, 2);
 385                cqp_request->waiting = true;
 386        } else {
 387                atomic_set(&cqp_request->refcount, 1);
 388        }
 389        return cqp_request;
 390}
 391
 392/**
 393 * i40iw_free_cqp_request - free cqp request
 394 * @cqp: cqp ptr
 395 * @cqp_request: to be put back in cqp list
 396 */
 397void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request)
 398{
 399        struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
 400        unsigned long flags;
 401
 402        if (cqp_request->dynamic) {
 403                kfree(cqp_request);
 404        } else {
 405                cqp_request->request_done = false;
 406                cqp_request->callback_fcn = NULL;
 407                cqp_request->waiting = false;
 408
 409                spin_lock_irqsave(&cqp->req_lock, flags);
 410                list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
 411                spin_unlock_irqrestore(&cqp->req_lock, flags);
 412        }
 413        wake_up(&iwdev->close_wq);
 414}
 415
 416/**
 417 * i40iw_put_cqp_request - dec ref count and free if 0
 418 * @cqp: cqp ptr
 419 * @cqp_request: to be put back in cqp list
 420 */
 421void i40iw_put_cqp_request(struct i40iw_cqp *cqp,
 422                           struct i40iw_cqp_request *cqp_request)
 423{
 424        if (atomic_dec_and_test(&cqp_request->refcount))
 425                i40iw_free_cqp_request(cqp, cqp_request);
 426}
 427
 428/**
 429 * i40iw_free_pending_cqp_request -free pending cqp request objs
 430 * @cqp: cqp ptr
 431 * @cqp_request: to be put back in cqp list
 432 */
 433static void i40iw_free_pending_cqp_request(struct i40iw_cqp *cqp,
 434                                           struct i40iw_cqp_request *cqp_request)
 435{
 436        struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp);
 437
 438        if (cqp_request->waiting) {
 439                cqp_request->compl_info.error = true;
 440                cqp_request->request_done = true;
 441                wake_up(&cqp_request->waitq);
 442        }
 443        i40iw_put_cqp_request(cqp, cqp_request);
 444        wait_event_timeout(iwdev->close_wq,
 445                           !atomic_read(&cqp_request->refcount),
 446                           1000);
 447}
 448
 449/**
 450 * i40iw_cleanup_pending_cqp_op - clean-up cqp with no completions
 451 * @iwdev: iwarp device
 452 */
 453void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev)
 454{
 455        struct i40iw_sc_dev *dev = &iwdev->sc_dev;
 456        struct i40iw_cqp *cqp = &iwdev->cqp;
 457        struct i40iw_cqp_request *cqp_request = NULL;
 458        struct cqp_commands_info *pcmdinfo = NULL;
 459        u32 i, pending_work, wqe_idx;
 460
 461        pending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring);
 462        wqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring);
 463        for (i = 0; i < pending_work; i++) {
 464                cqp_request = (struct i40iw_cqp_request *)(unsigned long)cqp->scratch_array[wqe_idx];
 465                if (cqp_request)
 466                        i40iw_free_pending_cqp_request(cqp, cqp_request);
 467                wqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring);
 468        }
 469
 470        while (!list_empty(&dev->cqp_cmd_head)) {
 471                pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
 472                cqp_request = container_of(pcmdinfo, struct i40iw_cqp_request, info);
 473                if (cqp_request)
 474                        i40iw_free_pending_cqp_request(cqp, cqp_request);
 475        }
 476}
 477
 478/**
 479 * i40iw_free_qp - callback after destroy cqp completes
 480 * @cqp_request: cqp request for destroy qp
 481 * @num: not used
 482 */
 483static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
 484{
 485        struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param;
 486        struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
 487        struct i40iw_device *iwdev;
 488        u32 qp_num = iwqp->ibqp.qp_num;
 489
 490        iwdev = iwqp->iwdev;
 491
 492        i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
 493        i40iw_free_qp_resources(iwdev, iwqp, qp_num);
 494        i40iw_rem_devusecount(iwdev);
 495}
 496
 497/**
 498 * i40iw_wait_event - wait for completion
 499 * @iwdev: iwarp device
 500 * @cqp_request: cqp request to wait
 501 */
 502static int i40iw_wait_event(struct i40iw_device *iwdev,
 503                            struct i40iw_cqp_request *cqp_request)
 504{
 505        struct cqp_commands_info *info = &cqp_request->info;
 506        struct i40iw_cqp *iwcqp = &iwdev->cqp;
 507        struct i40iw_cqp_timeout cqp_timeout;
 508        bool cqp_error = false;
 509        int err_code = 0;
 510        memset(&cqp_timeout, 0, sizeof(cqp_timeout));
 511        cqp_timeout.compl_cqp_cmds = iwdev->sc_dev.cqp_cmd_stats[OP_COMPLETED_COMMANDS];
 512        do {
 513                if (wait_event_timeout(cqp_request->waitq,
 514                                       cqp_request->request_done, CQP_COMPL_WAIT_TIME))
 515                        break;
 516
 517                i40iw_check_cqp_progress(&cqp_timeout, &iwdev->sc_dev);
 518
 519                if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
 520                        continue;
 521
 522                i40iw_pr_err("error cqp command 0x%x timed out", info->cqp_cmd);
 523                err_code = -ETIME;
 524                if (!iwdev->reset) {
 525                        iwdev->reset = true;
 526                        i40iw_request_reset(iwdev);
 527                }
 528                goto done;
 529        } while (1);
 530        cqp_error = cqp_request->compl_info.error;
 531        if (cqp_error) {
 532                i40iw_pr_err("error cqp command 0x%x completion maj = 0x%x min=0x%x\n",
 533                             info->cqp_cmd, cqp_request->compl_info.maj_err_code,
 534                             cqp_request->compl_info.min_err_code);
 535                err_code = -EPROTO;
 536                goto done;
 537        }
 538done:
 539        i40iw_put_cqp_request(iwcqp, cqp_request);
 540        return err_code;
 541}
 542
 543/**
 544 * i40iw_handle_cqp_op - process cqp command
 545 * @iwdev: iwarp device
 546 * @cqp_request: cqp request to process
 547 */
 548enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
 549                                           struct i40iw_cqp_request
 550                                           *cqp_request)
 551{
 552        struct i40iw_sc_dev *dev = &iwdev->sc_dev;
 553        enum i40iw_status_code status;
 554        struct cqp_commands_info *info = &cqp_request->info;
 555        int err_code = 0;
 556
 557        if (iwdev->reset) {
 558                i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
 559                return I40IW_ERR_CQP_COMPL_ERROR;
 560        }
 561
 562        status = i40iw_process_cqp_cmd(dev, info);
 563        if (status) {
 564                i40iw_pr_err("error cqp command 0x%x failed\n", info->cqp_cmd);
 565                i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
 566                return status;
 567        }
 568        if (cqp_request->waiting)
 569                err_code = i40iw_wait_event(iwdev, cqp_request);
 570        if (err_code)
 571                status = I40IW_ERR_CQP_COMPL_ERROR;
 572        return status;
 573}
 574
 575/**
 576 * i40iw_add_devusecount - add dev refcount
 577 * @iwdev: dev for refcount
 578 */
 579void i40iw_add_devusecount(struct i40iw_device *iwdev)
 580{
 581        atomic64_inc(&iwdev->use_count);
 582}
 583
 584/**
 585 * i40iw_rem_devusecount - decrement refcount for dev
 586 * @iwdev: device
 587 */
 588void i40iw_rem_devusecount(struct i40iw_device *iwdev)
 589{
 590        if (!atomic64_dec_and_test(&iwdev->use_count))
 591                return;
 592        wake_up(&iwdev->close_wq);
 593}
 594
 595/**
 596 * i40iw_add_pdusecount - add pd refcount
 597 * @iwpd: pd for refcount
 598 */
 599void i40iw_add_pdusecount(struct i40iw_pd *iwpd)
 600{
 601        atomic_inc(&iwpd->usecount);
 602}
 603
 604/**
 605 * i40iw_rem_pdusecount - decrement refcount for pd and free if 0
 606 * @iwpd: pd for refcount
 607 * @iwdev: iwarp device
 608 */
 609void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev)
 610{
 611        if (!atomic_dec_and_test(&iwpd->usecount))
 612                return;
 613        i40iw_free_resource(iwdev, iwdev->allocated_pds, iwpd->sc_pd.pd_id);
 614}
 615
 616/**
 617 * i40iw_add_ref - add refcount for qp
 618 * @ibqp: iqarp qp
 619 */
 620void i40iw_add_ref(struct ib_qp *ibqp)
 621{
 622        struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp;
 623
 624        atomic_inc(&iwqp->refcount);
 625}
 626
 627/**
 628 * i40iw_rem_ref - rem refcount for qp and free if 0
 629 * @ibqp: iqarp qp
 630 */
 631void i40iw_rem_ref(struct ib_qp *ibqp)
 632{
 633        struct i40iw_qp *iwqp;
 634        enum i40iw_status_code status;
 635        struct i40iw_cqp_request *cqp_request;
 636        struct cqp_commands_info *cqp_info;
 637        struct i40iw_device *iwdev;
 638        u32 qp_num;
 639        unsigned long flags;
 640
 641        iwqp = to_iwqp(ibqp);
 642        iwdev = iwqp->iwdev;
 643        spin_lock_irqsave(&iwdev->qptable_lock, flags);
 644        if (!atomic_dec_and_test(&iwqp->refcount)) {
 645                spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
 646                return;
 647        }
 648
 649        qp_num = iwqp->ibqp.qp_num;
 650        iwdev->qp_table[qp_num] = NULL;
 651        spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
 652        cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
 653        if (!cqp_request)
 654                return;
 655
 656        cqp_request->callback_fcn = i40iw_free_qp;
 657        cqp_request->param = (void *)&iwqp->sc_qp;
 658        cqp_info = &cqp_request->info;
 659        cqp_info->cqp_cmd = OP_QP_DESTROY;
 660        cqp_info->post_sq = 1;
 661        cqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp;
 662        cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
 663        cqp_info->in.u.qp_destroy.remove_hash_idx = true;
 664        status = i40iw_handle_cqp_op(iwdev, cqp_request);
 665        if (!status)
 666                return;
 667
 668        i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
 669        i40iw_free_qp_resources(iwdev, iwqp, qp_num);
 670        i40iw_rem_devusecount(iwdev);
 671}
 672
 673/**
 674 * i40iw_get_qp - get qp address
 675 * @device: iwarp device
 676 * @qpn: qp number
 677 */
 678struct ib_qp *i40iw_get_qp(struct ib_device *device, int qpn)
 679{
 680        struct i40iw_device *iwdev = to_iwdev(device);
 681
 682        if ((qpn < IW_FIRST_QPN) || (qpn >= iwdev->max_qp))
 683                return NULL;
 684
 685        return &iwdev->qp_table[qpn]->ibqp;
 686}
 687
 688/**
 689 * i40iw_debug_buf - print debug msg and buffer is mask set
 690 * @dev: hardware control device structure
 691 * @mask: mask to compare if to print debug buffer
 692 * @buf: points buffer addr
 693 * @size: saize of buffer to print
 694 */
 695void i40iw_debug_buf(struct i40iw_sc_dev *dev,
 696                     enum i40iw_debug_flag mask,
 697                     char *desc,
 698                     u64 *buf,
 699                     u32 size)
 700{
 701        u32 i;
 702
 703        if (!(dev->debug_mask & mask))
 704                return;
 705        i40iw_debug(dev, mask, "%s\n", desc);
 706        i40iw_debug(dev, mask, "starting address virt=%p phy=%llxh\n", buf,
 707                    (unsigned long long)virt_to_phys(buf));
 708
 709        for (i = 0; i < size; i += 8)
 710                i40iw_debug(dev, mask, "index %03d val: %016llx\n", i, buf[i / 8]);
 711}
 712
 713/**
 714 * i40iw_get_hw_addr - return hw addr
 715 * @par: points to shared dev
 716 */
 717u8 __iomem *i40iw_get_hw_addr(void *par)
 718{
 719        struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)par;
 720
 721        return dev->hw->hw_addr;
 722}
 723
 724/**
 725 * i40iw_remove_head - return head entry and remove from list
 726 * @list: list for entry
 727 */
 728void *i40iw_remove_head(struct list_head *list)
 729{
 730        struct list_head *entry;
 731
 732        if (list_empty(list))
 733                return NULL;
 734
 735        entry = (void *)list->next;
 736        list_del(entry);
 737        return (void *)entry;
 738}
 739
 740/**
 741 * i40iw_allocate_dma_mem - Memory alloc helper fn
 742 * @hw:   pointer to the HW structure
 743 * @mem:  ptr to mem struct to fill out
 744 * @size: size of memory requested
 745 * @alignment: what to align the allocation to
 746 */
 747enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
 748                                              struct i40iw_dma_mem *mem,
 749                                              u64 size,
 750                                              u32 alignment)
 751{
 752        struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
 753
 754        if (!mem)
 755                return I40IW_ERR_PARAM;
 756        mem->size = ALIGN(size, alignment);
 757        mem->va = dma_alloc_coherent(&pcidev->dev, mem->size,
 758                                     (dma_addr_t *)&mem->pa, GFP_KERNEL);
 759        if (!mem->va)
 760                return I40IW_ERR_NO_MEMORY;
 761        return 0;
 762}
 763
 764/**
 765 * i40iw_free_dma_mem - Memory free helper fn
 766 * @hw:   pointer to the HW structure
 767 * @mem:  ptr to mem struct to free
 768 */
 769void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem)
 770{
 771        struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
 772
 773        if (!mem || !mem->va)
 774                return;
 775
 776        dma_free_coherent(&pcidev->dev, mem->size,
 777                          mem->va, (dma_addr_t)mem->pa);
 778        mem->va = NULL;
 779}
 780
 781/**
 782 * i40iw_allocate_virt_mem - virtual memory alloc helper fn
 783 * @hw:   pointer to the HW structure
 784 * @mem:  ptr to mem struct to fill out
 785 * @size: size of memory requested
 786 */
 787enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw,
 788                                               struct i40iw_virt_mem *mem,
 789                                               u32 size)
 790{
 791        if (!mem)
 792                return I40IW_ERR_PARAM;
 793
 794        mem->size = size;
 795        mem->va = kzalloc(size, GFP_KERNEL);
 796
 797        if (mem->va)
 798                return 0;
 799        else
 800                return I40IW_ERR_NO_MEMORY;
 801}
 802
 803/**
 804 * i40iw_free_virt_mem - virtual memory free helper fn
 805 * @hw:   pointer to the HW structure
 806 * @mem:  ptr to mem struct to free
 807 */
 808enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
 809                                           struct i40iw_virt_mem *mem)
 810{
 811        if (!mem)
 812                return I40IW_ERR_PARAM;
 813        /*
 814         * mem->va points to the parent of mem, so both mem and mem->va
 815         * can not be touched once mem->va is freed
 816         */
 817        kfree(mem->va);
 818        return 0;
 819}
 820
 821/**
 822 * i40iw_cqp_sds_cmd - create cqp command for sd
 823 * @dev: hardware control device structure
 824 * @sd_info: information  for sd cqp
 825 *
 826 */
 827enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
 828                                         struct i40iw_update_sds_info *sdinfo)
 829{
 830        enum i40iw_status_code status;
 831        struct i40iw_cqp_request *cqp_request;
 832        struct cqp_commands_info *cqp_info;
 833        struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
 834
 835        cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
 836        if (!cqp_request)
 837                return I40IW_ERR_NO_MEMORY;
 838        cqp_info = &cqp_request->info;
 839        memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
 840               sizeof(cqp_info->in.u.update_pe_sds.info));
 841        cqp_info->cqp_cmd = OP_UPDATE_PE_SDS;
 842        cqp_info->post_sq = 1;
 843        cqp_info->in.u.update_pe_sds.dev = dev;
 844        cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;
 845        status = i40iw_handle_cqp_op(iwdev, cqp_request);
 846        if (status)
 847                i40iw_pr_err("CQP-OP Update SD's fail");
 848        return status;
 849}
 850
 851/**
 852 * i40iw_qp_suspend_resume - cqp command for suspend/resume
 853 * @dev: hardware control device structure
 854 * @qp: hardware control qp
 855 * @suspend: flag if suspend or resume
 856 */
 857void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend)
 858{
 859        struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
 860        struct i40iw_cqp_request *cqp_request;
 861        struct i40iw_sc_cqp *cqp = dev->cqp;
 862        struct cqp_commands_info *cqp_info;
 863        enum i40iw_status_code status;
 864
 865        cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
 866        if (!cqp_request)
 867                return;
 868
 869        cqp_info = &cqp_request->info;
 870        cqp_info->cqp_cmd = (suspend) ? OP_SUSPEND : OP_RESUME;
 871        cqp_info->in.u.suspend_resume.cqp = cqp;
 872        cqp_info->in.u.suspend_resume.qp = qp;
 873        cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
 874        status = i40iw_handle_cqp_op(iwdev, cqp_request);
 875        if (status)
 876                i40iw_pr_err("CQP-OP QP Suspend/Resume fail");
 877}
 878
 879/**
 880 * i40iw_term_modify_qp - modify qp for term message
 881 * @qp: hardware control qp
 882 * @next_state: qp's next state
 883 * @term: terminate code
 884 * @term_len: length
 885 */
 886void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len)
 887{
 888        struct i40iw_qp *iwqp;
 889
 890        iwqp = (struct i40iw_qp *)qp->back_qp;
 891        i40iw_next_iw_state(iwqp, next_state, 0, term, term_len);
 892};
 893
 894/**
 895 * i40iw_terminate_done - after terminate is completed
 896 * @qp: hardware control qp
 897 * @timeout_occurred: indicates if terminate timer expired
 898 */
 899void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred)
 900{
 901        struct i40iw_qp *iwqp;
 902        u32 next_iwarp_state = I40IW_QP_STATE_ERROR;
 903        u8 hte = 0;
 904        bool first_time;
 905        unsigned long flags;
 906
 907        iwqp = (struct i40iw_qp *)qp->back_qp;
 908        spin_lock_irqsave(&iwqp->lock, flags);
 909        if (iwqp->hte_added) {
 910                iwqp->hte_added = 0;
 911                hte = 1;
 912        }
 913        first_time = !(qp->term_flags & I40IW_TERM_DONE);
 914        qp->term_flags |= I40IW_TERM_DONE;
 915        spin_unlock_irqrestore(&iwqp->lock, flags);
 916        if (first_time) {
 917                if (!timeout_occurred)
 918                        i40iw_terminate_del_timer(qp);
 919                else
 920                        next_iwarp_state = I40IW_QP_STATE_CLOSING;
 921
 922                i40iw_next_iw_state(iwqp, next_iwarp_state, hte, 0, 0);
 923                i40iw_cm_disconn(iwqp);
 924        }
 925}
 926
 927/**
 928 * i40iw_terminate_imeout - timeout happened
 929 * @context: points to iwarp qp
 930 */
 931static void i40iw_terminate_timeout(struct timer_list *t)
 932{
 933        struct i40iw_qp *iwqp = from_timer(iwqp, t, terminate_timer);
 934        struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
 935
 936        i40iw_terminate_done(qp, 1);
 937        i40iw_rem_ref(&iwqp->ibqp);
 938}
 939
 940/**
 941 * i40iw_terminate_start_timer - start terminate timeout
 942 * @qp: hardware control qp
 943 */
 944void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
 945{
 946        struct i40iw_qp *iwqp;
 947
 948        iwqp = (struct i40iw_qp *)qp->back_qp;
 949        i40iw_add_ref(&iwqp->ibqp);
 950        timer_setup(&iwqp->terminate_timer, i40iw_terminate_timeout, 0);
 951        iwqp->terminate_timer.expires = jiffies + HZ;
 952        add_timer(&iwqp->terminate_timer);
 953}
 954
 955/**
 956 * i40iw_terminate_del_timer - delete terminate timeout
 957 * @qp: hardware control qp
 958 */
 959void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
 960{
 961        struct i40iw_qp *iwqp;
 962
 963        iwqp = (struct i40iw_qp *)qp->back_qp;
 964        if (del_timer(&iwqp->terminate_timer))
 965                i40iw_rem_ref(&iwqp->ibqp);
 966}
 967
 968/**
 969 * i40iw_cqp_generic_worker - generic worker for cqp
 970 * @work: work pointer
 971 */
 972static void i40iw_cqp_generic_worker(struct work_struct *work)
 973{
 974        struct i40iw_virtchnl_work_info *work_info =
 975            &((struct virtchnl_work *)work)->work_info;
 976
 977        if (work_info->worker_vf_dev)
 978                work_info->callback_fcn(work_info->worker_vf_dev);
 979}
 980
 981/**
 982 * i40iw_cqp_spawn_worker - spawn worket thread
 983 * @iwdev: device struct pointer
 984 * @work_info: work request info
 985 * @iw_vf_idx: virtual function index
 986 */
 987void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
 988                            struct i40iw_virtchnl_work_info *work_info,
 989                            u32 iw_vf_idx)
 990{
 991        struct virtchnl_work *work;
 992        struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
 993
 994        work = &iwdev->virtchnl_w[iw_vf_idx];
 995        memcpy(&work->work_info, work_info, sizeof(*work_info));
 996        INIT_WORK(&work->work, i40iw_cqp_generic_worker);
 997        queue_work(iwdev->virtchnl_wq, &work->work);
 998}
 999
1000/**
1001 * i40iw_cqp_manage_hmc_fcn_worker -
1002 * @work: work pointer for hmc info
1003 */
1004static void i40iw_cqp_manage_hmc_fcn_worker(struct work_struct *work)
1005{
1006        struct i40iw_cqp_request *cqp_request =
1007            ((struct virtchnl_work *)work)->cqp_request;
1008        struct i40iw_ccq_cqe_info ccq_cqe_info;
1009        struct i40iw_hmc_fcn_info *hmcfcninfo =
1010                        &cqp_request->info.in.u.manage_hmc_pm.info;
1011        struct i40iw_device *iwdev =
1012            (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->back_dev;
1013
1014        ccq_cqe_info.cqp = NULL;
1015        ccq_cqe_info.maj_err_code = cqp_request->compl_info.maj_err_code;
1016        ccq_cqe_info.min_err_code = cqp_request->compl_info.min_err_code;
1017        ccq_cqe_info.op_code = cqp_request->compl_info.op_code;
1018        ccq_cqe_info.op_ret_val = cqp_request->compl_info.op_ret_val;
1019        ccq_cqe_info.scratch = 0;
1020        ccq_cqe_info.error = cqp_request->compl_info.error;
1021        hmcfcninfo->callback_fcn(cqp_request->info.in.u.manage_hmc_pm.dev,
1022                                 hmcfcninfo->cqp_callback_param, &ccq_cqe_info);
1023        i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
1024}
1025
1026/**
1027 * i40iw_cqp_manage_hmc_fcn_callback - called function after cqp completion
1028 * @cqp_request: cqp request info struct for hmc fun
1029 * @unused: unused param of callback
1030 */
1031static void i40iw_cqp_manage_hmc_fcn_callback(struct i40iw_cqp_request *cqp_request,
1032                                              u32 unused)
1033{
1034        struct virtchnl_work *work;
1035        struct i40iw_hmc_fcn_info *hmcfcninfo =
1036            &cqp_request->info.in.u.manage_hmc_pm.info;
1037        struct i40iw_device *iwdev =
1038            (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->
1039            back_dev;
1040
1041        if (hmcfcninfo && hmcfcninfo->callback_fcn) {
1042                i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s1\n", __func__);
1043                atomic_inc(&cqp_request->refcount);
1044                work = &iwdev->virtchnl_w[hmcfcninfo->iw_vf_idx];
1045                work->cqp_request = cqp_request;
1046                INIT_WORK(&work->work, i40iw_cqp_manage_hmc_fcn_worker);
1047                queue_work(iwdev->virtchnl_wq, &work->work);
1048                i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s2\n", __func__);
1049        } else {
1050                i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s: Something wrong\n", __func__);
1051        }
1052}
1053
1054/**
1055 * i40iw_cqp_manage_hmc_fcn_cmd - issue cqp command to manage hmc
1056 * @dev: hardware control device structure
1057 * @hmcfcninfo: info for hmc
1058 */
1059enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev,
1060                                                    struct i40iw_hmc_fcn_info *hmcfcninfo)
1061{
1062        enum i40iw_status_code status;
1063        struct i40iw_cqp_request *cqp_request;
1064        struct cqp_commands_info *cqp_info;
1065        struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1066
1067        i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s\n", __func__);
1068        cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
1069        if (!cqp_request)
1070                return I40IW_ERR_NO_MEMORY;
1071        cqp_info = &cqp_request->info;
1072        cqp_request->callback_fcn = i40iw_cqp_manage_hmc_fcn_callback;
1073        cqp_request->param = hmcfcninfo;
1074        memcpy(&cqp_info->in.u.manage_hmc_pm.info, hmcfcninfo,
1075               sizeof(*hmcfcninfo));
1076        cqp_info->in.u.manage_hmc_pm.dev = dev;
1077        cqp_info->cqp_cmd = OP_MANAGE_HMC_PM_FUNC_TABLE;
1078        cqp_info->post_sq = 1;
1079        cqp_info->in.u.manage_hmc_pm.scratch = (uintptr_t)cqp_request;
1080        status = i40iw_handle_cqp_op(iwdev, cqp_request);
1081        if (status)
1082                i40iw_pr_err("CQP-OP Manage HMC fail");
1083        return status;
1084}
1085
1086/**
1087 * i40iw_cqp_query_fpm_values_cmd - send cqp command for fpm
1088 * @iwdev: function device struct
1089 * @values_mem: buffer for fpm
1090 * @hmc_fn_id: function id for fpm
1091 */
1092enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev,
1093                                                      struct i40iw_dma_mem *values_mem,
1094                                                      u8 hmc_fn_id)
1095{
1096        enum i40iw_status_code status;
1097        struct i40iw_cqp_request *cqp_request;
1098        struct cqp_commands_info *cqp_info;
1099        struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1100
1101        cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1102        if (!cqp_request)
1103                return I40IW_ERR_NO_MEMORY;
1104        cqp_info = &cqp_request->info;
1105        cqp_request->param = NULL;
1106        cqp_info->in.u.query_fpm_values.cqp = dev->cqp;
1107        cqp_info->in.u.query_fpm_values.fpm_values_pa = values_mem->pa;
1108        cqp_info->in.u.query_fpm_values.fpm_values_va = values_mem->va;
1109        cqp_info->in.u.query_fpm_values.hmc_fn_id = hmc_fn_id;
1110        cqp_info->cqp_cmd = OP_QUERY_FPM_VALUES;
1111        cqp_info->post_sq = 1;
1112        cqp_info->in.u.query_fpm_values.scratch = (uintptr_t)cqp_request;
1113        status = i40iw_handle_cqp_op(iwdev, cqp_request);
1114        if (status)
1115                i40iw_pr_err("CQP-OP Query FPM fail");
1116        return status;
1117}
1118
1119/**
1120 * i40iw_cqp_commit_fpm_values_cmd - commit fpm values in hw
1121 * @dev: hardware control device structure
1122 * @values_mem: buffer with fpm values
1123 * @hmc_fn_id: function id for fpm
1124 */
1125enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev,
1126                                                       struct i40iw_dma_mem *values_mem,
1127                                                       u8 hmc_fn_id)
1128{
1129        enum i40iw_status_code status;
1130        struct i40iw_cqp_request *cqp_request;
1131        struct cqp_commands_info *cqp_info;
1132        struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1133
1134        cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
1135        if (!cqp_request)
1136                return I40IW_ERR_NO_MEMORY;
1137        cqp_info = &cqp_request->info;
1138        cqp_request->param = NULL;
1139        cqp_info->in.u.commit_fpm_values.cqp = dev->cqp;
1140        cqp_info->in.u.commit_fpm_values.fpm_values_pa = values_mem->pa;
1141        cqp_info->in.u.commit_fpm_values.fpm_values_va = values_mem->va;
1142        cqp_info->in.u.commit_fpm_values.hmc_fn_id = hmc_fn_id;
1143        cqp_info->cqp_cmd = OP_COMMIT_FPM_VALUES;
1144        cqp_info->post_sq = 1;
1145        cqp_info->in.u.commit_fpm_values.scratch = (uintptr_t)cqp_request;
1146        status = i40iw_handle_cqp_op(iwdev, cqp_request);
1147        if (status)
1148                i40iw_pr_err("CQP-OP Commit FPM fail");
1149        return status;
1150}
1151
1152/**
1153 * i40iw_vf_wait_vchnl_resp - wait for channel msg
1154 * @iwdev: function's device struct
1155 */
1156enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev)
1157{
1158        struct i40iw_device *iwdev = dev->back_dev;
1159        int timeout_ret;
1160
1161        i40iw_debug(dev, I40IW_DEBUG_VIRT, "%s[%u] dev %p, iwdev %p\n",
1162                    __func__, __LINE__, dev, iwdev);
1163
1164        atomic_set(&iwdev->vchnl_msgs, 2);
1165        timeout_ret = wait_event_timeout(iwdev->vchnl_waitq,
1166                                         (atomic_read(&iwdev->vchnl_msgs) == 1),
1167                                         I40IW_VCHNL_EVENT_TIMEOUT);
1168        atomic_dec(&iwdev->vchnl_msgs);
1169        if (!timeout_ret) {
1170                i40iw_pr_err("virt channel completion timeout = 0x%x\n", timeout_ret);
1171                atomic_set(&iwdev->vchnl_msgs, 0);
1172                dev->vchnl_up = false;
1173                return I40IW_ERR_TIMEOUT;
1174        }
1175        wake_up(&dev->vf_reqs);
1176        return 0;
1177}
1178
1179/**
1180 * i40iw_cqp_cq_create_cmd - create a cq for the cqp
1181 * @dev: device pointer
1182 * @cq: pointer to created cq
1183 */
1184enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev,
1185                                               struct i40iw_sc_cq *cq)
1186{
1187        struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1188        struct i40iw_cqp *iwcqp = &iwdev->cqp;
1189        struct i40iw_cqp_request *cqp_request;
1190        struct cqp_commands_info *cqp_info;
1191        enum i40iw_status_code status;
1192
1193        cqp_request = i40iw_get_cqp_request(iwcqp, true);
1194        if (!cqp_request)
1195                return I40IW_ERR_NO_MEMORY;
1196
1197        cqp_info = &cqp_request->info;
1198        cqp_info->cqp_cmd = OP_CQ_CREATE;
1199        cqp_info->post_sq = 1;
1200        cqp_info->in.u.cq_create.cq = cq;
1201        cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1202        status = i40iw_handle_cqp_op(iwdev, cqp_request);
1203        if (status)
1204                i40iw_pr_err("CQP-OP Create QP fail");
1205
1206        return status;
1207}
1208
1209/**
1210 * i40iw_cqp_qp_create_cmd - create a qp for the cqp
1211 * @dev: device pointer
1212 * @qp: pointer to created qp
1213 */
1214enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev,
1215                                               struct i40iw_sc_qp *qp)
1216{
1217        struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1218        struct i40iw_cqp *iwcqp = &iwdev->cqp;
1219        struct i40iw_cqp_request *cqp_request;
1220        struct cqp_commands_info *cqp_info;
1221        struct i40iw_create_qp_info *qp_info;
1222        enum i40iw_status_code status;
1223
1224        cqp_request = i40iw_get_cqp_request(iwcqp, true);
1225        if (!cqp_request)
1226                return I40IW_ERR_NO_MEMORY;
1227
1228        cqp_info = &cqp_request->info;
1229        qp_info = &cqp_request->info.in.u.qp_create.info;
1230
1231        memset(qp_info, 0, sizeof(*qp_info));
1232
1233        qp_info->cq_num_valid = true;
1234        qp_info->next_iwarp_state = I40IW_QP_STATE_RTS;
1235
1236        cqp_info->cqp_cmd = OP_QP_CREATE;
1237        cqp_info->post_sq = 1;
1238        cqp_info->in.u.qp_create.qp = qp;
1239        cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
1240        status = i40iw_handle_cqp_op(iwdev, cqp_request);
1241        if (status)
1242                i40iw_pr_err("CQP-OP QP create fail");
1243        return status;
1244}
1245
1246/**
1247 * i40iw_cqp_cq_destroy_cmd - destroy the cqp cq
1248 * @dev: device pointer
1249 * @cq: pointer to cq
1250 */
1251void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq)
1252{
1253        struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1254
1255        i40iw_cq_wq_destroy(iwdev, cq);
1256}
1257
1258/**
1259 * i40iw_cqp_qp_destroy_cmd - destroy the cqp
1260 * @dev: device pointer
1261 * @qp: pointer to qp
1262 */
1263void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
1264{
1265        struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1266        struct i40iw_cqp *iwcqp = &iwdev->cqp;
1267        struct i40iw_cqp_request *cqp_request;
1268        struct cqp_commands_info *cqp_info;
1269        enum i40iw_status_code status;
1270
1271        cqp_request = i40iw_get_cqp_request(iwcqp, true);
1272        if (!cqp_request)
1273                return;
1274
1275        cqp_info = &cqp_request->info;
1276        memset(cqp_info, 0, sizeof(*cqp_info));
1277
1278        cqp_info->cqp_cmd = OP_QP_DESTROY;
1279        cqp_info->post_sq = 1;
1280        cqp_info->in.u.qp_destroy.qp = qp;
1281        cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
1282        cqp_info->in.u.qp_destroy.remove_hash_idx = true;
1283        status = i40iw_handle_cqp_op(iwdev, cqp_request);
1284        if (status)
1285                i40iw_pr_err("CQP QP_DESTROY fail");
1286}
1287
1288
1289/**
1290 * i40iw_ieq_mpa_crc_ae - generate AE for crc error
1291 * @dev: hardware control device structure
1292 * @qp: hardware control qp
1293 */
1294void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
1295{
1296        struct i40iw_gen_ae_info info;
1297        struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1298
1299        i40iw_debug(dev, I40IW_DEBUG_AEQ, "%s entered\n", __func__);
1300        info.ae_code = I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR;
1301        info.ae_source = I40IW_AE_SOURCE_RQ;
1302        i40iw_gen_ae(iwdev, qp, &info, false);
1303}
1304
1305/**
1306 * i40iw_init_hash_desc - initialize hash for crc calculation
1307 * @desc: cryption type
1308 */
1309enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc)
1310{
1311        struct crypto_shash *tfm;
1312        struct shash_desc *tdesc;
1313
1314        tfm = crypto_alloc_shash("crc32c", 0, 0);
1315        if (IS_ERR(tfm))
1316                return I40IW_ERR_MPA_CRC;
1317
1318        tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm),
1319                        GFP_KERNEL);
1320        if (!tdesc) {
1321                crypto_free_shash(tfm);
1322                return I40IW_ERR_MPA_CRC;
1323        }
1324        tdesc->tfm = tfm;
1325        *desc = tdesc;
1326
1327        return 0;
1328}
1329
1330/**
1331 * i40iw_free_hash_desc - free hash desc
1332 * @desc: to be freed
1333 */
1334void i40iw_free_hash_desc(struct shash_desc *desc)
1335{
1336        if (desc) {
1337                crypto_free_shash(desc->tfm);
1338                kfree(desc);
1339        }
1340}
1341
1342/**
1343 * i40iw_alloc_query_fpm_buf - allocate buffer for fpm
1344 * @dev: hardware control device structure
1345 * @mem: buffer ptr for fpm to be allocated
1346 * @return: memory allocation status
1347 */
1348enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev,
1349                                                 struct i40iw_dma_mem *mem)
1350{
1351        enum i40iw_status_code status;
1352        struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1353
1354        status = i40iw_obj_aligned_mem(iwdev, mem, I40IW_QUERY_FPM_BUF_SIZE,
1355                                       I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
1356        return status;
1357}
1358
1359/**
1360 * i40iw_ieq_check_mpacrc - check if mpa crc is OK
1361 * @desc: desc for hash
1362 * @addr: address of buffer for crc
1363 * @length: length of buffer
1364 * @value: value to be compared
1365 */
1366enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc,
1367                                              void *addr,
1368                                              u32 length,
1369                                              u32 value)
1370{
1371        u32 crc = 0;
1372        int ret;
1373        enum i40iw_status_code ret_code = 0;
1374
1375        crypto_shash_init(desc);
1376        ret = crypto_shash_update(desc, addr, length);
1377        if (!ret)
1378                crypto_shash_final(desc, (u8 *)&crc);
1379        if (crc != value) {
1380                i40iw_pr_err("mpa crc check fail\n");
1381                ret_code = I40IW_ERR_MPA_CRC;
1382        }
1383        return ret_code;
1384}
1385
1386/**
1387 * i40iw_ieq_get_qp - get qp based on quad in puda buffer
1388 * @dev: hardware control device structure
1389 * @buf: receive puda buffer on exception q
1390 */
1391struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,
1392                                     struct i40iw_puda_buf *buf)
1393{
1394        struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
1395        struct i40iw_qp *iwqp;
1396        struct i40iw_cm_node *cm_node;
1397        u32 loc_addr[4], rem_addr[4];
1398        u16 loc_port, rem_port;
1399        struct ipv6hdr *ip6h;
1400        struct iphdr *iph = (struct iphdr *)buf->iph;
1401        struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
1402
1403        if (iph->version == 4) {
1404                memset(loc_addr, 0, sizeof(loc_addr));
1405                loc_addr[0] = ntohl(iph->daddr);
1406                memset(rem_addr, 0, sizeof(rem_addr));
1407                rem_addr[0] = ntohl(iph->saddr);
1408        } else {
1409                ip6h = (struct ipv6hdr *)buf->iph;
1410                i40iw_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32);
1411                i40iw_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32);
1412        }
1413        loc_port = ntohs(tcph->dest);
1414        rem_port = ntohs(tcph->source);
1415
1416        cm_node = i40iw_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,
1417                                  loc_addr, false, true);
1418        if (!cm_node)
1419                return NULL;
1420        iwqp = cm_node->iwqp;
1421        return &iwqp->sc_qp;
1422}
1423
1424/**
1425 * i40iw_ieq_update_tcpip_info - update tcpip in the buffer
1426 * @buf: puda to update
1427 * @length: length of buffer
1428 * @seqnum: seq number for tcp
1429 */
1430void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum)
1431{
1432        struct tcphdr *tcph;
1433        struct iphdr *iph;
1434        u16 iphlen;
1435        u16 packetsize;
1436        u8 *addr = (u8 *)buf->mem.va;
1437
1438        iphlen = (buf->ipv4) ? 20 : 40;
1439        iph = (struct iphdr *)(addr + buf->maclen);
1440        tcph = (struct tcphdr *)(addr + buf->maclen + iphlen);
1441        packetsize = length + buf->tcphlen + iphlen;
1442
1443        iph->tot_len = htons(packetsize);
1444        tcph->seq = htonl(seqnum);
1445}
1446
1447/**
1448 * i40iw_puda_get_tcpip_info - get tcpip info from puda buffer
1449 * @info: to get information
1450 * @buf: puda buffer
1451 */
1452enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
1453                                                 struct i40iw_puda_buf *buf)
1454{
1455        struct iphdr *iph;
1456        struct ipv6hdr *ip6h;
1457        struct tcphdr *tcph;
1458        u16 iphlen;
1459        u16 pkt_len;
1460        u8 *mem = (u8 *)buf->mem.va;
1461        struct ethhdr *ethh = (struct ethhdr *)buf->mem.va;
1462
1463        if (ethh->h_proto == htons(0x8100)) {
1464                info->vlan_valid = true;
1465                buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) & VLAN_VID_MASK;
1466        }
1467        buf->maclen = (info->vlan_valid) ? 18 : 14;
1468        iphlen = (info->l3proto) ? 40 : 20;
1469        buf->ipv4 = (info->l3proto) ? false : true;
1470        buf->iph = mem + buf->maclen;
1471        iph = (struct iphdr *)buf->iph;
1472
1473        buf->tcph = buf->iph + iphlen;
1474        tcph = (struct tcphdr *)buf->tcph;
1475
1476        if (buf->ipv4) {
1477                pkt_len = ntohs(iph->tot_len);
1478        } else {
1479                ip6h = (struct ipv6hdr *)buf->iph;
1480                pkt_len = ntohs(ip6h->payload_len) + iphlen;
1481        }
1482
1483        buf->totallen = pkt_len + buf->maclen;
1484
1485        if (info->payload_len < buf->totallen) {
1486                i40iw_pr_err("payload_len = 0x%x totallen expected0x%x\n",
1487                             info->payload_len, buf->totallen);
1488                return I40IW_ERR_INVALID_SIZE;
1489        }
1490
1491        buf->tcphlen = (tcph->doff) << 2;
1492        buf->datalen = pkt_len - iphlen - buf->tcphlen;
1493        buf->data = (buf->datalen) ? buf->tcph + buf->tcphlen : NULL;
1494        buf->hdrlen = buf->maclen + iphlen + buf->tcphlen;
1495        buf->seqnum = ntohl(tcph->seq);
1496        return 0;
1497}
1498
1499/**
1500 * i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats
1501 * @vsi: pointer to the vsi structure
1502 */
1503static void i40iw_hw_stats_timeout(struct timer_list *t)
1504{
1505        struct i40iw_vsi_pestat *pf_devstat = from_timer(pf_devstat, t,
1506                                                       stats_timer);
1507        struct i40iw_sc_vsi *sc_vsi = pf_devstat->vsi;
1508        struct i40iw_sc_dev *pf_dev = sc_vsi->dev;
1509        struct i40iw_vsi_pestat *vf_devstat = NULL;
1510        u16 iw_vf_idx;
1511        unsigned long flags;
1512
1513        /*PF*/
1514        i40iw_hw_stats_read_all(pf_devstat, &pf_devstat->hw_stats);
1515
1516        for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
1517                spin_lock_irqsave(&pf_devstat->lock, flags);
1518                if (pf_dev->vf_dev[iw_vf_idx]) {
1519                        if (pf_dev->vf_dev[iw_vf_idx]->stats_initialized) {
1520                                vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->pestat;
1521                                i40iw_hw_stats_read_all(vf_devstat, &vf_devstat->hw_stats);
1522                        }
1523                }
1524                spin_unlock_irqrestore(&pf_devstat->lock, flags);
1525        }
1526
1527        mod_timer(&pf_devstat->stats_timer,
1528                  jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1529}
1530
1531/**
1532 * i40iw_hw_stats_start_timer - Start periodic stats timer
1533 * @vsi: pointer to the vsi structure
1534 */
1535void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi)
1536{
1537        struct i40iw_vsi_pestat *devstat = vsi->pestat;
1538
1539        timer_setup(&devstat->stats_timer, i40iw_hw_stats_timeout, 0);
1540        mod_timer(&devstat->stats_timer,
1541                  jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1542}
1543
1544/**
1545 * i40iw_hw_stats_stop_timer - Delete periodic stats timer
1546 * @vsi: pointer to the vsi structure
1547 */
1548void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi)
1549{
1550        struct i40iw_vsi_pestat *devstat = vsi->pestat;
1551
1552        del_timer_sync(&devstat->stats_timer);
1553}
1554