linux/drivers/infiniband/hw/irdma/utils.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
   2/* Copyright (c) 2015 - 2021 Intel Corporation */
   3#include "main.h"
   4
   5/**
   6 * irdma_arp_table -manage arp table
   7 * @rf: RDMA PCI function
   8 * @ip_addr: ip address for device
   9 * @ipv4: IPv4 flag
  10 * @mac_addr: mac address ptr
  11 * @action: modify, delete or add
  12 */
  13int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4,
  14                    u8 *mac_addr, u32 action)
  15{
  16        unsigned long flags;
  17        int arp_index;
  18        u32 ip[4] = {};
  19
  20        if (ipv4)
  21                ip[0] = *ip_addr;
  22        else
  23                memcpy(ip, ip_addr, sizeof(ip));
  24
  25        spin_lock_irqsave(&rf->arp_lock, flags);
  26        for (arp_index = 0; (u32)arp_index < rf->arp_table_size; arp_index++) {
  27                if (!memcmp(rf->arp_table[arp_index].ip_addr, ip, sizeof(ip)))
  28                        break;
  29        }
  30
  31        switch (action) {
  32        case IRDMA_ARP_ADD:
  33                if (arp_index != rf->arp_table_size) {
  34                        arp_index = -1;
  35                        break;
  36                }
  37
  38                arp_index = 0;
  39                if (irdma_alloc_rsrc(rf, rf->allocated_arps, rf->arp_table_size,
  40                                     (u32 *)&arp_index, &rf->next_arp_index)) {
  41                        arp_index = -1;
  42                        break;
  43                }
  44
  45                memcpy(rf->arp_table[arp_index].ip_addr, ip,
  46                       sizeof(rf->arp_table[arp_index].ip_addr));
  47                ether_addr_copy(rf->arp_table[arp_index].mac_addr, mac_addr);
  48                break;
  49        case IRDMA_ARP_RESOLVE:
  50                if (arp_index == rf->arp_table_size)
  51                        arp_index = -1;
  52                break;
  53        case IRDMA_ARP_DELETE:
  54                if (arp_index == rf->arp_table_size) {
  55                        arp_index = -1;
  56                        break;
  57                }
  58
  59                memset(rf->arp_table[arp_index].ip_addr, 0,
  60                       sizeof(rf->arp_table[arp_index].ip_addr));
  61                eth_zero_addr(rf->arp_table[arp_index].mac_addr);
  62                irdma_free_rsrc(rf, rf->allocated_arps, arp_index);
  63                break;
  64        default:
  65                arp_index = -1;
  66                break;
  67        }
  68
  69        spin_unlock_irqrestore(&rf->arp_lock, flags);
  70        return arp_index;
  71}
  72
  73/**
  74 * irdma_add_arp - add a new arp entry if needed
  75 * @rf: RDMA function
  76 * @ip: IP address
  77 * @ipv4: IPv4 flag
  78 * @mac: MAC address
  79 */
  80int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, u8 *mac)
  81{
  82        int arpidx;
  83
  84        arpidx = irdma_arp_table(rf, &ip[0], ipv4, NULL, IRDMA_ARP_RESOLVE);
  85        if (arpidx >= 0) {
  86                if (ether_addr_equal(rf->arp_table[arpidx].mac_addr, mac))
  87                        return arpidx;
  88
  89                irdma_manage_arp_cache(rf, rf->arp_table[arpidx].mac_addr, ip,
  90                                       ipv4, IRDMA_ARP_DELETE);
  91        }
  92
  93        irdma_manage_arp_cache(rf, mac, ip, ipv4, IRDMA_ARP_ADD);
  94
  95        return irdma_arp_table(rf, ip, ipv4, NULL, IRDMA_ARP_RESOLVE);
  96}
  97
  98/**
  99 * wr32 - write 32 bits to hw register
 100 * @hw: hardware information including registers
 101 * @reg: register offset
 102 * @val: value to write to register
 103 */
 104inline void wr32(struct irdma_hw *hw, u32 reg, u32 val)
 105{
 106        writel(val, hw->hw_addr + reg);
 107}
 108
 109/**
 110 * rd32 - read a 32 bit hw register
 111 * @hw: hardware information including registers
 112 * @reg: register offset
 113 *
 114 * Return value of register content
 115 */
 116inline u32 rd32(struct irdma_hw *hw, u32 reg)
 117{
 118        return readl(hw->hw_addr + reg);
 119}
 120
 121/**
 122 * rd64 - read a 64 bit hw register
 123 * @hw: hardware information including registers
 124 * @reg: register offset
 125 *
 126 * Return value of register content
 127 */
 128inline u64 rd64(struct irdma_hw *hw, u32 reg)
 129{
 130        return readq(hw->hw_addr + reg);
 131}
 132
 133static void irdma_gid_change_event(struct ib_device *ibdev)
 134{
 135        struct ib_event ib_event;
 136
 137        ib_event.event = IB_EVENT_GID_CHANGE;
 138        ib_event.device = ibdev;
 139        ib_event.element.port_num = 1;
 140        ib_dispatch_event(&ib_event);
 141}
 142
 143/**
 144 * irdma_inetaddr_event - system notifier for ipv4 addr events
 145 * @notifier: not used
 146 * @event: event for notifier
 147 * @ptr: if address
 148 */
 149int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
 150                         void *ptr)
 151{
 152        struct in_ifaddr *ifa = ptr;
 153        struct net_device *netdev = ifa->ifa_dev->dev;
 154        struct irdma_device *iwdev;
 155        struct ib_device *ibdev;
 156        u32 local_ipaddr;
 157
 158        ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA);
 159        if (!ibdev)
 160                return NOTIFY_DONE;
 161
 162        iwdev = to_iwdev(ibdev);
 163        local_ipaddr = ntohl(ifa->ifa_address);
 164        ibdev_dbg(&iwdev->ibdev,
 165                  "DEV: netdev %p event %lu local_ip=%pI4 MAC=%pM\n", netdev,
 166                  event, &local_ipaddr, netdev->dev_addr);
 167        switch (event) {
 168        case NETDEV_DOWN:
 169                irdma_manage_arp_cache(iwdev->rf, netdev->dev_addr,
 170                                       &local_ipaddr, true, IRDMA_ARP_DELETE);
 171                irdma_if_notify(iwdev, netdev, &local_ipaddr, true, false);
 172                irdma_gid_change_event(&iwdev->ibdev);
 173                break;
 174        case NETDEV_UP:
 175        case NETDEV_CHANGEADDR:
 176                irdma_add_arp(iwdev->rf, &local_ipaddr, true, netdev->dev_addr);
 177                irdma_if_notify(iwdev, netdev, &local_ipaddr, true, true);
 178                irdma_gid_change_event(&iwdev->ibdev);
 179                break;
 180        default:
 181                break;
 182        }
 183
 184        ib_device_put(ibdev);
 185
 186        return NOTIFY_DONE;
 187}
 188
 189/**
 190 * irdma_inet6addr_event - system notifier for ipv6 addr events
 191 * @notifier: not used
 192 * @event: event for notifier
 193 * @ptr: if address
 194 */
 195int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
 196                          void *ptr)
 197{
 198        struct inet6_ifaddr *ifa = ptr;
 199        struct net_device *netdev = ifa->idev->dev;
 200        struct irdma_device *iwdev;
 201        struct ib_device *ibdev;
 202        u32 local_ipaddr6[4];
 203
 204        ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA);
 205        if (!ibdev)
 206                return NOTIFY_DONE;
 207
 208        iwdev = to_iwdev(ibdev);
 209        irdma_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
 210        ibdev_dbg(&iwdev->ibdev,
 211                  "DEV: netdev %p event %lu local_ip=%pI6 MAC=%pM\n", netdev,
 212                  event, local_ipaddr6, netdev->dev_addr);
 213        switch (event) {
 214        case NETDEV_DOWN:
 215                irdma_manage_arp_cache(iwdev->rf, netdev->dev_addr,
 216                                       local_ipaddr6, false, IRDMA_ARP_DELETE);
 217                irdma_if_notify(iwdev, netdev, local_ipaddr6, false, false);
 218                irdma_gid_change_event(&iwdev->ibdev);
 219                break;
 220        case NETDEV_UP:
 221        case NETDEV_CHANGEADDR:
 222                irdma_add_arp(iwdev->rf, local_ipaddr6, false,
 223                              netdev->dev_addr);
 224                irdma_if_notify(iwdev, netdev, local_ipaddr6, false, true);
 225                irdma_gid_change_event(&iwdev->ibdev);
 226                break;
 227        default:
 228                break;
 229        }
 230
 231        ib_device_put(ibdev);
 232
 233        return NOTIFY_DONE;
 234}
 235
 236/**
 237 * irdma_net_event - system notifier for net events
 238 * @notifier: not used
 239 * @event: event for notifier
 240 * @ptr: neighbor
 241 */
 242int irdma_net_event(struct notifier_block *notifier, unsigned long event,
 243                    void *ptr)
 244{
 245        struct neighbour *neigh = ptr;
 246        struct irdma_device *iwdev;
 247        struct ib_device *ibdev;
 248        __be32 *p;
 249        u32 local_ipaddr[4] = {};
 250        bool ipv4 = true;
 251
 252        ibdev = ib_device_get_by_netdev((struct net_device *)neigh->dev,
 253                                        RDMA_DRIVER_IRDMA);
 254        if (!ibdev)
 255                return NOTIFY_DONE;
 256
 257        iwdev = to_iwdev(ibdev);
 258
 259        switch (event) {
 260        case NETEVENT_NEIGH_UPDATE:
 261                p = (__be32 *)neigh->primary_key;
 262                if (neigh->tbl->family == AF_INET6) {
 263                        ipv4 = false;
 264                        irdma_copy_ip_ntohl(local_ipaddr, p);
 265                } else {
 266                        local_ipaddr[0] = ntohl(*p);
 267                }
 268
 269                ibdev_dbg(&iwdev->ibdev,
 270                          "DEV: netdev %p state %d local_ip=%pI4 MAC=%pM\n",
 271                          iwdev->netdev, neigh->nud_state, local_ipaddr,
 272                          neigh->ha);
 273
 274                if (neigh->nud_state & NUD_VALID)
 275                        irdma_add_arp(iwdev->rf, local_ipaddr, ipv4, neigh->ha);
 276
 277                else
 278                        irdma_manage_arp_cache(iwdev->rf, neigh->ha,
 279                                               local_ipaddr, ipv4,
 280                                               IRDMA_ARP_DELETE);
 281                break;
 282        default:
 283                break;
 284        }
 285
 286        ib_device_put(ibdev);
 287
 288        return NOTIFY_DONE;
 289}
 290
 291/**
 292 * irdma_netdevice_event - system notifier for netdev events
 293 * @notifier: not used
 294 * @event: event for notifier
 295 * @ptr: netdev
 296 */
 297int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
 298                          void *ptr)
 299{
 300        struct irdma_device *iwdev;
 301        struct ib_device *ibdev;
 302        struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
 303
 304        ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_IRDMA);
 305        if (!ibdev)
 306                return NOTIFY_DONE;
 307
 308        iwdev = to_iwdev(ibdev);
 309        iwdev->iw_status = 1;
 310        switch (event) {
 311        case NETDEV_DOWN:
 312                iwdev->iw_status = 0;
 313                fallthrough;
 314        case NETDEV_UP:
 315                irdma_port_ibevent(iwdev);
 316                break;
 317        default:
 318                break;
 319        }
 320        ib_device_put(ibdev);
 321
 322        return NOTIFY_DONE;
 323}
 324
 325/**
 326 * irdma_add_ipv6_addr - add ipv6 address to the hw arp table
 327 * @iwdev: irdma device
 328 */
 329static void irdma_add_ipv6_addr(struct irdma_device *iwdev)
 330{
 331        struct net_device *ip_dev;
 332        struct inet6_dev *idev;
 333        struct inet6_ifaddr *ifp, *tmp;
 334        u32 local_ipaddr6[4];
 335
 336        rcu_read_lock();
 337        for_each_netdev_rcu (&init_net, ip_dev) {
 338                if (((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF &&
 339                      rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev) ||
 340                      ip_dev == iwdev->netdev) &&
 341                      (READ_ONCE(ip_dev->flags) & IFF_UP)) {
 342                        idev = __in6_dev_get(ip_dev);
 343                        if (!idev) {
 344                                ibdev_err(&iwdev->ibdev, "ipv6 inet device not found\n");
 345                                break;
 346                        }
 347                        list_for_each_entry_safe (ifp, tmp, &idev->addr_list,
 348                                                  if_list) {
 349                                ibdev_dbg(&iwdev->ibdev,
 350                                          "INIT: IP=%pI6, vlan_id=%d, MAC=%pM\n",
 351                                          &ifp->addr,
 352                                          rdma_vlan_dev_vlan_id(ip_dev),
 353                                          ip_dev->dev_addr);
 354
 355                                irdma_copy_ip_ntohl(local_ipaddr6,
 356                                                    ifp->addr.in6_u.u6_addr32);
 357                                irdma_manage_arp_cache(iwdev->rf,
 358                                                       ip_dev->dev_addr,
 359                                                       local_ipaddr6, false,
 360                                                       IRDMA_ARP_ADD);
 361                        }
 362                }
 363        }
 364        rcu_read_unlock();
 365}
 366
 367/**
 368 * irdma_add_ipv4_addr - add ipv4 address to the hw arp table
 369 * @iwdev: irdma device
 370 */
 371static void irdma_add_ipv4_addr(struct irdma_device *iwdev)
 372{
 373        struct net_device *dev;
 374        struct in_device *idev;
 375        u32 ip_addr;
 376
 377        rcu_read_lock();
 378        for_each_netdev_rcu (&init_net, dev) {
 379                if (((rdma_vlan_dev_vlan_id(dev) < 0xFFFF &&
 380                      rdma_vlan_dev_real_dev(dev) == iwdev->netdev) ||
 381                      dev == iwdev->netdev) && (READ_ONCE(dev->flags) & IFF_UP)) {
 382                        const struct in_ifaddr *ifa;
 383
 384                        idev = __in_dev_get_rcu(dev);
 385                        if (!idev)
 386                                continue;
 387
 388                        in_dev_for_each_ifa_rcu(ifa, idev) {
 389                                ibdev_dbg(&iwdev->ibdev, "CM: IP=%pI4, vlan_id=%d, MAC=%pM\n",
 390                                          &ifa->ifa_address, rdma_vlan_dev_vlan_id(dev),
 391                                          dev->dev_addr);
 392
 393                                ip_addr = ntohl(ifa->ifa_address);
 394                                irdma_manage_arp_cache(iwdev->rf, dev->dev_addr,
 395                                                       &ip_addr, true,
 396                                                       IRDMA_ARP_ADD);
 397                        }
 398                }
 399        }
 400        rcu_read_unlock();
 401}
 402
 403/**
 404 * irdma_add_ip - add ip addresses
 405 * @iwdev: irdma device
 406 *
 407 * Add ipv4/ipv6 addresses to the arp cache
 408 */
 409void irdma_add_ip(struct irdma_device *iwdev)
 410{
 411        irdma_add_ipv4_addr(iwdev);
 412        irdma_add_ipv6_addr(iwdev);
 413}
 414
 415/**
 416 * irdma_alloc_and_get_cqp_request - get cqp struct
 417 * @cqp: device cqp ptr
 418 * @wait: cqp to be used in wait mode
 419 */
 420struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
 421                                                          bool wait)
 422{
 423        struct irdma_cqp_request *cqp_request = NULL;
 424        unsigned long flags;
 425
 426        spin_lock_irqsave(&cqp->req_lock, flags);
 427        if (!list_empty(&cqp->cqp_avail_reqs)) {
 428                cqp_request = list_first_entry(&cqp->cqp_avail_reqs,
 429                                               struct irdma_cqp_request, list);
 430                list_del_init(&cqp_request->list);
 431        }
 432        spin_unlock_irqrestore(&cqp->req_lock, flags);
 433        if (!cqp_request) {
 434                cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC);
 435                if (cqp_request) {
 436                        cqp_request->dynamic = true;
 437                        if (wait)
 438                                init_waitqueue_head(&cqp_request->waitq);
 439                }
 440        }
 441        if (!cqp_request) {
 442                ibdev_dbg(to_ibdev(cqp->sc_cqp.dev), "ERR: CQP Request Fail: No Memory");
 443                return NULL;
 444        }
 445
 446        cqp_request->waiting = wait;
 447        refcount_set(&cqp_request->refcnt, 1);
 448        memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info));
 449
 450        return cqp_request;
 451}
 452
 453/**
 454 * irdma_get_cqp_request - increase refcount for cqp_request
 455 * @cqp_request: pointer to cqp_request instance
 456 */
 457static inline void irdma_get_cqp_request(struct irdma_cqp_request *cqp_request)
 458{
 459        refcount_inc(&cqp_request->refcnt);
 460}
 461
 462/**
 463 * irdma_free_cqp_request - free cqp request
 464 * @cqp: cqp ptr
 465 * @cqp_request: to be put back in cqp list
 466 */
 467void irdma_free_cqp_request(struct irdma_cqp *cqp,
 468                            struct irdma_cqp_request *cqp_request)
 469{
 470        unsigned long flags;
 471
 472        if (cqp_request->dynamic) {
 473                kfree(cqp_request);
 474        } else {
 475                cqp_request->request_done = false;
 476                cqp_request->callback_fcn = NULL;
 477                cqp_request->waiting = false;
 478
 479                spin_lock_irqsave(&cqp->req_lock, flags);
 480                list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
 481                spin_unlock_irqrestore(&cqp->req_lock, flags);
 482        }
 483        wake_up(&cqp->remove_wq);
 484}
 485
 486/**
 487 * irdma_put_cqp_request - dec ref count and free if 0
 488 * @cqp: cqp ptr
 489 * @cqp_request: to be put back in cqp list
 490 */
 491void irdma_put_cqp_request(struct irdma_cqp *cqp,
 492                           struct irdma_cqp_request *cqp_request)
 493{
 494        if (refcount_dec_and_test(&cqp_request->refcnt))
 495                irdma_free_cqp_request(cqp, cqp_request);
 496}
 497
 498/**
 499 * irdma_free_pending_cqp_request -free pending cqp request objs
 500 * @cqp: cqp ptr
 501 * @cqp_request: to be put back in cqp list
 502 */
 503static void
 504irdma_free_pending_cqp_request(struct irdma_cqp *cqp,
 505                               struct irdma_cqp_request *cqp_request)
 506{
 507        if (cqp_request->waiting) {
 508                cqp_request->compl_info.error = true;
 509                cqp_request->request_done = true;
 510                wake_up(&cqp_request->waitq);
 511        }
 512        wait_event_timeout(cqp->remove_wq,
 513                           refcount_read(&cqp_request->refcnt) == 1, 1000);
 514        irdma_put_cqp_request(cqp, cqp_request);
 515}
 516
 517/**
 518 * irdma_cleanup_pending_cqp_op - clean-up cqp with no
 519 * completions
 520 * @rf: RDMA PCI function
 521 */
 522void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
 523{
 524        struct irdma_sc_dev *dev = &rf->sc_dev;
 525        struct irdma_cqp *cqp = &rf->cqp;
 526        struct irdma_cqp_request *cqp_request = NULL;
 527        struct cqp_cmds_info *pcmdinfo = NULL;
 528        u32 i, pending_work, wqe_idx;
 529
 530        pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring);
 531        wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring);
 532        for (i = 0; i < pending_work; i++) {
 533                cqp_request = (struct irdma_cqp_request *)(unsigned long)
 534                                      cqp->scratch_array[wqe_idx];
 535                if (cqp_request)
 536                        irdma_free_pending_cqp_request(cqp, cqp_request);
 537                wqe_idx = (wqe_idx + 1) % IRDMA_RING_SIZE(cqp->sc_cqp.sq_ring);
 538        }
 539
 540        while (!list_empty(&dev->cqp_cmd_head)) {
 541                pcmdinfo = irdma_remove_cqp_head(dev);
 542                cqp_request =
 543                        container_of(pcmdinfo, struct irdma_cqp_request, info);
 544                if (cqp_request)
 545                        irdma_free_pending_cqp_request(cqp, cqp_request);
 546        }
 547}
 548
 549/**
 550 * irdma_wait_event - wait for completion
 551 * @rf: RDMA PCI function
 552 * @cqp_request: cqp request to wait
 553 */
 554static enum irdma_status_code irdma_wait_event(struct irdma_pci_f *rf,
 555                                               struct irdma_cqp_request *cqp_request)
 556{
 557        struct irdma_cqp_timeout cqp_timeout = {};
 558        bool cqp_error = false;
 559        enum irdma_status_code err_code = 0;
 560
 561        cqp_timeout.compl_cqp_cmds = rf->sc_dev.cqp_cmd_stats[IRDMA_OP_CMPL_CMDS];
 562        do {
 563                irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
 564                if (wait_event_timeout(cqp_request->waitq,
 565                                       cqp_request->request_done,
 566                                       msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS)))
 567                        break;
 568
 569                irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev);
 570
 571                if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
 572                        continue;
 573
 574                if (!rf->reset) {
 575                        rf->reset = true;
 576                        rf->gen_ops.request_reset(rf);
 577                }
 578                return IRDMA_ERR_TIMEOUT;
 579        } while (1);
 580
 581        cqp_error = cqp_request->compl_info.error;
 582        if (cqp_error) {
 583                err_code = IRDMA_ERR_CQP_COMPL_ERROR;
 584                if (cqp_request->compl_info.maj_err_code == 0xFFFF &&
 585                    cqp_request->compl_info.min_err_code == 0x8029) {
 586                        if (!rf->reset) {
 587                                rf->reset = true;
 588                                rf->gen_ops.request_reset(rf);
 589                        }
 590                }
 591        }
 592
 593        return err_code;
 594}
 595
 596static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
 597        [IRDMA_OP_CEQ_DESTROY] = "Destroy CEQ Cmd",
 598        [IRDMA_OP_AEQ_DESTROY] = "Destroy AEQ Cmd",
 599        [IRDMA_OP_DELETE_ARP_CACHE_ENTRY] = "Delete ARP Cache Cmd",
 600        [IRDMA_OP_MANAGE_APBVT_ENTRY] = "Manage APBV Table Entry Cmd",
 601        [IRDMA_OP_CEQ_CREATE] = "CEQ Create Cmd",
 602        [IRDMA_OP_AEQ_CREATE] = "AEQ Destroy Cmd",
 603        [IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY] = "Manage Quad Hash Table Entry Cmd",
 604        [IRDMA_OP_QP_MODIFY] = "Modify QP Cmd",
 605        [IRDMA_OP_QP_UPLOAD_CONTEXT] = "Upload Context Cmd",
 606        [IRDMA_OP_CQ_CREATE] = "Create CQ Cmd",
 607        [IRDMA_OP_CQ_DESTROY] = "Destroy CQ Cmd",
 608        [IRDMA_OP_QP_CREATE] = "Create QP Cmd",
 609        [IRDMA_OP_QP_DESTROY] = "Destroy QP Cmd",
 610        [IRDMA_OP_ALLOC_STAG] = "Allocate STag Cmd",
 611        [IRDMA_OP_MR_REG_NON_SHARED] = "Register Non-Shared MR Cmd",
 612        [IRDMA_OP_DEALLOC_STAG] = "Deallocate STag Cmd",
 613        [IRDMA_OP_MW_ALLOC] = "Allocate Memory Window Cmd",
 614        [IRDMA_OP_QP_FLUSH_WQES] = "Flush QP Cmd",
 615        [IRDMA_OP_ADD_ARP_CACHE_ENTRY] = "Add ARP Cache Cmd",
 616        [IRDMA_OP_MANAGE_PUSH_PAGE] = "Manage Push Page Cmd",
 617        [IRDMA_OP_UPDATE_PE_SDS] = "Update PE SDs Cmd",
 618        [IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE] = "Manage HMC PM Function Table Cmd",
 619        [IRDMA_OP_SUSPEND] = "Suspend QP Cmd",
 620        [IRDMA_OP_RESUME] = "Resume QP Cmd",
 621        [IRDMA_OP_MANAGE_VF_PBLE_BP] = "Manage VF PBLE Backing Pages Cmd",
 622        [IRDMA_OP_QUERY_FPM_VAL] = "Query FPM Values Cmd",
 623        [IRDMA_OP_COMMIT_FPM_VAL] = "Commit FPM Values Cmd",
 624        [IRDMA_OP_AH_CREATE] = "Create Address Handle Cmd",
 625        [IRDMA_OP_AH_MODIFY] = "Modify Address Handle Cmd",
 626        [IRDMA_OP_AH_DESTROY] = "Destroy Address Handle Cmd",
 627        [IRDMA_OP_MC_CREATE] = "Create Multicast Group Cmd",
 628        [IRDMA_OP_MC_DESTROY] = "Destroy Multicast Group Cmd",
 629        [IRDMA_OP_MC_MODIFY] = "Modify Multicast Group Cmd",
 630        [IRDMA_OP_STATS_ALLOCATE] = "Add Statistics Instance Cmd",
 631        [IRDMA_OP_STATS_FREE] = "Free Statistics Instance Cmd",
 632        [IRDMA_OP_STATS_GATHER] = "Gather Statistics Cmd",
 633        [IRDMA_OP_WS_ADD_NODE] = "Add Work Scheduler Node Cmd",
 634        [IRDMA_OP_WS_MODIFY_NODE] = "Modify Work Scheduler Node Cmd",
 635        [IRDMA_OP_WS_DELETE_NODE] = "Delete Work Scheduler Node Cmd",
 636        [IRDMA_OP_SET_UP_MAP] = "Set UP-UP Mapping Cmd",
 637        [IRDMA_OP_GEN_AE] = "Generate AE Cmd",
 638        [IRDMA_OP_QUERY_RDMA_FEATURES] = "RDMA Get Features Cmd",
 639        [IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY] = "Allocate Local MAC Entry Cmd",
 640        [IRDMA_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd",
 641        [IRDMA_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd",
 642        [IRDMA_OP_CQ_MODIFY] = "CQ Modify Cmd",
 643};
 644
 645static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
 646        {0xffff, 0x8006, "Flush No Wqe Pending"},
 647        {0xffff, 0x8007, "Modify QP Bad Close"},
 648        {0xffff, 0x8009, "LLP Closed"},
 649        {0xffff, 0x800a, "Reset Not Sent"}
 650};
 651
 652/**
 653 * irdma_cqp_crit_err - check if CQP error is critical
 654 * @dev: pointer to dev structure
 655 * @cqp_cmd: code for last CQP operation
 656 * @maj_err_code: major error code
 657 * @min_err_code: minot error code
 658 */
 659bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
 660                        u16 maj_err_code, u16 min_err_code)
 661{
 662        int i;
 663
 664        for (i = 0; i < ARRAY_SIZE(irdma_noncrit_err_list); ++i) {
 665                if (maj_err_code == irdma_noncrit_err_list[i].maj &&
 666                    min_err_code == irdma_noncrit_err_list[i].min) {
 667                        ibdev_dbg(to_ibdev(dev),
 668                                  "CQP: [%s Error][%s] maj=0x%x min=0x%x\n",
 669                                  irdma_noncrit_err_list[i].desc,
 670                                  irdma_cqp_cmd_names[cqp_cmd], maj_err_code,
 671                                  min_err_code);
 672                        return false;
 673                }
 674        }
 675        return true;
 676}
 677
 678/**
 679 * irdma_handle_cqp_op - process cqp command
 680 * @rf: RDMA PCI function
 681 * @cqp_request: cqp request to process
 682 */
 683enum irdma_status_code irdma_handle_cqp_op(struct irdma_pci_f *rf,
 684                                           struct irdma_cqp_request *cqp_request)
 685{
 686        struct irdma_sc_dev *dev = &rf->sc_dev;
 687        struct cqp_cmds_info *info = &cqp_request->info;
 688        enum irdma_status_code status;
 689        bool put_cqp_request = true;
 690
 691        if (rf->reset)
 692                return IRDMA_ERR_NOT_READY;
 693
 694        irdma_get_cqp_request(cqp_request);
 695        status = irdma_process_cqp_cmd(dev, info);
 696        if (status)
 697                goto err;
 698
 699        if (cqp_request->waiting) {
 700                put_cqp_request = false;
 701                status = irdma_wait_event(rf, cqp_request);
 702                if (status)
 703                        goto err;
 704        }
 705
 706        return 0;
 707
 708err:
 709        if (irdma_cqp_crit_err(dev, info->cqp_cmd,
 710                               cqp_request->compl_info.maj_err_code,
 711                               cqp_request->compl_info.min_err_code))
 712                ibdev_err(&rf->iwdev->ibdev,
 713                          "[%s Error][op_code=%d] status=%d waiting=%d completion_err=%d maj=0x%x min=0x%x\n",
 714                          irdma_cqp_cmd_names[info->cqp_cmd], info->cqp_cmd, status, cqp_request->waiting,
 715                          cqp_request->compl_info.error, cqp_request->compl_info.maj_err_code,
 716                          cqp_request->compl_info.min_err_code);
 717
 718        if (put_cqp_request)
 719                irdma_put_cqp_request(&rf->cqp, cqp_request);
 720
 721        return status;
 722}
 723
 724void irdma_qp_add_ref(struct ib_qp *ibqp)
 725{
 726        struct irdma_qp *iwqp = (struct irdma_qp *)ibqp;
 727
 728        refcount_inc(&iwqp->refcnt);
 729}
 730
 731void irdma_qp_rem_ref(struct ib_qp *ibqp)
 732{
 733        struct irdma_qp *iwqp = to_iwqp(ibqp);
 734        struct irdma_device *iwdev = iwqp->iwdev;
 735        u32 qp_num;
 736        unsigned long flags;
 737
 738        spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);
 739        if (!refcount_dec_and_test(&iwqp->refcnt)) {
 740                spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
 741                return;
 742        }
 743
 744        qp_num = iwqp->ibqp.qp_num;
 745        iwdev->rf->qp_table[qp_num] = NULL;
 746        spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
 747        complete(&iwqp->free_qp);
 748}
 749
 750struct ib_device *to_ibdev(struct irdma_sc_dev *dev)
 751{
 752        return &(container_of(dev, struct irdma_pci_f, sc_dev))->iwdev->ibdev;
 753}
 754
 755/**
 756 * irdma_get_qp - get qp address
 757 * @device: iwarp device
 758 * @qpn: qp number
 759 */
 760struct ib_qp *irdma_get_qp(struct ib_device *device, int qpn)
 761{
 762        struct irdma_device *iwdev = to_iwdev(device);
 763
 764        if (qpn < IW_FIRST_QPN || qpn >= iwdev->rf->max_qp)
 765                return NULL;
 766
 767        return &iwdev->rf->qp_table[qpn]->ibqp;
 768}
 769
 770/**
 771 * irdma_get_hw_addr - return hw addr
 772 * @par: points to shared dev
 773 */
 774u8 __iomem *irdma_get_hw_addr(void *par)
 775{
 776        struct irdma_sc_dev *dev = par;
 777
 778        return dev->hw->hw_addr;
 779}
 780
 781/**
 782 * irdma_remove_cqp_head - return head entry and remove
 783 * @dev: device
 784 */
 785void *irdma_remove_cqp_head(struct irdma_sc_dev *dev)
 786{
 787        struct list_head *entry;
 788        struct list_head *list = &dev->cqp_cmd_head;
 789
 790        if (list_empty(list))
 791                return NULL;
 792
 793        entry = list->next;
 794        list_del(entry);
 795
 796        return entry;
 797}
 798
 799/**
 800 * irdma_cqp_sds_cmd - create cqp command for sd
 801 * @dev: hardware control device structure
 802 * @sdinfo: information for sd cqp
 803 *
 804 */
 805enum irdma_status_code irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
 806                                         struct irdma_update_sds_info *sdinfo)
 807{
 808        struct irdma_cqp_request *cqp_request;
 809        struct cqp_cmds_info *cqp_info;
 810        struct irdma_pci_f *rf = dev_to_rf(dev);
 811        enum irdma_status_code status;
 812
 813        cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
 814        if (!cqp_request)
 815                return IRDMA_ERR_NO_MEMORY;
 816
 817        cqp_info = &cqp_request->info;
 818        memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
 819               sizeof(cqp_info->in.u.update_pe_sds.info));
 820        cqp_info->cqp_cmd = IRDMA_OP_UPDATE_PE_SDS;
 821        cqp_info->post_sq = 1;
 822        cqp_info->in.u.update_pe_sds.dev = dev;
 823        cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;
 824
 825        status = irdma_handle_cqp_op(rf, cqp_request);
 826        irdma_put_cqp_request(&rf->cqp, cqp_request);
 827
 828        return status;
 829}
 830
 831/**
 832 * irdma_cqp_qp_suspend_resume - cqp command for suspend/resume
 833 * @qp: hardware control qp
 834 * @op: suspend or resume
 835 */
 836enum irdma_status_code irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp,
 837                                                   u8 op)
 838{
 839        struct irdma_sc_dev *dev = qp->dev;
 840        struct irdma_cqp_request *cqp_request;
 841        struct irdma_sc_cqp *cqp = dev->cqp;
 842        struct cqp_cmds_info *cqp_info;
 843        struct irdma_pci_f *rf = dev_to_rf(dev);
 844        enum irdma_status_code status;
 845
 846        cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
 847        if (!cqp_request)
 848                return IRDMA_ERR_NO_MEMORY;
 849
 850        cqp_info = &cqp_request->info;
 851        cqp_info->cqp_cmd = op;
 852        cqp_info->in.u.suspend_resume.cqp = cqp;
 853        cqp_info->in.u.suspend_resume.qp = qp;
 854        cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request;
 855
 856        status = irdma_handle_cqp_op(rf, cqp_request);
 857        irdma_put_cqp_request(&rf->cqp, cqp_request);
 858
 859        return status;
 860}
 861
 862/**
 863 * irdma_term_modify_qp - modify qp for term message
 864 * @qp: hardware control qp
 865 * @next_state: qp's next state
 866 * @term: terminate code
 867 * @term_len: length
 868 */
 869void irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term,
 870                          u8 term_len)
 871{
 872        struct irdma_qp *iwqp;
 873
 874        iwqp = qp->qp_uk.back_qp;
 875        irdma_next_iw_state(iwqp, next_state, 0, term, term_len);
 876};
 877
 878/**
 879 * irdma_terminate_done - after terminate is completed
 880 * @qp: hardware control qp
 881 * @timeout_occurred: indicates if terminate timer expired
 882 */
 883void irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred)
 884{
 885        struct irdma_qp *iwqp;
 886        u8 hte = 0;
 887        bool first_time;
 888        unsigned long flags;
 889
 890        iwqp = qp->qp_uk.back_qp;
 891        spin_lock_irqsave(&iwqp->lock, flags);
 892        if (iwqp->hte_added) {
 893                iwqp->hte_added = 0;
 894                hte = 1;
 895        }
 896        first_time = !(qp->term_flags & IRDMA_TERM_DONE);
 897        qp->term_flags |= IRDMA_TERM_DONE;
 898        spin_unlock_irqrestore(&iwqp->lock, flags);
 899        if (first_time) {
 900                if (!timeout_occurred)
 901                        irdma_terminate_del_timer(qp);
 902
 903                irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, hte, 0, 0);
 904                irdma_cm_disconn(iwqp);
 905        }
 906}
 907
 908static void irdma_terminate_timeout(struct timer_list *t)
 909{
 910        struct irdma_qp *iwqp = from_timer(iwqp, t, terminate_timer);
 911        struct irdma_sc_qp *qp = &iwqp->sc_qp;
 912
 913        irdma_terminate_done(qp, 1);
 914        irdma_qp_rem_ref(&iwqp->ibqp);
 915}
 916
 917/**
 918 * irdma_terminate_start_timer - start terminate timeout
 919 * @qp: hardware control qp
 920 */
 921void irdma_terminate_start_timer(struct irdma_sc_qp *qp)
 922{
 923        struct irdma_qp *iwqp;
 924
 925        iwqp = qp->qp_uk.back_qp;
 926        irdma_qp_add_ref(&iwqp->ibqp);
 927        timer_setup(&iwqp->terminate_timer, irdma_terminate_timeout, 0);
 928        iwqp->terminate_timer.expires = jiffies + HZ;
 929
 930        add_timer(&iwqp->terminate_timer);
 931}
 932
 933/**
 934 * irdma_terminate_del_timer - delete terminate timeout
 935 * @qp: hardware control qp
 936 */
 937void irdma_terminate_del_timer(struct irdma_sc_qp *qp)
 938{
 939        struct irdma_qp *iwqp;
 940        int ret;
 941
 942        iwqp = qp->qp_uk.back_qp;
 943        ret = del_timer(&iwqp->terminate_timer);
 944        if (ret)
 945                irdma_qp_rem_ref(&iwqp->ibqp);
 946}
 947
 948/**
 949 * irdma_cqp_query_fpm_val_cmd - send cqp command for fpm
 950 * @dev: function device struct
 951 * @val_mem: buffer for fpm
 952 * @hmc_fn_id: function id for fpm
 953 */
 954enum irdma_status_code
 955irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
 956                            struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
 957{
 958        struct irdma_cqp_request *cqp_request;
 959        struct cqp_cmds_info *cqp_info;
 960        struct irdma_pci_f *rf = dev_to_rf(dev);
 961        enum irdma_status_code status;
 962
 963        cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
 964        if (!cqp_request)
 965                return IRDMA_ERR_NO_MEMORY;
 966
 967        cqp_info = &cqp_request->info;
 968        cqp_request->param = NULL;
 969        cqp_info->in.u.query_fpm_val.cqp = dev->cqp;
 970        cqp_info->in.u.query_fpm_val.fpm_val_pa = val_mem->pa;
 971        cqp_info->in.u.query_fpm_val.fpm_val_va = val_mem->va;
 972        cqp_info->in.u.query_fpm_val.hmc_fn_id = hmc_fn_id;
 973        cqp_info->cqp_cmd = IRDMA_OP_QUERY_FPM_VAL;
 974        cqp_info->post_sq = 1;
 975        cqp_info->in.u.query_fpm_val.scratch = (uintptr_t)cqp_request;
 976
 977        status = irdma_handle_cqp_op(rf, cqp_request);
 978        irdma_put_cqp_request(&rf->cqp, cqp_request);
 979
 980        return status;
 981}
 982
 983/**
 984 * irdma_cqp_commit_fpm_val_cmd - commit fpm values in hw
 985 * @dev: hardware control device structure
 986 * @val_mem: buffer with fpm values
 987 * @hmc_fn_id: function id for fpm
 988 */
 989enum irdma_status_code
 990irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
 991                             struct irdma_dma_mem *val_mem, u8 hmc_fn_id)
 992{
 993        struct irdma_cqp_request *cqp_request;
 994        struct cqp_cmds_info *cqp_info;
 995        struct irdma_pci_f *rf = dev_to_rf(dev);
 996        enum irdma_status_code status;
 997
 998        cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
 999        if (!cqp_request)
1000                return IRDMA_ERR_NO_MEMORY;
1001
1002        cqp_info = &cqp_request->info;
1003        cqp_request->param = NULL;
1004        cqp_info->in.u.commit_fpm_val.cqp = dev->cqp;
1005        cqp_info->in.u.commit_fpm_val.fpm_val_pa = val_mem->pa;
1006        cqp_info->in.u.commit_fpm_val.fpm_val_va = val_mem->va;
1007        cqp_info->in.u.commit_fpm_val.hmc_fn_id = hmc_fn_id;
1008        cqp_info->cqp_cmd = IRDMA_OP_COMMIT_FPM_VAL;
1009        cqp_info->post_sq = 1;
1010        cqp_info->in.u.commit_fpm_val.scratch = (uintptr_t)cqp_request;
1011
1012        status = irdma_handle_cqp_op(rf, cqp_request);
1013        irdma_put_cqp_request(&rf->cqp, cqp_request);
1014
1015        return status;
1016}
1017
1018/**
1019 * irdma_cqp_cq_create_cmd - create a cq for the cqp
1020 * @dev: device pointer
1021 * @cq: pointer to created cq
1022 */
1023enum irdma_status_code irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev,
1024                                               struct irdma_sc_cq *cq)
1025{
1026        struct irdma_pci_f *rf = dev_to_rf(dev);
1027        struct irdma_cqp *iwcqp = &rf->cqp;
1028        struct irdma_cqp_request *cqp_request;
1029        struct cqp_cmds_info *cqp_info;
1030        enum irdma_status_code status;
1031
1032        cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
1033        if (!cqp_request)
1034                return IRDMA_ERR_NO_MEMORY;
1035
1036        cqp_info = &cqp_request->info;
1037        cqp_info->cqp_cmd = IRDMA_OP_CQ_CREATE;
1038        cqp_info->post_sq = 1;
1039        cqp_info->in.u.cq_create.cq = cq;
1040        cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
1041
1042        status = irdma_handle_cqp_op(rf, cqp_request);
1043        irdma_put_cqp_request(iwcqp, cqp_request);
1044
1045        return status;
1046}
1047
1048/**
1049 * irdma_cqp_qp_create_cmd - create a qp for the cqp
1050 * @dev: device pointer
1051 * @qp: pointer to created qp
1052 */
1053enum irdma_status_code irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev,
1054                                               struct irdma_sc_qp *qp)
1055{
1056        struct irdma_pci_f *rf = dev_to_rf(dev);
1057        struct irdma_cqp *iwcqp = &rf->cqp;
1058        struct irdma_cqp_request *cqp_request;
1059        struct cqp_cmds_info *cqp_info;
1060        struct irdma_create_qp_info *qp_info;
1061        enum irdma_status_code status;
1062
1063        cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
1064        if (!cqp_request)
1065                return IRDMA_ERR_NO_MEMORY;
1066
1067        cqp_info = &cqp_request->info;
1068        qp_info = &cqp_request->info.in.u.qp_create.info;
1069        memset(qp_info, 0, sizeof(*qp_info));
1070        qp_info->cq_num_valid = true;
1071        qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS;
1072        cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
1073        cqp_info->post_sq = 1;
1074        cqp_info->in.u.qp_create.qp = qp;
1075        cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
1076
1077        status = irdma_handle_cqp_op(rf, cqp_request);
1078        irdma_put_cqp_request(iwcqp, cqp_request);
1079
1080        return status;
1081}
1082
1083/**
1084 * irdma_dealloc_push_page - free a push page for qp
1085 * @rf: RDMA PCI function
1086 * @qp: hardware control qp
1087 */
1088static void irdma_dealloc_push_page(struct irdma_pci_f *rf,
1089                                    struct irdma_sc_qp *qp)
1090{
1091        struct irdma_cqp_request *cqp_request;
1092        struct cqp_cmds_info *cqp_info;
1093        enum irdma_status_code status;
1094
1095        if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX)
1096                return;
1097
1098        cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false);
1099        if (!cqp_request)
1100                return;
1101
1102        cqp_info = &cqp_request->info;
1103        cqp_info->cqp_cmd = IRDMA_OP_MANAGE_PUSH_PAGE;
1104        cqp_info->post_sq = 1;
1105        cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
1106        cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle;
1107        cqp_info->in.u.manage_push_page.info.free_page = 1;
1108        cqp_info->in.u.manage_push_page.info.push_page_type = 0;
1109        cqp_info->in.u.manage_push_page.cqp = &rf->cqp.sc_cqp;
1110        cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
1111        status = irdma_handle_cqp_op(rf, cqp_request);
1112        if (!status)
1113                qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
1114        irdma_put_cqp_request(&rf->cqp, cqp_request);
1115}
1116
1117/**
1118 * irdma_free_qp_rsrc - free up memory resources for qp
1119 * @iwqp: qp ptr (user or kernel)
1120 */
1121void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
1122{
1123        struct irdma_device *iwdev = iwqp->iwdev;
1124        struct irdma_pci_f *rf = iwdev->rf;
1125        u32 qp_num = iwqp->ibqp.qp_num;
1126
1127        irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
1128        irdma_dealloc_push_page(rf, &iwqp->sc_qp);
1129        if (iwqp->sc_qp.vsi) {
1130                irdma_qp_rem_qos(&iwqp->sc_qp);
1131                iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi,
1132                                           iwqp->sc_qp.user_pri);
1133        }
1134
1135        if (qp_num > 2)
1136                irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
1137        dma_free_coherent(rf->sc_dev.hw->device, iwqp->q2_ctx_mem.size,
1138                          iwqp->q2_ctx_mem.va, iwqp->q2_ctx_mem.pa);
1139        iwqp->q2_ctx_mem.va = NULL;
1140        dma_free_coherent(rf->sc_dev.hw->device, iwqp->kqp.dma_mem.size,
1141                          iwqp->kqp.dma_mem.va, iwqp->kqp.dma_mem.pa);
1142        iwqp->kqp.dma_mem.va = NULL;
1143        kfree(iwqp->kqp.sq_wrid_mem);
1144        kfree(iwqp->kqp.rq_wrid_mem);
1145}
1146
1147/**
1148 * irdma_cq_wq_destroy - send cq destroy cqp
1149 * @rf: RDMA PCI function
1150 * @cq: hardware control cq
1151 */
1152void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
1153{
1154        struct irdma_cqp_request *cqp_request;
1155        struct cqp_cmds_info *cqp_info;
1156
1157        cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1158        if (!cqp_request)
1159                return;
1160
1161        cqp_info = &cqp_request->info;
1162        cqp_info->cqp_cmd = IRDMA_OP_CQ_DESTROY;
1163        cqp_info->post_sq = 1;
1164        cqp_info->in.u.cq_destroy.cq = cq;
1165        cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
1166
1167        irdma_handle_cqp_op(rf, cqp_request);
1168        irdma_put_cqp_request(&rf->cqp, cqp_request);
1169}
1170
1171/**
1172 * irdma_hw_modify_qp_callback - handle state for modifyQPs that don't wait
1173 * @cqp_request: modify QP completion
1174 */
1175static void irdma_hw_modify_qp_callback(struct irdma_cqp_request *cqp_request)
1176{
1177        struct cqp_cmds_info *cqp_info;
1178        struct irdma_qp *iwqp;
1179
1180        cqp_info = &cqp_request->info;
1181        iwqp = cqp_info->in.u.qp_modify.qp->qp_uk.back_qp;
1182        atomic_dec(&iwqp->hw_mod_qp_pend);
1183        wake_up(&iwqp->mod_qp_waitq);
1184}
1185
1186/**
1187 * irdma_hw_modify_qp - setup cqp for modify qp
1188 * @iwdev: RDMA device
1189 * @iwqp: qp ptr (user or kernel)
1190 * @info: info for modify qp
1191 * @wait: flag to wait or not for modify qp completion
1192 */
1193enum irdma_status_code irdma_hw_modify_qp(struct irdma_device *iwdev,
1194                                          struct irdma_qp *iwqp,
1195                                          struct irdma_modify_qp_info *info,
1196                                          bool wait)
1197{
1198        enum irdma_status_code status;
1199        struct irdma_pci_f *rf = iwdev->rf;
1200        struct irdma_cqp_request *cqp_request;
1201        struct cqp_cmds_info *cqp_info;
1202        struct irdma_modify_qp_info *m_info;
1203
1204        cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
1205        if (!cqp_request)
1206                return IRDMA_ERR_NO_MEMORY;
1207
1208        if (!wait) {
1209                cqp_request->callback_fcn = irdma_hw_modify_qp_callback;
1210                atomic_inc(&iwqp->hw_mod_qp_pend);
1211        }
1212        cqp_info = &cqp_request->info;
1213        m_info = &cqp_info->in.u.qp_modify.info;
1214        memcpy(m_info, info, sizeof(*m_info));
1215        cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
1216        cqp_info->post_sq = 1;
1217        cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
1218        cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
1219        status = irdma_handle_cqp_op(rf, cqp_request);
1220        irdma_put_cqp_request(&rf->cqp, cqp_request);
1221        if (status) {
1222                if (rdma_protocol_roce(&iwdev->ibdev, 1))
1223                        return status;
1224
1225                switch (m_info->next_iwarp_state) {
1226                        struct irdma_gen_ae_info ae_info;
1227
1228                case IRDMA_QP_STATE_RTS:
1229                case IRDMA_QP_STATE_IDLE:
1230                case IRDMA_QP_STATE_TERMINATE:
1231                case IRDMA_QP_STATE_CLOSING:
1232                        if (info->curr_iwarp_state == IRDMA_QP_STATE_IDLE)
1233                                irdma_send_reset(iwqp->cm_node);
1234                        else
1235                                iwqp->sc_qp.term_flags = IRDMA_TERM_DONE;
1236                        if (!wait) {
1237                                ae_info.ae_code = IRDMA_AE_BAD_CLOSE;
1238                                ae_info.ae_src = 0;
1239                                irdma_gen_ae(rf, &iwqp->sc_qp, &ae_info, false);
1240                        } else {
1241                                cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp,
1242                                                                              wait);
1243                                if (!cqp_request)
1244                                        return IRDMA_ERR_NO_MEMORY;
1245
1246                                cqp_info = &cqp_request->info;
1247                                m_info = &cqp_info->in.u.qp_modify.info;
1248                                memcpy(m_info, info, sizeof(*m_info));
1249                                cqp_info->cqp_cmd = IRDMA_OP_QP_MODIFY;
1250                                cqp_info->post_sq = 1;
1251                                cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
1252                                cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
1253                                m_info->next_iwarp_state = IRDMA_QP_STATE_ERROR;
1254                                m_info->reset_tcp_conn = true;
1255                                irdma_handle_cqp_op(rf, cqp_request);
1256                                irdma_put_cqp_request(&rf->cqp, cqp_request);
1257                        }
1258                        break;
1259                case IRDMA_QP_STATE_ERROR:
1260                default:
1261                        break;
1262                }
1263        }
1264
1265        return status;
1266}
1267
1268/**
1269 * irdma_cqp_cq_destroy_cmd - destroy the cqp cq
1270 * @dev: device pointer
1271 * @cq: pointer to cq
1272 */
1273void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
1274{
1275        struct irdma_pci_f *rf = dev_to_rf(dev);
1276
1277        irdma_cq_wq_destroy(rf, cq);
1278}
1279
1280/**
1281 * irdma_cqp_qp_destroy_cmd - destroy the cqp
1282 * @dev: device pointer
1283 * @qp: pointer to qp
1284 */
1285enum irdma_status_code irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
1286{
1287        struct irdma_pci_f *rf = dev_to_rf(dev);
1288        struct irdma_cqp *iwcqp = &rf->cqp;
1289        struct irdma_cqp_request *cqp_request;
1290        struct cqp_cmds_info *cqp_info;
1291        enum irdma_status_code status;
1292
1293        cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, true);
1294        if (!cqp_request)
1295                return IRDMA_ERR_NO_MEMORY;
1296
1297        cqp_info = &cqp_request->info;
1298        memset(cqp_info, 0, sizeof(*cqp_info));
1299        cqp_info->cqp_cmd = IRDMA_OP_QP_DESTROY;
1300        cqp_info->post_sq = 1;
1301        cqp_info->in.u.qp_destroy.qp = qp;
1302        cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
1303        cqp_info->in.u.qp_destroy.remove_hash_idx = true;
1304
1305        status = irdma_handle_cqp_op(rf, cqp_request);
1306        irdma_put_cqp_request(&rf->cqp, cqp_request);
1307
1308        return status;
1309}
1310
1311/**
1312 * irdma_ieq_mpa_crc_ae - generate AE for crc error
1313 * @dev: hardware control device structure
1314 * @qp: hardware control qp
1315 */
1316void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
1317{
1318        struct irdma_gen_ae_info info = {};
1319        struct irdma_pci_f *rf = dev_to_rf(dev);
1320
1321        ibdev_dbg(&rf->iwdev->ibdev, "AEQ: Generate MPA CRC AE\n");
1322        info.ae_code = IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR;
1323        info.ae_src = IRDMA_AE_SOURCE_RQ;
1324        irdma_gen_ae(rf, qp, &info, false);
1325}
1326
1327/**
1328 * irdma_init_hash_desc - initialize hash for crc calculation
1329 * @desc: cryption type
1330 */
1331enum irdma_status_code irdma_init_hash_desc(struct shash_desc **desc)
1332{
1333        struct crypto_shash *tfm;
1334        struct shash_desc *tdesc;
1335
1336        tfm = crypto_alloc_shash("crc32c", 0, 0);
1337        if (IS_ERR(tfm))
1338                return IRDMA_ERR_MPA_CRC;
1339
1340        tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm),
1341                        GFP_KERNEL);
1342        if (!tdesc) {
1343                crypto_free_shash(tfm);
1344                return IRDMA_ERR_MPA_CRC;
1345        }
1346
1347        tdesc->tfm = tfm;
1348        *desc = tdesc;
1349
1350        return 0;
1351}
1352
1353/**
1354 * irdma_free_hash_desc - free hash desc
1355 * @desc: to be freed
1356 */
1357void irdma_free_hash_desc(struct shash_desc *desc)
1358{
1359        if (desc) {
1360                crypto_free_shash(desc->tfm);
1361                kfree(desc);
1362        }
1363}
1364
1365/**
1366 * irdma_ieq_check_mpacrc - check if mpa crc is OK
1367 * @desc: desc for hash
1368 * @addr: address of buffer for crc
1369 * @len: length of buffer
1370 * @val: value to be compared
1371 */
1372enum irdma_status_code irdma_ieq_check_mpacrc(struct shash_desc *desc,
1373                                              void *addr, u32 len, u32 val)
1374{
1375        u32 crc = 0;
1376        int ret;
1377        enum irdma_status_code ret_code = 0;
1378
1379        crypto_shash_init(desc);
1380        ret = crypto_shash_update(desc, addr, len);
1381        if (!ret)
1382                crypto_shash_final(desc, (u8 *)&crc);
1383        if (crc != val)
1384                ret_code = IRDMA_ERR_MPA_CRC;
1385
1386        return ret_code;
1387}
1388
1389/**
1390 * irdma_ieq_get_qp - get qp based on quad in puda buffer
1391 * @dev: hardware control device structure
1392 * @buf: receive puda buffer on exception q
1393 */
1394struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
1395                                     struct irdma_puda_buf *buf)
1396{
1397        struct irdma_qp *iwqp;
1398        struct irdma_cm_node *cm_node;
1399        struct irdma_device *iwdev = buf->vsi->back_vsi;
1400        u32 loc_addr[4] = {};
1401        u32 rem_addr[4] = {};
1402        u16 loc_port, rem_port;
1403        struct ipv6hdr *ip6h;
1404        struct iphdr *iph = (struct iphdr *)buf->iph;
1405        struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
1406
1407        if (iph->version == 4) {
1408                loc_addr[0] = ntohl(iph->daddr);
1409                rem_addr[0] = ntohl(iph->saddr);
1410        } else {
1411                ip6h = (struct ipv6hdr *)buf->iph;
1412                irdma_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32);
1413                irdma_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32);
1414        }
1415        loc_port = ntohs(tcph->dest);
1416        rem_port = ntohs(tcph->source);
1417        cm_node = irdma_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,
1418                                  loc_addr, buf->vlan_valid ? buf->vlan_id : 0xFFFF);
1419        if (!cm_node)
1420                return NULL;
1421
1422        iwqp = cm_node->iwqp;
1423        irdma_rem_ref_cm_node(cm_node);
1424
1425        return &iwqp->sc_qp;
1426}
1427
1428/**
1429 * irdma_send_ieq_ack - ACKs for duplicate or OOO partials FPDUs
1430 * @qp: qp ptr
1431 */
1432void irdma_send_ieq_ack(struct irdma_sc_qp *qp)
1433{
1434        struct irdma_cm_node *cm_node = ((struct irdma_qp *)qp->qp_uk.back_qp)->cm_node;
1435        struct irdma_puda_buf *buf = qp->pfpdu.lastrcv_buf;
1436        struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
1437
1438        cm_node->tcp_cntxt.rcv_nxt = qp->pfpdu.nextseqnum;
1439        cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
1440
1441        irdma_send_ack(cm_node);
1442}
1443
1444/**
1445 * irdma_puda_ieq_get_ah_info - get AH info from IEQ buffer
1446 * @qp: qp pointer
1447 * @ah_info: AH info pointer
1448 */
1449void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp,
1450                                struct irdma_ah_info *ah_info)
1451{
1452        struct irdma_puda_buf *buf = qp->pfpdu.ah_buf;
1453        struct iphdr *iph;
1454        struct ipv6hdr *ip6h;
1455
1456        memset(ah_info, 0, sizeof(*ah_info));
1457        ah_info->do_lpbk = true;
1458        ah_info->vlan_tag = buf->vlan_id;
1459        ah_info->insert_vlan_tag = buf->vlan_valid;
1460        ah_info->ipv4_valid = buf->ipv4;
1461        ah_info->vsi = qp->vsi;
1462
1463        if (buf->smac_valid)
1464                ether_addr_copy(ah_info->mac_addr, buf->smac);
1465
1466        if (buf->ipv4) {
1467                ah_info->ipv4_valid = true;
1468                iph = (struct iphdr *)buf->iph;
1469                ah_info->hop_ttl = iph->ttl;
1470                ah_info->tc_tos = iph->tos;
1471                ah_info->dest_ip_addr[0] = ntohl(iph->daddr);
1472                ah_info->src_ip_addr[0] = ntohl(iph->saddr);
1473        } else {
1474                ip6h = (struct ipv6hdr *)buf->iph;
1475                ah_info->hop_ttl = ip6h->hop_limit;
1476                ah_info->tc_tos = ip6h->priority;
1477                irdma_copy_ip_ntohl(ah_info->dest_ip_addr,
1478                                    ip6h->daddr.in6_u.u6_addr32);
1479                irdma_copy_ip_ntohl(ah_info->src_ip_addr,
1480                                    ip6h->saddr.in6_u.u6_addr32);
1481        }
1482
1483        ah_info->dst_arpindex = irdma_arp_table(dev_to_rf(qp->dev),
1484                                                ah_info->dest_ip_addr,
1485                                                ah_info->ipv4_valid,
1486                                                NULL, IRDMA_ARP_RESOLVE);
1487}
1488
1489/**
1490 * irdma_gen1_ieq_update_tcpip_info - update tcpip in the buffer
1491 * @buf: puda to update
1492 * @len: length of buffer
1493 * @seqnum: seq number for tcp
1494 */
1495static void irdma_gen1_ieq_update_tcpip_info(struct irdma_puda_buf *buf,
1496                                             u16 len, u32 seqnum)
1497{
1498        struct tcphdr *tcph;
1499        struct iphdr *iph;
1500        u16 iphlen;
1501        u16 pktsize;
1502        u8 *addr = buf->mem.va;
1503
1504        iphlen = (buf->ipv4) ? 20 : 40;
1505        iph = (struct iphdr *)(addr + buf->maclen);
1506        tcph = (struct tcphdr *)(addr + buf->maclen + iphlen);
1507        pktsize = len + buf->tcphlen + iphlen;
1508        iph->tot_len = htons(pktsize);
1509        tcph->seq = htonl(seqnum);
1510}
1511
1512/**
1513 * irdma_ieq_update_tcpip_info - update tcpip in the buffer
1514 * @buf: puda to update
1515 * @len: length of buffer
1516 * @seqnum: seq number for tcp
1517 */
1518void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
1519                                 u32 seqnum)
1520{
1521        struct tcphdr *tcph;
1522        u8 *addr;
1523
1524        if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1525                return irdma_gen1_ieq_update_tcpip_info(buf, len, seqnum);
1526
1527        addr = buf->mem.va;
1528        tcph = (struct tcphdr *)addr;
1529        tcph->seq = htonl(seqnum);
1530}
1531
1532/**
1533 * irdma_gen1_puda_get_tcpip_info - get tcpip info from puda
1534 * buffer
1535 * @info: to get information
1536 * @buf: puda buffer
1537 */
1538static enum irdma_status_code
1539irdma_gen1_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
1540                               struct irdma_puda_buf *buf)
1541{
1542        struct iphdr *iph;
1543        struct ipv6hdr *ip6h;
1544        struct tcphdr *tcph;
1545        u16 iphlen;
1546        u16 pkt_len;
1547        u8 *mem = buf->mem.va;
1548        struct ethhdr *ethh = buf->mem.va;
1549
1550        if (ethh->h_proto == htons(0x8100)) {
1551                info->vlan_valid = true;
1552                buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) &
1553                               VLAN_VID_MASK;
1554        }
1555
1556        buf->maclen = (info->vlan_valid) ? 18 : 14;
1557        iphlen = (info->l3proto) ? 40 : 20;
1558        buf->ipv4 = (info->l3proto) ? false : true;
1559        buf->iph = mem + buf->maclen;
1560        iph = (struct iphdr *)buf->iph;
1561        buf->tcph = buf->iph + iphlen;
1562        tcph = (struct tcphdr *)buf->tcph;
1563
1564        if (buf->ipv4) {
1565                pkt_len = ntohs(iph->tot_len);
1566        } else {
1567                ip6h = (struct ipv6hdr *)buf->iph;
1568                pkt_len = ntohs(ip6h->payload_len) + iphlen;
1569        }
1570
1571        buf->totallen = pkt_len + buf->maclen;
1572
1573        if (info->payload_len < buf->totallen) {
1574                ibdev_dbg(to_ibdev(buf->vsi->dev),
1575                          "ERR: payload_len = 0x%x totallen expected0x%x\n",
1576                          info->payload_len, buf->totallen);
1577                return IRDMA_ERR_INVALID_SIZE;
1578        }
1579
1580        buf->tcphlen = tcph->doff << 2;
1581        buf->datalen = pkt_len - iphlen - buf->tcphlen;
1582        buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
1583        buf->hdrlen = buf->maclen + iphlen + buf->tcphlen;
1584        buf->seqnum = ntohl(tcph->seq);
1585
1586        return 0;
1587}
1588
1589/**
1590 * irdma_puda_get_tcpip_info - get tcpip info from puda buffer
1591 * @info: to get information
1592 * @buf: puda buffer
1593 */
1594enum irdma_status_code
1595irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
1596                          struct irdma_puda_buf *buf)
1597{
1598        struct tcphdr *tcph;
1599        u32 pkt_len;
1600        u8 *mem;
1601
1602        if (buf->vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1603                return irdma_gen1_puda_get_tcpip_info(info, buf);
1604
1605        mem = buf->mem.va;
1606        buf->vlan_valid = info->vlan_valid;
1607        if (info->vlan_valid)
1608                buf->vlan_id = info->vlan;
1609
1610        buf->ipv4 = info->ipv4;
1611        if (buf->ipv4)
1612                buf->iph = mem + IRDMA_IPV4_PAD;
1613        else
1614                buf->iph = mem;
1615
1616        buf->tcph = mem + IRDMA_TCP_OFFSET;
1617        tcph = (struct tcphdr *)buf->tcph;
1618        pkt_len = info->payload_len;
1619        buf->totallen = pkt_len;
1620        buf->tcphlen = tcph->doff << 2;
1621        buf->datalen = pkt_len - IRDMA_TCP_OFFSET - buf->tcphlen;
1622        buf->data = buf->datalen ? buf->tcph + buf->tcphlen : NULL;
1623        buf->hdrlen = IRDMA_TCP_OFFSET + buf->tcphlen;
1624        buf->seqnum = ntohl(tcph->seq);
1625
1626        if (info->smac_valid) {
1627                ether_addr_copy(buf->smac, info->smac);
1628                buf->smac_valid = true;
1629        }
1630
1631        return 0;
1632}
1633
1634/**
1635 * irdma_hw_stats_timeout - Stats timer-handler which updates all HW stats
1636 * @t: timer_list pointer
1637 */
1638static void irdma_hw_stats_timeout(struct timer_list *t)
1639{
1640        struct irdma_vsi_pestat *pf_devstat =
1641                from_timer(pf_devstat, t, stats_timer);
1642        struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi;
1643
1644        if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
1645                irdma_cqp_gather_stats_gen1(sc_vsi->dev, sc_vsi->pestat);
1646        else
1647                irdma_cqp_gather_stats_cmd(sc_vsi->dev, sc_vsi->pestat, false);
1648
1649        mod_timer(&pf_devstat->stats_timer,
1650                  jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1651}
1652
1653/**
1654 * irdma_hw_stats_start_timer - Start periodic stats timer
1655 * @vsi: vsi structure pointer
1656 */
1657void irdma_hw_stats_start_timer(struct irdma_sc_vsi *vsi)
1658{
1659        struct irdma_vsi_pestat *devstat = vsi->pestat;
1660
1661        timer_setup(&devstat->stats_timer, irdma_hw_stats_timeout, 0);
1662        mod_timer(&devstat->stats_timer,
1663                  jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
1664}
1665
1666/**
1667 * irdma_hw_stats_stop_timer - Delete periodic stats timer
1668 * @vsi: pointer to vsi structure
1669 */
1670void irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi)
1671{
1672        struct irdma_vsi_pestat *devstat = vsi->pestat;
1673
1674        del_timer_sync(&devstat->stats_timer);
1675}
1676
1677/**
1678 * irdma_process_stats - Checking for wrap and update stats
1679 * @pestat: stats structure pointer
1680 */
1681static inline void irdma_process_stats(struct irdma_vsi_pestat *pestat)
1682{
1683        sc_vsi_update_stats(pestat->vsi);
1684}
1685
1686/**
1687 * irdma_cqp_gather_stats_gen1 - Gather stats
1688 * @dev: pointer to device structure
1689 * @pestat: statistics structure
1690 */
1691void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev,
1692                                 struct irdma_vsi_pestat *pestat)
1693{
1694        struct irdma_gather_stats *gather_stats =
1695                pestat->gather_info.gather_stats_va;
1696        u32 stats_inst_offset_32;
1697        u32 stats_inst_offset_64;
1698
1699        stats_inst_offset_32 = (pestat->gather_info.use_stats_inst) ?
1700                                       pestat->gather_info.stats_inst_index :
1701                                       pestat->hw->hmc.hmc_fn_id;
1702        stats_inst_offset_32 *= 4;
1703        stats_inst_offset_64 = stats_inst_offset_32 * 2;
1704
1705        gather_stats->rxvlanerr =
1706                rd32(dev->hw,
1707                     dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_RXVLANERR]
1708                     + stats_inst_offset_32);
1709        gather_stats->ip4rxdiscard =
1710                rd32(dev->hw,
1711                     dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4RXDISCARD]
1712                     + stats_inst_offset_32);
1713        gather_stats->ip4rxtrunc =
1714                rd32(dev->hw,
1715                     dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4RXTRUNC]
1716                     + stats_inst_offset_32);
1717        gather_stats->ip4txnoroute =
1718                rd32(dev->hw,
1719                     dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP4TXNOROUTE]
1720                     + stats_inst_offset_32);
1721        gather_stats->ip6rxdiscard =
1722                rd32(dev->hw,
1723                     dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6RXDISCARD]
1724                     + stats_inst_offset_32);
1725        gather_stats->ip6rxtrunc =
1726                rd32(dev->hw,
1727                     dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6RXTRUNC]
1728                     + stats_inst_offset_32);
1729        gather_stats->ip6txnoroute =
1730                rd32(dev->hw,
1731                     dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE]
1732                     + stats_inst_offset_32);
1733        gather_stats->tcprtxseg =
1734                rd32(dev->hw,
1735                     dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_TCPRTXSEG]
1736                     + stats_inst_offset_32);
1737        gather_stats->tcprxopterr =
1738                rd32(dev->hw,
1739                     dev->hw_stats_regs_32[IRDMA_HW_STAT_INDEX_TCPRXOPTERR]
1740                     + stats_inst_offset_32);
1741
1742        gather_stats->ip4rxocts =
1743                rd64(dev->hw,
1744                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXOCTS]
1745                     + stats_inst_offset_64);
1746        gather_stats->ip4rxpkts =
1747                rd64(dev->hw,
1748                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXPKTS]
1749                     + stats_inst_offset_64);
1750        gather_stats->ip4txfrag =
1751                rd64(dev->hw,
1752                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXFRAGS]
1753                     + stats_inst_offset_64);
1754        gather_stats->ip4rxmcpkts =
1755                rd64(dev->hw,
1756                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4RXMCPKTS]
1757                     + stats_inst_offset_64);
1758        gather_stats->ip4txocts =
1759                rd64(dev->hw,
1760                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXOCTS]
1761                     + stats_inst_offset_64);
1762        gather_stats->ip4txpkts =
1763                rd64(dev->hw,
1764                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXPKTS]
1765                     + stats_inst_offset_64);
1766        gather_stats->ip4txfrag =
1767                rd64(dev->hw,
1768                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXFRAGS]
1769                     + stats_inst_offset_64);
1770        gather_stats->ip4txmcpkts =
1771                rd64(dev->hw,
1772                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP4TXMCPKTS]
1773                     + stats_inst_offset_64);
1774        gather_stats->ip6rxocts =
1775                rd64(dev->hw,
1776                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXOCTS]
1777                     + stats_inst_offset_64);
1778        gather_stats->ip6rxpkts =
1779                rd64(dev->hw,
1780                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXPKTS]
1781                     + stats_inst_offset_64);
1782        gather_stats->ip6txfrags =
1783                rd64(dev->hw,
1784                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXFRAGS]
1785                     + stats_inst_offset_64);
1786        gather_stats->ip6rxmcpkts =
1787                rd64(dev->hw,
1788                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6RXMCPKTS]
1789                     + stats_inst_offset_64);
1790        gather_stats->ip6txocts =
1791                rd64(dev->hw,
1792                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXOCTS]
1793                     + stats_inst_offset_64);
1794        gather_stats->ip6txpkts =
1795                rd64(dev->hw,
1796                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXPKTS]
1797                     + stats_inst_offset_64);
1798        gather_stats->ip6txfrags =
1799                rd64(dev->hw,
1800                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXFRAGS]
1801                     + stats_inst_offset_64);
1802        gather_stats->ip6txmcpkts =
1803                rd64(dev->hw,
1804                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS]
1805                     + stats_inst_offset_64);
1806        gather_stats->tcprxsegs =
1807                rd64(dev->hw,
1808                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_TCPRXSEGS]
1809                     + stats_inst_offset_64);
1810        gather_stats->tcptxsegs =
1811                rd64(dev->hw,
1812                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_TCPTXSEG]
1813                     + stats_inst_offset_64);
1814        gather_stats->rdmarxrds =
1815                rd64(dev->hw,
1816                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXRDS]
1817                     + stats_inst_offset_64);
1818        gather_stats->rdmarxsnds =
1819                rd64(dev->hw,
1820                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXSNDS]
1821                     + stats_inst_offset_64);
1822        gather_stats->rdmarxwrs =
1823                rd64(dev->hw,
1824                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMARXWRS]
1825                     + stats_inst_offset_64);
1826        gather_stats->rdmatxrds =
1827                rd64(dev->hw,
1828                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXRDS]
1829                     + stats_inst_offset_64);
1830        gather_stats->rdmatxsnds =
1831                rd64(dev->hw,
1832                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXSNDS]
1833                     + stats_inst_offset_64);
1834        gather_stats->rdmatxwrs =
1835                rd64(dev->hw,
1836                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMATXWRS]
1837                     + stats_inst_offset_64);
1838        gather_stats->rdmavbn =
1839                rd64(dev->hw,
1840                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMAVBND]
1841                     + stats_inst_offset_64);
1842        gather_stats->rdmavinv =
1843                rd64(dev->hw,
1844                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_RDMAVINV]
1845                     + stats_inst_offset_64);
1846        gather_stats->udprxpkts =
1847                rd64(dev->hw,
1848                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_UDPRXPKTS]
1849                     + stats_inst_offset_64);
1850        gather_stats->udptxpkts =
1851                rd64(dev->hw,
1852                     dev->hw_stats_regs_64[IRDMA_HW_STAT_INDEX_UDPTXPKTS]
1853                     + stats_inst_offset_64);
1854
1855        irdma_process_stats(pestat);
1856}
1857
1858/**
1859 * irdma_process_cqp_stats - Checking for wrap and update stats
1860 * @cqp_request: cqp_request structure pointer
1861 */
1862static void irdma_process_cqp_stats(struct irdma_cqp_request *cqp_request)
1863{
1864        struct irdma_vsi_pestat *pestat = cqp_request->param;
1865
1866        irdma_process_stats(pestat);
1867}
1868
1869/**
1870 * irdma_cqp_gather_stats_cmd - Gather stats
1871 * @dev: pointer to device structure
1872 * @pestat: pointer to stats info
1873 * @wait: flag to wait or not wait for stats
1874 */
1875enum irdma_status_code
1876irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
1877                           struct irdma_vsi_pestat *pestat, bool wait)
1878
1879{
1880        struct irdma_pci_f *rf = dev_to_rf(dev);
1881        struct irdma_cqp *iwcqp = &rf->cqp;
1882        struct irdma_cqp_request *cqp_request;
1883        struct cqp_cmds_info *cqp_info;
1884        enum irdma_status_code status;
1885
1886        cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
1887        if (!cqp_request)
1888                return IRDMA_ERR_NO_MEMORY;
1889
1890        cqp_info = &cqp_request->info;
1891        memset(cqp_info, 0, sizeof(*cqp_info));
1892        cqp_info->cqp_cmd = IRDMA_OP_STATS_GATHER;
1893        cqp_info->post_sq = 1;
1894        cqp_info->in.u.stats_gather.info = pestat->gather_info;
1895        cqp_info->in.u.stats_gather.scratch = (uintptr_t)cqp_request;
1896        cqp_info->in.u.stats_gather.cqp = &rf->cqp.sc_cqp;
1897        cqp_request->param = pestat;
1898        if (!wait)
1899                cqp_request->callback_fcn = irdma_process_cqp_stats;
1900        status = irdma_handle_cqp_op(rf, cqp_request);
1901        if (wait)
1902                irdma_process_stats(pestat);
1903        irdma_put_cqp_request(&rf->cqp, cqp_request);
1904
1905        return status;
1906}
1907
1908/**
1909 * irdma_cqp_stats_inst_cmd - Allocate/free stats instance
1910 * @vsi: pointer to vsi structure
1911 * @cmd: command to allocate or free
1912 * @stats_info: pointer to allocate stats info
1913 */
1914enum irdma_status_code
1915irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
1916                         struct irdma_stats_inst_info *stats_info)
1917{
1918        struct irdma_pci_f *rf = dev_to_rf(vsi->dev);
1919        struct irdma_cqp *iwcqp = &rf->cqp;
1920        struct irdma_cqp_request *cqp_request;
1921        struct cqp_cmds_info *cqp_info;
1922        enum irdma_status_code status;
1923        bool wait = false;
1924
1925        if (cmd == IRDMA_OP_STATS_ALLOCATE)
1926                wait = true;
1927        cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, wait);
1928        if (!cqp_request)
1929                return IRDMA_ERR_NO_MEMORY;
1930
1931        cqp_info = &cqp_request->info;
1932        memset(cqp_info, 0, sizeof(*cqp_info));
1933        cqp_info->cqp_cmd = cmd;
1934        cqp_info->post_sq = 1;
1935        cqp_info->in.u.stats_manage.info = *stats_info;
1936        cqp_info->in.u.stats_manage.scratch = (uintptr_t)cqp_request;
1937        cqp_info->in.u.stats_manage.cqp = &rf->cqp.sc_cqp;
1938        status = irdma_handle_cqp_op(rf, cqp_request);
1939        if (wait)
1940                stats_info->stats_idx = cqp_request->compl_info.op_ret_val;
1941        irdma_put_cqp_request(iwcqp, cqp_request);
1942
1943        return status;
1944}
1945
1946/**
1947 * irdma_cqp_ceq_cmd - Create/Destroy CEQ's after CEQ 0
1948 * @dev: pointer to device info
1949 * @sc_ceq: pointer to ceq structure
1950 * @op: Create or Destroy
1951 */
1952enum irdma_status_code irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev,
1953                                         struct irdma_sc_ceq *sc_ceq, u8 op)
1954{
1955        struct irdma_cqp_request *cqp_request;
1956        struct cqp_cmds_info *cqp_info;
1957        struct irdma_pci_f *rf = dev_to_rf(dev);
1958        enum irdma_status_code status;
1959
1960        cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1961        if (!cqp_request)
1962                return IRDMA_ERR_NO_MEMORY;
1963
1964        cqp_info = &cqp_request->info;
1965        cqp_info->post_sq = 1;
1966        cqp_info->cqp_cmd = op;
1967        cqp_info->in.u.ceq_create.ceq = sc_ceq;
1968        cqp_info->in.u.ceq_create.scratch = (uintptr_t)cqp_request;
1969
1970        status = irdma_handle_cqp_op(rf, cqp_request);
1971        irdma_put_cqp_request(&rf->cqp, cqp_request);
1972
1973        return status;
1974}
1975
1976/**
1977 * irdma_cqp_aeq_cmd - Create/Destroy AEQ
1978 * @dev: pointer to device info
1979 * @sc_aeq: pointer to aeq structure
1980 * @op: Create or Destroy
1981 */
1982enum irdma_status_code irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev,
1983                                         struct irdma_sc_aeq *sc_aeq, u8 op)
1984{
1985        struct irdma_cqp_request *cqp_request;
1986        struct cqp_cmds_info *cqp_info;
1987        struct irdma_pci_f *rf = dev_to_rf(dev);
1988        enum irdma_status_code status;
1989
1990        cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
1991        if (!cqp_request)
1992                return IRDMA_ERR_NO_MEMORY;
1993
1994        cqp_info = &cqp_request->info;
1995        cqp_info->post_sq = 1;
1996        cqp_info->cqp_cmd = op;
1997        cqp_info->in.u.aeq_create.aeq = sc_aeq;
1998        cqp_info->in.u.aeq_create.scratch = (uintptr_t)cqp_request;
1999
2000        status = irdma_handle_cqp_op(rf, cqp_request);
2001        irdma_put_cqp_request(&rf->cqp, cqp_request);
2002
2003        return status;
2004}
2005
2006/**
2007 * irdma_cqp_ws_node_cmd - Add/modify/delete ws node
2008 * @dev: pointer to device structure
2009 * @cmd: Add, modify or delete
2010 * @node_info: pointer to ws node info
2011 */
2012enum irdma_status_code
2013irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
2014                      struct irdma_ws_node_info *node_info)
2015{
2016        struct irdma_pci_f *rf = dev_to_rf(dev);
2017        struct irdma_cqp *iwcqp = &rf->cqp;
2018        struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp;
2019        struct irdma_cqp_request *cqp_request;
2020        struct cqp_cmds_info *cqp_info;
2021        enum irdma_status_code status;
2022        bool poll;
2023
2024        if (!rf->sc_dev.ceq_valid)
2025                poll = true;
2026        else
2027                poll = false;
2028
2029        cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, !poll);
2030        if (!cqp_request)
2031                return IRDMA_ERR_NO_MEMORY;
2032
2033        cqp_info = &cqp_request->info;
2034        memset(cqp_info, 0, sizeof(*cqp_info));
2035        cqp_info->cqp_cmd = cmd;
2036        cqp_info->post_sq = 1;
2037        cqp_info->in.u.ws_node.info = *node_info;
2038        cqp_info->in.u.ws_node.cqp = cqp;
2039        cqp_info->in.u.ws_node.scratch = (uintptr_t)cqp_request;
2040        status = irdma_handle_cqp_op(rf, cqp_request);
2041        if (status)
2042                goto exit;
2043
2044        if (poll) {
2045                struct irdma_ccq_cqe_info compl_info;
2046
2047                status = irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_WORK_SCHED_NODE,
2048                                                       &compl_info);
2049                node_info->qs_handle = compl_info.op_ret_val;
2050                ibdev_dbg(&rf->iwdev->ibdev, "DCB: opcode=%d, compl_info.retval=%d\n",
2051                          compl_info.op_code, compl_info.op_ret_val);
2052        } else {
2053                node_info->qs_handle = cqp_request->compl_info.op_ret_val;
2054        }
2055
2056exit:
2057        irdma_put_cqp_request(&rf->cqp, cqp_request);
2058
2059        return status;
2060}
2061
2062/**
2063 * irdma_cqp_up_map_cmd - Set the up-up mapping
2064 * @dev: pointer to device structure
2065 * @cmd: map command
2066 * @map_info: pointer to up map info
2067 */
2068enum irdma_status_code irdma_cqp_up_map_cmd(struct irdma_sc_dev *dev, u8 cmd,
2069                                            struct irdma_up_info *map_info)
2070{
2071        struct irdma_pci_f *rf = dev_to_rf(dev);
2072        struct irdma_cqp *iwcqp = &rf->cqp;
2073        struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp;
2074        struct irdma_cqp_request *cqp_request;
2075        struct cqp_cmds_info *cqp_info;
2076        enum irdma_status_code status;
2077
2078        cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, false);
2079        if (!cqp_request)
2080                return IRDMA_ERR_NO_MEMORY;
2081
2082        cqp_info = &cqp_request->info;
2083        memset(cqp_info, 0, sizeof(*cqp_info));
2084        cqp_info->cqp_cmd = cmd;
2085        cqp_info->post_sq = 1;
2086        cqp_info->in.u.up_map.info = *map_info;
2087        cqp_info->in.u.up_map.cqp = cqp;
2088        cqp_info->in.u.up_map.scratch = (uintptr_t)cqp_request;
2089
2090        status = irdma_handle_cqp_op(rf, cqp_request);
2091        irdma_put_cqp_request(&rf->cqp, cqp_request);
2092
2093        return status;
2094}
2095
2096/**
2097 * irdma_ah_cqp_op - perform an AH cqp operation
2098 * @rf: RDMA PCI function
2099 * @sc_ah: address handle
2100 * @cmd: AH operation
2101 * @wait: wait if true
2102 * @callback_fcn: Callback function on CQP op completion
2103 * @cb_param: parameter for callback function
2104 *
2105 * returns errno
2106 */
2107int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
2108                    bool wait,
2109                    void (*callback_fcn)(struct irdma_cqp_request *),
2110                    void *cb_param)
2111{
2112        struct irdma_cqp_request *cqp_request;
2113        struct cqp_cmds_info *cqp_info;
2114        enum irdma_status_code status;
2115
2116        if (cmd != IRDMA_OP_AH_CREATE && cmd != IRDMA_OP_AH_DESTROY)
2117                return -EINVAL;
2118
2119        cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait);
2120        if (!cqp_request)
2121                return -ENOMEM;
2122
2123        cqp_info = &cqp_request->info;
2124        cqp_info->cqp_cmd = cmd;
2125        cqp_info->post_sq = 1;
2126        if (cmd == IRDMA_OP_AH_CREATE) {
2127                cqp_info->in.u.ah_create.info = sc_ah->ah_info;
2128                cqp_info->in.u.ah_create.scratch = (uintptr_t)cqp_request;
2129                cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp;
2130        } else if (cmd == IRDMA_OP_AH_DESTROY) {
2131                cqp_info->in.u.ah_destroy.info = sc_ah->ah_info;
2132                cqp_info->in.u.ah_destroy.scratch = (uintptr_t)cqp_request;
2133                cqp_info->in.u.ah_destroy.cqp = &rf->cqp.sc_cqp;
2134        }
2135
2136        if (!wait) {
2137                cqp_request->callback_fcn = callback_fcn;
2138                cqp_request->param = cb_param;
2139        }
2140        status = irdma_handle_cqp_op(rf, cqp_request);
2141        irdma_put_cqp_request(&rf->cqp, cqp_request);
2142
2143        if (status)
2144                return -ENOMEM;
2145
2146        if (wait)
2147                sc_ah->ah_info.ah_valid = (cmd == IRDMA_OP_AH_CREATE);
2148
2149        return 0;
2150}
2151
2152/**
2153 * irdma_ieq_ah_cb - callback after creation of AH for IEQ
2154 * @cqp_request: pointer to cqp_request of create AH
2155 */
2156static void irdma_ieq_ah_cb(struct irdma_cqp_request *cqp_request)
2157{
2158        struct irdma_sc_qp *qp = cqp_request->param;
2159        struct irdma_sc_ah *sc_ah = qp->pfpdu.ah;
2160        unsigned long flags;
2161
2162        spin_lock_irqsave(&qp->pfpdu.lock, flags);
2163        if (!cqp_request->compl_info.op_ret_val) {
2164                sc_ah->ah_info.ah_valid = true;
2165                irdma_ieq_process_fpdus(qp, qp->vsi->ieq);
2166        } else {
2167                sc_ah->ah_info.ah_valid = false;
2168                irdma_ieq_cleanup_qp(qp->vsi->ieq, qp);
2169        }
2170        spin_unlock_irqrestore(&qp->pfpdu.lock, flags);
2171}
2172
2173/**
2174 * irdma_ilq_ah_cb - callback after creation of AH for ILQ
2175 * @cqp_request: pointer to cqp_request of create AH
2176 */
2177static void irdma_ilq_ah_cb(struct irdma_cqp_request *cqp_request)
2178{
2179        struct irdma_cm_node *cm_node = cqp_request->param;
2180        struct irdma_sc_ah *sc_ah = cm_node->ah;
2181
2182        sc_ah->ah_info.ah_valid = !cqp_request->compl_info.op_ret_val;
2183        irdma_add_conn_est_qh(cm_node);
2184}
2185
2186/**
2187 * irdma_puda_create_ah - create AH for ILQ/IEQ qp's
2188 * @dev: device pointer
2189 * @ah_info: Address handle info
2190 * @wait: When true will wait for operation to complete
2191 * @type: ILQ/IEQ
2192 * @cb_param: Callback param when not waiting
2193 * @ah_ret: Returned pointer to address handle if created
2194 *
2195 */
2196enum irdma_status_code irdma_puda_create_ah(struct irdma_sc_dev *dev,
2197                                            struct irdma_ah_info *ah_info,
2198                                            bool wait, enum puda_rsrc_type type,
2199                                            void *cb_param,
2200                                            struct irdma_sc_ah **ah_ret)
2201{
2202        struct irdma_sc_ah *ah;
2203        struct irdma_pci_f *rf = dev_to_rf(dev);
2204        int err;
2205
2206        ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2207        *ah_ret = ah;
2208        if (!ah)
2209                return IRDMA_ERR_NO_MEMORY;
2210
2211        err = irdma_alloc_rsrc(rf, rf->allocated_ahs, rf->max_ah,
2212                               &ah_info->ah_idx, &rf->next_ah);
2213        if (err)
2214                goto err_free;
2215
2216        ah->dev = dev;
2217        ah->ah_info = *ah_info;
2218
2219        if (type == IRDMA_PUDA_RSRC_TYPE_ILQ)
2220                err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
2221                                      irdma_ilq_ah_cb, cb_param);
2222        else
2223                err = irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_CREATE, wait,
2224                                      irdma_ieq_ah_cb, cb_param);
2225
2226        if (err)
2227                goto error;
2228        return 0;
2229
2230error:
2231        irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
2232err_free:
2233        kfree(ah);
2234        *ah_ret = NULL;
2235        return IRDMA_ERR_NO_MEMORY;
2236}
2237
2238/**
2239 * irdma_puda_free_ah - free a puda address handle
2240 * @dev: device pointer
2241 * @ah: The address handle to free
2242 */
2243void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah)
2244{
2245        struct irdma_pci_f *rf = dev_to_rf(dev);
2246
2247        if (!ah)
2248                return;
2249
2250        if (ah->ah_info.ah_valid) {
2251                irdma_ah_cqp_op(rf, ah, IRDMA_OP_AH_DESTROY, false, NULL, NULL);
2252                irdma_free_rsrc(rf, rf->allocated_ahs, ah->ah_info.ah_idx);
2253        }
2254
2255        kfree(ah);
2256}
2257
2258/**
2259 * irdma_gsi_ud_qp_ah_cb - callback after creation of AH for GSI/ID QP
2260 * @cqp_request: pointer to cqp_request of create AH
2261 */
2262void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request)
2263{
2264        struct irdma_sc_ah *sc_ah = cqp_request->param;
2265
2266        if (!cqp_request->compl_info.op_ret_val)
2267                sc_ah->ah_info.ah_valid = true;
2268        else
2269                sc_ah->ah_info.ah_valid = false;
2270}
2271
2272/**
2273 * irdma_prm_add_pble_mem - add moemory to pble resources
2274 * @pprm: pble resource manager
2275 * @pchunk: chunk of memory to add
2276 */
2277enum irdma_status_code irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
2278                                              struct irdma_chunk *pchunk)
2279{
2280        u64 sizeofbitmap;
2281
2282        if (pchunk->size & 0xfff)
2283                return IRDMA_ERR_PARAM;
2284
2285        sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift;
2286
2287        pchunk->bitmapmem.size = sizeofbitmap >> 3;
2288        pchunk->bitmapmem.va = kzalloc(pchunk->bitmapmem.size, GFP_KERNEL);
2289
2290        if (!pchunk->bitmapmem.va)
2291                return IRDMA_ERR_NO_MEMORY;
2292
2293        pchunk->bitmapbuf = pchunk->bitmapmem.va;
2294        bitmap_zero(pchunk->bitmapbuf, sizeofbitmap);
2295
2296        pchunk->sizeofbitmap = sizeofbitmap;
2297        /* each pble is 8 bytes hence shift by 3 */
2298        pprm->total_pble_alloc += pchunk->size >> 3;
2299        pprm->free_pble_cnt += pchunk->size >> 3;
2300
2301        return 0;
2302}
2303
2304/**
2305 * irdma_prm_get_pbles - get pble's from prm
2306 * @pprm: pble resource manager
2307 * @chunkinfo: nformation about chunk where pble's were acquired
2308 * @mem_size: size of pble memory needed
2309 * @vaddr: returns virtual address of pble memory
2310 * @fpm_addr: returns fpm address of pble memory
2311 */
2312enum irdma_status_code
2313irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
2314                    struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size,
2315                    u64 **vaddr, u64 *fpm_addr)
2316{
2317        u64 bits_needed;
2318        u64 bit_idx = PBLE_INVALID_IDX;
2319        struct irdma_chunk *pchunk = NULL;
2320        struct list_head *chunk_entry = pprm->clist.next;
2321        u32 offset;
2322        unsigned long flags;
2323        *vaddr = NULL;
2324        *fpm_addr = 0;
2325
2326        bits_needed = DIV_ROUND_UP_ULL(mem_size, BIT_ULL(pprm->pble_shift));
2327
2328        spin_lock_irqsave(&pprm->prm_lock, flags);
2329        while (chunk_entry != &pprm->clist) {
2330                pchunk = (struct irdma_chunk *)chunk_entry;
2331                bit_idx = bitmap_find_next_zero_area(pchunk->bitmapbuf,
2332                                                     pchunk->sizeofbitmap, 0,
2333                                                     bits_needed, 0);
2334                if (bit_idx < pchunk->sizeofbitmap)
2335                        break;
2336
2337                /* list.next used macro */
2338                chunk_entry = pchunk->list.next;
2339        }
2340
2341        if (!pchunk || bit_idx >= pchunk->sizeofbitmap) {
2342                spin_unlock_irqrestore(&pprm->prm_lock, flags);
2343                return IRDMA_ERR_NO_MEMORY;
2344        }
2345
2346        bitmap_set(pchunk->bitmapbuf, bit_idx, bits_needed);
2347        offset = bit_idx << pprm->pble_shift;
2348        *vaddr = pchunk->vaddr + offset;
2349        *fpm_addr = pchunk->fpm_addr + offset;
2350
2351        chunkinfo->pchunk = pchunk;
2352        chunkinfo->bit_idx = bit_idx;
2353        chunkinfo->bits_used = bits_needed;
2354        /* 3 is sizeof pble divide */
2355        pprm->free_pble_cnt -= chunkinfo->bits_used << (pprm->pble_shift - 3);
2356        spin_unlock_irqrestore(&pprm->prm_lock, flags);
2357
2358        return 0;
2359}
2360
2361/**
2362 * irdma_prm_return_pbles - return pbles back to prm
2363 * @pprm: pble resource manager
2364 * @chunkinfo: chunk where pble's were acquired and to be freed
2365 */
2366void irdma_prm_return_pbles(struct irdma_pble_prm *pprm,
2367                            struct irdma_pble_chunkinfo *chunkinfo)
2368{
2369        unsigned long flags;
2370
2371        spin_lock_irqsave(&pprm->prm_lock, flags);
2372        pprm->free_pble_cnt += chunkinfo->bits_used << (pprm->pble_shift - 3);
2373        bitmap_clear(chunkinfo->pchunk->bitmapbuf, chunkinfo->bit_idx,
2374                     chunkinfo->bits_used);
2375        spin_unlock_irqrestore(&pprm->prm_lock, flags);
2376}
2377
2378enum irdma_status_code irdma_map_vm_page_list(struct irdma_hw *hw, void *va,
2379                                              dma_addr_t *pg_dma, u32 pg_cnt)
2380{
2381        struct page *vm_page;
2382        int i;
2383        u8 *addr;
2384
2385        addr = (u8 *)(uintptr_t)va;
2386        for (i = 0; i < pg_cnt; i++) {
2387                vm_page = vmalloc_to_page(addr);
2388                if (!vm_page)
2389                        goto err;
2390
2391                pg_dma[i] = dma_map_page(hw->device, vm_page, 0, PAGE_SIZE,
2392                                         DMA_BIDIRECTIONAL);
2393                if (dma_mapping_error(hw->device, pg_dma[i]))
2394                        goto err;
2395
2396                addr += PAGE_SIZE;
2397        }
2398
2399        return 0;
2400
2401err:
2402        irdma_unmap_vm_page_list(hw, pg_dma, i);
2403        return IRDMA_ERR_NO_MEMORY;
2404}
2405
2406void irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt)
2407{
2408        int i;
2409
2410        for (i = 0; i < pg_cnt; i++)
2411                dma_unmap_page(hw->device, pg_dma[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
2412}
2413
2414/**
2415 * irdma_pble_free_paged_mem - free virtual paged memory
2416 * @chunk: chunk to free with paged memory
2417 */
2418void irdma_pble_free_paged_mem(struct irdma_chunk *chunk)
2419{
2420        if (!chunk->pg_cnt)
2421                goto done;
2422
2423        irdma_unmap_vm_page_list(chunk->dev->hw, chunk->dmainfo.dmaaddrs,
2424                                 chunk->pg_cnt);
2425
2426done:
2427        kfree(chunk->dmainfo.dmaaddrs);
2428        chunk->dmainfo.dmaaddrs = NULL;
2429        vfree(chunk->vaddr);
2430        chunk->vaddr = NULL;
2431        chunk->type = 0;
2432}
2433
2434/**
2435 * irdma_pble_get_paged_mem -allocate paged memory for pbles
2436 * @chunk: chunk to add for paged memory
2437 * @pg_cnt: number of pages needed
2438 */
2439enum irdma_status_code irdma_pble_get_paged_mem(struct irdma_chunk *chunk,
2440                                                u32 pg_cnt)
2441{
2442        u32 size;
2443        void *va;
2444
2445        chunk->dmainfo.dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
2446        if (!chunk->dmainfo.dmaaddrs)
2447                return IRDMA_ERR_NO_MEMORY;
2448
2449        size = PAGE_SIZE * pg_cnt;
2450        va = vmalloc(size);
2451        if (!va)
2452                goto err;
2453
2454        if (irdma_map_vm_page_list(chunk->dev->hw, va, chunk->dmainfo.dmaaddrs,
2455                                   pg_cnt)) {
2456                vfree(va);
2457                goto err;
2458        }
2459        chunk->vaddr = va;
2460        chunk->size = size;
2461        chunk->pg_cnt = pg_cnt;
2462        chunk->type = PBLE_SD_PAGED;
2463
2464        return 0;
2465err:
2466        kfree(chunk->dmainfo.dmaaddrs);
2467        chunk->dmainfo.dmaaddrs = NULL;
2468
2469        return IRDMA_ERR_NO_MEMORY;
2470}
2471
2472/**
2473 * irdma_alloc_ws_node_id - Allocate a tx scheduler node ID
2474 * @dev: device pointer
2475 */
2476u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev)
2477{
2478        struct irdma_pci_f *rf = dev_to_rf(dev);
2479        u32 next = 1;
2480        u32 node_id;
2481
2482        if (irdma_alloc_rsrc(rf, rf->allocated_ws_nodes, rf->max_ws_node_id,
2483                             &node_id, &next))
2484                return IRDMA_WS_NODE_INVALID;
2485
2486        return (u16)node_id;
2487}
2488
2489/**
2490 * irdma_free_ws_node_id - Free a tx scheduler node ID
2491 * @dev: device pointer
2492 * @node_id: Work scheduler node ID
2493 */
2494void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id)
2495{
2496        struct irdma_pci_f *rf = dev_to_rf(dev);
2497
2498        irdma_free_rsrc(rf, rf->allocated_ws_nodes, (u32)node_id);
2499}
2500
2501/**
2502 * irdma_modify_qp_to_err - Modify a QP to error
2503 * @sc_qp: qp structure
2504 */
2505void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp)
2506{
2507        struct irdma_qp *qp = sc_qp->qp_uk.back_qp;
2508        struct ib_qp_attr attr;
2509
2510        if (qp->iwdev->rf->reset)
2511                return;
2512        attr.qp_state = IB_QPS_ERR;
2513
2514        if (rdma_protocol_roce(qp->ibqp.device, 1))
2515                irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
2516        else
2517                irdma_modify_qp(&qp->ibqp, &attr, IB_QP_STATE, NULL);
2518}
2519
2520void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
2521{
2522        struct ib_event ibevent;
2523
2524        if (!iwqp->ibqp.event_handler)
2525                return;
2526
2527        switch (event) {
2528        case IRDMA_QP_EVENT_CATASTROPHIC:
2529                ibevent.event = IB_EVENT_QP_FATAL;
2530                break;
2531        case IRDMA_QP_EVENT_ACCESS_ERR:
2532                ibevent.event = IB_EVENT_QP_ACCESS_ERR;
2533                break;
2534        }
2535        ibevent.device = iwqp->ibqp.device;
2536        ibevent.element.qp = &iwqp->ibqp;
2537        iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
2538}
2539