linux/drivers/target/iscsi/cxgbit/cxgbit_cm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2016 Chelsio Communications, Inc.
   4 */
   5
   6#include <linux/module.h>
   7#include <linux/list.h>
   8#include <linux/workqueue.h>
   9#include <linux/skbuff.h>
  10#include <linux/timer.h>
  11#include <linux/notifier.h>
  12#include <linux/inetdevice.h>
  13#include <linux/ip.h>
  14#include <linux/tcp.h>
  15#include <linux/if_vlan.h>
  16
  17#include <net/neighbour.h>
  18#include <net/netevent.h>
  19#include <net/route.h>
  20#include <net/tcp.h>
  21#include <net/ip6_route.h>
  22#include <net/addrconf.h>
  23
  24#include <libcxgb_cm.h>
  25#include "cxgbit.h"
  26#include "clip_tbl.h"
  27
  28static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
  29{
  30        wr_waitp->ret = 0;
  31        reinit_completion(&wr_waitp->completion);
  32}
  33
  34static void
  35cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
  36{
  37        if (ret == CPL_ERR_NONE)
  38                wr_waitp->ret = 0;
  39        else
  40                wr_waitp->ret = -EIO;
  41
  42        if (wr_waitp->ret)
  43                pr_err("%s: err:%u", func, ret);
  44
  45        complete(&wr_waitp->completion);
  46}
  47
  48static int
  49cxgbit_wait_for_reply(struct cxgbit_device *cdev,
  50                      struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
  51                      const char *func)
  52{
  53        int ret;
  54
  55        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
  56                wr_waitp->ret = -EIO;
  57                goto out;
  58        }
  59
  60        ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
  61        if (!ret) {
  62                pr_info("%s - Device %s not responding tid %u\n",
  63                        func, pci_name(cdev->lldi.pdev), tid);
  64                wr_waitp->ret = -ETIMEDOUT;
  65        }
  66out:
  67        if (wr_waitp->ret)
  68                pr_info("%s: FW reply %d tid %u\n",
  69                        pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
  70        return wr_waitp->ret;
  71}
  72
  73static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
  74{
  75        return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
  76}
  77
  78static struct np_info *
  79cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
  80                   unsigned int stid)
  81{
  82        struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
  83
  84        if (p) {
  85                int bucket = cxgbit_np_hashfn(cnp);
  86
  87                p->cnp = cnp;
  88                p->stid = stid;
  89                spin_lock(&cdev->np_lock);
  90                p->next = cdev->np_hash_tab[bucket];
  91                cdev->np_hash_tab[bucket] = p;
  92                spin_unlock(&cdev->np_lock);
  93        }
  94
  95        return p;
  96}
  97
  98static int
  99cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 100{
 101        int stid = -1, bucket = cxgbit_np_hashfn(cnp);
 102        struct np_info *p;
 103
 104        spin_lock(&cdev->np_lock);
 105        for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
 106                if (p->cnp == cnp) {
 107                        stid = p->stid;
 108                        break;
 109                }
 110        }
 111        spin_unlock(&cdev->np_lock);
 112
 113        return stid;
 114}
 115
 116static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 117{
 118        int stid = -1, bucket = cxgbit_np_hashfn(cnp);
 119        struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
 120
 121        spin_lock(&cdev->np_lock);
 122        for (p = *prev; p; prev = &p->next, p = p->next) {
 123                if (p->cnp == cnp) {
 124                        stid = p->stid;
 125                        *prev = p->next;
 126                        kfree(p);
 127                        break;
 128                }
 129        }
 130        spin_unlock(&cdev->np_lock);
 131
 132        return stid;
 133}
 134
 135void _cxgbit_free_cnp(struct kref *kref)
 136{
 137        struct cxgbit_np *cnp;
 138
 139        cnp = container_of(kref, struct cxgbit_np, kref);
 140        kfree(cnp);
 141}
 142
 143static int
 144cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
 145                      struct cxgbit_np *cnp)
 146{
 147        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
 148                                     &cnp->com.local_addr;
 149        int addr_type;
 150        int ret;
 151
 152        pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
 153                 __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
 154
 155        addr_type = ipv6_addr_type((const struct in6_addr *)
 156                                   &sin6->sin6_addr);
 157        if (addr_type != IPV6_ADDR_ANY) {
 158                ret = cxgb4_clip_get(cdev->lldi.ports[0],
 159                                     (const u32 *)&sin6->sin6_addr.s6_addr, 1);
 160                if (ret) {
 161                        pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
 162                               sin6->sin6_addr.s6_addr, ret);
 163                        return -ENOMEM;
 164                }
 165        }
 166
 167        cxgbit_get_cnp(cnp);
 168        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 169
 170        ret = cxgb4_create_server6(cdev->lldi.ports[0],
 171                                   stid, &sin6->sin6_addr,
 172                                   sin6->sin6_port,
 173                                   cdev->lldi.rxq_ids[0]);
 174        if (!ret)
 175                ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
 176                                            0, 10, __func__);
 177        else if (ret > 0)
 178                ret = net_xmit_errno(ret);
 179        else
 180                cxgbit_put_cnp(cnp);
 181
 182        if (ret) {
 183                if (ret != -ETIMEDOUT)
 184                        cxgb4_clip_release(cdev->lldi.ports[0],
 185                                   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
 186
 187                pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
 188                       ret, stid, sin6->sin6_addr.s6_addr,
 189                       ntohs(sin6->sin6_port));
 190        }
 191
 192        return ret;
 193}
 194
 195static int
 196cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
 197                      struct cxgbit_np *cnp)
 198{
 199        struct sockaddr_in *sin = (struct sockaddr_in *)
 200                                   &cnp->com.local_addr;
 201        int ret;
 202
 203        pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
 204                 __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
 205
 206        cxgbit_get_cnp(cnp);
 207        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 208
 209        ret = cxgb4_create_server(cdev->lldi.ports[0],
 210                                  stid, sin->sin_addr.s_addr,
 211                                  sin->sin_port, 0,
 212                                  cdev->lldi.rxq_ids[0]);
 213        if (!ret)
 214                ret = cxgbit_wait_for_reply(cdev,
 215                                            &cnp->com.wr_wait,
 216                                            0, 10, __func__);
 217        else if (ret > 0)
 218                ret = net_xmit_errno(ret);
 219        else
 220                cxgbit_put_cnp(cnp);
 221
 222        if (ret)
 223                pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
 224                       ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
 225        return ret;
 226}
 227
 228struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
 229{
 230        struct cxgbit_device *cdev;
 231        u8 i;
 232
 233        list_for_each_entry(cdev, &cdev_list_head, list) {
 234                struct cxgb4_lld_info *lldi = &cdev->lldi;
 235
 236                for (i = 0; i < lldi->nports; i++) {
 237                        if (lldi->ports[i] == ndev) {
 238                                if (port_id)
 239                                        *port_id = i;
 240                                return cdev;
 241                        }
 242                }
 243        }
 244
 245        return NULL;
 246}
 247
 248static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
 249{
 250        if (ndev->priv_flags & IFF_BONDING) {
 251                pr_err("Bond devices are not supported. Interface:%s\n",
 252                       ndev->name);
 253                return NULL;
 254        }
 255
 256        if (is_vlan_dev(ndev))
 257                return vlan_dev_real_dev(ndev);
 258
 259        return ndev;
 260}
 261
 262static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
 263{
 264        struct net_device *ndev;
 265
 266        ndev = __ip_dev_find(&init_net, saddr, false);
 267        if (!ndev)
 268                return NULL;
 269
 270        return cxgbit_get_real_dev(ndev);
 271}
 272
 273static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
 274{
 275        struct net_device *ndev = NULL;
 276        bool found = false;
 277
 278        if (IS_ENABLED(CONFIG_IPV6)) {
 279                for_each_netdev_rcu(&init_net, ndev)
 280                        if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
 281                                found = true;
 282                                break;
 283                        }
 284        }
 285        if (!found)
 286                return NULL;
 287        return cxgbit_get_real_dev(ndev);
 288}
 289
 290static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
 291{
 292        struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
 293        int ss_family = sockaddr->ss_family;
 294        struct net_device *ndev = NULL;
 295        struct cxgbit_device *cdev = NULL;
 296
 297        rcu_read_lock();
 298        if (ss_family == AF_INET) {
 299                struct sockaddr_in *sin;
 300
 301                sin = (struct sockaddr_in *)sockaddr;
 302                ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
 303        } else if (ss_family == AF_INET6) {
 304                struct sockaddr_in6 *sin6;
 305
 306                sin6 = (struct sockaddr_in6 *)sockaddr;
 307                ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
 308        }
 309        if (!ndev)
 310                goto out;
 311
 312        cdev = cxgbit_find_device(ndev, NULL);
 313out:
 314        rcu_read_unlock();
 315        return cdev;
 316}
 317
 318static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
 319{
 320        struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
 321        int ss_family = sockaddr->ss_family;
 322        int addr_type;
 323
 324        if (ss_family == AF_INET) {
 325                struct sockaddr_in *sin;
 326
 327                sin = (struct sockaddr_in *)sockaddr;
 328                if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
 329                        return true;
 330        } else if (ss_family == AF_INET6) {
 331                struct sockaddr_in6 *sin6;
 332
 333                sin6 = (struct sockaddr_in6 *)sockaddr;
 334                addr_type = ipv6_addr_type((const struct in6_addr *)
 335                                &sin6->sin6_addr);
 336                if (addr_type == IPV6_ADDR_ANY)
 337                        return true;
 338        }
 339        return false;
 340}
 341
 342static int
 343__cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 344{
 345        int stid, ret;
 346        int ss_family = cnp->com.local_addr.ss_family;
 347
 348        if (!test_bit(CDEV_STATE_UP, &cdev->flags))
 349                return -EINVAL;
 350
 351        stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
 352        if (stid < 0)
 353                return -EINVAL;
 354
 355        if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
 356                cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
 357                return -EINVAL;
 358        }
 359
 360        if (ss_family == AF_INET)
 361                ret = cxgbit_create_server4(cdev, stid, cnp);
 362        else
 363                ret = cxgbit_create_server6(cdev, stid, cnp);
 364
 365        if (ret) {
 366                if (ret != -ETIMEDOUT)
 367                        cxgb4_free_stid(cdev->lldi.tids, stid,
 368                                        ss_family);
 369                cxgbit_np_hash_del(cdev, cnp);
 370                return ret;
 371        }
 372        return ret;
 373}
 374
 375static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
 376{
 377        struct cxgbit_device *cdev;
 378        int ret = -1;
 379
 380        mutex_lock(&cdev_list_lock);
 381        cdev = cxgbit_find_np_cdev(cnp);
 382        if (!cdev)
 383                goto out;
 384
 385        if (cxgbit_np_hash_find(cdev, cnp) >= 0)
 386                goto out;
 387
 388        if (__cxgbit_setup_cdev_np(cdev, cnp))
 389                goto out;
 390
 391        cnp->com.cdev = cdev;
 392        ret = 0;
 393out:
 394        mutex_unlock(&cdev_list_lock);
 395        return ret;
 396}
 397
 398static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
 399{
 400        struct cxgbit_device *cdev;
 401        int ret;
 402        u32 count = 0;
 403
 404        mutex_lock(&cdev_list_lock);
 405        list_for_each_entry(cdev, &cdev_list_head, list) {
 406                if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
 407                        mutex_unlock(&cdev_list_lock);
 408                        return -1;
 409                }
 410        }
 411
 412        list_for_each_entry(cdev, &cdev_list_head, list) {
 413                ret = __cxgbit_setup_cdev_np(cdev, cnp);
 414                if (ret == -ETIMEDOUT)
 415                        break;
 416                if (ret != 0)
 417                        continue;
 418                count++;
 419        }
 420        mutex_unlock(&cdev_list_lock);
 421
 422        return count ? 0 : -1;
 423}
 424
 425int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
 426{
 427        struct cxgbit_np *cnp;
 428        int ret;
 429
 430        if ((ksockaddr->ss_family != AF_INET) &&
 431            (ksockaddr->ss_family != AF_INET6))
 432                return -EINVAL;
 433
 434        cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
 435        if (!cnp)
 436                return -ENOMEM;
 437
 438        init_waitqueue_head(&cnp->accept_wait);
 439        init_completion(&cnp->com.wr_wait.completion);
 440        init_completion(&cnp->accept_comp);
 441        INIT_LIST_HEAD(&cnp->np_accept_list);
 442        spin_lock_init(&cnp->np_accept_lock);
 443        kref_init(&cnp->kref);
 444        memcpy(&np->np_sockaddr, ksockaddr,
 445               sizeof(struct sockaddr_storage));
 446        memcpy(&cnp->com.local_addr, &np->np_sockaddr,
 447               sizeof(cnp->com.local_addr));
 448
 449        cnp->np = np;
 450        cnp->com.cdev = NULL;
 451
 452        if (cxgbit_inaddr_any(cnp))
 453                ret = cxgbit_setup_all_np(cnp);
 454        else
 455                ret = cxgbit_setup_cdev_np(cnp);
 456
 457        if (ret) {
 458                cxgbit_put_cnp(cnp);
 459                return -EINVAL;
 460        }
 461
 462        np->np_context = cnp;
 463        cnp->com.state = CSK_STATE_LISTEN;
 464        return 0;
 465}
 466
 467static void
 468cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
 469                     struct cxgbit_sock *csk)
 470{
 471        conn->login_family = np->np_sockaddr.ss_family;
 472        conn->login_sockaddr = csk->com.remote_addr;
 473        conn->local_sockaddr = csk->com.local_addr;
 474}
 475
 476int cxgbit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
 477{
 478        struct cxgbit_np *cnp = np->np_context;
 479        struct cxgbit_sock *csk;
 480        int ret = 0;
 481
 482accept_wait:
 483        ret = wait_for_completion_interruptible(&cnp->accept_comp);
 484        if (ret)
 485                return -ENODEV;
 486
 487        spin_lock_bh(&np->np_thread_lock);
 488        if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
 489                spin_unlock_bh(&np->np_thread_lock);
 490                /**
 491                 * No point in stalling here when np_thread
 492                 * is in state RESET/SHUTDOWN/EXIT - bail
 493                 **/
 494                return -ENODEV;
 495        }
 496        spin_unlock_bh(&np->np_thread_lock);
 497
 498        spin_lock_bh(&cnp->np_accept_lock);
 499        if (list_empty(&cnp->np_accept_list)) {
 500                spin_unlock_bh(&cnp->np_accept_lock);
 501                goto accept_wait;
 502        }
 503
 504        csk = list_first_entry(&cnp->np_accept_list,
 505                               struct cxgbit_sock,
 506                               accept_node);
 507
 508        list_del_init(&csk->accept_node);
 509        spin_unlock_bh(&cnp->np_accept_lock);
 510        conn->context = csk;
 511        csk->conn = conn;
 512
 513        cxgbit_set_conn_info(np, conn, csk);
 514        return 0;
 515}
 516
 517static int
 518__cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 519{
 520        int stid, ret;
 521        bool ipv6 = false;
 522
 523        stid = cxgbit_np_hash_del(cdev, cnp);
 524        if (stid < 0)
 525                return -EINVAL;
 526        if (!test_bit(CDEV_STATE_UP, &cdev->flags))
 527                return -EINVAL;
 528
 529        if (cnp->np->np_sockaddr.ss_family == AF_INET6)
 530                ipv6 = true;
 531
 532        cxgbit_get_cnp(cnp);
 533        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 534        ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
 535                                  cdev->lldi.rxq_ids[0], ipv6);
 536
 537        if (ret > 0)
 538                ret = net_xmit_errno(ret);
 539
 540        if (ret) {
 541                cxgbit_put_cnp(cnp);
 542                return ret;
 543        }
 544
 545        ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
 546                                    0, 10, __func__);
 547        if (ret == -ETIMEDOUT)
 548                return ret;
 549
 550        if (ipv6 && cnp->com.cdev) {
 551                struct sockaddr_in6 *sin6;
 552
 553                sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
 554                cxgb4_clip_release(cdev->lldi.ports[0],
 555                                   (const u32 *)&sin6->sin6_addr.s6_addr,
 556                                   1);
 557        }
 558
 559        cxgb4_free_stid(cdev->lldi.tids, stid,
 560                        cnp->com.local_addr.ss_family);
 561        return 0;
 562}
 563
 564static void cxgbit_free_all_np(struct cxgbit_np *cnp)
 565{
 566        struct cxgbit_device *cdev;
 567        int ret;
 568
 569        mutex_lock(&cdev_list_lock);
 570        list_for_each_entry(cdev, &cdev_list_head, list) {
 571                ret = __cxgbit_free_cdev_np(cdev, cnp);
 572                if (ret == -ETIMEDOUT)
 573                        break;
 574        }
 575        mutex_unlock(&cdev_list_lock);
 576}
 577
 578static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
 579{
 580        struct cxgbit_device *cdev;
 581        bool found = false;
 582
 583        mutex_lock(&cdev_list_lock);
 584        list_for_each_entry(cdev, &cdev_list_head, list) {
 585                if (cdev == cnp->com.cdev) {
 586                        found = true;
 587                        break;
 588                }
 589        }
 590        if (!found)
 591                goto out;
 592
 593        __cxgbit_free_cdev_np(cdev, cnp);
 594out:
 595        mutex_unlock(&cdev_list_lock);
 596}
 597
 598static void __cxgbit_free_conn(struct cxgbit_sock *csk);
 599
 600void cxgbit_free_np(struct iscsi_np *np)
 601{
 602        struct cxgbit_np *cnp = np->np_context;
 603        struct cxgbit_sock *csk, *tmp;
 604
 605        cnp->com.state = CSK_STATE_DEAD;
 606        if (cnp->com.cdev)
 607                cxgbit_free_cdev_np(cnp);
 608        else
 609                cxgbit_free_all_np(cnp);
 610
 611        spin_lock_bh(&cnp->np_accept_lock);
 612        list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
 613                list_del_init(&csk->accept_node);
 614                __cxgbit_free_conn(csk);
 615        }
 616        spin_unlock_bh(&cnp->np_accept_lock);
 617
 618        np->np_context = NULL;
 619        cxgbit_put_cnp(cnp);
 620}
 621
 622static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
 623{
 624        struct sk_buff *skb;
 625        u32 len = roundup(sizeof(struct cpl_close_con_req), 16);
 626
 627        skb = alloc_skb(len, GFP_ATOMIC);
 628        if (!skb)
 629                return;
 630
 631        cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
 632                              NULL, NULL);
 633
 634        cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
 635        __skb_queue_tail(&csk->txq, skb);
 636        cxgbit_push_tx_frames(csk);
 637}
 638
 639static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
 640{
 641        struct cxgbit_sock *csk = handle;
 642
 643        pr_debug("%s cxgbit_device %p\n", __func__, handle);
 644        kfree_skb(skb);
 645        cxgbit_put_csk(csk);
 646}
 647
 648static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
 649{
 650        struct cxgbit_device *cdev = handle;
 651        struct cpl_abort_req *req = cplhdr(skb);
 652
 653        pr_debug("%s cdev %p\n", __func__, cdev);
 654        req->cmd = CPL_ABORT_NO_RST;
 655        cxgbit_ofld_send(cdev, skb);
 656}
 657
 658static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
 659{
 660        struct sk_buff *skb;
 661        u32 len = roundup(sizeof(struct cpl_abort_req), 16);
 662
 663        pr_debug("%s: csk %p tid %u; state %d\n",
 664                 __func__, csk, csk->tid, csk->com.state);
 665
 666        __skb_queue_purge(&csk->txq);
 667
 668        if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
 669                cxgbit_send_tx_flowc_wr(csk);
 670
 671        skb = __skb_dequeue(&csk->skbq);
 672        cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
 673                          csk->com.cdev, cxgbit_abort_arp_failure);
 674
 675        return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
 676}
 677
 678static void
 679__cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
 680{
 681        __kfree_skb(skb);
 682
 683        if (csk->com.state != CSK_STATE_ESTABLISHED)
 684                goto no_abort;
 685
 686        set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
 687        csk->com.state = CSK_STATE_ABORTING;
 688
 689        cxgbit_send_abort_req(csk);
 690
 691        return;
 692
 693no_abort:
 694        cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
 695        cxgbit_put_csk(csk);
 696}
 697
 698void cxgbit_abort_conn(struct cxgbit_sock *csk)
 699{
 700        struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
 701
 702        cxgbit_get_csk(csk);
 703        cxgbit_init_wr_wait(&csk->com.wr_wait);
 704
 705        spin_lock_bh(&csk->lock);
 706        if (csk->lock_owner) {
 707                cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
 708                __skb_queue_tail(&csk->backlogq, skb);
 709        } else {
 710                __cxgbit_abort_conn(csk, skb);
 711        }
 712        spin_unlock_bh(&csk->lock);
 713
 714        cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
 715                              csk->tid, 600, __func__);
 716}
 717
 718static void __cxgbit_free_conn(struct cxgbit_sock *csk)
 719{
 720        struct iscsi_conn *conn = csk->conn;
 721        bool release = false;
 722
 723        pr_debug("%s: state %d\n",
 724                 __func__, csk->com.state);
 725
 726        spin_lock_bh(&csk->lock);
 727        switch (csk->com.state) {
 728        case CSK_STATE_ESTABLISHED:
 729                if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
 730                        csk->com.state = CSK_STATE_CLOSING;
 731                        cxgbit_send_halfclose(csk);
 732                } else {
 733                        csk->com.state = CSK_STATE_ABORTING;
 734                        cxgbit_send_abort_req(csk);
 735                }
 736                break;
 737        case CSK_STATE_CLOSING:
 738                csk->com.state = CSK_STATE_MORIBUND;
 739                cxgbit_send_halfclose(csk);
 740                break;
 741        case CSK_STATE_DEAD:
 742                release = true;
 743                break;
 744        default:
 745                pr_err("%s: csk %p; state %d\n",
 746                       __func__, csk, csk->com.state);
 747        }
 748        spin_unlock_bh(&csk->lock);
 749
 750        if (release)
 751                cxgbit_put_csk(csk);
 752}
 753
 754void cxgbit_free_conn(struct iscsi_conn *conn)
 755{
 756        __cxgbit_free_conn(conn->context);
 757}
 758
 759static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
 760{
 761        csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
 762                        ((csk->com.remote_addr.ss_family == AF_INET) ?
 763                        sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
 764                        sizeof(struct tcphdr);
 765        csk->mss = csk->emss;
 766        if (TCPOPT_TSTAMP_G(opt))
 767                csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
 768        if (csk->emss < 128)
 769                csk->emss = 128;
 770        if (csk->emss & 7)
 771                pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
 772                        TCPOPT_MSS_G(opt), csk->mss, csk->emss);
 773        pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
 774                 csk->mss, csk->emss);
 775}
 776
 777static void cxgbit_free_skb(struct cxgbit_sock *csk)
 778{
 779        struct sk_buff *skb;
 780
 781        __skb_queue_purge(&csk->txq);
 782        __skb_queue_purge(&csk->rxq);
 783        __skb_queue_purge(&csk->backlogq);
 784        __skb_queue_purge(&csk->ppodq);
 785        __skb_queue_purge(&csk->skbq);
 786
 787        while ((skb = cxgbit_sock_dequeue_wr(csk)))
 788                kfree_skb(skb);
 789
 790        __kfree_skb(csk->lro_hskb);
 791}
 792
 793void _cxgbit_free_csk(struct kref *kref)
 794{
 795        struct cxgbit_sock *csk;
 796        struct cxgbit_device *cdev;
 797
 798        csk = container_of(kref, struct cxgbit_sock, kref);
 799
 800        pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
 801
 802        if (csk->com.local_addr.ss_family == AF_INET6) {
 803                struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
 804                                             &csk->com.local_addr;
 805                cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
 806                                   (const u32 *)
 807                                   &sin6->sin6_addr.s6_addr, 1);
 808        }
 809
 810        cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid,
 811                         csk->com.local_addr.ss_family);
 812        dst_release(csk->dst);
 813        cxgb4_l2t_release(csk->l2t);
 814
 815        cdev = csk->com.cdev;
 816        spin_lock_bh(&cdev->cskq.lock);
 817        list_del(&csk->list);
 818        spin_unlock_bh(&cdev->cskq.lock);
 819
 820        cxgbit_free_skb(csk);
 821        cxgbit_put_cnp(csk->cnp);
 822        cxgbit_put_cdev(cdev);
 823
 824        kfree(csk);
 825}
 826
 827static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
 828{
 829        unsigned int linkspeed;
 830        u8 scale;
 831
 832        linkspeed = pi->link_cfg.speed;
 833        scale = linkspeed / SPEED_10000;
 834
 835#define CXGBIT_10G_RCV_WIN (256 * 1024)
 836        csk->rcv_win = CXGBIT_10G_RCV_WIN;
 837        if (scale)
 838                csk->rcv_win *= scale;
 839
 840#define CXGBIT_10G_SND_WIN (256 * 1024)
 841        csk->snd_win = CXGBIT_10G_SND_WIN;
 842        if (scale)
 843                csk->snd_win *= scale;
 844
 845        pr_debug("%s snd_win %d rcv_win %d\n",
 846                 __func__, csk->snd_win, csk->rcv_win);
 847}
 848
 849#ifdef CONFIG_CHELSIO_T4_DCB
 850static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
 851{
 852        return ndev->dcbnl_ops->getstate(ndev);
 853}
 854
 855static int cxgbit_select_priority(int pri_mask)
 856{
 857        if (!pri_mask)
 858                return 0;
 859
 860        return (ffs(pri_mask) - 1);
 861}
 862
 863static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
 864{
 865        int ret;
 866        u8 caps;
 867
 868        struct dcb_app iscsi_dcb_app = {
 869                .protocol = local_port
 870        };
 871
 872        ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
 873
 874        if (ret)
 875                return 0;
 876
 877        if (caps & DCB_CAP_DCBX_VER_IEEE) {
 878                iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
 879
 880                ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
 881
 882        } else if (caps & DCB_CAP_DCBX_VER_CEE) {
 883                iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
 884
 885                ret = dcb_getapp(ndev, &iscsi_dcb_app);
 886        }
 887
 888        pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
 889
 890        return cxgbit_select_priority(ret);
 891}
 892#endif
 893
 894static int
 895cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
 896                    u16 local_port, struct dst_entry *dst,
 897                    struct cxgbit_device *cdev)
 898{
 899        struct neighbour *n;
 900        int ret, step;
 901        struct net_device *ndev;
 902        u16 rxq_idx, port_id;
 903#ifdef CONFIG_CHELSIO_T4_DCB
 904        u8 priority = 0;
 905#endif
 906
 907        n = dst_neigh_lookup(dst, peer_ip);
 908        if (!n)
 909                return -ENODEV;
 910
 911        rcu_read_lock();
 912        if (!(n->nud_state & NUD_VALID))
 913                neigh_event_send(n, NULL);
 914
 915        ret = -ENOMEM;
 916        if (n->dev->flags & IFF_LOOPBACK) {
 917                if (iptype == 4)
 918                        ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
 919                else if (IS_ENABLED(CONFIG_IPV6))
 920                        ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
 921                else
 922                        ndev = NULL;
 923
 924                if (!ndev) {
 925                        ret = -ENODEV;
 926                        goto out;
 927                }
 928
 929                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
 930                                         n, ndev, 0);
 931                if (!csk->l2t)
 932                        goto out;
 933                csk->mtu = ndev->mtu;
 934                csk->tx_chan = cxgb4_port_chan(ndev);
 935                csk->smac_idx =
 936                               ((struct port_info *)netdev_priv(ndev))->smt_idx;
 937                step = cdev->lldi.ntxq /
 938                        cdev->lldi.nchan;
 939                csk->txq_idx = cxgb4_port_idx(ndev) * step;
 940                step = cdev->lldi.nrxq /
 941                        cdev->lldi.nchan;
 942                csk->ctrlq_idx = cxgb4_port_idx(ndev);
 943                csk->rss_qid = cdev->lldi.rxq_ids[
 944                                cxgb4_port_idx(ndev) * step];
 945                csk->port_id = cxgb4_port_idx(ndev);
 946                cxgbit_set_tcp_window(csk,
 947                                      (struct port_info *)netdev_priv(ndev));
 948        } else {
 949                ndev = cxgbit_get_real_dev(n->dev);
 950                if (!ndev) {
 951                        ret = -ENODEV;
 952                        goto out;
 953                }
 954
 955#ifdef CONFIG_CHELSIO_T4_DCB
 956                if (cxgbit_get_iscsi_dcb_state(ndev))
 957                        priority = cxgbit_get_iscsi_dcb_priority(ndev,
 958                                                                 local_port);
 959
 960                csk->dcb_priority = priority;
 961
 962                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
 963#else
 964                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
 965#endif
 966                if (!csk->l2t)
 967                        goto out;
 968                port_id = cxgb4_port_idx(ndev);
 969                csk->mtu = dst_mtu(dst);
 970                csk->tx_chan = cxgb4_port_chan(ndev);
 971                csk->smac_idx =
 972                               ((struct port_info *)netdev_priv(ndev))->smt_idx;
 973                step = cdev->lldi.ntxq /
 974                        cdev->lldi.nports;
 975                csk->txq_idx = (port_id * step) +
 976                                (cdev->selectq[port_id][0]++ % step);
 977                csk->ctrlq_idx = cxgb4_port_idx(ndev);
 978                step = cdev->lldi.nrxq /
 979                        cdev->lldi.nports;
 980                rxq_idx = (port_id * step) +
 981                                (cdev->selectq[port_id][1]++ % step);
 982                csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
 983                csk->port_id = port_id;
 984                cxgbit_set_tcp_window(csk,
 985                                      (struct port_info *)netdev_priv(ndev));
 986        }
 987        ret = 0;
 988out:
 989        rcu_read_unlock();
 990        neigh_release(n);
 991        return ret;
 992}
 993
 994int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
 995{
 996        int ret = 0;
 997
 998        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
 999                kfree_skb(skb);
1000                pr_err("%s - device not up - dropping\n", __func__);
1001                return -EIO;
1002        }
1003
1004        ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
1005        if (ret < 0)
1006                kfree_skb(skb);
1007        return ret < 0 ? ret : 0;
1008}
1009
1010static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
1011{
1012        u32 len = roundup(sizeof(struct cpl_tid_release), 16);
1013        struct sk_buff *skb;
1014
1015        skb = alloc_skb(len, GFP_ATOMIC);
1016        if (!skb)
1017                return;
1018
1019        cxgb_mk_tid_release(skb, len, tid, 0);
1020        cxgbit_ofld_send(cdev, skb);
1021}
1022
1023int
1024cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
1025                struct l2t_entry *l2e)
1026{
1027        int ret = 0;
1028
1029        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1030                kfree_skb(skb);
1031                pr_err("%s - device not up - dropping\n", __func__);
1032                return -EIO;
1033        }
1034
1035        ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
1036        if (ret < 0)
1037                kfree_skb(skb);
1038        return ret < 0 ? ret : 0;
1039}
1040
1041static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
1042{
1043        if (csk->com.state != CSK_STATE_ESTABLISHED) {
1044                __kfree_skb(skb);
1045                return;
1046        }
1047
1048        cxgbit_ofld_send(csk->com.cdev, skb);
1049}
1050
1051/*
1052 * CPL connection rx data ack: host ->
1053 * Send RX credits through an RX_DATA_ACK CPL message.
1054 * Returns the number of credits sent.
1055 */
1056int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
1057{
1058        struct sk_buff *skb;
1059        u32 len = roundup(sizeof(struct cpl_rx_data_ack), 16);
1060        u32 credit_dack;
1061
1062        skb = alloc_skb(len, GFP_KERNEL);
1063        if (!skb)
1064                return -1;
1065
1066        credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
1067                      RX_CREDITS_V(csk->rx_credits);
1068
1069        cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
1070                            credit_dack);
1071
1072        csk->rx_credits = 0;
1073
1074        spin_lock_bh(&csk->lock);
1075        if (csk->lock_owner) {
1076                cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
1077                __skb_queue_tail(&csk->backlogq, skb);
1078                spin_unlock_bh(&csk->lock);
1079                return 0;
1080        }
1081
1082        cxgbit_send_rx_credits(csk, skb);
1083        spin_unlock_bh(&csk->lock);
1084
1085        return 0;
1086}
1087
1088#define FLOWC_WR_NPARAMS_MIN    9
1089#define FLOWC_WR_NPARAMS_MAX    11
1090static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
1091{
1092        struct sk_buff *skb;
1093        u32 len, flowclen;
1094        u8 i;
1095
1096        flowclen = offsetof(struct fw_flowc_wr,
1097                            mnemval[FLOWC_WR_NPARAMS_MAX]);
1098
1099        len = max_t(u32, sizeof(struct cpl_abort_req),
1100                    sizeof(struct cpl_abort_rpl));
1101
1102        len = max(len, flowclen);
1103        len = roundup(len, 16);
1104
1105        for (i = 0; i < 3; i++) {
1106                skb = alloc_skb(len, GFP_ATOMIC);
1107                if (!skb)
1108                        goto out;
1109                __skb_queue_tail(&csk->skbq, skb);
1110        }
1111
1112        skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
1113        if (!skb)
1114                goto out;
1115
1116        memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
1117        csk->lro_hskb = skb;
1118
1119        return 0;
1120out:
1121        __skb_queue_purge(&csk->skbq);
1122        return -ENOMEM;
1123}
1124
1125static void
1126cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
1127{
1128        struct sk_buff *skb;
1129        const struct tcphdr *tcph;
1130        struct cpl_t5_pass_accept_rpl *rpl5;
1131        struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
1132        unsigned int len = roundup(sizeof(*rpl5), 16);
1133        unsigned int mtu_idx;
1134        u64 opt0;
1135        u32 opt2, hlen;
1136        u32 wscale;
1137        u32 win;
1138
1139        pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
1140
1141        skb = alloc_skb(len, GFP_ATOMIC);
1142        if (!skb) {
1143                cxgbit_put_csk(csk);
1144                return;
1145        }
1146
1147        rpl5 = __skb_put_zero(skb, len);
1148
1149        INIT_TP_WR(rpl5, csk->tid);
1150        OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1151                                                     csk->tid));
1152        cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
1153                      req->tcpopt.tstamp,
1154                      (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
1155        wscale = cxgb_compute_wscale(csk->rcv_win);
1156        /*
1157         * Specify the largest window that will fit in opt0. The
1158         * remainder will be specified in the rx_data_ack.
1159         */
1160        win = csk->rcv_win >> 10;
1161        if (win > RCV_BUFSIZ_M)
1162                win = RCV_BUFSIZ_M;
1163        opt0 =  TCAM_BYPASS_F |
1164                WND_SCALE_V(wscale) |
1165                MSS_IDX_V(mtu_idx) |
1166                L2T_IDX_V(csk->l2t->idx) |
1167                TX_CHAN_V(csk->tx_chan) |
1168                SMAC_SEL_V(csk->smac_idx) |
1169                DSCP_V(csk->tos >> 2) |
1170                ULP_MODE_V(ULP_MODE_ISCSI) |
1171                RCV_BUFSIZ_V(win);
1172
1173        opt2 = RX_CHANNEL_V(0) |
1174                RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
1175
1176        if (!is_t5(lldi->adapter_type))
1177                opt2 |= RX_FC_DISABLE_F;
1178
1179        if (req->tcpopt.tstamp)
1180                opt2 |= TSTAMPS_EN_F;
1181        if (req->tcpopt.sack)
1182                opt2 |= SACK_EN_F;
1183        if (wscale)
1184                opt2 |= WND_SCALE_EN_F;
1185
1186        hlen = ntohl(req->hdr_len);
1187
1188        if (is_t5(lldi->adapter_type))
1189                tcph = (struct tcphdr *)((u8 *)(req + 1) +
1190                       ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen));
1191        else
1192                tcph = (struct tcphdr *)((u8 *)(req + 1) +
1193                       T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
1194
1195        if (tcph->ece && tcph->cwr)
1196                opt2 |= CCTRL_ECN_V(1);
1197
1198        opt2 |= RX_COALESCE_V(3);
1199        opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
1200
1201        opt2 |= T5_ISS_F;
1202        rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
1203
1204        opt2 |= T5_OPT_2_VALID_F;
1205
1206        rpl5->opt0 = cpu_to_be64(opt0);
1207        rpl5->opt2 = cpu_to_be32(opt2);
1208        set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
1209        t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard);
1210        cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
1211}
1212
1213static void
1214cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
1215{
1216        struct cxgbit_sock *csk = NULL;
1217        struct cxgbit_np *cnp;
1218        struct cpl_pass_accept_req *req = cplhdr(skb);
1219        unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1220        struct tid_info *t = cdev->lldi.tids;
1221        unsigned int tid = GET_TID(req);
1222        u16 peer_mss = ntohs(req->tcpopt.mss);
1223        unsigned short hdrs;
1224
1225        struct dst_entry *dst;
1226        __u8 local_ip[16], peer_ip[16];
1227        __be16 local_port, peer_port;
1228        int ret;
1229        int iptype;
1230
1231        pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
1232                 __func__, cdev, stid, tid);
1233
1234        cnp = lookup_stid(t, stid);
1235        if (!cnp) {
1236                pr_err("%s connect request on invalid stid %d\n",
1237                       __func__, stid);
1238                goto rel_skb;
1239        }
1240
1241        if (cnp->com.state != CSK_STATE_LISTEN) {
1242                pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
1243                       __func__);
1244                goto reject;
1245        }
1246
1247        csk = lookup_tid(t, tid);
1248        if (csk) {
1249                pr_err("%s csk not null tid %u\n",
1250                       __func__, tid);
1251                goto rel_skb;
1252        }
1253
1254        cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
1255                        peer_ip, &local_port, &peer_port);
1256
1257        /* Find output route */
1258        if (iptype == 4)  {
1259                pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
1260                         "lport %d rport %d peer_mss %d\n"
1261                         , __func__, cnp, tid,
1262                         local_ip, peer_ip, ntohs(local_port),
1263                         ntohs(peer_port), peer_mss);
1264                dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
1265                                      *(__be32 *)local_ip,
1266                                      *(__be32 *)peer_ip,
1267                                      local_port, peer_port,
1268                                      PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
1269        } else {
1270                pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
1271                         "lport %d rport %d peer_mss %d\n"
1272                         , __func__, cnp, tid,
1273                         local_ip, peer_ip, ntohs(local_port),
1274                         ntohs(peer_port), peer_mss);
1275                dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
1276                                       local_ip, peer_ip,
1277                                       local_port, peer_port,
1278                                       PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
1279                                       ((struct sockaddr_in6 *)
1280                                        &cnp->com.local_addr)->sin6_scope_id);
1281        }
1282        if (!dst) {
1283                pr_err("%s - failed to find dst entry!\n",
1284                       __func__);
1285                goto reject;
1286        }
1287
1288        csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
1289        if (!csk) {
1290                dst_release(dst);
1291                goto rel_skb;
1292        }
1293
1294        ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
1295                                  dst, cdev);
1296        if (ret) {
1297                pr_err("%s - failed to allocate l2t entry!\n",
1298                       __func__);
1299                dst_release(dst);
1300                kfree(csk);
1301                goto reject;
1302        }
1303
1304        kref_init(&csk->kref);
1305        init_completion(&csk->com.wr_wait.completion);
1306
1307        INIT_LIST_HEAD(&csk->accept_node);
1308
1309        hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
1310                sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0);
1311        if (peer_mss && csk->mtu > (peer_mss + hdrs))
1312                csk->mtu = peer_mss + hdrs;
1313
1314        csk->com.state = CSK_STATE_CONNECTING;
1315        csk->com.cdev = cdev;
1316        csk->cnp = cnp;
1317        csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1318        csk->dst = dst;
1319        csk->tid = tid;
1320        csk->wr_cred = cdev->lldi.wr_cred -
1321                        DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1322        csk->wr_max_cred = csk->wr_cred;
1323        csk->wr_una_cred = 0;
1324
1325        if (iptype == 4) {
1326                struct sockaddr_in *sin = (struct sockaddr_in *)
1327                                          &csk->com.local_addr;
1328                sin->sin_family = AF_INET;
1329                sin->sin_port = local_port;
1330                sin->sin_addr.s_addr = *(__be32 *)local_ip;
1331
1332                sin = (struct sockaddr_in *)&csk->com.remote_addr;
1333                sin->sin_family = AF_INET;
1334                sin->sin_port = peer_port;
1335                sin->sin_addr.s_addr = *(__be32 *)peer_ip;
1336        } else {
1337                struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
1338                                            &csk->com.local_addr;
1339
1340                sin6->sin6_family = PF_INET6;
1341                sin6->sin6_port = local_port;
1342                memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
1343                cxgb4_clip_get(cdev->lldi.ports[0],
1344                               (const u32 *)&sin6->sin6_addr.s6_addr,
1345                               1);
1346
1347                sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
1348                sin6->sin6_family = PF_INET6;
1349                sin6->sin6_port = peer_port;
1350                memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
1351        }
1352
1353        skb_queue_head_init(&csk->rxq);
1354        skb_queue_head_init(&csk->txq);
1355        skb_queue_head_init(&csk->ppodq);
1356        skb_queue_head_init(&csk->backlogq);
1357        skb_queue_head_init(&csk->skbq);
1358        cxgbit_sock_reset_wr_list(csk);
1359        spin_lock_init(&csk->lock);
1360        init_waitqueue_head(&csk->waitq);
1361        init_waitqueue_head(&csk->ack_waitq);
1362        csk->lock_owner = false;
1363
1364        if (cxgbit_alloc_csk_skb(csk)) {
1365                dst_release(dst);
1366                kfree(csk);
1367                goto rel_skb;
1368        }
1369
1370        cxgbit_get_cnp(cnp);
1371        cxgbit_get_cdev(cdev);
1372
1373        spin_lock(&cdev->cskq.lock);
1374        list_add_tail(&csk->list, &cdev->cskq.list);
1375        spin_unlock(&cdev->cskq.lock);
1376        cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family);
1377        cxgbit_pass_accept_rpl(csk, req);
1378        goto rel_skb;
1379
1380reject:
1381        cxgbit_release_tid(cdev, tid);
1382rel_skb:
1383        __kfree_skb(skb);
1384}
1385
1386static u32
1387cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
1388                           u32 *flowclenp)
1389{
1390        u32 nparams, flowclen16, flowclen;
1391
1392        nparams = FLOWC_WR_NPARAMS_MIN;
1393
1394        if (csk->snd_wscale)
1395                nparams++;
1396
1397#ifdef CONFIG_CHELSIO_T4_DCB
1398        nparams++;
1399#endif
1400        flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
1401        flowclen16 = DIV_ROUND_UP(flowclen, 16);
1402        flowclen = flowclen16 * 16;
1403        /*
1404         * Return the number of 16-byte credits used by the flowc request.
1405         * Pass back the nparams and actual flowc length if requested.
1406         */
1407        if (nparamsp)
1408                *nparamsp = nparams;
1409        if (flowclenp)
1410                *flowclenp = flowclen;
1411        return flowclen16;
1412}
1413
1414u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
1415{
1416        struct cxgbit_device *cdev = csk->com.cdev;
1417        struct fw_flowc_wr *flowc;
1418        u32 nparams, flowclen16, flowclen;
1419        struct sk_buff *skb;
1420        u8 index;
1421
1422#ifdef CONFIG_CHELSIO_T4_DCB
1423        u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
1424#endif
1425
1426        flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
1427
1428        skb = __skb_dequeue(&csk->skbq);
1429        flowc = __skb_put_zero(skb, flowclen);
1430
1431        flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
1432                                           FW_FLOWC_WR_NPARAMS_V(nparams));
1433        flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
1434                                          FW_WR_FLOWID_V(csk->tid));
1435        flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
1436        flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
1437                                            (csk->com.cdev->lldi.pf));
1438        flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
1439        flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
1440        flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
1441        flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
1442        flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
1443        flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
1444        flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
1445        flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
1446        flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
1447        flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
1448        flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
1449        flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
1450        flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
1451        flowc->mnemval[7].val = cpu_to_be32(csk->emss);
1452
1453        flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
1454        if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
1455                flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
1456        else
1457                flowc->mnemval[8].val = cpu_to_be32(16384);
1458
1459        index = 9;
1460
1461        if (csk->snd_wscale) {
1462                flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
1463                flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
1464                index++;
1465        }
1466
1467#ifdef CONFIG_CHELSIO_T4_DCB
1468        flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
1469        if (vlan == VLAN_NONE) {
1470                pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
1471                flowc->mnemval[index].val = cpu_to_be32(0);
1472        } else
1473                flowc->mnemval[index].val = cpu_to_be32(
1474                                (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
1475#endif
1476
1477        pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
1478                 " rcv_seq = %u; snd_win = %u; emss = %u\n",
1479                 __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
1480                 csk->rcv_nxt, csk->snd_win, csk->emss);
1481        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
1482        cxgbit_ofld_send(csk->com.cdev, skb);
1483        return flowclen16;
1484}
1485
1486int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
1487{
1488        struct sk_buff *skb;
1489        struct cpl_set_tcb_field *req;
1490        u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
1491        u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
1492        unsigned int len = roundup(sizeof(*req), 16);
1493        int ret;
1494
1495        skb = alloc_skb(len, GFP_KERNEL);
1496        if (!skb)
1497                return -ENOMEM;
1498
1499        /*  set up ulp submode */
1500        req = __skb_put_zero(skb, len);
1501
1502        INIT_TP_WR(req, csk->tid);
1503        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1504        req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1505        req->word_cookie = htons(0);
1506        req->mask = cpu_to_be64(0x3 << 4);
1507        req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1508                                (dcrc ? ULP_CRC_DATA : 0)) << 4);
1509        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1510
1511        cxgbit_get_csk(csk);
1512        cxgbit_init_wr_wait(&csk->com.wr_wait);
1513
1514        cxgbit_ofld_send(csk->com.cdev, skb);
1515
1516        ret = cxgbit_wait_for_reply(csk->com.cdev,
1517                                    &csk->com.wr_wait,
1518                                    csk->tid, 5, __func__);
1519        if (ret)
1520                return -1;
1521
1522        return 0;
1523}
1524
1525int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
1526{
1527        struct sk_buff *skb;
1528        struct cpl_set_tcb_field *req;
1529        unsigned int len = roundup(sizeof(*req), 16);
1530        int ret;
1531
1532        skb = alloc_skb(len, GFP_KERNEL);
1533        if (!skb)
1534                return -ENOMEM;
1535
1536        req = __skb_put_zero(skb, len);
1537
1538        INIT_TP_WR(req, csk->tid);
1539        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1540        req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1541        req->word_cookie = htons(0);
1542        req->mask = cpu_to_be64(0x3 << 8);
1543        req->val = cpu_to_be64(pg_idx << 8);
1544        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1545
1546        cxgbit_get_csk(csk);
1547        cxgbit_init_wr_wait(&csk->com.wr_wait);
1548
1549        cxgbit_ofld_send(csk->com.cdev, skb);
1550
1551        ret = cxgbit_wait_for_reply(csk->com.cdev,
1552                                    &csk->com.wr_wait,
1553                                    csk->tid, 5, __func__);
1554        if (ret)
1555                return -1;
1556
1557        return 0;
1558}
1559
1560static void
1561cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1562{
1563        struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1564        struct tid_info *t = cdev->lldi.tids;
1565        unsigned int stid = GET_TID(rpl);
1566        struct cxgbit_np *cnp = lookup_stid(t, stid);
1567
1568        pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1569                 __func__, cnp, stid, rpl->status);
1570
1571        if (!cnp) {
1572                pr_info("%s stid %d lookup failure\n", __func__, stid);
1573                goto rel_skb;
1574        }
1575
1576        cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1577        cxgbit_put_cnp(cnp);
1578rel_skb:
1579        __kfree_skb(skb);
1580}
1581
1582static void
1583cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1584{
1585        struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1586        struct tid_info *t = cdev->lldi.tids;
1587        unsigned int stid = GET_TID(rpl);
1588        struct cxgbit_np *cnp = lookup_stid(t, stid);
1589
1590        pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1591                 __func__, cnp, stid, rpl->status);
1592
1593        if (!cnp) {
1594                pr_info("%s stid %d lookup failure\n", __func__, stid);
1595                goto rel_skb;
1596        }
1597
1598        cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1599        cxgbit_put_cnp(cnp);
1600rel_skb:
1601        __kfree_skb(skb);
1602}
1603
1604static void
1605cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
1606{
1607        struct cpl_pass_establish *req = cplhdr(skb);
1608        struct tid_info *t = cdev->lldi.tids;
1609        unsigned int tid = GET_TID(req);
1610        struct cxgbit_sock *csk;
1611        struct cxgbit_np *cnp;
1612        u16 tcp_opt = be16_to_cpu(req->tcp_opt);
1613        u32 snd_isn = be32_to_cpu(req->snd_isn);
1614        u32 rcv_isn = be32_to_cpu(req->rcv_isn);
1615
1616        csk = lookup_tid(t, tid);
1617        if (unlikely(!csk)) {
1618                pr_err("can't find connection for tid %u.\n", tid);
1619                goto rel_skb;
1620        }
1621        cnp = csk->cnp;
1622
1623        pr_debug("%s: csk %p; tid %u; cnp %p\n",
1624                 __func__, csk, tid, cnp);
1625
1626        csk->write_seq = snd_isn;
1627        csk->snd_una = snd_isn;
1628        csk->snd_nxt = snd_isn;
1629
1630        csk->rcv_nxt = rcv_isn;
1631
1632        if (csk->rcv_win > (RCV_BUFSIZ_M << 10))
1633                csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10));
1634
1635        csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
1636        cxgbit_set_emss(csk, tcp_opt);
1637        dst_confirm(csk->dst);
1638        csk->com.state = CSK_STATE_ESTABLISHED;
1639        spin_lock_bh(&cnp->np_accept_lock);
1640        list_add_tail(&csk->accept_node, &cnp->np_accept_list);
1641        spin_unlock_bh(&cnp->np_accept_lock);
1642        complete(&cnp->accept_comp);
1643rel_skb:
1644        __kfree_skb(skb);
1645}
1646
1647static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1648{
1649        cxgbit_skcb_flags(skb) = 0;
1650        spin_lock_bh(&csk->rxq.lock);
1651        __skb_queue_tail(&csk->rxq, skb);
1652        spin_unlock_bh(&csk->rxq.lock);
1653        wake_up(&csk->waitq);
1654}
1655
1656static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
1657{
1658        pr_debug("%s: csk %p; tid %u; state %d\n",
1659                 __func__, csk, csk->tid, csk->com.state);
1660
1661        switch (csk->com.state) {
1662        case CSK_STATE_ESTABLISHED:
1663                csk->com.state = CSK_STATE_CLOSING;
1664                cxgbit_queue_rx_skb(csk, skb);
1665                return;
1666        case CSK_STATE_CLOSING:
1667                /* simultaneous close */
1668                csk->com.state = CSK_STATE_MORIBUND;
1669                break;
1670        case CSK_STATE_MORIBUND:
1671                csk->com.state = CSK_STATE_DEAD;
1672                cxgbit_put_csk(csk);
1673                break;
1674        case CSK_STATE_ABORTING:
1675                break;
1676        default:
1677                pr_info("%s: cpl_peer_close in bad state %d\n",
1678                        __func__, csk->com.state);
1679        }
1680
1681        __kfree_skb(skb);
1682}
1683
1684static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1685{
1686        pr_debug("%s: csk %p; tid %u; state %d\n",
1687                 __func__, csk, csk->tid, csk->com.state);
1688
1689        switch (csk->com.state) {
1690        case CSK_STATE_CLOSING:
1691                csk->com.state = CSK_STATE_MORIBUND;
1692                break;
1693        case CSK_STATE_MORIBUND:
1694                csk->com.state = CSK_STATE_DEAD;
1695                cxgbit_put_csk(csk);
1696                break;
1697        case CSK_STATE_ABORTING:
1698        case CSK_STATE_DEAD:
1699                break;
1700        default:
1701                pr_info("%s: cpl_close_con_rpl in bad state %d\n",
1702                        __func__, csk->com.state);
1703        }
1704
1705        __kfree_skb(skb);
1706}
1707
1708static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1709{
1710        struct cpl_abort_req_rss *hdr = cplhdr(skb);
1711        unsigned int tid = GET_TID(hdr);
1712        struct sk_buff *rpl_skb;
1713        bool release = false;
1714        bool wakeup_thread = false;
1715        u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
1716
1717        pr_debug("%s: csk %p; tid %u; state %d\n",
1718                 __func__, csk, tid, csk->com.state);
1719
1720        if (cxgb_is_neg_adv(hdr->status)) {
1721                pr_err("%s: got neg advise %d on tid %u\n",
1722                       __func__, hdr->status, tid);
1723                goto rel_skb;
1724        }
1725
1726        switch (csk->com.state) {
1727        case CSK_STATE_CONNECTING:
1728        case CSK_STATE_MORIBUND:
1729                csk->com.state = CSK_STATE_DEAD;
1730                release = true;
1731                break;
1732        case CSK_STATE_ESTABLISHED:
1733                csk->com.state = CSK_STATE_DEAD;
1734                wakeup_thread = true;
1735                break;
1736        case CSK_STATE_CLOSING:
1737                csk->com.state = CSK_STATE_DEAD;
1738                if (!csk->conn)
1739                        release = true;
1740                break;
1741        case CSK_STATE_ABORTING:
1742                break;
1743        default:
1744                pr_info("%s: cpl_abort_req_rss in bad state %d\n",
1745                        __func__, csk->com.state);
1746                csk->com.state = CSK_STATE_DEAD;
1747        }
1748
1749        __skb_queue_purge(&csk->txq);
1750
1751        if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
1752                cxgbit_send_tx_flowc_wr(csk);
1753
1754        rpl_skb = __skb_dequeue(&csk->skbq);
1755
1756        cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
1757        cxgbit_ofld_send(csk->com.cdev, rpl_skb);
1758
1759        if (wakeup_thread) {
1760                cxgbit_queue_rx_skb(csk, skb);
1761                return;
1762        }
1763
1764        if (release)
1765                cxgbit_put_csk(csk);
1766rel_skb:
1767        __kfree_skb(skb);
1768}
1769
1770static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1771{
1772        struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1773
1774        pr_debug("%s: csk %p; tid %u; state %d\n",
1775                 __func__, csk, csk->tid, csk->com.state);
1776
1777        switch (csk->com.state) {
1778        case CSK_STATE_ABORTING:
1779                csk->com.state = CSK_STATE_DEAD;
1780                if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
1781                        cxgbit_wake_up(&csk->com.wr_wait, __func__,
1782                                       rpl->status);
1783                cxgbit_put_csk(csk);
1784                break;
1785        default:
1786                pr_info("%s: cpl_abort_rpl_rss in state %d\n",
1787                        __func__, csk->com.state);
1788        }
1789
1790        __kfree_skb(skb);
1791}
1792
1793static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
1794{
1795        const struct sk_buff *skb = csk->wr_pending_head;
1796        u32 credit = 0;
1797
1798        if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
1799                pr_err("csk 0x%p, tid %u, credit %u > %u\n",
1800                       csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
1801                return true;
1802        }
1803
1804        while (skb) {
1805                credit += (__force u32)skb->csum;
1806                skb = cxgbit_skcb_tx_wr_next(skb);
1807        }
1808
1809        if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
1810                pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1811                       csk, csk->tid, csk->wr_cred,
1812                       credit, csk->wr_max_cred);
1813
1814                return true;
1815        }
1816
1817        return false;
1818}
1819
1820static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
1821{
1822        struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
1823        u32 credits = rpl->credits;
1824        u32 snd_una = ntohl(rpl->snd_una);
1825
1826        csk->wr_cred += credits;
1827        if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
1828                csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
1829
1830        while (credits) {
1831                struct sk_buff *p = cxgbit_sock_peek_wr(csk);
1832                const u32 csum = (__force u32)p->csum;
1833
1834                if (unlikely(!p)) {
1835                        pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
1836                               csk, csk->tid, credits,
1837                               csk->wr_cred, csk->wr_una_cred);
1838                        break;
1839                }
1840
1841                if (unlikely(credits < csum)) {
1842                        pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
1843                                csk,  csk->tid,
1844                                credits, csk->wr_cred, csk->wr_una_cred,
1845                                csum);
1846                        p->csum = (__force __wsum)(csum - credits);
1847                        break;
1848                }
1849
1850                cxgbit_sock_dequeue_wr(csk);
1851                credits -= csum;
1852                kfree_skb(p);
1853        }
1854
1855        if (unlikely(cxgbit_credit_err(csk))) {
1856                cxgbit_queue_rx_skb(csk, skb);
1857                return;
1858        }
1859
1860        if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
1861                if (unlikely(before(snd_una, csk->snd_una))) {
1862                        pr_warn("csk 0x%p,%u, snd_una %u/%u.",
1863                                csk, csk->tid, snd_una,
1864                                csk->snd_una);
1865                        goto rel_skb;
1866                }
1867
1868                if (csk->snd_una != snd_una) {
1869                        csk->snd_una = snd_una;
1870                        dst_confirm(csk->dst);
1871                        wake_up(&csk->ack_waitq);
1872                }
1873        }
1874
1875        if (skb_queue_len(&csk->txq))
1876                cxgbit_push_tx_frames(csk);
1877
1878rel_skb:
1879        __kfree_skb(skb);
1880}
1881
1882static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1883{
1884        struct cxgbit_sock *csk;
1885        struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1886        unsigned int tid = GET_TID(rpl);
1887        struct cxgb4_lld_info *lldi = &cdev->lldi;
1888        struct tid_info *t = lldi->tids;
1889
1890        csk = lookup_tid(t, tid);
1891        if (unlikely(!csk)) {
1892                pr_err("can't find connection for tid %u.\n", tid);
1893                goto rel_skb;
1894        } else {
1895                cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
1896        }
1897
1898        cxgbit_put_csk(csk);
1899rel_skb:
1900        __kfree_skb(skb);
1901}
1902
1903static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
1904{
1905        struct cxgbit_sock *csk;
1906        struct cpl_rx_data *cpl = cplhdr(skb);
1907        unsigned int tid = GET_TID(cpl);
1908        struct cxgb4_lld_info *lldi = &cdev->lldi;
1909        struct tid_info *t = lldi->tids;
1910
1911        csk = lookup_tid(t, tid);
1912        if (unlikely(!csk)) {
1913                pr_err("can't find conn. for tid %u.\n", tid);
1914                goto rel_skb;
1915        }
1916
1917        cxgbit_queue_rx_skb(csk, skb);
1918        return;
1919rel_skb:
1920        __kfree_skb(skb);
1921}
1922
1923static void
1924__cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1925{
1926        spin_lock(&csk->lock);
1927        if (csk->lock_owner) {
1928                __skb_queue_tail(&csk->backlogq, skb);
1929                spin_unlock(&csk->lock);
1930                return;
1931        }
1932
1933        cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
1934        spin_unlock(&csk->lock);
1935}
1936
1937static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1938{
1939        cxgbit_get_csk(csk);
1940        __cxgbit_process_rx_cpl(csk, skb);
1941        cxgbit_put_csk(csk);
1942}
1943
1944static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1945{
1946        struct cxgbit_sock *csk;
1947        struct cpl_tx_data *cpl = cplhdr(skb);
1948        struct cxgb4_lld_info *lldi = &cdev->lldi;
1949        struct tid_info *t = lldi->tids;
1950        unsigned int tid = GET_TID(cpl);
1951        u8 opcode = cxgbit_skcb_rx_opcode(skb);
1952        bool ref = true;
1953
1954        switch (opcode) {
1955        case CPL_FW4_ACK:
1956                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
1957                        ref = false;
1958                        break;
1959        case CPL_PEER_CLOSE:
1960                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
1961                        break;
1962        case CPL_CLOSE_CON_RPL:
1963                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
1964                        break;
1965        case CPL_ABORT_REQ_RSS:
1966                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
1967                        break;
1968        case CPL_ABORT_RPL_RSS:
1969                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
1970                        break;
1971        default:
1972                goto rel_skb;
1973        }
1974
1975        csk = lookup_tid(t, tid);
1976        if (unlikely(!csk)) {
1977                pr_err("can't find conn. for tid %u.\n", tid);
1978                goto rel_skb;
1979        }
1980
1981        if (ref)
1982                cxgbit_process_rx_cpl(csk, skb);
1983        else
1984                __cxgbit_process_rx_cpl(csk, skb);
1985
1986        return;
1987rel_skb:
1988        __kfree_skb(skb);
1989}
1990
1991cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
1992        [CPL_PASS_OPEN_RPL]     = cxgbit_pass_open_rpl,
1993        [CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
1994        [CPL_PASS_ACCEPT_REQ]   = cxgbit_pass_accept_req,
1995        [CPL_PASS_ESTABLISH]    = cxgbit_pass_establish,
1996        [CPL_SET_TCB_RPL]       = cxgbit_set_tcb_rpl,
1997        [CPL_RX_DATA]           = cxgbit_rx_data,
1998        [CPL_FW4_ACK]           = cxgbit_rx_cpl,
1999        [CPL_PEER_CLOSE]        = cxgbit_rx_cpl,
2000        [CPL_CLOSE_CON_RPL]     = cxgbit_rx_cpl,
2001        [CPL_ABORT_REQ_RSS]     = cxgbit_rx_cpl,
2002        [CPL_ABORT_RPL_RSS]     = cxgbit_rx_cpl,
2003};
2004