linux/drivers/target/iscsi/cxgbit/cxgbit_cm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2016 Chelsio Communications, Inc.
   4 */
   5
   6#include <linux/module.h>
   7#include <linux/list.h>
   8#include <linux/workqueue.h>
   9#include <linux/skbuff.h>
  10#include <linux/timer.h>
  11#include <linux/notifier.h>
  12#include <linux/inetdevice.h>
  13#include <linux/ip.h>
  14#include <linux/tcp.h>
  15#include <linux/if_vlan.h>
  16
  17#include <net/neighbour.h>
  18#include <net/netevent.h>
  19#include <net/route.h>
  20#include <net/tcp.h>
  21#include <net/ip6_route.h>
  22#include <net/addrconf.h>
  23
  24#include <libcxgb_cm.h>
  25#include "cxgbit.h"
  26#include "clip_tbl.h"
  27
  28static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
  29{
  30        wr_waitp->ret = 0;
  31        reinit_completion(&wr_waitp->completion);
  32}
  33
  34static void
  35cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
  36{
  37        if (ret == CPL_ERR_NONE)
  38                wr_waitp->ret = 0;
  39        else
  40                wr_waitp->ret = -EIO;
  41
  42        if (wr_waitp->ret)
  43                pr_err("%s: err:%u", func, ret);
  44
  45        complete(&wr_waitp->completion);
  46}
  47
  48static int
  49cxgbit_wait_for_reply(struct cxgbit_device *cdev,
  50                      struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
  51                      const char *func)
  52{
  53        int ret;
  54
  55        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
  56                wr_waitp->ret = -EIO;
  57                goto out;
  58        }
  59
  60        ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
  61        if (!ret) {
  62                pr_info("%s - Device %s not responding tid %u\n",
  63                        func, pci_name(cdev->lldi.pdev), tid);
  64                wr_waitp->ret = -ETIMEDOUT;
  65        }
  66out:
  67        if (wr_waitp->ret)
  68                pr_info("%s: FW reply %d tid %u\n",
  69                        pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
  70        return wr_waitp->ret;
  71}
  72
  73static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
  74{
  75        return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
  76}
  77
  78static struct np_info *
  79cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
  80                   unsigned int stid)
  81{
  82        struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
  83
  84        if (p) {
  85                int bucket = cxgbit_np_hashfn(cnp);
  86
  87                p->cnp = cnp;
  88                p->stid = stid;
  89                spin_lock(&cdev->np_lock);
  90                p->next = cdev->np_hash_tab[bucket];
  91                cdev->np_hash_tab[bucket] = p;
  92                spin_unlock(&cdev->np_lock);
  93        }
  94
  95        return p;
  96}
  97
  98static int
  99cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 100{
 101        int stid = -1, bucket = cxgbit_np_hashfn(cnp);
 102        struct np_info *p;
 103
 104        spin_lock(&cdev->np_lock);
 105        for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
 106                if (p->cnp == cnp) {
 107                        stid = p->stid;
 108                        break;
 109                }
 110        }
 111        spin_unlock(&cdev->np_lock);
 112
 113        return stid;
 114}
 115
 116static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 117{
 118        int stid = -1, bucket = cxgbit_np_hashfn(cnp);
 119        struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
 120
 121        spin_lock(&cdev->np_lock);
 122        for (p = *prev; p; prev = &p->next, p = p->next) {
 123                if (p->cnp == cnp) {
 124                        stid = p->stid;
 125                        *prev = p->next;
 126                        kfree(p);
 127                        break;
 128                }
 129        }
 130        spin_unlock(&cdev->np_lock);
 131
 132        return stid;
 133}
 134
 135void _cxgbit_free_cnp(struct kref *kref)
 136{
 137        struct cxgbit_np *cnp;
 138
 139        cnp = container_of(kref, struct cxgbit_np, kref);
 140        kfree(cnp);
 141}
 142
 143static int
 144cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
 145                      struct cxgbit_np *cnp)
 146{
 147        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
 148                                     &cnp->com.local_addr;
 149        int addr_type;
 150        int ret;
 151
 152        pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
 153                 __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
 154
 155        addr_type = ipv6_addr_type((const struct in6_addr *)
 156                                   &sin6->sin6_addr);
 157        if (addr_type != IPV6_ADDR_ANY) {
 158                ret = cxgb4_clip_get(cdev->lldi.ports[0],
 159                                     (const u32 *)&sin6->sin6_addr.s6_addr, 1);
 160                if (ret) {
 161                        pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
 162                               sin6->sin6_addr.s6_addr, ret);
 163                        return -ENOMEM;
 164                }
 165        }
 166
 167        cxgbit_get_cnp(cnp);
 168        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 169
 170        ret = cxgb4_create_server6(cdev->lldi.ports[0],
 171                                   stid, &sin6->sin6_addr,
 172                                   sin6->sin6_port,
 173                                   cdev->lldi.rxq_ids[0]);
 174        if (!ret)
 175                ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
 176                                            0, 10, __func__);
 177        else if (ret > 0)
 178                ret = net_xmit_errno(ret);
 179        else
 180                cxgbit_put_cnp(cnp);
 181
 182        if (ret) {
 183                if (ret != -ETIMEDOUT)
 184                        cxgb4_clip_release(cdev->lldi.ports[0],
 185                                   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
 186
 187                pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
 188                       ret, stid, sin6->sin6_addr.s6_addr,
 189                       ntohs(sin6->sin6_port));
 190        }
 191
 192        return ret;
 193}
 194
 195static int
 196cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
 197                      struct cxgbit_np *cnp)
 198{
 199        struct sockaddr_in *sin = (struct sockaddr_in *)
 200                                   &cnp->com.local_addr;
 201        int ret;
 202
 203        pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
 204                 __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
 205
 206        cxgbit_get_cnp(cnp);
 207        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 208
 209        ret = cxgb4_create_server(cdev->lldi.ports[0],
 210                                  stid, sin->sin_addr.s_addr,
 211                                  sin->sin_port, 0,
 212                                  cdev->lldi.rxq_ids[0]);
 213        if (!ret)
 214                ret = cxgbit_wait_for_reply(cdev,
 215                                            &cnp->com.wr_wait,
 216                                            0, 10, __func__);
 217        else if (ret > 0)
 218                ret = net_xmit_errno(ret);
 219        else
 220                cxgbit_put_cnp(cnp);
 221
 222        if (ret)
 223                pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
 224                       ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
 225        return ret;
 226}
 227
 228struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
 229{
 230        struct cxgbit_device *cdev;
 231        u8 i;
 232
 233        list_for_each_entry(cdev, &cdev_list_head, list) {
 234                struct cxgb4_lld_info *lldi = &cdev->lldi;
 235
 236                for (i = 0; i < lldi->nports; i++) {
 237                        if (lldi->ports[i] == ndev) {
 238                                if (port_id)
 239                                        *port_id = i;
 240                                return cdev;
 241                        }
 242                }
 243        }
 244
 245        return NULL;
 246}
 247
 248static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
 249{
 250        if (ndev->priv_flags & IFF_BONDING) {
 251                pr_err("Bond devices are not supported. Interface:%s\n",
 252                       ndev->name);
 253                return NULL;
 254        }
 255
 256        if (is_vlan_dev(ndev))
 257                return vlan_dev_real_dev(ndev);
 258
 259        return ndev;
 260}
 261
 262static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
 263{
 264        struct net_device *ndev;
 265
 266        ndev = __ip_dev_find(&init_net, saddr, false);
 267        if (!ndev)
 268                return NULL;
 269
 270        return cxgbit_get_real_dev(ndev);
 271}
 272
 273static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
 274{
 275        struct net_device *ndev = NULL;
 276        bool found = false;
 277
 278        if (IS_ENABLED(CONFIG_IPV6)) {
 279                for_each_netdev_rcu(&init_net, ndev)
 280                        if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
 281                                found = true;
 282                                break;
 283                        }
 284        }
 285        if (!found)
 286                return NULL;
 287        return cxgbit_get_real_dev(ndev);
 288}
 289
 290static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
 291{
 292        struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
 293        int ss_family = sockaddr->ss_family;
 294        struct net_device *ndev = NULL;
 295        struct cxgbit_device *cdev = NULL;
 296
 297        rcu_read_lock();
 298        if (ss_family == AF_INET) {
 299                struct sockaddr_in *sin;
 300
 301                sin = (struct sockaddr_in *)sockaddr;
 302                ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
 303        } else if (ss_family == AF_INET6) {
 304                struct sockaddr_in6 *sin6;
 305
 306                sin6 = (struct sockaddr_in6 *)sockaddr;
 307                ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
 308        }
 309        if (!ndev)
 310                goto out;
 311
 312        cdev = cxgbit_find_device(ndev, NULL);
 313out:
 314        rcu_read_unlock();
 315        return cdev;
 316}
 317
 318static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
 319{
 320        struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
 321        int ss_family = sockaddr->ss_family;
 322        int addr_type;
 323
 324        if (ss_family == AF_INET) {
 325                struct sockaddr_in *sin;
 326
 327                sin = (struct sockaddr_in *)sockaddr;
 328                if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
 329                        return true;
 330        } else if (ss_family == AF_INET6) {
 331                struct sockaddr_in6 *sin6;
 332
 333                sin6 = (struct sockaddr_in6 *)sockaddr;
 334                addr_type = ipv6_addr_type((const struct in6_addr *)
 335                                &sin6->sin6_addr);
 336                if (addr_type == IPV6_ADDR_ANY)
 337                        return true;
 338        }
 339        return false;
 340}
 341
 342static int
 343__cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 344{
 345        int stid, ret;
 346        int ss_family = cnp->com.local_addr.ss_family;
 347
 348        if (!test_bit(CDEV_STATE_UP, &cdev->flags))
 349                return -EINVAL;
 350
 351        stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
 352        if (stid < 0)
 353                return -EINVAL;
 354
 355        if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
 356                cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
 357                return -EINVAL;
 358        }
 359
 360        if (ss_family == AF_INET)
 361                ret = cxgbit_create_server4(cdev, stid, cnp);
 362        else
 363                ret = cxgbit_create_server6(cdev, stid, cnp);
 364
 365        if (ret) {
 366                if (ret != -ETIMEDOUT)
 367                        cxgb4_free_stid(cdev->lldi.tids, stid,
 368                                        ss_family);
 369                cxgbit_np_hash_del(cdev, cnp);
 370                return ret;
 371        }
 372        return ret;
 373}
 374
 375static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
 376{
 377        struct cxgbit_device *cdev;
 378        int ret = -1;
 379
 380        mutex_lock(&cdev_list_lock);
 381        cdev = cxgbit_find_np_cdev(cnp);
 382        if (!cdev)
 383                goto out;
 384
 385        if (cxgbit_np_hash_find(cdev, cnp) >= 0)
 386                goto out;
 387
 388        if (__cxgbit_setup_cdev_np(cdev, cnp))
 389                goto out;
 390
 391        cnp->com.cdev = cdev;
 392        ret = 0;
 393out:
 394        mutex_unlock(&cdev_list_lock);
 395        return ret;
 396}
 397
 398static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
 399{
 400        struct cxgbit_device *cdev;
 401        int ret;
 402        u32 count = 0;
 403
 404        mutex_lock(&cdev_list_lock);
 405        list_for_each_entry(cdev, &cdev_list_head, list) {
 406                if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
 407                        mutex_unlock(&cdev_list_lock);
 408                        return -1;
 409                }
 410        }
 411
 412        list_for_each_entry(cdev, &cdev_list_head, list) {
 413                ret = __cxgbit_setup_cdev_np(cdev, cnp);
 414                if (ret == -ETIMEDOUT)
 415                        break;
 416                if (ret != 0)
 417                        continue;
 418                count++;
 419        }
 420        mutex_unlock(&cdev_list_lock);
 421
 422        return count ? 0 : -1;
 423}
 424
 425int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
 426{
 427        struct cxgbit_np *cnp;
 428        int ret;
 429
 430        if ((ksockaddr->ss_family != AF_INET) &&
 431            (ksockaddr->ss_family != AF_INET6))
 432                return -EINVAL;
 433
 434        cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
 435        if (!cnp)
 436                return -ENOMEM;
 437
 438        init_waitqueue_head(&cnp->accept_wait);
 439        init_completion(&cnp->com.wr_wait.completion);
 440        init_completion(&cnp->accept_comp);
 441        INIT_LIST_HEAD(&cnp->np_accept_list);
 442        spin_lock_init(&cnp->np_accept_lock);
 443        kref_init(&cnp->kref);
 444        memcpy(&np->np_sockaddr, ksockaddr,
 445               sizeof(struct sockaddr_storage));
 446        memcpy(&cnp->com.local_addr, &np->np_sockaddr,
 447               sizeof(cnp->com.local_addr));
 448
 449        cnp->np = np;
 450        cnp->com.cdev = NULL;
 451
 452        if (cxgbit_inaddr_any(cnp))
 453                ret = cxgbit_setup_all_np(cnp);
 454        else
 455                ret = cxgbit_setup_cdev_np(cnp);
 456
 457        if (ret) {
 458                cxgbit_put_cnp(cnp);
 459                return -EINVAL;
 460        }
 461
 462        np->np_context = cnp;
 463        cnp->com.state = CSK_STATE_LISTEN;
 464        return 0;
 465}
 466
 467static void
 468cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
 469                     struct cxgbit_sock *csk)
 470{
 471        conn->login_family = np->np_sockaddr.ss_family;
 472        conn->login_sockaddr = csk->com.remote_addr;
 473        conn->local_sockaddr = csk->com.local_addr;
 474}
 475
 476int cxgbit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
 477{
 478        struct cxgbit_np *cnp = np->np_context;
 479        struct cxgbit_sock *csk;
 480        int ret = 0;
 481
 482accept_wait:
 483        ret = wait_for_completion_interruptible(&cnp->accept_comp);
 484        if (ret)
 485                return -ENODEV;
 486
 487        spin_lock_bh(&np->np_thread_lock);
 488        if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
 489                spin_unlock_bh(&np->np_thread_lock);
 490                /**
 491                 * No point in stalling here when np_thread
 492                 * is in state RESET/SHUTDOWN/EXIT - bail
 493                 **/
 494                return -ENODEV;
 495        }
 496        spin_unlock_bh(&np->np_thread_lock);
 497
 498        spin_lock_bh(&cnp->np_accept_lock);
 499        if (list_empty(&cnp->np_accept_list)) {
 500                spin_unlock_bh(&cnp->np_accept_lock);
 501                goto accept_wait;
 502        }
 503
 504        csk = list_first_entry(&cnp->np_accept_list,
 505                               struct cxgbit_sock,
 506                               accept_node);
 507
 508        list_del_init(&csk->accept_node);
 509        spin_unlock_bh(&cnp->np_accept_lock);
 510        conn->context = csk;
 511        csk->conn = conn;
 512
 513        cxgbit_set_conn_info(np, conn, csk);
 514        return 0;
 515}
 516
 517static int
 518__cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 519{
 520        int stid, ret;
 521        bool ipv6 = false;
 522
 523        stid = cxgbit_np_hash_del(cdev, cnp);
 524        if (stid < 0)
 525                return -EINVAL;
 526        if (!test_bit(CDEV_STATE_UP, &cdev->flags))
 527                return -EINVAL;
 528
 529        if (cnp->np->np_sockaddr.ss_family == AF_INET6)
 530                ipv6 = true;
 531
 532        cxgbit_get_cnp(cnp);
 533        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 534        ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
 535                                  cdev->lldi.rxq_ids[0], ipv6);
 536
 537        if (ret > 0)
 538                ret = net_xmit_errno(ret);
 539
 540        if (ret) {
 541                cxgbit_put_cnp(cnp);
 542                return ret;
 543        }
 544
 545        ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
 546                                    0, 10, __func__);
 547        if (ret == -ETIMEDOUT)
 548                return ret;
 549
 550        if (ipv6 && cnp->com.cdev) {
 551                struct sockaddr_in6 *sin6;
 552
 553                sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
 554                cxgb4_clip_release(cdev->lldi.ports[0],
 555                                   (const u32 *)&sin6->sin6_addr.s6_addr,
 556                                   1);
 557        }
 558
 559        cxgb4_free_stid(cdev->lldi.tids, stid,
 560                        cnp->com.local_addr.ss_family);
 561        return 0;
 562}
 563
 564static void cxgbit_free_all_np(struct cxgbit_np *cnp)
 565{
 566        struct cxgbit_device *cdev;
 567        int ret;
 568
 569        mutex_lock(&cdev_list_lock);
 570        list_for_each_entry(cdev, &cdev_list_head, list) {
 571                ret = __cxgbit_free_cdev_np(cdev, cnp);
 572                if (ret == -ETIMEDOUT)
 573                        break;
 574        }
 575        mutex_unlock(&cdev_list_lock);
 576}
 577
 578static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
 579{
 580        struct cxgbit_device *cdev;
 581        bool found = false;
 582
 583        mutex_lock(&cdev_list_lock);
 584        list_for_each_entry(cdev, &cdev_list_head, list) {
 585                if (cdev == cnp->com.cdev) {
 586                        found = true;
 587                        break;
 588                }
 589        }
 590        if (!found)
 591                goto out;
 592
 593        __cxgbit_free_cdev_np(cdev, cnp);
 594out:
 595        mutex_unlock(&cdev_list_lock);
 596}
 597
 598static void __cxgbit_free_conn(struct cxgbit_sock *csk);
 599
 600void cxgbit_free_np(struct iscsi_np *np)
 601{
 602        struct cxgbit_np *cnp = np->np_context;
 603        struct cxgbit_sock *csk, *tmp;
 604
 605        cnp->com.state = CSK_STATE_DEAD;
 606        if (cnp->com.cdev)
 607                cxgbit_free_cdev_np(cnp);
 608        else
 609                cxgbit_free_all_np(cnp);
 610
 611        spin_lock_bh(&cnp->np_accept_lock);
 612        list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
 613                list_del_init(&csk->accept_node);
 614                __cxgbit_free_conn(csk);
 615        }
 616        spin_unlock_bh(&cnp->np_accept_lock);
 617
 618        np->np_context = NULL;
 619        cxgbit_put_cnp(cnp);
 620}
 621
 622static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
 623{
 624        struct sk_buff *skb;
 625        u32 len = roundup(sizeof(struct cpl_close_con_req), 16);
 626
 627        skb = alloc_skb(len, GFP_ATOMIC);
 628        if (!skb)
 629                return;
 630
 631        cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
 632                              NULL, NULL);
 633
 634        cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
 635        __skb_queue_tail(&csk->txq, skb);
 636        cxgbit_push_tx_frames(csk);
 637}
 638
 639static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
 640{
 641        struct cxgbit_sock *csk = handle;
 642
 643        pr_debug("%s cxgbit_device %p\n", __func__, handle);
 644        kfree_skb(skb);
 645        cxgbit_put_csk(csk);
 646}
 647
 648static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
 649{
 650        struct cxgbit_device *cdev = handle;
 651        struct cpl_abort_req *req = cplhdr(skb);
 652
 653        pr_debug("%s cdev %p\n", __func__, cdev);
 654        req->cmd = CPL_ABORT_NO_RST;
 655        cxgbit_ofld_send(cdev, skb);
 656}
 657
 658static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
 659{
 660        struct sk_buff *skb;
 661        u32 len = roundup(sizeof(struct cpl_abort_req), 16);
 662
 663        pr_debug("%s: csk %p tid %u; state %d\n",
 664                 __func__, csk, csk->tid, csk->com.state);
 665
 666        __skb_queue_purge(&csk->txq);
 667
 668        if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
 669                cxgbit_send_tx_flowc_wr(csk);
 670
 671        skb = __skb_dequeue(&csk->skbq);
 672        cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
 673                          csk->com.cdev, cxgbit_abort_arp_failure);
 674
 675        return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
 676}
 677
 678static void
 679__cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
 680{
 681        __kfree_skb(skb);
 682
 683        if (csk->com.state != CSK_STATE_ESTABLISHED)
 684                goto no_abort;
 685
 686        set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
 687        csk->com.state = CSK_STATE_ABORTING;
 688
 689        cxgbit_send_abort_req(csk);
 690
 691        return;
 692
 693no_abort:
 694        cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
 695        cxgbit_put_csk(csk);
 696}
 697
 698void cxgbit_abort_conn(struct cxgbit_sock *csk)
 699{
 700        struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
 701
 702        cxgbit_get_csk(csk);
 703        cxgbit_init_wr_wait(&csk->com.wr_wait);
 704
 705        spin_lock_bh(&csk->lock);
 706        if (csk->lock_owner) {
 707                cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
 708                __skb_queue_tail(&csk->backlogq, skb);
 709        } else {
 710                __cxgbit_abort_conn(csk, skb);
 711        }
 712        spin_unlock_bh(&csk->lock);
 713
 714        cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
 715                              csk->tid, 600, __func__);
 716}
 717
 718static void __cxgbit_free_conn(struct cxgbit_sock *csk)
 719{
 720        struct iscsi_conn *conn = csk->conn;
 721        bool release = false;
 722
 723        pr_debug("%s: state %d\n",
 724                 __func__, csk->com.state);
 725
 726        spin_lock_bh(&csk->lock);
 727        switch (csk->com.state) {
 728        case CSK_STATE_ESTABLISHED:
 729                if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
 730                        csk->com.state = CSK_STATE_CLOSING;
 731                        cxgbit_send_halfclose(csk);
 732                } else {
 733                        csk->com.state = CSK_STATE_ABORTING;
 734                        cxgbit_send_abort_req(csk);
 735                }
 736                break;
 737        case CSK_STATE_CLOSING:
 738                csk->com.state = CSK_STATE_MORIBUND;
 739                cxgbit_send_halfclose(csk);
 740                break;
 741        case CSK_STATE_DEAD:
 742                release = true;
 743                break;
 744        default:
 745                pr_err("%s: csk %p; state %d\n",
 746                       __func__, csk, csk->com.state);
 747        }
 748        spin_unlock_bh(&csk->lock);
 749
 750        if (release)
 751                cxgbit_put_csk(csk);
 752}
 753
 754void cxgbit_free_conn(struct iscsi_conn *conn)
 755{
 756        __cxgbit_free_conn(conn->context);
 757}
 758
 759static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
 760{
 761        csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
 762                        ((csk->com.remote_addr.ss_family == AF_INET) ?
 763                        sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
 764                        sizeof(struct tcphdr);
 765        csk->mss = csk->emss;
 766        if (TCPOPT_TSTAMP_G(opt))
 767                csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
 768        if (csk->emss < 128)
 769                csk->emss = 128;
 770        if (csk->emss & 7)
 771                pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
 772                        TCPOPT_MSS_G(opt), csk->mss, csk->emss);
 773        pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
 774                 csk->mss, csk->emss);
 775}
 776
 777static void cxgbit_free_skb(struct cxgbit_sock *csk)
 778{
 779        struct sk_buff *skb;
 780
 781        __skb_queue_purge(&csk->txq);
 782        __skb_queue_purge(&csk->rxq);
 783        __skb_queue_purge(&csk->backlogq);
 784        __skb_queue_purge(&csk->ppodq);
 785        __skb_queue_purge(&csk->skbq);
 786
 787        while ((skb = cxgbit_sock_dequeue_wr(csk)))
 788                kfree_skb(skb);
 789
 790        __kfree_skb(csk->lro_hskb);
 791}
 792
 793void _cxgbit_free_csk(struct kref *kref)
 794{
 795        struct cxgbit_sock *csk;
 796        struct cxgbit_device *cdev;
 797
 798        csk = container_of(kref, struct cxgbit_sock, kref);
 799
 800        pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
 801
 802        if (csk->com.local_addr.ss_family == AF_INET6) {
 803                struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
 804                                             &csk->com.local_addr;
 805                cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
 806                                   (const u32 *)
 807                                   &sin6->sin6_addr.s6_addr, 1);
 808        }
 809
 810        cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid,
 811                         csk->com.local_addr.ss_family);
 812        dst_release(csk->dst);
 813        cxgb4_l2t_release(csk->l2t);
 814
 815        cdev = csk->com.cdev;
 816        spin_lock_bh(&cdev->cskq.lock);
 817        list_del(&csk->list);
 818        spin_unlock_bh(&cdev->cskq.lock);
 819
 820        cxgbit_free_skb(csk);
 821        cxgbit_put_cnp(csk->cnp);
 822        cxgbit_put_cdev(cdev);
 823
 824        kfree(csk);
 825}
 826
 827static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
 828{
 829        unsigned int linkspeed;
 830        u8 scale;
 831
 832        linkspeed = pi->link_cfg.speed;
 833        scale = linkspeed / SPEED_10000;
 834
 835#define CXGBIT_10G_RCV_WIN (256 * 1024)
 836        csk->rcv_win = CXGBIT_10G_RCV_WIN;
 837        if (scale)
 838                csk->rcv_win *= scale;
 839
 840#define CXGBIT_10G_SND_WIN (256 * 1024)
 841        csk->snd_win = CXGBIT_10G_SND_WIN;
 842        if (scale)
 843                csk->snd_win *= scale;
 844
 845        pr_debug("%s snd_win %d rcv_win %d\n",
 846                 __func__, csk->snd_win, csk->rcv_win);
 847}
 848
 849#ifdef CONFIG_CHELSIO_T4_DCB
 850static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
 851{
 852        return ndev->dcbnl_ops->getstate(ndev);
 853}
 854
 855static int cxgbit_select_priority(int pri_mask)
 856{
 857        if (!pri_mask)
 858                return 0;
 859
 860        return (ffs(pri_mask) - 1);
 861}
 862
 863static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
 864{
 865        int ret;
 866        u8 caps;
 867
 868        struct dcb_app iscsi_dcb_app = {
 869                .protocol = local_port
 870        };
 871
 872        ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
 873
 874        if (ret)
 875                return 0;
 876
 877        if (caps & DCB_CAP_DCBX_VER_IEEE) {
 878                iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM;
 879                ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
 880                if (!ret) {
 881                        iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
 882                        ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
 883                }
 884        } else if (caps & DCB_CAP_DCBX_VER_CEE) {
 885                iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
 886
 887                ret = dcb_getapp(ndev, &iscsi_dcb_app);
 888        }
 889
 890        pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
 891
 892        return cxgbit_select_priority(ret);
 893}
 894#endif
 895
 896static int
 897cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
 898                    u16 local_port, struct dst_entry *dst,
 899                    struct cxgbit_device *cdev)
 900{
 901        struct neighbour *n;
 902        int ret, step;
 903        struct net_device *ndev;
 904        u16 rxq_idx, port_id;
 905#ifdef CONFIG_CHELSIO_T4_DCB
 906        u8 priority = 0;
 907#endif
 908
 909        n = dst_neigh_lookup(dst, peer_ip);
 910        if (!n)
 911                return -ENODEV;
 912
 913        rcu_read_lock();
 914        if (!(n->nud_state & NUD_VALID))
 915                neigh_event_send(n, NULL);
 916
 917        ret = -ENOMEM;
 918        if (n->dev->flags & IFF_LOOPBACK) {
 919                if (iptype == 4)
 920                        ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
 921                else if (IS_ENABLED(CONFIG_IPV6))
 922                        ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
 923                else
 924                        ndev = NULL;
 925
 926                if (!ndev) {
 927                        ret = -ENODEV;
 928                        goto out;
 929                }
 930
 931                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
 932                                         n, ndev, 0);
 933                if (!csk->l2t)
 934                        goto out;
 935                csk->mtu = ndev->mtu;
 936                csk->tx_chan = cxgb4_port_chan(ndev);
 937                csk->smac_idx =
 938                               ((struct port_info *)netdev_priv(ndev))->smt_idx;
 939                step = cdev->lldi.ntxq /
 940                        cdev->lldi.nchan;
 941                csk->txq_idx = cxgb4_port_idx(ndev) * step;
 942                step = cdev->lldi.nrxq /
 943                        cdev->lldi.nchan;
 944                csk->ctrlq_idx = cxgb4_port_idx(ndev);
 945                csk->rss_qid = cdev->lldi.rxq_ids[
 946                                cxgb4_port_idx(ndev) * step];
 947                csk->port_id = cxgb4_port_idx(ndev);
 948                cxgbit_set_tcp_window(csk,
 949                                      (struct port_info *)netdev_priv(ndev));
 950        } else {
 951                ndev = cxgbit_get_real_dev(n->dev);
 952                if (!ndev) {
 953                        ret = -ENODEV;
 954                        goto out;
 955                }
 956
 957#ifdef CONFIG_CHELSIO_T4_DCB
 958                if (cxgbit_get_iscsi_dcb_state(ndev))
 959                        priority = cxgbit_get_iscsi_dcb_priority(ndev,
 960                                                                 local_port);
 961
 962                csk->dcb_priority = priority;
 963
 964                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
 965#else
 966                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
 967#endif
 968                if (!csk->l2t)
 969                        goto out;
 970                port_id = cxgb4_port_idx(ndev);
 971                csk->mtu = dst_mtu(dst);
 972                csk->tx_chan = cxgb4_port_chan(ndev);
 973                csk->smac_idx =
 974                               ((struct port_info *)netdev_priv(ndev))->smt_idx;
 975                step = cdev->lldi.ntxq /
 976                        cdev->lldi.nports;
 977                csk->txq_idx = (port_id * step) +
 978                                (cdev->selectq[port_id][0]++ % step);
 979                csk->ctrlq_idx = cxgb4_port_idx(ndev);
 980                step = cdev->lldi.nrxq /
 981                        cdev->lldi.nports;
 982                rxq_idx = (port_id * step) +
 983                                (cdev->selectq[port_id][1]++ % step);
 984                csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
 985                csk->port_id = port_id;
 986                cxgbit_set_tcp_window(csk,
 987                                      (struct port_info *)netdev_priv(ndev));
 988        }
 989        ret = 0;
 990out:
 991        rcu_read_unlock();
 992        neigh_release(n);
 993        return ret;
 994}
 995
 996int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
 997{
 998        int ret = 0;
 999
1000        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1001                kfree_skb(skb);
1002                pr_err("%s - device not up - dropping\n", __func__);
1003                return -EIO;
1004        }
1005
1006        ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
1007        if (ret < 0)
1008                kfree_skb(skb);
1009        return ret < 0 ? ret : 0;
1010}
1011
1012static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
1013{
1014        u32 len = roundup(sizeof(struct cpl_tid_release), 16);
1015        struct sk_buff *skb;
1016
1017        skb = alloc_skb(len, GFP_ATOMIC);
1018        if (!skb)
1019                return;
1020
1021        cxgb_mk_tid_release(skb, len, tid, 0);
1022        cxgbit_ofld_send(cdev, skb);
1023}
1024
1025int
1026cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
1027                struct l2t_entry *l2e)
1028{
1029        int ret = 0;
1030
1031        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1032                kfree_skb(skb);
1033                pr_err("%s - device not up - dropping\n", __func__);
1034                return -EIO;
1035        }
1036
1037        ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
1038        if (ret < 0)
1039                kfree_skb(skb);
1040        return ret < 0 ? ret : 0;
1041}
1042
1043static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
1044{
1045        if (csk->com.state != CSK_STATE_ESTABLISHED) {
1046                __kfree_skb(skb);
1047                return;
1048        }
1049
1050        cxgbit_ofld_send(csk->com.cdev, skb);
1051}
1052
1053/*
1054 * CPL connection rx data ack: host ->
1055 * Send RX credits through an RX_DATA_ACK CPL message.
1056 * Returns the number of credits sent.
1057 */
1058int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
1059{
1060        struct sk_buff *skb;
1061        u32 len = roundup(sizeof(struct cpl_rx_data_ack), 16);
1062        u32 credit_dack;
1063
1064        skb = alloc_skb(len, GFP_KERNEL);
1065        if (!skb)
1066                return -1;
1067
1068        credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
1069                      RX_CREDITS_V(csk->rx_credits);
1070
1071        cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
1072                            credit_dack);
1073
1074        csk->rx_credits = 0;
1075
1076        spin_lock_bh(&csk->lock);
1077        if (csk->lock_owner) {
1078                cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
1079                __skb_queue_tail(&csk->backlogq, skb);
1080                spin_unlock_bh(&csk->lock);
1081                return 0;
1082        }
1083
1084        cxgbit_send_rx_credits(csk, skb);
1085        spin_unlock_bh(&csk->lock);
1086
1087        return 0;
1088}
1089
1090#define FLOWC_WR_NPARAMS_MIN    9
1091#define FLOWC_WR_NPARAMS_MAX    11
1092static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
1093{
1094        struct sk_buff *skb;
1095        u32 len, flowclen;
1096        u8 i;
1097
1098        flowclen = offsetof(struct fw_flowc_wr,
1099                            mnemval[FLOWC_WR_NPARAMS_MAX]);
1100
1101        len = max_t(u32, sizeof(struct cpl_abort_req),
1102                    sizeof(struct cpl_abort_rpl));
1103
1104        len = max(len, flowclen);
1105        len = roundup(len, 16);
1106
1107        for (i = 0; i < 3; i++) {
1108                skb = alloc_skb(len, GFP_ATOMIC);
1109                if (!skb)
1110                        goto out;
1111                __skb_queue_tail(&csk->skbq, skb);
1112        }
1113
1114        skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
1115        if (!skb)
1116                goto out;
1117
1118        memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
1119        csk->lro_hskb = skb;
1120
1121        return 0;
1122out:
1123        __skb_queue_purge(&csk->skbq);
1124        return -ENOMEM;
1125}
1126
1127static void
1128cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
1129{
1130        struct sk_buff *skb;
1131        const struct tcphdr *tcph;
1132        struct cpl_t5_pass_accept_rpl *rpl5;
1133        struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
1134        unsigned int len = roundup(sizeof(*rpl5), 16);
1135        unsigned int mtu_idx;
1136        u64 opt0;
1137        u32 opt2, hlen;
1138        u32 wscale;
1139        u32 win;
1140
1141        pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
1142
1143        skb = alloc_skb(len, GFP_ATOMIC);
1144        if (!skb) {
1145                cxgbit_put_csk(csk);
1146                return;
1147        }
1148
1149        rpl5 = __skb_put_zero(skb, len);
1150
1151        INIT_TP_WR(rpl5, csk->tid);
1152        OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1153                                                     csk->tid));
1154        cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
1155                      req->tcpopt.tstamp,
1156                      (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
1157        wscale = cxgb_compute_wscale(csk->rcv_win);
1158        /*
1159         * Specify the largest window that will fit in opt0. The
1160         * remainder will be specified in the rx_data_ack.
1161         */
1162        win = csk->rcv_win >> 10;
1163        if (win > RCV_BUFSIZ_M)
1164                win = RCV_BUFSIZ_M;
1165        opt0 =  TCAM_BYPASS_F |
1166                WND_SCALE_V(wscale) |
1167                MSS_IDX_V(mtu_idx) |
1168                L2T_IDX_V(csk->l2t->idx) |
1169                TX_CHAN_V(csk->tx_chan) |
1170                SMAC_SEL_V(csk->smac_idx) |
1171                DSCP_V(csk->tos >> 2) |
1172                ULP_MODE_V(ULP_MODE_ISCSI) |
1173                RCV_BUFSIZ_V(win);
1174
1175        opt2 = RX_CHANNEL_V(0) |
1176                RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
1177
1178        if (!is_t5(lldi->adapter_type))
1179                opt2 |= RX_FC_DISABLE_F;
1180
1181        if (req->tcpopt.tstamp)
1182                opt2 |= TSTAMPS_EN_F;
1183        if (req->tcpopt.sack)
1184                opt2 |= SACK_EN_F;
1185        if (wscale)
1186                opt2 |= WND_SCALE_EN_F;
1187
1188        hlen = ntohl(req->hdr_len);
1189
1190        if (is_t5(lldi->adapter_type))
1191                tcph = (struct tcphdr *)((u8 *)(req + 1) +
1192                       ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen));
1193        else
1194                tcph = (struct tcphdr *)((u8 *)(req + 1) +
1195                       T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
1196
1197        if (tcph->ece && tcph->cwr)
1198                opt2 |= CCTRL_ECN_V(1);
1199
1200        opt2 |= RX_COALESCE_V(3);
1201        opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
1202
1203        opt2 |= T5_ISS_F;
1204        rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
1205
1206        opt2 |= T5_OPT_2_VALID_F;
1207
1208        rpl5->opt0 = cpu_to_be64(opt0);
1209        rpl5->opt2 = cpu_to_be32(opt2);
1210        set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
1211        t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard);
1212        cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
1213}
1214
1215static void
1216cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
1217{
1218        struct cxgbit_sock *csk = NULL;
1219        struct cxgbit_np *cnp;
1220        struct cpl_pass_accept_req *req = cplhdr(skb);
1221        unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1222        struct tid_info *t = cdev->lldi.tids;
1223        unsigned int tid = GET_TID(req);
1224        u16 peer_mss = ntohs(req->tcpopt.mss);
1225        unsigned short hdrs;
1226
1227        struct dst_entry *dst;
1228        __u8 local_ip[16], peer_ip[16];
1229        __be16 local_port, peer_port;
1230        int ret;
1231        int iptype;
1232
1233        pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
1234                 __func__, cdev, stid, tid);
1235
1236        cnp = lookup_stid(t, stid);
1237        if (!cnp) {
1238                pr_err("%s connect request on invalid stid %d\n",
1239                       __func__, stid);
1240                goto rel_skb;
1241        }
1242
1243        if (cnp->com.state != CSK_STATE_LISTEN) {
1244                pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
1245                       __func__);
1246                goto reject;
1247        }
1248
1249        csk = lookup_tid(t, tid);
1250        if (csk) {
1251                pr_err("%s csk not null tid %u\n",
1252                       __func__, tid);
1253                goto rel_skb;
1254        }
1255
1256        cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
1257                        peer_ip, &local_port, &peer_port);
1258
1259        /* Find output route */
1260        if (iptype == 4)  {
1261                pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
1262                         "lport %d rport %d peer_mss %d\n"
1263                         , __func__, cnp, tid,
1264                         local_ip, peer_ip, ntohs(local_port),
1265                         ntohs(peer_port), peer_mss);
1266                dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
1267                                      *(__be32 *)local_ip,
1268                                      *(__be32 *)peer_ip,
1269                                      local_port, peer_port,
1270                                      PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
1271        } else {
1272                pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
1273                         "lport %d rport %d peer_mss %d\n"
1274                         , __func__, cnp, tid,
1275                         local_ip, peer_ip, ntohs(local_port),
1276                         ntohs(peer_port), peer_mss);
1277                dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
1278                                       local_ip, peer_ip,
1279                                       local_port, peer_port,
1280                                       PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
1281                                       ((struct sockaddr_in6 *)
1282                                        &cnp->com.local_addr)->sin6_scope_id);
1283        }
1284        if (!dst) {
1285                pr_err("%s - failed to find dst entry!\n",
1286                       __func__);
1287                goto reject;
1288        }
1289
1290        csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
1291        if (!csk) {
1292                dst_release(dst);
1293                goto rel_skb;
1294        }
1295
1296        ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
1297                                  dst, cdev);
1298        if (ret) {
1299                pr_err("%s - failed to allocate l2t entry!\n",
1300                       __func__);
1301                dst_release(dst);
1302                kfree(csk);
1303                goto reject;
1304        }
1305
1306        kref_init(&csk->kref);
1307        init_completion(&csk->com.wr_wait.completion);
1308
1309        INIT_LIST_HEAD(&csk->accept_node);
1310
1311        hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
1312                sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0);
1313        if (peer_mss && csk->mtu > (peer_mss + hdrs))
1314                csk->mtu = peer_mss + hdrs;
1315
1316        csk->com.state = CSK_STATE_CONNECTING;
1317        csk->com.cdev = cdev;
1318        csk->cnp = cnp;
1319        csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1320        csk->dst = dst;
1321        csk->tid = tid;
1322        csk->wr_cred = cdev->lldi.wr_cred -
1323                        DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1324        csk->wr_max_cred = csk->wr_cred;
1325        csk->wr_una_cred = 0;
1326
1327        if (iptype == 4) {
1328                struct sockaddr_in *sin = (struct sockaddr_in *)
1329                                          &csk->com.local_addr;
1330                sin->sin_family = AF_INET;
1331                sin->sin_port = local_port;
1332                sin->sin_addr.s_addr = *(__be32 *)local_ip;
1333
1334                sin = (struct sockaddr_in *)&csk->com.remote_addr;
1335                sin->sin_family = AF_INET;
1336                sin->sin_port = peer_port;
1337                sin->sin_addr.s_addr = *(__be32 *)peer_ip;
1338        } else {
1339                struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
1340                                            &csk->com.local_addr;
1341
1342                sin6->sin6_family = PF_INET6;
1343                sin6->sin6_port = local_port;
1344                memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
1345                cxgb4_clip_get(cdev->lldi.ports[0],
1346                               (const u32 *)&sin6->sin6_addr.s6_addr,
1347                               1);
1348
1349                sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
1350                sin6->sin6_family = PF_INET6;
1351                sin6->sin6_port = peer_port;
1352                memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
1353        }
1354
1355        skb_queue_head_init(&csk->rxq);
1356        skb_queue_head_init(&csk->txq);
1357        skb_queue_head_init(&csk->ppodq);
1358        skb_queue_head_init(&csk->backlogq);
1359        skb_queue_head_init(&csk->skbq);
1360        cxgbit_sock_reset_wr_list(csk);
1361        spin_lock_init(&csk->lock);
1362        init_waitqueue_head(&csk->waitq);
1363        init_waitqueue_head(&csk->ack_waitq);
1364        csk->lock_owner = false;
1365
1366        if (cxgbit_alloc_csk_skb(csk)) {
1367                dst_release(dst);
1368                kfree(csk);
1369                goto rel_skb;
1370        }
1371
1372        cxgbit_get_cnp(cnp);
1373        cxgbit_get_cdev(cdev);
1374
1375        spin_lock(&cdev->cskq.lock);
1376        list_add_tail(&csk->list, &cdev->cskq.list);
1377        spin_unlock(&cdev->cskq.lock);
1378        cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family);
1379        cxgbit_pass_accept_rpl(csk, req);
1380        goto rel_skb;
1381
1382reject:
1383        cxgbit_release_tid(cdev, tid);
1384rel_skb:
1385        __kfree_skb(skb);
1386}
1387
1388static u32
1389cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
1390                           u32 *flowclenp)
1391{
1392        u32 nparams, flowclen16, flowclen;
1393
1394        nparams = FLOWC_WR_NPARAMS_MIN;
1395
1396        if (csk->snd_wscale)
1397                nparams++;
1398
1399#ifdef CONFIG_CHELSIO_T4_DCB
1400        nparams++;
1401#endif
1402        flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
1403        flowclen16 = DIV_ROUND_UP(flowclen, 16);
1404        flowclen = flowclen16 * 16;
1405        /*
1406         * Return the number of 16-byte credits used by the flowc request.
1407         * Pass back the nparams and actual flowc length if requested.
1408         */
1409        if (nparamsp)
1410                *nparamsp = nparams;
1411        if (flowclenp)
1412                *flowclenp = flowclen;
1413        return flowclen16;
1414}
1415
1416u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
1417{
1418        struct cxgbit_device *cdev = csk->com.cdev;
1419        struct fw_flowc_wr *flowc;
1420        u32 nparams, flowclen16, flowclen;
1421        struct sk_buff *skb;
1422        u8 index;
1423
1424#ifdef CONFIG_CHELSIO_T4_DCB
1425        u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
1426#endif
1427
1428        flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
1429
1430        skb = __skb_dequeue(&csk->skbq);
1431        flowc = __skb_put_zero(skb, flowclen);
1432
1433        flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
1434                                           FW_FLOWC_WR_NPARAMS_V(nparams));
1435        flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
1436                                          FW_WR_FLOWID_V(csk->tid));
1437        flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
1438        flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
1439                                            (csk->com.cdev->lldi.pf));
1440        flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
1441        flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
1442        flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
1443        flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
1444        flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
1445        flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
1446        flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
1447        flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
1448        flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
1449        flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
1450        flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
1451        flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
1452        flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
1453        flowc->mnemval[7].val = cpu_to_be32(csk->emss);
1454
1455        flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
1456        if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
1457                flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
1458        else
1459                flowc->mnemval[8].val = cpu_to_be32(16384);
1460
1461        index = 9;
1462
1463        if (csk->snd_wscale) {
1464                flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
1465                flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
1466                index++;
1467        }
1468
1469#ifdef CONFIG_CHELSIO_T4_DCB
1470        flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
1471        if (vlan == VLAN_NONE) {
1472                pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
1473                flowc->mnemval[index].val = cpu_to_be32(0);
1474        } else
1475                flowc->mnemval[index].val = cpu_to_be32(
1476                                (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
1477#endif
1478
1479        pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
1480                 " rcv_seq = %u; snd_win = %u; emss = %u\n",
1481                 __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
1482                 csk->rcv_nxt, csk->snd_win, csk->emss);
1483        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
1484        cxgbit_ofld_send(csk->com.cdev, skb);
1485        return flowclen16;
1486}
1487
1488int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
1489{
1490        struct sk_buff *skb;
1491        struct cpl_set_tcb_field *req;
1492        u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
1493        u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
1494        unsigned int len = roundup(sizeof(*req), 16);
1495        int ret;
1496
1497        skb = alloc_skb(len, GFP_KERNEL);
1498        if (!skb)
1499                return -ENOMEM;
1500
1501        /*  set up ulp submode */
1502        req = __skb_put_zero(skb, len);
1503
1504        INIT_TP_WR(req, csk->tid);
1505        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1506        req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1507        req->word_cookie = htons(0);
1508        req->mask = cpu_to_be64(0x3 << 4);
1509        req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1510                                (dcrc ? ULP_CRC_DATA : 0)) << 4);
1511        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1512
1513        cxgbit_get_csk(csk);
1514        cxgbit_init_wr_wait(&csk->com.wr_wait);
1515
1516        cxgbit_ofld_send(csk->com.cdev, skb);
1517
1518        ret = cxgbit_wait_for_reply(csk->com.cdev,
1519                                    &csk->com.wr_wait,
1520                                    csk->tid, 5, __func__);
1521        if (ret)
1522                return -1;
1523
1524        return 0;
1525}
1526
1527int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
1528{
1529        struct sk_buff *skb;
1530        struct cpl_set_tcb_field *req;
1531        unsigned int len = roundup(sizeof(*req), 16);
1532        int ret;
1533
1534        skb = alloc_skb(len, GFP_KERNEL);
1535        if (!skb)
1536                return -ENOMEM;
1537
1538        req = __skb_put_zero(skb, len);
1539
1540        INIT_TP_WR(req, csk->tid);
1541        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1542        req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1543        req->word_cookie = htons(0);
1544        req->mask = cpu_to_be64(0x3 << 8);
1545        req->val = cpu_to_be64(pg_idx << 8);
1546        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1547
1548        cxgbit_get_csk(csk);
1549        cxgbit_init_wr_wait(&csk->com.wr_wait);
1550
1551        cxgbit_ofld_send(csk->com.cdev, skb);
1552
1553        ret = cxgbit_wait_for_reply(csk->com.cdev,
1554                                    &csk->com.wr_wait,
1555                                    csk->tid, 5, __func__);
1556        if (ret)
1557                return -1;
1558
1559        return 0;
1560}
1561
1562static void
1563cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1564{
1565        struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1566        struct tid_info *t = cdev->lldi.tids;
1567        unsigned int stid = GET_TID(rpl);
1568        struct cxgbit_np *cnp = lookup_stid(t, stid);
1569
1570        pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1571                 __func__, cnp, stid, rpl->status);
1572
1573        if (!cnp) {
1574                pr_info("%s stid %d lookup failure\n", __func__, stid);
1575                goto rel_skb;
1576        }
1577
1578        cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1579        cxgbit_put_cnp(cnp);
1580rel_skb:
1581        __kfree_skb(skb);
1582}
1583
1584static void
1585cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1586{
1587        struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1588        struct tid_info *t = cdev->lldi.tids;
1589        unsigned int stid = GET_TID(rpl);
1590        struct cxgbit_np *cnp = lookup_stid(t, stid);
1591
1592        pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1593                 __func__, cnp, stid, rpl->status);
1594
1595        if (!cnp) {
1596                pr_info("%s stid %d lookup failure\n", __func__, stid);
1597                goto rel_skb;
1598        }
1599
1600        cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1601        cxgbit_put_cnp(cnp);
1602rel_skb:
1603        __kfree_skb(skb);
1604}
1605
1606static void
1607cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
1608{
1609        struct cpl_pass_establish *req = cplhdr(skb);
1610        struct tid_info *t = cdev->lldi.tids;
1611        unsigned int tid = GET_TID(req);
1612        struct cxgbit_sock *csk;
1613        struct cxgbit_np *cnp;
1614        u16 tcp_opt = be16_to_cpu(req->tcp_opt);
1615        u32 snd_isn = be32_to_cpu(req->snd_isn);
1616        u32 rcv_isn = be32_to_cpu(req->rcv_isn);
1617
1618        csk = lookup_tid(t, tid);
1619        if (unlikely(!csk)) {
1620                pr_err("can't find connection for tid %u.\n", tid);
1621                goto rel_skb;
1622        }
1623        cnp = csk->cnp;
1624
1625        pr_debug("%s: csk %p; tid %u; cnp %p\n",
1626                 __func__, csk, tid, cnp);
1627
1628        csk->write_seq = snd_isn;
1629        csk->snd_una = snd_isn;
1630        csk->snd_nxt = snd_isn;
1631
1632        csk->rcv_nxt = rcv_isn;
1633
1634        if (csk->rcv_win > (RCV_BUFSIZ_M << 10))
1635                csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10));
1636
1637        csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
1638        cxgbit_set_emss(csk, tcp_opt);
1639        dst_confirm(csk->dst);
1640        csk->com.state = CSK_STATE_ESTABLISHED;
1641        spin_lock_bh(&cnp->np_accept_lock);
1642        list_add_tail(&csk->accept_node, &cnp->np_accept_list);
1643        spin_unlock_bh(&cnp->np_accept_lock);
1644        complete(&cnp->accept_comp);
1645rel_skb:
1646        __kfree_skb(skb);
1647}
1648
1649static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1650{
1651        cxgbit_skcb_flags(skb) = 0;
1652        spin_lock_bh(&csk->rxq.lock);
1653        __skb_queue_tail(&csk->rxq, skb);
1654        spin_unlock_bh(&csk->rxq.lock);
1655        wake_up(&csk->waitq);
1656}
1657
1658static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
1659{
1660        pr_debug("%s: csk %p; tid %u; state %d\n",
1661                 __func__, csk, csk->tid, csk->com.state);
1662
1663        switch (csk->com.state) {
1664        case CSK_STATE_ESTABLISHED:
1665                csk->com.state = CSK_STATE_CLOSING;
1666                cxgbit_queue_rx_skb(csk, skb);
1667                return;
1668        case CSK_STATE_CLOSING:
1669                /* simultaneous close */
1670                csk->com.state = CSK_STATE_MORIBUND;
1671                break;
1672        case CSK_STATE_MORIBUND:
1673                csk->com.state = CSK_STATE_DEAD;
1674                cxgbit_put_csk(csk);
1675                break;
1676        case CSK_STATE_ABORTING:
1677                break;
1678        default:
1679                pr_info("%s: cpl_peer_close in bad state %d\n",
1680                        __func__, csk->com.state);
1681        }
1682
1683        __kfree_skb(skb);
1684}
1685
1686static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1687{
1688        pr_debug("%s: csk %p; tid %u; state %d\n",
1689                 __func__, csk, csk->tid, csk->com.state);
1690
1691        switch (csk->com.state) {
1692        case CSK_STATE_CLOSING:
1693                csk->com.state = CSK_STATE_MORIBUND;
1694                break;
1695        case CSK_STATE_MORIBUND:
1696                csk->com.state = CSK_STATE_DEAD;
1697                cxgbit_put_csk(csk);
1698                break;
1699        case CSK_STATE_ABORTING:
1700        case CSK_STATE_DEAD:
1701                break;
1702        default:
1703                pr_info("%s: cpl_close_con_rpl in bad state %d\n",
1704                        __func__, csk->com.state);
1705        }
1706
1707        __kfree_skb(skb);
1708}
1709
1710static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1711{
1712        struct cpl_abort_req_rss *hdr = cplhdr(skb);
1713        unsigned int tid = GET_TID(hdr);
1714        struct sk_buff *rpl_skb;
1715        bool release = false;
1716        bool wakeup_thread = false;
1717        u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
1718
1719        pr_debug("%s: csk %p; tid %u; state %d\n",
1720                 __func__, csk, tid, csk->com.state);
1721
1722        if (cxgb_is_neg_adv(hdr->status)) {
1723                pr_err("%s: got neg advise %d on tid %u\n",
1724                       __func__, hdr->status, tid);
1725                goto rel_skb;
1726        }
1727
1728        switch (csk->com.state) {
1729        case CSK_STATE_CONNECTING:
1730        case CSK_STATE_MORIBUND:
1731                csk->com.state = CSK_STATE_DEAD;
1732                release = true;
1733                break;
1734        case CSK_STATE_ESTABLISHED:
1735                csk->com.state = CSK_STATE_DEAD;
1736                wakeup_thread = true;
1737                break;
1738        case CSK_STATE_CLOSING:
1739                csk->com.state = CSK_STATE_DEAD;
1740                if (!csk->conn)
1741                        release = true;
1742                break;
1743        case CSK_STATE_ABORTING:
1744                break;
1745        default:
1746                pr_info("%s: cpl_abort_req_rss in bad state %d\n",
1747                        __func__, csk->com.state);
1748                csk->com.state = CSK_STATE_DEAD;
1749        }
1750
1751        __skb_queue_purge(&csk->txq);
1752
1753        if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
1754                cxgbit_send_tx_flowc_wr(csk);
1755
1756        rpl_skb = __skb_dequeue(&csk->skbq);
1757
1758        cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
1759        cxgbit_ofld_send(csk->com.cdev, rpl_skb);
1760
1761        if (wakeup_thread) {
1762                cxgbit_queue_rx_skb(csk, skb);
1763                return;
1764        }
1765
1766        if (release)
1767                cxgbit_put_csk(csk);
1768rel_skb:
1769        __kfree_skb(skb);
1770}
1771
1772static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1773{
1774        struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1775
1776        pr_debug("%s: csk %p; tid %u; state %d\n",
1777                 __func__, csk, csk->tid, csk->com.state);
1778
1779        switch (csk->com.state) {
1780        case CSK_STATE_ABORTING:
1781                csk->com.state = CSK_STATE_DEAD;
1782                if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
1783                        cxgbit_wake_up(&csk->com.wr_wait, __func__,
1784                                       rpl->status);
1785                cxgbit_put_csk(csk);
1786                break;
1787        default:
1788                pr_info("%s: cpl_abort_rpl_rss in state %d\n",
1789                        __func__, csk->com.state);
1790        }
1791
1792        __kfree_skb(skb);
1793}
1794
1795static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
1796{
1797        const struct sk_buff *skb = csk->wr_pending_head;
1798        u32 credit = 0;
1799
1800        if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
1801                pr_err("csk 0x%p, tid %u, credit %u > %u\n",
1802                       csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
1803                return true;
1804        }
1805
1806        while (skb) {
1807                credit += (__force u32)skb->csum;
1808                skb = cxgbit_skcb_tx_wr_next(skb);
1809        }
1810
1811        if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
1812                pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1813                       csk, csk->tid, csk->wr_cred,
1814                       credit, csk->wr_max_cred);
1815
1816                return true;
1817        }
1818
1819        return false;
1820}
1821
1822static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
1823{
1824        struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
1825        u32 credits = rpl->credits;
1826        u32 snd_una = ntohl(rpl->snd_una);
1827
1828        csk->wr_cred += credits;
1829        if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
1830                csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
1831
1832        while (credits) {
1833                struct sk_buff *p = cxgbit_sock_peek_wr(csk);
1834                const u32 csum = (__force u32)p->csum;
1835
1836                if (unlikely(!p)) {
1837                        pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
1838                               csk, csk->tid, credits,
1839                               csk->wr_cred, csk->wr_una_cred);
1840                        break;
1841                }
1842
1843                if (unlikely(credits < csum)) {
1844                        pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
1845                                csk,  csk->tid,
1846                                credits, csk->wr_cred, csk->wr_una_cred,
1847                                csum);
1848                        p->csum = (__force __wsum)(csum - credits);
1849                        break;
1850                }
1851
1852                cxgbit_sock_dequeue_wr(csk);
1853                credits -= csum;
1854                kfree_skb(p);
1855        }
1856
1857        if (unlikely(cxgbit_credit_err(csk))) {
1858                cxgbit_queue_rx_skb(csk, skb);
1859                return;
1860        }
1861
1862        if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
1863                if (unlikely(before(snd_una, csk->snd_una))) {
1864                        pr_warn("csk 0x%p,%u, snd_una %u/%u.",
1865                                csk, csk->tid, snd_una,
1866                                csk->snd_una);
1867                        goto rel_skb;
1868                }
1869
1870                if (csk->snd_una != snd_una) {
1871                        csk->snd_una = snd_una;
1872                        dst_confirm(csk->dst);
1873                        wake_up(&csk->ack_waitq);
1874                }
1875        }
1876
1877        if (skb_queue_len(&csk->txq))
1878                cxgbit_push_tx_frames(csk);
1879
1880rel_skb:
1881        __kfree_skb(skb);
1882}
1883
1884static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1885{
1886        struct cxgbit_sock *csk;
1887        struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1888        unsigned int tid = GET_TID(rpl);
1889        struct cxgb4_lld_info *lldi = &cdev->lldi;
1890        struct tid_info *t = lldi->tids;
1891
1892        csk = lookup_tid(t, tid);
1893        if (unlikely(!csk)) {
1894                pr_err("can't find connection for tid %u.\n", tid);
1895                goto rel_skb;
1896        } else {
1897                cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
1898        }
1899
1900        cxgbit_put_csk(csk);
1901rel_skb:
1902        __kfree_skb(skb);
1903}
1904
1905static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
1906{
1907        struct cxgbit_sock *csk;
1908        struct cpl_rx_data *cpl = cplhdr(skb);
1909        unsigned int tid = GET_TID(cpl);
1910        struct cxgb4_lld_info *lldi = &cdev->lldi;
1911        struct tid_info *t = lldi->tids;
1912
1913        csk = lookup_tid(t, tid);
1914        if (unlikely(!csk)) {
1915                pr_err("can't find conn. for tid %u.\n", tid);
1916                goto rel_skb;
1917        }
1918
1919        cxgbit_queue_rx_skb(csk, skb);
1920        return;
1921rel_skb:
1922        __kfree_skb(skb);
1923}
1924
1925static void
1926__cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1927{
1928        spin_lock(&csk->lock);
1929        if (csk->lock_owner) {
1930                __skb_queue_tail(&csk->backlogq, skb);
1931                spin_unlock(&csk->lock);
1932                return;
1933        }
1934
1935        cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
1936        spin_unlock(&csk->lock);
1937}
1938
1939static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1940{
1941        cxgbit_get_csk(csk);
1942        __cxgbit_process_rx_cpl(csk, skb);
1943        cxgbit_put_csk(csk);
1944}
1945
1946static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1947{
1948        struct cxgbit_sock *csk;
1949        struct cpl_tx_data *cpl = cplhdr(skb);
1950        struct cxgb4_lld_info *lldi = &cdev->lldi;
1951        struct tid_info *t = lldi->tids;
1952        unsigned int tid = GET_TID(cpl);
1953        u8 opcode = cxgbit_skcb_rx_opcode(skb);
1954        bool ref = true;
1955
1956        switch (opcode) {
1957        case CPL_FW4_ACK:
1958                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
1959                        ref = false;
1960                        break;
1961        case CPL_PEER_CLOSE:
1962                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
1963                        break;
1964        case CPL_CLOSE_CON_RPL:
1965                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
1966                        break;
1967        case CPL_ABORT_REQ_RSS:
1968                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
1969                        break;
1970        case CPL_ABORT_RPL_RSS:
1971                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
1972                        break;
1973        default:
1974                goto rel_skb;
1975        }
1976
1977        csk = lookup_tid(t, tid);
1978        if (unlikely(!csk)) {
1979                pr_err("can't find conn. for tid %u.\n", tid);
1980                goto rel_skb;
1981        }
1982
1983        if (ref)
1984                cxgbit_process_rx_cpl(csk, skb);
1985        else
1986                __cxgbit_process_rx_cpl(csk, skb);
1987
1988        return;
1989rel_skb:
1990        __kfree_skb(skb);
1991}
1992
1993cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
1994        [CPL_PASS_OPEN_RPL]     = cxgbit_pass_open_rpl,
1995        [CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
1996        [CPL_PASS_ACCEPT_REQ]   = cxgbit_pass_accept_req,
1997        [CPL_PASS_ESTABLISH]    = cxgbit_pass_establish,
1998        [CPL_SET_TCB_RPL]       = cxgbit_set_tcb_rpl,
1999        [CPL_RX_DATA]           = cxgbit_rx_data,
2000        [CPL_FW4_ACK]           = cxgbit_rx_cpl,
2001        [CPL_PEER_CLOSE]        = cxgbit_rx_cpl,
2002        [CPL_CLOSE_CON_RPL]     = cxgbit_rx_cpl,
2003        [CPL_ABORT_REQ_RSS]     = cxgbit_rx_cpl,
2004        [CPL_ABORT_RPL_RSS]     = cxgbit_rx_cpl,
2005};
2006