linux/drivers/target/iscsi/cxgbit/cxgbit_cm.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016 Chelsio Communications, Inc.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/list.h>
  11#include <linux/workqueue.h>
  12#include <linux/skbuff.h>
  13#include <linux/timer.h>
  14#include <linux/notifier.h>
  15#include <linux/inetdevice.h>
  16#include <linux/ip.h>
  17#include <linux/tcp.h>
  18#include <linux/if_vlan.h>
  19
  20#include <net/neighbour.h>
  21#include <net/netevent.h>
  22#include <net/route.h>
  23#include <net/tcp.h>
  24#include <net/ip6_route.h>
  25#include <net/addrconf.h>
  26
  27#include <libcxgb_cm.h>
  28#include "cxgbit.h"
  29#include "clip_tbl.h"
  30
  31static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
  32{
  33        wr_waitp->ret = 0;
  34        reinit_completion(&wr_waitp->completion);
  35}
  36
  37static void
  38cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
  39{
  40        if (ret == CPL_ERR_NONE)
  41                wr_waitp->ret = 0;
  42        else
  43                wr_waitp->ret = -EIO;
  44
  45        if (wr_waitp->ret)
  46                pr_err("%s: err:%u", func, ret);
  47
  48        complete(&wr_waitp->completion);
  49}
  50
  51static int
  52cxgbit_wait_for_reply(struct cxgbit_device *cdev,
  53                      struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
  54                      const char *func)
  55{
  56        int ret;
  57
  58        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
  59                wr_waitp->ret = -EIO;
  60                goto out;
  61        }
  62
  63        ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
  64        if (!ret) {
  65                pr_info("%s - Device %s not responding tid %u\n",
  66                        func, pci_name(cdev->lldi.pdev), tid);
  67                wr_waitp->ret = -ETIMEDOUT;
  68        }
  69out:
  70        if (wr_waitp->ret)
  71                pr_info("%s: FW reply %d tid %u\n",
  72                        pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
  73        return wr_waitp->ret;
  74}
  75
  76static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
  77{
  78        return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
  79}
  80
  81static struct np_info *
  82cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
  83                   unsigned int stid)
  84{
  85        struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
  86
  87        if (p) {
  88                int bucket = cxgbit_np_hashfn(cnp);
  89
  90                p->cnp = cnp;
  91                p->stid = stid;
  92                spin_lock(&cdev->np_lock);
  93                p->next = cdev->np_hash_tab[bucket];
  94                cdev->np_hash_tab[bucket] = p;
  95                spin_unlock(&cdev->np_lock);
  96        }
  97
  98        return p;
  99}
 100
 101static int
 102cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 103{
 104        int stid = -1, bucket = cxgbit_np_hashfn(cnp);
 105        struct np_info *p;
 106
 107        spin_lock(&cdev->np_lock);
 108        for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
 109                if (p->cnp == cnp) {
 110                        stid = p->stid;
 111                        break;
 112                }
 113        }
 114        spin_unlock(&cdev->np_lock);
 115
 116        return stid;
 117}
 118
 119static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 120{
 121        int stid = -1, bucket = cxgbit_np_hashfn(cnp);
 122        struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
 123
 124        spin_lock(&cdev->np_lock);
 125        for (p = *prev; p; prev = &p->next, p = p->next) {
 126                if (p->cnp == cnp) {
 127                        stid = p->stid;
 128                        *prev = p->next;
 129                        kfree(p);
 130                        break;
 131                }
 132        }
 133        spin_unlock(&cdev->np_lock);
 134
 135        return stid;
 136}
 137
 138void _cxgbit_free_cnp(struct kref *kref)
 139{
 140        struct cxgbit_np *cnp;
 141
 142        cnp = container_of(kref, struct cxgbit_np, kref);
 143        kfree(cnp);
 144}
 145
 146static int
 147cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
 148                      struct cxgbit_np *cnp)
 149{
 150        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
 151                                     &cnp->com.local_addr;
 152        int addr_type;
 153        int ret;
 154
 155        pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
 156                 __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
 157
 158        addr_type = ipv6_addr_type((const struct in6_addr *)
 159                                   &sin6->sin6_addr);
 160        if (addr_type != IPV6_ADDR_ANY) {
 161                ret = cxgb4_clip_get(cdev->lldi.ports[0],
 162                                     (const u32 *)&sin6->sin6_addr.s6_addr, 1);
 163                if (ret) {
 164                        pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
 165                               sin6->sin6_addr.s6_addr, ret);
 166                        return -ENOMEM;
 167                }
 168        }
 169
 170        cxgbit_get_cnp(cnp);
 171        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 172
 173        ret = cxgb4_create_server6(cdev->lldi.ports[0],
 174                                   stid, &sin6->sin6_addr,
 175                                   sin6->sin6_port,
 176                                   cdev->lldi.rxq_ids[0]);
 177        if (!ret)
 178                ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
 179                                            0, 10, __func__);
 180        else if (ret > 0)
 181                ret = net_xmit_errno(ret);
 182        else
 183                cxgbit_put_cnp(cnp);
 184
 185        if (ret) {
 186                if (ret != -ETIMEDOUT)
 187                        cxgb4_clip_release(cdev->lldi.ports[0],
 188                                   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
 189
 190                pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
 191                       ret, stid, sin6->sin6_addr.s6_addr,
 192                       ntohs(sin6->sin6_port));
 193        }
 194
 195        return ret;
 196}
 197
 198static int
 199cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
 200                      struct cxgbit_np *cnp)
 201{
 202        struct sockaddr_in *sin = (struct sockaddr_in *)
 203                                   &cnp->com.local_addr;
 204        int ret;
 205
 206        pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
 207                 __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
 208
 209        cxgbit_get_cnp(cnp);
 210        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 211
 212        ret = cxgb4_create_server(cdev->lldi.ports[0],
 213                                  stid, sin->sin_addr.s_addr,
 214                                  sin->sin_port, 0,
 215                                  cdev->lldi.rxq_ids[0]);
 216        if (!ret)
 217                ret = cxgbit_wait_for_reply(cdev,
 218                                            &cnp->com.wr_wait,
 219                                            0, 10, __func__);
 220        else if (ret > 0)
 221                ret = net_xmit_errno(ret);
 222        else
 223                cxgbit_put_cnp(cnp);
 224
 225        if (ret)
 226                pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
 227                       ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
 228        return ret;
 229}
 230
 231struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
 232{
 233        struct cxgbit_device *cdev;
 234        u8 i;
 235
 236        list_for_each_entry(cdev, &cdev_list_head, list) {
 237                struct cxgb4_lld_info *lldi = &cdev->lldi;
 238
 239                for (i = 0; i < lldi->nports; i++) {
 240                        if (lldi->ports[i] == ndev) {
 241                                if (port_id)
 242                                        *port_id = i;
 243                                return cdev;
 244                        }
 245                }
 246        }
 247
 248        return NULL;
 249}
 250
 251static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
 252{
 253        if (ndev->priv_flags & IFF_BONDING) {
 254                pr_err("Bond devices are not supported. Interface:%s\n",
 255                       ndev->name);
 256                return NULL;
 257        }
 258
 259        if (is_vlan_dev(ndev))
 260                return vlan_dev_real_dev(ndev);
 261
 262        return ndev;
 263}
 264
 265static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
 266{
 267        struct net_device *ndev;
 268
 269        ndev = __ip_dev_find(&init_net, saddr, false);
 270        if (!ndev)
 271                return NULL;
 272
 273        return cxgbit_get_real_dev(ndev);
 274}
 275
 276static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
 277{
 278        struct net_device *ndev = NULL;
 279        bool found = false;
 280
 281        if (IS_ENABLED(CONFIG_IPV6)) {
 282                for_each_netdev_rcu(&init_net, ndev)
 283                        if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
 284                                found = true;
 285                                break;
 286                        }
 287        }
 288        if (!found)
 289                return NULL;
 290        return cxgbit_get_real_dev(ndev);
 291}
 292
 293static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
 294{
 295        struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
 296        int ss_family = sockaddr->ss_family;
 297        struct net_device *ndev = NULL;
 298        struct cxgbit_device *cdev = NULL;
 299
 300        rcu_read_lock();
 301        if (ss_family == AF_INET) {
 302                struct sockaddr_in *sin;
 303
 304                sin = (struct sockaddr_in *)sockaddr;
 305                ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
 306        } else if (ss_family == AF_INET6) {
 307                struct sockaddr_in6 *sin6;
 308
 309                sin6 = (struct sockaddr_in6 *)sockaddr;
 310                ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
 311        }
 312        if (!ndev)
 313                goto out;
 314
 315        cdev = cxgbit_find_device(ndev, NULL);
 316out:
 317        rcu_read_unlock();
 318        return cdev;
 319}
 320
 321static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
 322{
 323        struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
 324        int ss_family = sockaddr->ss_family;
 325        int addr_type;
 326
 327        if (ss_family == AF_INET) {
 328                struct sockaddr_in *sin;
 329
 330                sin = (struct sockaddr_in *)sockaddr;
 331                if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
 332                        return true;
 333        } else if (ss_family == AF_INET6) {
 334                struct sockaddr_in6 *sin6;
 335
 336                sin6 = (struct sockaddr_in6 *)sockaddr;
 337                addr_type = ipv6_addr_type((const struct in6_addr *)
 338                                &sin6->sin6_addr);
 339                if (addr_type == IPV6_ADDR_ANY)
 340                        return true;
 341        }
 342        return false;
 343}
 344
 345static int
 346__cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 347{
 348        int stid, ret;
 349        int ss_family = cnp->com.local_addr.ss_family;
 350
 351        if (!test_bit(CDEV_STATE_UP, &cdev->flags))
 352                return -EINVAL;
 353
 354        stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
 355        if (stid < 0)
 356                return -EINVAL;
 357
 358        if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
 359                cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
 360                return -EINVAL;
 361        }
 362
 363        if (ss_family == AF_INET)
 364                ret = cxgbit_create_server4(cdev, stid, cnp);
 365        else
 366                ret = cxgbit_create_server6(cdev, stid, cnp);
 367
 368        if (ret) {
 369                if (ret != -ETIMEDOUT)
 370                        cxgb4_free_stid(cdev->lldi.tids, stid,
 371                                        ss_family);
 372                cxgbit_np_hash_del(cdev, cnp);
 373                return ret;
 374        }
 375        return ret;
 376}
 377
 378static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
 379{
 380        struct cxgbit_device *cdev;
 381        int ret = -1;
 382
 383        mutex_lock(&cdev_list_lock);
 384        cdev = cxgbit_find_np_cdev(cnp);
 385        if (!cdev)
 386                goto out;
 387
 388        if (cxgbit_np_hash_find(cdev, cnp) >= 0)
 389                goto out;
 390
 391        if (__cxgbit_setup_cdev_np(cdev, cnp))
 392                goto out;
 393
 394        cnp->com.cdev = cdev;
 395        ret = 0;
 396out:
 397        mutex_unlock(&cdev_list_lock);
 398        return ret;
 399}
 400
 401static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
 402{
 403        struct cxgbit_device *cdev;
 404        int ret;
 405        u32 count = 0;
 406
 407        mutex_lock(&cdev_list_lock);
 408        list_for_each_entry(cdev, &cdev_list_head, list) {
 409                if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
 410                        mutex_unlock(&cdev_list_lock);
 411                        return -1;
 412                }
 413        }
 414
 415        list_for_each_entry(cdev, &cdev_list_head, list) {
 416                ret = __cxgbit_setup_cdev_np(cdev, cnp);
 417                if (ret == -ETIMEDOUT)
 418                        break;
 419                if (ret != 0)
 420                        continue;
 421                count++;
 422        }
 423        mutex_unlock(&cdev_list_lock);
 424
 425        return count ? 0 : -1;
 426}
 427
 428int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
 429{
 430        struct cxgbit_np *cnp;
 431        int ret;
 432
 433        if ((ksockaddr->ss_family != AF_INET) &&
 434            (ksockaddr->ss_family != AF_INET6))
 435                return -EINVAL;
 436
 437        cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
 438        if (!cnp)
 439                return -ENOMEM;
 440
 441        init_waitqueue_head(&cnp->accept_wait);
 442        init_completion(&cnp->com.wr_wait.completion);
 443        init_completion(&cnp->accept_comp);
 444        INIT_LIST_HEAD(&cnp->np_accept_list);
 445        spin_lock_init(&cnp->np_accept_lock);
 446        kref_init(&cnp->kref);
 447        memcpy(&np->np_sockaddr, ksockaddr,
 448               sizeof(struct sockaddr_storage));
 449        memcpy(&cnp->com.local_addr, &np->np_sockaddr,
 450               sizeof(cnp->com.local_addr));
 451
 452        cnp->np = np;
 453        cnp->com.cdev = NULL;
 454
 455        if (cxgbit_inaddr_any(cnp))
 456                ret = cxgbit_setup_all_np(cnp);
 457        else
 458                ret = cxgbit_setup_cdev_np(cnp);
 459
 460        if (ret) {
 461                cxgbit_put_cnp(cnp);
 462                return -EINVAL;
 463        }
 464
 465        np->np_context = cnp;
 466        cnp->com.state = CSK_STATE_LISTEN;
 467        return 0;
 468}
 469
 470static void
 471cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
 472                     struct cxgbit_sock *csk)
 473{
 474        conn->login_family = np->np_sockaddr.ss_family;
 475        conn->login_sockaddr = csk->com.remote_addr;
 476        conn->local_sockaddr = csk->com.local_addr;
 477}
 478
 479int cxgbit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
 480{
 481        struct cxgbit_np *cnp = np->np_context;
 482        struct cxgbit_sock *csk;
 483        int ret = 0;
 484
 485accept_wait:
 486        ret = wait_for_completion_interruptible(&cnp->accept_comp);
 487        if (ret)
 488                return -ENODEV;
 489
 490        spin_lock_bh(&np->np_thread_lock);
 491        if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
 492                spin_unlock_bh(&np->np_thread_lock);
 493                /**
 494                 * No point in stalling here when np_thread
 495                 * is in state RESET/SHUTDOWN/EXIT - bail
 496                 **/
 497                return -ENODEV;
 498        }
 499        spin_unlock_bh(&np->np_thread_lock);
 500
 501        spin_lock_bh(&cnp->np_accept_lock);
 502        if (list_empty(&cnp->np_accept_list)) {
 503                spin_unlock_bh(&cnp->np_accept_lock);
 504                goto accept_wait;
 505        }
 506
 507        csk = list_first_entry(&cnp->np_accept_list,
 508                               struct cxgbit_sock,
 509                               accept_node);
 510
 511        list_del_init(&csk->accept_node);
 512        spin_unlock_bh(&cnp->np_accept_lock);
 513        conn->context = csk;
 514        csk->conn = conn;
 515
 516        cxgbit_set_conn_info(np, conn, csk);
 517        return 0;
 518}
 519
 520static int
 521__cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 522{
 523        int stid, ret;
 524        bool ipv6 = false;
 525
 526        stid = cxgbit_np_hash_del(cdev, cnp);
 527        if (stid < 0)
 528                return -EINVAL;
 529        if (!test_bit(CDEV_STATE_UP, &cdev->flags))
 530                return -EINVAL;
 531
 532        if (cnp->np->np_sockaddr.ss_family == AF_INET6)
 533                ipv6 = true;
 534
 535        cxgbit_get_cnp(cnp);
 536        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 537        ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
 538                                  cdev->lldi.rxq_ids[0], ipv6);
 539
 540        if (ret > 0)
 541                ret = net_xmit_errno(ret);
 542
 543        if (ret) {
 544                cxgbit_put_cnp(cnp);
 545                return ret;
 546        }
 547
 548        ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
 549                                    0, 10, __func__);
 550        if (ret == -ETIMEDOUT)
 551                return ret;
 552
 553        if (ipv6 && cnp->com.cdev) {
 554                struct sockaddr_in6 *sin6;
 555
 556                sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
 557                cxgb4_clip_release(cdev->lldi.ports[0],
 558                                   (const u32 *)&sin6->sin6_addr.s6_addr,
 559                                   1);
 560        }
 561
 562        cxgb4_free_stid(cdev->lldi.tids, stid,
 563                        cnp->com.local_addr.ss_family);
 564        return 0;
 565}
 566
 567static void cxgbit_free_all_np(struct cxgbit_np *cnp)
 568{
 569        struct cxgbit_device *cdev;
 570        int ret;
 571
 572        mutex_lock(&cdev_list_lock);
 573        list_for_each_entry(cdev, &cdev_list_head, list) {
 574                ret = __cxgbit_free_cdev_np(cdev, cnp);
 575                if (ret == -ETIMEDOUT)
 576                        break;
 577        }
 578        mutex_unlock(&cdev_list_lock);
 579}
 580
 581static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
 582{
 583        struct cxgbit_device *cdev;
 584        bool found = false;
 585
 586        mutex_lock(&cdev_list_lock);
 587        list_for_each_entry(cdev, &cdev_list_head, list) {
 588                if (cdev == cnp->com.cdev) {
 589                        found = true;
 590                        break;
 591                }
 592        }
 593        if (!found)
 594                goto out;
 595
 596        __cxgbit_free_cdev_np(cdev, cnp);
 597out:
 598        mutex_unlock(&cdev_list_lock);
 599}
 600
 601void cxgbit_free_np(struct iscsi_np *np)
 602{
 603        struct cxgbit_np *cnp = np->np_context;
 604
 605        cnp->com.state = CSK_STATE_DEAD;
 606        if (cnp->com.cdev)
 607                cxgbit_free_cdev_np(cnp);
 608        else
 609                cxgbit_free_all_np(cnp);
 610
 611        np->np_context = NULL;
 612        cxgbit_put_cnp(cnp);
 613}
 614
 615static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
 616{
 617        struct sk_buff *skb;
 618        u32 len = roundup(sizeof(struct cpl_close_con_req), 16);
 619
 620        skb = alloc_skb(len, GFP_ATOMIC);
 621        if (!skb)
 622                return;
 623
 624        cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
 625                              NULL, NULL);
 626
 627        cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
 628        __skb_queue_tail(&csk->txq, skb);
 629        cxgbit_push_tx_frames(csk);
 630}
 631
 632static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
 633{
 634        pr_debug("%s cxgbit_device %p\n", __func__, handle);
 635        kfree_skb(skb);
 636}
 637
 638static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
 639{
 640        struct cxgbit_device *cdev = handle;
 641        struct cpl_abort_req *req = cplhdr(skb);
 642
 643        pr_debug("%s cdev %p\n", __func__, cdev);
 644        req->cmd = CPL_ABORT_NO_RST;
 645        cxgbit_ofld_send(cdev, skb);
 646}
 647
 648static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
 649{
 650        struct sk_buff *skb;
 651        u32 len = roundup(sizeof(struct cpl_abort_req), 16);
 652
 653        pr_debug("%s: csk %p tid %u; state %d\n",
 654                 __func__, csk, csk->tid, csk->com.state);
 655
 656        __skb_queue_purge(&csk->txq);
 657
 658        if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
 659                cxgbit_send_tx_flowc_wr(csk);
 660
 661        skb = __skb_dequeue(&csk->skbq);
 662        cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
 663                          csk->com.cdev, cxgbit_abort_arp_failure);
 664
 665        return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
 666}
 667
 668static void
 669__cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
 670{
 671        __kfree_skb(skb);
 672
 673        if (csk->com.state != CSK_STATE_ESTABLISHED)
 674                goto no_abort;
 675
 676        set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
 677        csk->com.state = CSK_STATE_ABORTING;
 678
 679        cxgbit_send_abort_req(csk);
 680
 681        return;
 682
 683no_abort:
 684        cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
 685        cxgbit_put_csk(csk);
 686}
 687
 688void cxgbit_abort_conn(struct cxgbit_sock *csk)
 689{
 690        struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
 691
 692        cxgbit_get_csk(csk);
 693        cxgbit_init_wr_wait(&csk->com.wr_wait);
 694
 695        spin_lock_bh(&csk->lock);
 696        if (csk->lock_owner) {
 697                cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
 698                __skb_queue_tail(&csk->backlogq, skb);
 699        } else {
 700                __cxgbit_abort_conn(csk, skb);
 701        }
 702        spin_unlock_bh(&csk->lock);
 703
 704        cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
 705                              csk->tid, 600, __func__);
 706}
 707
 708void cxgbit_free_conn(struct iscsi_conn *conn)
 709{
 710        struct cxgbit_sock *csk = conn->context;
 711        bool release = false;
 712
 713        pr_debug("%s: state %d\n",
 714                 __func__, csk->com.state);
 715
 716        spin_lock_bh(&csk->lock);
 717        switch (csk->com.state) {
 718        case CSK_STATE_ESTABLISHED:
 719                if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
 720                        csk->com.state = CSK_STATE_CLOSING;
 721                        cxgbit_send_halfclose(csk);
 722                } else {
 723                        csk->com.state = CSK_STATE_ABORTING;
 724                        cxgbit_send_abort_req(csk);
 725                }
 726                break;
 727        case CSK_STATE_CLOSING:
 728                csk->com.state = CSK_STATE_MORIBUND;
 729                cxgbit_send_halfclose(csk);
 730                break;
 731        case CSK_STATE_DEAD:
 732                release = true;
 733                break;
 734        default:
 735                pr_err("%s: csk %p; state %d\n",
 736                       __func__, csk, csk->com.state);
 737        }
 738        spin_unlock_bh(&csk->lock);
 739
 740        if (release)
 741                cxgbit_put_csk(csk);
 742}
 743
 744static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
 745{
 746        csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
 747                        ((csk->com.remote_addr.ss_family == AF_INET) ?
 748                        sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
 749                        sizeof(struct tcphdr);
 750        csk->mss = csk->emss;
 751        if (TCPOPT_TSTAMP_G(opt))
 752                csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
 753        if (csk->emss < 128)
 754                csk->emss = 128;
 755        if (csk->emss & 7)
 756                pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
 757                        TCPOPT_MSS_G(opt), csk->mss, csk->emss);
 758        pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
 759                 csk->mss, csk->emss);
 760}
 761
 762static void cxgbit_free_skb(struct cxgbit_sock *csk)
 763{
 764        struct sk_buff *skb;
 765
 766        __skb_queue_purge(&csk->txq);
 767        __skb_queue_purge(&csk->rxq);
 768        __skb_queue_purge(&csk->backlogq);
 769        __skb_queue_purge(&csk->ppodq);
 770        __skb_queue_purge(&csk->skbq);
 771
 772        while ((skb = cxgbit_sock_dequeue_wr(csk)))
 773                kfree_skb(skb);
 774
 775        __kfree_skb(csk->lro_hskb);
 776}
 777
 778void _cxgbit_free_csk(struct kref *kref)
 779{
 780        struct cxgbit_sock *csk;
 781        struct cxgbit_device *cdev;
 782
 783        csk = container_of(kref, struct cxgbit_sock, kref);
 784
 785        pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
 786
 787        if (csk->com.local_addr.ss_family == AF_INET6) {
 788                struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
 789                                             &csk->com.local_addr;
 790                cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
 791                                   (const u32 *)
 792                                   &sin6->sin6_addr.s6_addr, 1);
 793        }
 794
 795        cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid,
 796                         csk->com.local_addr.ss_family);
 797        dst_release(csk->dst);
 798        cxgb4_l2t_release(csk->l2t);
 799
 800        cdev = csk->com.cdev;
 801        spin_lock_bh(&cdev->cskq.lock);
 802        list_del(&csk->list);
 803        spin_unlock_bh(&cdev->cskq.lock);
 804
 805        cxgbit_free_skb(csk);
 806        cxgbit_put_cdev(cdev);
 807
 808        kfree(csk);
 809}
 810
 811static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
 812{
 813        unsigned int linkspeed;
 814        u8 scale;
 815
 816        linkspeed = pi->link_cfg.speed;
 817        scale = linkspeed / SPEED_10000;
 818
 819#define CXGBIT_10G_RCV_WIN (256 * 1024)
 820        csk->rcv_win = CXGBIT_10G_RCV_WIN;
 821        if (scale)
 822                csk->rcv_win *= scale;
 823
 824#define CXGBIT_10G_SND_WIN (256 * 1024)
 825        csk->snd_win = CXGBIT_10G_SND_WIN;
 826        if (scale)
 827                csk->snd_win *= scale;
 828
 829        pr_debug("%s snd_win %d rcv_win %d\n",
 830                 __func__, csk->snd_win, csk->rcv_win);
 831}
 832
 833#ifdef CONFIG_CHELSIO_T4_DCB
 834static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
 835{
 836        return ndev->dcbnl_ops->getstate(ndev);
 837}
 838
 839static int cxgbit_select_priority(int pri_mask)
 840{
 841        if (!pri_mask)
 842                return 0;
 843
 844        return (ffs(pri_mask) - 1);
 845}
 846
 847static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
 848{
 849        int ret;
 850        u8 caps;
 851
 852        struct dcb_app iscsi_dcb_app = {
 853                .protocol = local_port
 854        };
 855
 856        ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
 857
 858        if (ret)
 859                return 0;
 860
 861        if (caps & DCB_CAP_DCBX_VER_IEEE) {
 862                iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
 863
 864                ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
 865
 866        } else if (caps & DCB_CAP_DCBX_VER_CEE) {
 867                iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
 868
 869                ret = dcb_getapp(ndev, &iscsi_dcb_app);
 870        }
 871
 872        pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
 873
 874        return cxgbit_select_priority(ret);
 875}
 876#endif
 877
 878static int
 879cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
 880                    u16 local_port, struct dst_entry *dst,
 881                    struct cxgbit_device *cdev)
 882{
 883        struct neighbour *n;
 884        int ret, step;
 885        struct net_device *ndev;
 886        u16 rxq_idx, port_id;
 887#ifdef CONFIG_CHELSIO_T4_DCB
 888        u8 priority = 0;
 889#endif
 890
 891        n = dst_neigh_lookup(dst, peer_ip);
 892        if (!n)
 893                return -ENODEV;
 894
 895        rcu_read_lock();
 896        if (!(n->nud_state & NUD_VALID))
 897                neigh_event_send(n, NULL);
 898
 899        ret = -ENOMEM;
 900        if (n->dev->flags & IFF_LOOPBACK) {
 901                if (iptype == 4)
 902                        ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
 903                else if (IS_ENABLED(CONFIG_IPV6))
 904                        ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
 905                else
 906                        ndev = NULL;
 907
 908                if (!ndev) {
 909                        ret = -ENODEV;
 910                        goto out;
 911                }
 912
 913                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
 914                                         n, ndev, 0);
 915                if (!csk->l2t)
 916                        goto out;
 917                csk->mtu = ndev->mtu;
 918                csk->tx_chan = cxgb4_port_chan(ndev);
 919                csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type,
 920                                                 cxgb4_port_viid(ndev));
 921                step = cdev->lldi.ntxq /
 922                        cdev->lldi.nchan;
 923                csk->txq_idx = cxgb4_port_idx(ndev) * step;
 924                step = cdev->lldi.nrxq /
 925                        cdev->lldi.nchan;
 926                csk->ctrlq_idx = cxgb4_port_idx(ndev);
 927                csk->rss_qid = cdev->lldi.rxq_ids[
 928                                cxgb4_port_idx(ndev) * step];
 929                csk->port_id = cxgb4_port_idx(ndev);
 930                cxgbit_set_tcp_window(csk,
 931                                      (struct port_info *)netdev_priv(ndev));
 932        } else {
 933                ndev = cxgbit_get_real_dev(n->dev);
 934                if (!ndev) {
 935                        ret = -ENODEV;
 936                        goto out;
 937                }
 938
 939#ifdef CONFIG_CHELSIO_T4_DCB
 940                if (cxgbit_get_iscsi_dcb_state(ndev))
 941                        priority = cxgbit_get_iscsi_dcb_priority(ndev,
 942                                                                 local_port);
 943
 944                csk->dcb_priority = priority;
 945
 946                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
 947#else
 948                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
 949#endif
 950                if (!csk->l2t)
 951                        goto out;
 952                port_id = cxgb4_port_idx(ndev);
 953                csk->mtu = dst_mtu(dst);
 954                csk->tx_chan = cxgb4_port_chan(ndev);
 955                csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type,
 956                                                 cxgb4_port_viid(ndev));
 957                step = cdev->lldi.ntxq /
 958                        cdev->lldi.nports;
 959                csk->txq_idx = (port_id * step) +
 960                                (cdev->selectq[port_id][0]++ % step);
 961                csk->ctrlq_idx = cxgb4_port_idx(ndev);
 962                step = cdev->lldi.nrxq /
 963                        cdev->lldi.nports;
 964                rxq_idx = (port_id * step) +
 965                                (cdev->selectq[port_id][1]++ % step);
 966                csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
 967                csk->port_id = port_id;
 968                cxgbit_set_tcp_window(csk,
 969                                      (struct port_info *)netdev_priv(ndev));
 970        }
 971        ret = 0;
 972out:
 973        rcu_read_unlock();
 974        neigh_release(n);
 975        return ret;
 976}
 977
 978int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
 979{
 980        int ret = 0;
 981
 982        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
 983                kfree_skb(skb);
 984                pr_err("%s - device not up - dropping\n", __func__);
 985                return -EIO;
 986        }
 987
 988        ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
 989        if (ret < 0)
 990                kfree_skb(skb);
 991        return ret < 0 ? ret : 0;
 992}
 993
 994static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
 995{
 996        u32 len = roundup(sizeof(struct cpl_tid_release), 16);
 997        struct sk_buff *skb;
 998
 999        skb = alloc_skb(len, GFP_ATOMIC);
1000        if (!skb)
1001                return;
1002
1003        cxgb_mk_tid_release(skb, len, tid, 0);
1004        cxgbit_ofld_send(cdev, skb);
1005}
1006
1007int
1008cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
1009                struct l2t_entry *l2e)
1010{
1011        int ret = 0;
1012
1013        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1014                kfree_skb(skb);
1015                pr_err("%s - device not up - dropping\n", __func__);
1016                return -EIO;
1017        }
1018
1019        ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
1020        if (ret < 0)
1021                kfree_skb(skb);
1022        return ret < 0 ? ret : 0;
1023}
1024
1025static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
1026{
1027        if (csk->com.state != CSK_STATE_ESTABLISHED) {
1028                __kfree_skb(skb);
1029                return;
1030        }
1031
1032        cxgbit_ofld_send(csk->com.cdev, skb);
1033}
1034
1035/*
1036 * CPL connection rx data ack: host ->
1037 * Send RX credits through an RX_DATA_ACK CPL message.
1038 * Returns the number of credits sent.
1039 */
1040int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
1041{
1042        struct sk_buff *skb;
1043        u32 len = roundup(sizeof(struct cpl_rx_data_ack), 16);
1044        u32 credit_dack;
1045
1046        skb = alloc_skb(len, GFP_KERNEL);
1047        if (!skb)
1048                return -1;
1049
1050        credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
1051                      RX_CREDITS_V(csk->rx_credits);
1052
1053        cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
1054                            credit_dack);
1055
1056        csk->rx_credits = 0;
1057
1058        spin_lock_bh(&csk->lock);
1059        if (csk->lock_owner) {
1060                cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
1061                __skb_queue_tail(&csk->backlogq, skb);
1062                spin_unlock_bh(&csk->lock);
1063                return 0;
1064        }
1065
1066        cxgbit_send_rx_credits(csk, skb);
1067        spin_unlock_bh(&csk->lock);
1068
1069        return 0;
1070}
1071
1072#define FLOWC_WR_NPARAMS_MIN    9
1073#define FLOWC_WR_NPARAMS_MAX    11
1074static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
1075{
1076        struct sk_buff *skb;
1077        u32 len, flowclen;
1078        u8 i;
1079
1080        flowclen = offsetof(struct fw_flowc_wr,
1081                            mnemval[FLOWC_WR_NPARAMS_MAX]);
1082
1083        len = max_t(u32, sizeof(struct cpl_abort_req),
1084                    sizeof(struct cpl_abort_rpl));
1085
1086        len = max(len, flowclen);
1087        len = roundup(len, 16);
1088
1089        for (i = 0; i < 3; i++) {
1090                skb = alloc_skb(len, GFP_ATOMIC);
1091                if (!skb)
1092                        goto out;
1093                __skb_queue_tail(&csk->skbq, skb);
1094        }
1095
1096        skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
1097        if (!skb)
1098                goto out;
1099
1100        memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
1101        csk->lro_hskb = skb;
1102
1103        return 0;
1104out:
1105        __skb_queue_purge(&csk->skbq);
1106        return -ENOMEM;
1107}
1108
1109static void
1110cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
1111{
1112        struct sk_buff *skb;
1113        const struct tcphdr *tcph;
1114        struct cpl_t5_pass_accept_rpl *rpl5;
1115        struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
1116        unsigned int len = roundup(sizeof(*rpl5), 16);
1117        unsigned int mtu_idx;
1118        u64 opt0;
1119        u32 opt2, hlen;
1120        u32 wscale;
1121        u32 win;
1122
1123        pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
1124
1125        skb = alloc_skb(len, GFP_ATOMIC);
1126        if (!skb) {
1127                cxgbit_put_csk(csk);
1128                return;
1129        }
1130
1131        rpl5 = __skb_put_zero(skb, len);
1132
1133        INIT_TP_WR(rpl5, csk->tid);
1134        OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1135                                                     csk->tid));
1136        cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
1137                      req->tcpopt.tstamp,
1138                      (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
1139        wscale = cxgb_compute_wscale(csk->rcv_win);
1140        /*
1141         * Specify the largest window that will fit in opt0. The
1142         * remainder will be specified in the rx_data_ack.
1143         */
1144        win = csk->rcv_win >> 10;
1145        if (win > RCV_BUFSIZ_M)
1146                win = RCV_BUFSIZ_M;
1147        opt0 =  TCAM_BYPASS_F |
1148                WND_SCALE_V(wscale) |
1149                MSS_IDX_V(mtu_idx) |
1150                L2T_IDX_V(csk->l2t->idx) |
1151                TX_CHAN_V(csk->tx_chan) |
1152                SMAC_SEL_V(csk->smac_idx) |
1153                DSCP_V(csk->tos >> 2) |
1154                ULP_MODE_V(ULP_MODE_ISCSI) |
1155                RCV_BUFSIZ_V(win);
1156
1157        opt2 = RX_CHANNEL_V(0) |
1158                RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
1159
1160        if (!is_t5(lldi->adapter_type))
1161                opt2 |= RX_FC_DISABLE_F;
1162
1163        if (req->tcpopt.tstamp)
1164                opt2 |= TSTAMPS_EN_F;
1165        if (req->tcpopt.sack)
1166                opt2 |= SACK_EN_F;
1167        if (wscale)
1168                opt2 |= WND_SCALE_EN_F;
1169
1170        hlen = ntohl(req->hdr_len);
1171
1172        if (is_t5(lldi->adapter_type))
1173                tcph = (struct tcphdr *)((u8 *)(req + 1) +
1174                       ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen));
1175        else
1176                tcph = (struct tcphdr *)((u8 *)(req + 1) +
1177                       T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
1178
1179        if (tcph->ece && tcph->cwr)
1180                opt2 |= CCTRL_ECN_V(1);
1181
1182        opt2 |= RX_COALESCE_V(3);
1183        opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
1184
1185        opt2 |= T5_ISS_F;
1186        rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
1187
1188        opt2 |= T5_OPT_2_VALID_F;
1189
1190        rpl5->opt0 = cpu_to_be64(opt0);
1191        rpl5->opt2 = cpu_to_be32(opt2);
1192        set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
1193        t4_set_arp_err_handler(skb, NULL, cxgbit_arp_failure_discard);
1194        cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
1195}
1196
1197static void
1198cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
1199{
1200        struct cxgbit_sock *csk = NULL;
1201        struct cxgbit_np *cnp;
1202        struct cpl_pass_accept_req *req = cplhdr(skb);
1203        unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1204        struct tid_info *t = cdev->lldi.tids;
1205        unsigned int tid = GET_TID(req);
1206        u16 peer_mss = ntohs(req->tcpopt.mss);
1207        unsigned short hdrs;
1208
1209        struct dst_entry *dst;
1210        __u8 local_ip[16], peer_ip[16];
1211        __be16 local_port, peer_port;
1212        int ret;
1213        int iptype;
1214
1215        pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
1216                 __func__, cdev, stid, tid);
1217
1218        cnp = lookup_stid(t, stid);
1219        if (!cnp) {
1220                pr_err("%s connect request on invalid stid %d\n",
1221                       __func__, stid);
1222                goto rel_skb;
1223        }
1224
1225        if (cnp->com.state != CSK_STATE_LISTEN) {
1226                pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
1227                       __func__);
1228                goto reject;
1229        }
1230
1231        csk = lookup_tid(t, tid);
1232        if (csk) {
1233                pr_err("%s csk not null tid %u\n",
1234                       __func__, tid);
1235                goto rel_skb;
1236        }
1237
1238        cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
1239                        peer_ip, &local_port, &peer_port);
1240
1241        /* Find output route */
1242        if (iptype == 4)  {
1243                pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
1244                         "lport %d rport %d peer_mss %d\n"
1245                         , __func__, cnp, tid,
1246                         local_ip, peer_ip, ntohs(local_port),
1247                         ntohs(peer_port), peer_mss);
1248                dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
1249                                      *(__be32 *)local_ip,
1250                                      *(__be32 *)peer_ip,
1251                                      local_port, peer_port,
1252                                      PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
1253        } else {
1254                pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
1255                         "lport %d rport %d peer_mss %d\n"
1256                         , __func__, cnp, tid,
1257                         local_ip, peer_ip, ntohs(local_port),
1258                         ntohs(peer_port), peer_mss);
1259                dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
1260                                       local_ip, peer_ip,
1261                                       local_port, peer_port,
1262                                       PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
1263                                       ((struct sockaddr_in6 *)
1264                                        &cnp->com.local_addr)->sin6_scope_id);
1265        }
1266        if (!dst) {
1267                pr_err("%s - failed to find dst entry!\n",
1268                       __func__);
1269                goto reject;
1270        }
1271
1272        csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
1273        if (!csk) {
1274                dst_release(dst);
1275                goto rel_skb;
1276        }
1277
1278        ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
1279                                  dst, cdev);
1280        if (ret) {
1281                pr_err("%s - failed to allocate l2t entry!\n",
1282                       __func__);
1283                dst_release(dst);
1284                kfree(csk);
1285                goto reject;
1286        }
1287
1288        kref_init(&csk->kref);
1289        init_completion(&csk->com.wr_wait.completion);
1290
1291        INIT_LIST_HEAD(&csk->accept_node);
1292
1293        hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
1294                sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0);
1295        if (peer_mss && csk->mtu > (peer_mss + hdrs))
1296                csk->mtu = peer_mss + hdrs;
1297
1298        csk->com.state = CSK_STATE_CONNECTING;
1299        csk->com.cdev = cdev;
1300        csk->cnp = cnp;
1301        csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1302        csk->dst = dst;
1303        csk->tid = tid;
1304        csk->wr_cred = cdev->lldi.wr_cred -
1305                        DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1306        csk->wr_max_cred = csk->wr_cred;
1307        csk->wr_una_cred = 0;
1308
1309        if (iptype == 4) {
1310                struct sockaddr_in *sin = (struct sockaddr_in *)
1311                                          &csk->com.local_addr;
1312                sin->sin_family = AF_INET;
1313                sin->sin_port = local_port;
1314                sin->sin_addr.s_addr = *(__be32 *)local_ip;
1315
1316                sin = (struct sockaddr_in *)&csk->com.remote_addr;
1317                sin->sin_family = AF_INET;
1318                sin->sin_port = peer_port;
1319                sin->sin_addr.s_addr = *(__be32 *)peer_ip;
1320        } else {
1321                struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
1322                                            &csk->com.local_addr;
1323
1324                sin6->sin6_family = PF_INET6;
1325                sin6->sin6_port = local_port;
1326                memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
1327                cxgb4_clip_get(cdev->lldi.ports[0],
1328                               (const u32 *)&sin6->sin6_addr.s6_addr,
1329                               1);
1330
1331                sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
1332                sin6->sin6_family = PF_INET6;
1333                sin6->sin6_port = peer_port;
1334                memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
1335        }
1336
1337        skb_queue_head_init(&csk->rxq);
1338        skb_queue_head_init(&csk->txq);
1339        skb_queue_head_init(&csk->ppodq);
1340        skb_queue_head_init(&csk->backlogq);
1341        skb_queue_head_init(&csk->skbq);
1342        cxgbit_sock_reset_wr_list(csk);
1343        spin_lock_init(&csk->lock);
1344        init_waitqueue_head(&csk->waitq);
1345        init_waitqueue_head(&csk->ack_waitq);
1346        csk->lock_owner = false;
1347
1348        if (cxgbit_alloc_csk_skb(csk)) {
1349                dst_release(dst);
1350                kfree(csk);
1351                goto rel_skb;
1352        }
1353
1354        cxgbit_get_cdev(cdev);
1355
1356        spin_lock(&cdev->cskq.lock);
1357        list_add_tail(&csk->list, &cdev->cskq.list);
1358        spin_unlock(&cdev->cskq.lock);
1359        cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family);
1360        cxgbit_pass_accept_rpl(csk, req);
1361        goto rel_skb;
1362
1363reject:
1364        cxgbit_release_tid(cdev, tid);
1365rel_skb:
1366        __kfree_skb(skb);
1367}
1368
1369static u32
1370cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
1371                           u32 *flowclenp)
1372{
1373        u32 nparams, flowclen16, flowclen;
1374
1375        nparams = FLOWC_WR_NPARAMS_MIN;
1376
1377        if (csk->snd_wscale)
1378                nparams++;
1379
1380#ifdef CONFIG_CHELSIO_T4_DCB
1381        nparams++;
1382#endif
1383        flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
1384        flowclen16 = DIV_ROUND_UP(flowclen, 16);
1385        flowclen = flowclen16 * 16;
1386        /*
1387         * Return the number of 16-byte credits used by the flowc request.
1388         * Pass back the nparams and actual flowc length if requested.
1389         */
1390        if (nparamsp)
1391                *nparamsp = nparams;
1392        if (flowclenp)
1393                *flowclenp = flowclen;
1394        return flowclen16;
1395}
1396
1397u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
1398{
1399        struct cxgbit_device *cdev = csk->com.cdev;
1400        struct fw_flowc_wr *flowc;
1401        u32 nparams, flowclen16, flowclen;
1402        struct sk_buff *skb;
1403        u8 index;
1404
1405#ifdef CONFIG_CHELSIO_T4_DCB
1406        u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
1407#endif
1408
1409        flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
1410
1411        skb = __skb_dequeue(&csk->skbq);
1412        flowc = __skb_put_zero(skb, flowclen);
1413
1414        flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
1415                                           FW_FLOWC_WR_NPARAMS_V(nparams));
1416        flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
1417                                          FW_WR_FLOWID_V(csk->tid));
1418        flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
1419        flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
1420                                            (csk->com.cdev->lldi.pf));
1421        flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
1422        flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
1423        flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
1424        flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
1425        flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
1426        flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
1427        flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
1428        flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
1429        flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
1430        flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
1431        flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
1432        flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
1433        flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
1434        flowc->mnemval[7].val = cpu_to_be32(csk->emss);
1435
1436        flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
1437        if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
1438                flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
1439        else
1440                flowc->mnemval[8].val = cpu_to_be32(16384);
1441
1442        index = 9;
1443
1444        if (csk->snd_wscale) {
1445                flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
1446                flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
1447                index++;
1448        }
1449
1450#ifdef CONFIG_CHELSIO_T4_DCB
1451        flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
1452        if (vlan == VLAN_NONE) {
1453                pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
1454                flowc->mnemval[index].val = cpu_to_be32(0);
1455        } else
1456                flowc->mnemval[index].val = cpu_to_be32(
1457                                (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
1458#endif
1459
1460        pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
1461                 " rcv_seq = %u; snd_win = %u; emss = %u\n",
1462                 __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
1463                 csk->rcv_nxt, csk->snd_win, csk->emss);
1464        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
1465        cxgbit_ofld_send(csk->com.cdev, skb);
1466        return flowclen16;
1467}
1468
1469int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
1470{
1471        struct sk_buff *skb;
1472        struct cpl_set_tcb_field *req;
1473        u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
1474        u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
1475        unsigned int len = roundup(sizeof(*req), 16);
1476        int ret;
1477
1478        skb = alloc_skb(len, GFP_KERNEL);
1479        if (!skb)
1480                return -ENOMEM;
1481
1482        /*  set up ulp submode */
1483        req = __skb_put_zero(skb, len);
1484
1485        INIT_TP_WR(req, csk->tid);
1486        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1487        req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1488        req->word_cookie = htons(0);
1489        req->mask = cpu_to_be64(0x3 << 4);
1490        req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1491                                (dcrc ? ULP_CRC_DATA : 0)) << 4);
1492        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1493
1494        cxgbit_get_csk(csk);
1495        cxgbit_init_wr_wait(&csk->com.wr_wait);
1496
1497        cxgbit_ofld_send(csk->com.cdev, skb);
1498
1499        ret = cxgbit_wait_for_reply(csk->com.cdev,
1500                                    &csk->com.wr_wait,
1501                                    csk->tid, 5, __func__);
1502        if (ret)
1503                return -1;
1504
1505        return 0;
1506}
1507
1508int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
1509{
1510        struct sk_buff *skb;
1511        struct cpl_set_tcb_field *req;
1512        unsigned int len = roundup(sizeof(*req), 16);
1513        int ret;
1514
1515        skb = alloc_skb(len, GFP_KERNEL);
1516        if (!skb)
1517                return -ENOMEM;
1518
1519        req = __skb_put_zero(skb, len);
1520
1521        INIT_TP_WR(req, csk->tid);
1522        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1523        req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1524        req->word_cookie = htons(0);
1525        req->mask = cpu_to_be64(0x3 << 8);
1526        req->val = cpu_to_be64(pg_idx << 8);
1527        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1528
1529        cxgbit_get_csk(csk);
1530        cxgbit_init_wr_wait(&csk->com.wr_wait);
1531
1532        cxgbit_ofld_send(csk->com.cdev, skb);
1533
1534        ret = cxgbit_wait_for_reply(csk->com.cdev,
1535                                    &csk->com.wr_wait,
1536                                    csk->tid, 5, __func__);
1537        if (ret)
1538                return -1;
1539
1540        return 0;
1541}
1542
1543static void
1544cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1545{
1546        struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1547        struct tid_info *t = cdev->lldi.tids;
1548        unsigned int stid = GET_TID(rpl);
1549        struct cxgbit_np *cnp = lookup_stid(t, stid);
1550
1551        pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1552                 __func__, cnp, stid, rpl->status);
1553
1554        if (!cnp) {
1555                pr_info("%s stid %d lookup failure\n", __func__, stid);
1556                goto rel_skb;
1557        }
1558
1559        cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1560        cxgbit_put_cnp(cnp);
1561rel_skb:
1562        __kfree_skb(skb);
1563}
1564
1565static void
1566cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1567{
1568        struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1569        struct tid_info *t = cdev->lldi.tids;
1570        unsigned int stid = GET_TID(rpl);
1571        struct cxgbit_np *cnp = lookup_stid(t, stid);
1572
1573        pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1574                 __func__, cnp, stid, rpl->status);
1575
1576        if (!cnp) {
1577                pr_info("%s stid %d lookup failure\n", __func__, stid);
1578                goto rel_skb;
1579        }
1580
1581        cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1582        cxgbit_put_cnp(cnp);
1583rel_skb:
1584        __kfree_skb(skb);
1585}
1586
1587static void
1588cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
1589{
1590        struct cpl_pass_establish *req = cplhdr(skb);
1591        struct tid_info *t = cdev->lldi.tids;
1592        unsigned int tid = GET_TID(req);
1593        struct cxgbit_sock *csk;
1594        struct cxgbit_np *cnp;
1595        u16 tcp_opt = be16_to_cpu(req->tcp_opt);
1596        u32 snd_isn = be32_to_cpu(req->snd_isn);
1597        u32 rcv_isn = be32_to_cpu(req->rcv_isn);
1598
1599        csk = lookup_tid(t, tid);
1600        if (unlikely(!csk)) {
1601                pr_err("can't find connection for tid %u.\n", tid);
1602                goto rel_skb;
1603        }
1604        cnp = csk->cnp;
1605
1606        pr_debug("%s: csk %p; tid %u; cnp %p\n",
1607                 __func__, csk, tid, cnp);
1608
1609        csk->write_seq = snd_isn;
1610        csk->snd_una = snd_isn;
1611        csk->snd_nxt = snd_isn;
1612
1613        csk->rcv_nxt = rcv_isn;
1614
1615        if (csk->rcv_win > (RCV_BUFSIZ_M << 10))
1616                csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10));
1617
1618        csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
1619        cxgbit_set_emss(csk, tcp_opt);
1620        dst_confirm(csk->dst);
1621        csk->com.state = CSK_STATE_ESTABLISHED;
1622        spin_lock_bh(&cnp->np_accept_lock);
1623        list_add_tail(&csk->accept_node, &cnp->np_accept_list);
1624        spin_unlock_bh(&cnp->np_accept_lock);
1625        complete(&cnp->accept_comp);
1626rel_skb:
1627        __kfree_skb(skb);
1628}
1629
1630static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1631{
1632        cxgbit_skcb_flags(skb) = 0;
1633        spin_lock_bh(&csk->rxq.lock);
1634        __skb_queue_tail(&csk->rxq, skb);
1635        spin_unlock_bh(&csk->rxq.lock);
1636        wake_up(&csk->waitq);
1637}
1638
1639static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
1640{
1641        pr_debug("%s: csk %p; tid %u; state %d\n",
1642                 __func__, csk, csk->tid, csk->com.state);
1643
1644        switch (csk->com.state) {
1645        case CSK_STATE_ESTABLISHED:
1646                csk->com.state = CSK_STATE_CLOSING;
1647                cxgbit_queue_rx_skb(csk, skb);
1648                return;
1649        case CSK_STATE_CLOSING:
1650                /* simultaneous close */
1651                csk->com.state = CSK_STATE_MORIBUND;
1652                break;
1653        case CSK_STATE_MORIBUND:
1654                csk->com.state = CSK_STATE_DEAD;
1655                cxgbit_put_csk(csk);
1656                break;
1657        case CSK_STATE_ABORTING:
1658                break;
1659        default:
1660                pr_info("%s: cpl_peer_close in bad state %d\n",
1661                        __func__, csk->com.state);
1662        }
1663
1664        __kfree_skb(skb);
1665}
1666
1667static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1668{
1669        pr_debug("%s: csk %p; tid %u; state %d\n",
1670                 __func__, csk, csk->tid, csk->com.state);
1671
1672        switch (csk->com.state) {
1673        case CSK_STATE_CLOSING:
1674                csk->com.state = CSK_STATE_MORIBUND;
1675                break;
1676        case CSK_STATE_MORIBUND:
1677                csk->com.state = CSK_STATE_DEAD;
1678                cxgbit_put_csk(csk);
1679                break;
1680        case CSK_STATE_ABORTING:
1681        case CSK_STATE_DEAD:
1682                break;
1683        default:
1684                pr_info("%s: cpl_close_con_rpl in bad state %d\n",
1685                        __func__, csk->com.state);
1686        }
1687
1688        __kfree_skb(skb);
1689}
1690
1691static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1692{
1693        struct cpl_abort_req_rss *hdr = cplhdr(skb);
1694        unsigned int tid = GET_TID(hdr);
1695        struct sk_buff *rpl_skb;
1696        bool release = false;
1697        bool wakeup_thread = false;
1698        u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
1699
1700        pr_debug("%s: csk %p; tid %u; state %d\n",
1701                 __func__, csk, tid, csk->com.state);
1702
1703        if (cxgb_is_neg_adv(hdr->status)) {
1704                pr_err("%s: got neg advise %d on tid %u\n",
1705                       __func__, hdr->status, tid);
1706                goto rel_skb;
1707        }
1708
1709        switch (csk->com.state) {
1710        case CSK_STATE_CONNECTING:
1711        case CSK_STATE_MORIBUND:
1712                csk->com.state = CSK_STATE_DEAD;
1713                release = true;
1714                break;
1715        case CSK_STATE_ESTABLISHED:
1716                csk->com.state = CSK_STATE_DEAD;
1717                wakeup_thread = true;
1718                break;
1719        case CSK_STATE_CLOSING:
1720                csk->com.state = CSK_STATE_DEAD;
1721                if (!csk->conn)
1722                        release = true;
1723                break;
1724        case CSK_STATE_ABORTING:
1725                break;
1726        default:
1727                pr_info("%s: cpl_abort_req_rss in bad state %d\n",
1728                        __func__, csk->com.state);
1729                csk->com.state = CSK_STATE_DEAD;
1730        }
1731
1732        __skb_queue_purge(&csk->txq);
1733
1734        if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
1735                cxgbit_send_tx_flowc_wr(csk);
1736
1737        rpl_skb = __skb_dequeue(&csk->skbq);
1738
1739        cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
1740        cxgbit_ofld_send(csk->com.cdev, rpl_skb);
1741
1742        if (wakeup_thread) {
1743                cxgbit_queue_rx_skb(csk, skb);
1744                return;
1745        }
1746
1747        if (release)
1748                cxgbit_put_csk(csk);
1749rel_skb:
1750        __kfree_skb(skb);
1751}
1752
1753static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1754{
1755        struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1756
1757        pr_debug("%s: csk %p; tid %u; state %d\n",
1758                 __func__, csk, csk->tid, csk->com.state);
1759
1760        switch (csk->com.state) {
1761        case CSK_STATE_ABORTING:
1762                csk->com.state = CSK_STATE_DEAD;
1763                if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
1764                        cxgbit_wake_up(&csk->com.wr_wait, __func__,
1765                                       rpl->status);
1766                cxgbit_put_csk(csk);
1767                break;
1768        default:
1769                pr_info("%s: cpl_abort_rpl_rss in state %d\n",
1770                        __func__, csk->com.state);
1771        }
1772
1773        __kfree_skb(skb);
1774}
1775
1776static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
1777{
1778        const struct sk_buff *skb = csk->wr_pending_head;
1779        u32 credit = 0;
1780
1781        if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
1782                pr_err("csk 0x%p, tid %u, credit %u > %u\n",
1783                       csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
1784                return true;
1785        }
1786
1787        while (skb) {
1788                credit += (__force u32)skb->csum;
1789                skb = cxgbit_skcb_tx_wr_next(skb);
1790        }
1791
1792        if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
1793                pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1794                       csk, csk->tid, csk->wr_cred,
1795                       credit, csk->wr_max_cred);
1796
1797                return true;
1798        }
1799
1800        return false;
1801}
1802
1803static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
1804{
1805        struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
1806        u32 credits = rpl->credits;
1807        u32 snd_una = ntohl(rpl->snd_una);
1808
1809        csk->wr_cred += credits;
1810        if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
1811                csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
1812
1813        while (credits) {
1814                struct sk_buff *p = cxgbit_sock_peek_wr(csk);
1815                const u32 csum = (__force u32)p->csum;
1816
1817                if (unlikely(!p)) {
1818                        pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
1819                               csk, csk->tid, credits,
1820                               csk->wr_cred, csk->wr_una_cred);
1821                        break;
1822                }
1823
1824                if (unlikely(credits < csum)) {
1825                        pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
1826                                csk,  csk->tid,
1827                                credits, csk->wr_cred, csk->wr_una_cred,
1828                                csum);
1829                        p->csum = (__force __wsum)(csum - credits);
1830                        break;
1831                }
1832
1833                cxgbit_sock_dequeue_wr(csk);
1834                credits -= csum;
1835                kfree_skb(p);
1836        }
1837
1838        if (unlikely(cxgbit_credit_err(csk))) {
1839                cxgbit_queue_rx_skb(csk, skb);
1840                return;
1841        }
1842
1843        if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
1844                if (unlikely(before(snd_una, csk->snd_una))) {
1845                        pr_warn("csk 0x%p,%u, snd_una %u/%u.",
1846                                csk, csk->tid, snd_una,
1847                                csk->snd_una);
1848                        goto rel_skb;
1849                }
1850
1851                if (csk->snd_una != snd_una) {
1852                        csk->snd_una = snd_una;
1853                        dst_confirm(csk->dst);
1854                        wake_up(&csk->ack_waitq);
1855                }
1856        }
1857
1858        if (skb_queue_len(&csk->txq))
1859                cxgbit_push_tx_frames(csk);
1860
1861rel_skb:
1862        __kfree_skb(skb);
1863}
1864
1865static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1866{
1867        struct cxgbit_sock *csk;
1868        struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1869        unsigned int tid = GET_TID(rpl);
1870        struct cxgb4_lld_info *lldi = &cdev->lldi;
1871        struct tid_info *t = lldi->tids;
1872
1873        csk = lookup_tid(t, tid);
1874        if (unlikely(!csk)) {
1875                pr_err("can't find connection for tid %u.\n", tid);
1876                goto rel_skb;
1877        } else {
1878                cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
1879        }
1880
1881        cxgbit_put_csk(csk);
1882rel_skb:
1883        __kfree_skb(skb);
1884}
1885
1886static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
1887{
1888        struct cxgbit_sock *csk;
1889        struct cpl_rx_data *cpl = cplhdr(skb);
1890        unsigned int tid = GET_TID(cpl);
1891        struct cxgb4_lld_info *lldi = &cdev->lldi;
1892        struct tid_info *t = lldi->tids;
1893
1894        csk = lookup_tid(t, tid);
1895        if (unlikely(!csk)) {
1896                pr_err("can't find conn. for tid %u.\n", tid);
1897                goto rel_skb;
1898        }
1899
1900        cxgbit_queue_rx_skb(csk, skb);
1901        return;
1902rel_skb:
1903        __kfree_skb(skb);
1904}
1905
1906static void
1907__cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1908{
1909        spin_lock(&csk->lock);
1910        if (csk->lock_owner) {
1911                __skb_queue_tail(&csk->backlogq, skb);
1912                spin_unlock(&csk->lock);
1913                return;
1914        }
1915
1916        cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
1917        spin_unlock(&csk->lock);
1918}
1919
1920static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1921{
1922        cxgbit_get_csk(csk);
1923        __cxgbit_process_rx_cpl(csk, skb);
1924        cxgbit_put_csk(csk);
1925}
1926
1927static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1928{
1929        struct cxgbit_sock *csk;
1930        struct cpl_tx_data *cpl = cplhdr(skb);
1931        struct cxgb4_lld_info *lldi = &cdev->lldi;
1932        struct tid_info *t = lldi->tids;
1933        unsigned int tid = GET_TID(cpl);
1934        u8 opcode = cxgbit_skcb_rx_opcode(skb);
1935        bool ref = true;
1936
1937        switch (opcode) {
1938        case CPL_FW4_ACK:
1939                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
1940                        ref = false;
1941                        break;
1942        case CPL_PEER_CLOSE:
1943                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
1944                        break;
1945        case CPL_CLOSE_CON_RPL:
1946                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
1947                        break;
1948        case CPL_ABORT_REQ_RSS:
1949                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
1950                        break;
1951        case CPL_ABORT_RPL_RSS:
1952                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
1953                        break;
1954        default:
1955                goto rel_skb;
1956        }
1957
1958        csk = lookup_tid(t, tid);
1959        if (unlikely(!csk)) {
1960                pr_err("can't find conn. for tid %u.\n", tid);
1961                goto rel_skb;
1962        }
1963
1964        if (ref)
1965                cxgbit_process_rx_cpl(csk, skb);
1966        else
1967                __cxgbit_process_rx_cpl(csk, skb);
1968
1969        return;
1970rel_skb:
1971        __kfree_skb(skb);
1972}
1973
1974cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
1975        [CPL_PASS_OPEN_RPL]     = cxgbit_pass_open_rpl,
1976        [CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
1977        [CPL_PASS_ACCEPT_REQ]   = cxgbit_pass_accept_req,
1978        [CPL_PASS_ESTABLISH]    = cxgbit_pass_establish,
1979        [CPL_SET_TCB_RPL]       = cxgbit_set_tcb_rpl,
1980        [CPL_RX_DATA]           = cxgbit_rx_data,
1981        [CPL_FW4_ACK]           = cxgbit_rx_cpl,
1982        [CPL_PEER_CLOSE]        = cxgbit_rx_cpl,
1983        [CPL_CLOSE_CON_RPL]     = cxgbit_rx_cpl,
1984        [CPL_ABORT_REQ_RSS]     = cxgbit_rx_cpl,
1985        [CPL_ABORT_RPL_RSS]     = cxgbit_rx_cpl,
1986};
1987