linux/drivers/target/iscsi/cxgbit/cxgbit_cm.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016 Chelsio Communications, Inc.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/list.h>
  11#include <linux/workqueue.h>
  12#include <linux/skbuff.h>
  13#include <linux/timer.h>
  14#include <linux/notifier.h>
  15#include <linux/inetdevice.h>
  16#include <linux/ip.h>
  17#include <linux/tcp.h>
  18#include <linux/if_vlan.h>
  19
  20#include <net/neighbour.h>
  21#include <net/netevent.h>
  22#include <net/route.h>
  23#include <net/tcp.h>
  24#include <net/ip6_route.h>
  25#include <net/addrconf.h>
  26
  27#include <libcxgb_cm.h>
  28#include "cxgbit.h"
  29#include "clip_tbl.h"
  30
  31static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
  32{
  33        wr_waitp->ret = 0;
  34        reinit_completion(&wr_waitp->completion);
  35}
  36
  37static void
  38cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
  39{
  40        if (ret == CPL_ERR_NONE)
  41                wr_waitp->ret = 0;
  42        else
  43                wr_waitp->ret = -EIO;
  44
  45        if (wr_waitp->ret)
  46                pr_err("%s: err:%u", func, ret);
  47
  48        complete(&wr_waitp->completion);
  49}
  50
  51static int
  52cxgbit_wait_for_reply(struct cxgbit_device *cdev,
  53                      struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
  54                      const char *func)
  55{
  56        int ret;
  57
  58        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
  59                wr_waitp->ret = -EIO;
  60                goto out;
  61        }
  62
  63        ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
  64        if (!ret) {
  65                pr_info("%s - Device %s not responding tid %u\n",
  66                        func, pci_name(cdev->lldi.pdev), tid);
  67                wr_waitp->ret = -ETIMEDOUT;
  68        }
  69out:
  70        if (wr_waitp->ret)
  71                pr_info("%s: FW reply %d tid %u\n",
  72                        pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
  73        return wr_waitp->ret;
  74}
  75
  76static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
  77{
  78        return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
  79}
  80
  81static struct np_info *
  82cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
  83                   unsigned int stid)
  84{
  85        struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
  86
  87        if (p) {
  88                int bucket = cxgbit_np_hashfn(cnp);
  89
  90                p->cnp = cnp;
  91                p->stid = stid;
  92                spin_lock(&cdev->np_lock);
  93                p->next = cdev->np_hash_tab[bucket];
  94                cdev->np_hash_tab[bucket] = p;
  95                spin_unlock(&cdev->np_lock);
  96        }
  97
  98        return p;
  99}
 100
 101static int
 102cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 103{
 104        int stid = -1, bucket = cxgbit_np_hashfn(cnp);
 105        struct np_info *p;
 106
 107        spin_lock(&cdev->np_lock);
 108        for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
 109                if (p->cnp == cnp) {
 110                        stid = p->stid;
 111                        break;
 112                }
 113        }
 114        spin_unlock(&cdev->np_lock);
 115
 116        return stid;
 117}
 118
 119static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 120{
 121        int stid = -1, bucket = cxgbit_np_hashfn(cnp);
 122        struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
 123
 124        spin_lock(&cdev->np_lock);
 125        for (p = *prev; p; prev = &p->next, p = p->next) {
 126                if (p->cnp == cnp) {
 127                        stid = p->stid;
 128                        *prev = p->next;
 129                        kfree(p);
 130                        break;
 131                }
 132        }
 133        spin_unlock(&cdev->np_lock);
 134
 135        return stid;
 136}
 137
 138void _cxgbit_free_cnp(struct kref *kref)
 139{
 140        struct cxgbit_np *cnp;
 141
 142        cnp = container_of(kref, struct cxgbit_np, kref);
 143        kfree(cnp);
 144}
 145
 146static int
 147cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
 148                      struct cxgbit_np *cnp)
 149{
 150        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
 151                                     &cnp->com.local_addr;
 152        int addr_type;
 153        int ret;
 154
 155        pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
 156                 __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
 157
 158        addr_type = ipv6_addr_type((const struct in6_addr *)
 159                                   &sin6->sin6_addr);
 160        if (addr_type != IPV6_ADDR_ANY) {
 161                ret = cxgb4_clip_get(cdev->lldi.ports[0],
 162                                     (const u32 *)&sin6->sin6_addr.s6_addr, 1);
 163                if (ret) {
 164                        pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
 165                               sin6->sin6_addr.s6_addr, ret);
 166                        return -ENOMEM;
 167                }
 168        }
 169
 170        cxgbit_get_cnp(cnp);
 171        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 172
 173        ret = cxgb4_create_server6(cdev->lldi.ports[0],
 174                                   stid, &sin6->sin6_addr,
 175                                   sin6->sin6_port,
 176                                   cdev->lldi.rxq_ids[0]);
 177        if (!ret)
 178                ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
 179                                            0, 10, __func__);
 180        else if (ret > 0)
 181                ret = net_xmit_errno(ret);
 182        else
 183                cxgbit_put_cnp(cnp);
 184
 185        if (ret) {
 186                if (ret != -ETIMEDOUT)
 187                        cxgb4_clip_release(cdev->lldi.ports[0],
 188                                   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
 189
 190                pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
 191                       ret, stid, sin6->sin6_addr.s6_addr,
 192                       ntohs(sin6->sin6_port));
 193        }
 194
 195        return ret;
 196}
 197
 198static int
 199cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
 200                      struct cxgbit_np *cnp)
 201{
 202        struct sockaddr_in *sin = (struct sockaddr_in *)
 203                                   &cnp->com.local_addr;
 204        int ret;
 205
 206        pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
 207                 __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
 208
 209        cxgbit_get_cnp(cnp);
 210        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 211
 212        ret = cxgb4_create_server(cdev->lldi.ports[0],
 213                                  stid, sin->sin_addr.s_addr,
 214                                  sin->sin_port, 0,
 215                                  cdev->lldi.rxq_ids[0]);
 216        if (!ret)
 217                ret = cxgbit_wait_for_reply(cdev,
 218                                            &cnp->com.wr_wait,
 219                                            0, 10, __func__);
 220        else if (ret > 0)
 221                ret = net_xmit_errno(ret);
 222        else
 223                cxgbit_put_cnp(cnp);
 224
 225        if (ret)
 226                pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
 227                       ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
 228        return ret;
 229}
 230
 231struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
 232{
 233        struct cxgbit_device *cdev;
 234        u8 i;
 235
 236        list_for_each_entry(cdev, &cdev_list_head, list) {
 237                struct cxgb4_lld_info *lldi = &cdev->lldi;
 238
 239                for (i = 0; i < lldi->nports; i++) {
 240                        if (lldi->ports[i] == ndev) {
 241                                if (port_id)
 242                                        *port_id = i;
 243                                return cdev;
 244                        }
 245                }
 246        }
 247
 248        return NULL;
 249}
 250
 251static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
 252{
 253        if (ndev->priv_flags & IFF_BONDING) {
 254                pr_err("Bond devices are not supported. Interface:%s\n",
 255                       ndev->name);
 256                return NULL;
 257        }
 258
 259        if (is_vlan_dev(ndev))
 260                return vlan_dev_real_dev(ndev);
 261
 262        return ndev;
 263}
 264
 265static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
 266{
 267        struct net_device *ndev;
 268
 269        ndev = __ip_dev_find(&init_net, saddr, false);
 270        if (!ndev)
 271                return NULL;
 272
 273        return cxgbit_get_real_dev(ndev);
 274}
 275
 276static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
 277{
 278        struct net_device *ndev = NULL;
 279        bool found = false;
 280
 281        if (IS_ENABLED(CONFIG_IPV6)) {
 282                for_each_netdev_rcu(&init_net, ndev)
 283                        if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
 284                                found = true;
 285                                break;
 286                        }
 287        }
 288        if (!found)
 289                return NULL;
 290        return cxgbit_get_real_dev(ndev);
 291}
 292
 293static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
 294{
 295        struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
 296        int ss_family = sockaddr->ss_family;
 297        struct net_device *ndev = NULL;
 298        struct cxgbit_device *cdev = NULL;
 299
 300        rcu_read_lock();
 301        if (ss_family == AF_INET) {
 302                struct sockaddr_in *sin;
 303
 304                sin = (struct sockaddr_in *)sockaddr;
 305                ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
 306        } else if (ss_family == AF_INET6) {
 307                struct sockaddr_in6 *sin6;
 308
 309                sin6 = (struct sockaddr_in6 *)sockaddr;
 310                ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
 311        }
 312        if (!ndev)
 313                goto out;
 314
 315        cdev = cxgbit_find_device(ndev, NULL);
 316out:
 317        rcu_read_unlock();
 318        return cdev;
 319}
 320
 321static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
 322{
 323        struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
 324        int ss_family = sockaddr->ss_family;
 325        int addr_type;
 326
 327        if (ss_family == AF_INET) {
 328                struct sockaddr_in *sin;
 329
 330                sin = (struct sockaddr_in *)sockaddr;
 331                if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
 332                        return true;
 333        } else if (ss_family == AF_INET6) {
 334                struct sockaddr_in6 *sin6;
 335
 336                sin6 = (struct sockaddr_in6 *)sockaddr;
 337                addr_type = ipv6_addr_type((const struct in6_addr *)
 338                                &sin6->sin6_addr);
 339                if (addr_type == IPV6_ADDR_ANY)
 340                        return true;
 341        }
 342        return false;
 343}
 344
 345static int
 346__cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 347{
 348        int stid, ret;
 349        int ss_family = cnp->com.local_addr.ss_family;
 350
 351        if (!test_bit(CDEV_STATE_UP, &cdev->flags))
 352                return -EINVAL;
 353
 354        stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
 355        if (stid < 0)
 356                return -EINVAL;
 357
 358        if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
 359                cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
 360                return -EINVAL;
 361        }
 362
 363        if (ss_family == AF_INET)
 364                ret = cxgbit_create_server4(cdev, stid, cnp);
 365        else
 366                ret = cxgbit_create_server6(cdev, stid, cnp);
 367
 368        if (ret) {
 369                if (ret != -ETIMEDOUT)
 370                        cxgb4_free_stid(cdev->lldi.tids, stid,
 371                                        ss_family);
 372                cxgbit_np_hash_del(cdev, cnp);
 373                return ret;
 374        }
 375        return ret;
 376}
 377
 378static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
 379{
 380        struct cxgbit_device *cdev;
 381        int ret = -1;
 382
 383        mutex_lock(&cdev_list_lock);
 384        cdev = cxgbit_find_np_cdev(cnp);
 385        if (!cdev)
 386                goto out;
 387
 388        if (cxgbit_np_hash_find(cdev, cnp) >= 0)
 389                goto out;
 390
 391        if (__cxgbit_setup_cdev_np(cdev, cnp))
 392                goto out;
 393
 394        cnp->com.cdev = cdev;
 395        ret = 0;
 396out:
 397        mutex_unlock(&cdev_list_lock);
 398        return ret;
 399}
 400
 401static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
 402{
 403        struct cxgbit_device *cdev;
 404        int ret;
 405        u32 count = 0;
 406
 407        mutex_lock(&cdev_list_lock);
 408        list_for_each_entry(cdev, &cdev_list_head, list) {
 409                if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
 410                        mutex_unlock(&cdev_list_lock);
 411                        return -1;
 412                }
 413        }
 414
 415        list_for_each_entry(cdev, &cdev_list_head, list) {
 416                ret = __cxgbit_setup_cdev_np(cdev, cnp);
 417                if (ret == -ETIMEDOUT)
 418                        break;
 419                if (ret != 0)
 420                        continue;
 421                count++;
 422        }
 423        mutex_unlock(&cdev_list_lock);
 424
 425        return count ? 0 : -1;
 426}
 427
 428int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
 429{
 430        struct cxgbit_np *cnp;
 431        int ret;
 432
 433        if ((ksockaddr->ss_family != AF_INET) &&
 434            (ksockaddr->ss_family != AF_INET6))
 435                return -EINVAL;
 436
 437        cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
 438        if (!cnp)
 439                return -ENOMEM;
 440
 441        init_waitqueue_head(&cnp->accept_wait);
 442        init_completion(&cnp->com.wr_wait.completion);
 443        init_completion(&cnp->accept_comp);
 444        INIT_LIST_HEAD(&cnp->np_accept_list);
 445        spin_lock_init(&cnp->np_accept_lock);
 446        kref_init(&cnp->kref);
 447        memcpy(&np->np_sockaddr, ksockaddr,
 448               sizeof(struct sockaddr_storage));
 449        memcpy(&cnp->com.local_addr, &np->np_sockaddr,
 450               sizeof(cnp->com.local_addr));
 451
 452        cnp->np = np;
 453        cnp->com.cdev = NULL;
 454
 455        if (cxgbit_inaddr_any(cnp))
 456                ret = cxgbit_setup_all_np(cnp);
 457        else
 458                ret = cxgbit_setup_cdev_np(cnp);
 459
 460        if (ret) {
 461                cxgbit_put_cnp(cnp);
 462                return -EINVAL;
 463        }
 464
 465        np->np_context = cnp;
 466        cnp->com.state = CSK_STATE_LISTEN;
 467        return 0;
 468}
 469
 470static void
 471cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
 472                     struct cxgbit_sock *csk)
 473{
 474        conn->login_family = np->np_sockaddr.ss_family;
 475        conn->login_sockaddr = csk->com.remote_addr;
 476        conn->local_sockaddr = csk->com.local_addr;
 477}
 478
 479int cxgbit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
 480{
 481        struct cxgbit_np *cnp = np->np_context;
 482        struct cxgbit_sock *csk;
 483        int ret = 0;
 484
 485accept_wait:
 486        ret = wait_for_completion_interruptible(&cnp->accept_comp);
 487        if (ret)
 488                return -ENODEV;
 489
 490        spin_lock_bh(&np->np_thread_lock);
 491        if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
 492                spin_unlock_bh(&np->np_thread_lock);
 493                /**
 494                 * No point in stalling here when np_thread
 495                 * is in state RESET/SHUTDOWN/EXIT - bail
 496                 **/
 497                return -ENODEV;
 498        }
 499        spin_unlock_bh(&np->np_thread_lock);
 500
 501        spin_lock_bh(&cnp->np_accept_lock);
 502        if (list_empty(&cnp->np_accept_list)) {
 503                spin_unlock_bh(&cnp->np_accept_lock);
 504                goto accept_wait;
 505        }
 506
 507        csk = list_first_entry(&cnp->np_accept_list,
 508                               struct cxgbit_sock,
 509                               accept_node);
 510
 511        list_del_init(&csk->accept_node);
 512        spin_unlock_bh(&cnp->np_accept_lock);
 513        conn->context = csk;
 514        csk->conn = conn;
 515
 516        cxgbit_set_conn_info(np, conn, csk);
 517        return 0;
 518}
 519
 520static int
 521__cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 522{
 523        int stid, ret;
 524        bool ipv6 = false;
 525
 526        stid = cxgbit_np_hash_del(cdev, cnp);
 527        if (stid < 0)
 528                return -EINVAL;
 529        if (!test_bit(CDEV_STATE_UP, &cdev->flags))
 530                return -EINVAL;
 531
 532        if (cnp->np->np_sockaddr.ss_family == AF_INET6)
 533                ipv6 = true;
 534
 535        cxgbit_get_cnp(cnp);
 536        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 537        ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
 538                                  cdev->lldi.rxq_ids[0], ipv6);
 539
 540        if (ret > 0)
 541                ret = net_xmit_errno(ret);
 542
 543        if (ret) {
 544                cxgbit_put_cnp(cnp);
 545                return ret;
 546        }
 547
 548        ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
 549                                    0, 10, __func__);
 550        if (ret == -ETIMEDOUT)
 551                return ret;
 552
 553        if (ipv6 && cnp->com.cdev) {
 554                struct sockaddr_in6 *sin6;
 555
 556                sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
 557                cxgb4_clip_release(cdev->lldi.ports[0],
 558                                   (const u32 *)&sin6->sin6_addr.s6_addr,
 559                                   1);
 560        }
 561
 562        cxgb4_free_stid(cdev->lldi.tids, stid,
 563                        cnp->com.local_addr.ss_family);
 564        return 0;
 565}
 566
 567static void cxgbit_free_all_np(struct cxgbit_np *cnp)
 568{
 569        struct cxgbit_device *cdev;
 570        int ret;
 571
 572        mutex_lock(&cdev_list_lock);
 573        list_for_each_entry(cdev, &cdev_list_head, list) {
 574                ret = __cxgbit_free_cdev_np(cdev, cnp);
 575                if (ret == -ETIMEDOUT)
 576                        break;
 577        }
 578        mutex_unlock(&cdev_list_lock);
 579}
 580
 581static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
 582{
 583        struct cxgbit_device *cdev;
 584        bool found = false;
 585
 586        mutex_lock(&cdev_list_lock);
 587        list_for_each_entry(cdev, &cdev_list_head, list) {
 588                if (cdev == cnp->com.cdev) {
 589                        found = true;
 590                        break;
 591                }
 592        }
 593        if (!found)
 594                goto out;
 595
 596        __cxgbit_free_cdev_np(cdev, cnp);
 597out:
 598        mutex_unlock(&cdev_list_lock);
 599}
 600
 601static void __cxgbit_free_conn(struct cxgbit_sock *csk);
 602
 603void cxgbit_free_np(struct iscsi_np *np)
 604{
 605        struct cxgbit_np *cnp = np->np_context;
 606        struct cxgbit_sock *csk, *tmp;
 607
 608        cnp->com.state = CSK_STATE_DEAD;
 609        if (cnp->com.cdev)
 610                cxgbit_free_cdev_np(cnp);
 611        else
 612                cxgbit_free_all_np(cnp);
 613
 614        spin_lock_bh(&cnp->np_accept_lock);
 615        list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
 616                list_del_init(&csk->accept_node);
 617                __cxgbit_free_conn(csk);
 618        }
 619        spin_unlock_bh(&cnp->np_accept_lock);
 620
 621        np->np_context = NULL;
 622        cxgbit_put_cnp(cnp);
 623}
 624
 625static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
 626{
 627        struct sk_buff *skb;
 628        u32 len = roundup(sizeof(struct cpl_close_con_req), 16);
 629
 630        skb = alloc_skb(len, GFP_ATOMIC);
 631        if (!skb)
 632                return;
 633
 634        cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
 635                              NULL, NULL);
 636
 637        cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
 638        __skb_queue_tail(&csk->txq, skb);
 639        cxgbit_push_tx_frames(csk);
 640}
 641
 642static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
 643{
 644        struct cxgbit_sock *csk = handle;
 645
 646        pr_debug("%s cxgbit_device %p\n", __func__, handle);
 647        kfree_skb(skb);
 648        cxgbit_put_csk(csk);
 649}
 650
 651static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
 652{
 653        struct cxgbit_device *cdev = handle;
 654        struct cpl_abort_req *req = cplhdr(skb);
 655
 656        pr_debug("%s cdev %p\n", __func__, cdev);
 657        req->cmd = CPL_ABORT_NO_RST;
 658        cxgbit_ofld_send(cdev, skb);
 659}
 660
 661static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
 662{
 663        struct sk_buff *skb;
 664        u32 len = roundup(sizeof(struct cpl_abort_req), 16);
 665
 666        pr_debug("%s: csk %p tid %u; state %d\n",
 667                 __func__, csk, csk->tid, csk->com.state);
 668
 669        __skb_queue_purge(&csk->txq);
 670
 671        if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
 672                cxgbit_send_tx_flowc_wr(csk);
 673
 674        skb = __skb_dequeue(&csk->skbq);
 675        cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
 676                          csk->com.cdev, cxgbit_abort_arp_failure);
 677
 678        return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
 679}
 680
 681static void
 682__cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
 683{
 684        __kfree_skb(skb);
 685
 686        if (csk->com.state != CSK_STATE_ESTABLISHED)
 687                goto no_abort;
 688
 689        set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
 690        csk->com.state = CSK_STATE_ABORTING;
 691
 692        cxgbit_send_abort_req(csk);
 693
 694        return;
 695
 696no_abort:
 697        cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
 698        cxgbit_put_csk(csk);
 699}
 700
 701void cxgbit_abort_conn(struct cxgbit_sock *csk)
 702{
 703        struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
 704
 705        cxgbit_get_csk(csk);
 706        cxgbit_init_wr_wait(&csk->com.wr_wait);
 707
 708        spin_lock_bh(&csk->lock);
 709        if (csk->lock_owner) {
 710                cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
 711                __skb_queue_tail(&csk->backlogq, skb);
 712        } else {
 713                __cxgbit_abort_conn(csk, skb);
 714        }
 715        spin_unlock_bh(&csk->lock);
 716
 717        cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
 718                              csk->tid, 600, __func__);
 719}
 720
 721static void __cxgbit_free_conn(struct cxgbit_sock *csk)
 722{
 723        struct iscsi_conn *conn = csk->conn;
 724        bool release = false;
 725
 726        pr_debug("%s: state %d\n",
 727                 __func__, csk->com.state);
 728
 729        spin_lock_bh(&csk->lock);
 730        switch (csk->com.state) {
 731        case CSK_STATE_ESTABLISHED:
 732                if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
 733                        csk->com.state = CSK_STATE_CLOSING;
 734                        cxgbit_send_halfclose(csk);
 735                } else {
 736                        csk->com.state = CSK_STATE_ABORTING;
 737                        cxgbit_send_abort_req(csk);
 738                }
 739                break;
 740        case CSK_STATE_CLOSING:
 741                csk->com.state = CSK_STATE_MORIBUND;
 742                cxgbit_send_halfclose(csk);
 743                break;
 744        case CSK_STATE_DEAD:
 745                release = true;
 746                break;
 747        default:
 748                pr_err("%s: csk %p; state %d\n",
 749                       __func__, csk, csk->com.state);
 750        }
 751        spin_unlock_bh(&csk->lock);
 752
 753        if (release)
 754                cxgbit_put_csk(csk);
 755}
 756
 757void cxgbit_free_conn(struct iscsi_conn *conn)
 758{
 759        __cxgbit_free_conn(conn->context);
 760}
 761
 762static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
 763{
 764        csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
 765                        ((csk->com.remote_addr.ss_family == AF_INET) ?
 766                        sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
 767                        sizeof(struct tcphdr);
 768        csk->mss = csk->emss;
 769        if (TCPOPT_TSTAMP_G(opt))
 770                csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
 771        if (csk->emss < 128)
 772                csk->emss = 128;
 773        if (csk->emss & 7)
 774                pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
 775                        TCPOPT_MSS_G(opt), csk->mss, csk->emss);
 776        pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
 777                 csk->mss, csk->emss);
 778}
 779
 780static void cxgbit_free_skb(struct cxgbit_sock *csk)
 781{
 782        struct sk_buff *skb;
 783
 784        __skb_queue_purge(&csk->txq);
 785        __skb_queue_purge(&csk->rxq);
 786        __skb_queue_purge(&csk->backlogq);
 787        __skb_queue_purge(&csk->ppodq);
 788        __skb_queue_purge(&csk->skbq);
 789
 790        while ((skb = cxgbit_sock_dequeue_wr(csk)))
 791                kfree_skb(skb);
 792
 793        __kfree_skb(csk->lro_hskb);
 794}
 795
 796void _cxgbit_free_csk(struct kref *kref)
 797{
 798        struct cxgbit_sock *csk;
 799        struct cxgbit_device *cdev;
 800
 801        csk = container_of(kref, struct cxgbit_sock, kref);
 802
 803        pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
 804
 805        if (csk->com.local_addr.ss_family == AF_INET6) {
 806                struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
 807                                             &csk->com.local_addr;
 808                cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
 809                                   (const u32 *)
 810                                   &sin6->sin6_addr.s6_addr, 1);
 811        }
 812
 813        cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid,
 814                         csk->com.local_addr.ss_family);
 815        dst_release(csk->dst);
 816        cxgb4_l2t_release(csk->l2t);
 817
 818        cdev = csk->com.cdev;
 819        spin_lock_bh(&cdev->cskq.lock);
 820        list_del(&csk->list);
 821        spin_unlock_bh(&cdev->cskq.lock);
 822
 823        cxgbit_free_skb(csk);
 824        cxgbit_put_cnp(csk->cnp);
 825        cxgbit_put_cdev(cdev);
 826
 827        kfree(csk);
 828}
 829
 830static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
 831{
 832        unsigned int linkspeed;
 833        u8 scale;
 834
 835        linkspeed = pi->link_cfg.speed;
 836        scale = linkspeed / SPEED_10000;
 837
 838#define CXGBIT_10G_RCV_WIN (256 * 1024)
 839        csk->rcv_win = CXGBIT_10G_RCV_WIN;
 840        if (scale)
 841                csk->rcv_win *= scale;
 842
 843#define CXGBIT_10G_SND_WIN (256 * 1024)
 844        csk->snd_win = CXGBIT_10G_SND_WIN;
 845        if (scale)
 846                csk->snd_win *= scale;
 847
 848        pr_debug("%s snd_win %d rcv_win %d\n",
 849                 __func__, csk->snd_win, csk->rcv_win);
 850}
 851
 852#ifdef CONFIG_CHELSIO_T4_DCB
 853static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
 854{
 855        return ndev->dcbnl_ops->getstate(ndev);
 856}
 857
 858static int cxgbit_select_priority(int pri_mask)
 859{
 860        if (!pri_mask)
 861                return 0;
 862
 863        return (ffs(pri_mask) - 1);
 864}
 865
 866static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
 867{
 868        int ret;
 869        u8 caps;
 870
 871        struct dcb_app iscsi_dcb_app = {
 872                .protocol = local_port
 873        };
 874
 875        ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
 876
 877        if (ret)
 878                return 0;
 879
 880        if (caps & DCB_CAP_DCBX_VER_IEEE) {
 881                iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM;
 882                ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
 883                if (!ret) {
 884                        iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
 885                        ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
 886                }
 887        } else if (caps & DCB_CAP_DCBX_VER_CEE) {
 888                iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
 889
 890                ret = dcb_getapp(ndev, &iscsi_dcb_app);
 891        }
 892
 893        pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
 894
 895        return cxgbit_select_priority(ret);
 896}
 897#endif
 898
 899static int
 900cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
 901                    u16 local_port, struct dst_entry *dst,
 902                    struct cxgbit_device *cdev)
 903{
 904        struct neighbour *n;
 905        int ret, step;
 906        struct net_device *ndev;
 907        u16 rxq_idx, port_id;
 908#ifdef CONFIG_CHELSIO_T4_DCB
 909        u8 priority = 0;
 910#endif
 911
 912        n = dst_neigh_lookup(dst, peer_ip);
 913        if (!n)
 914                return -ENODEV;
 915
 916        rcu_read_lock();
 917        if (!(n->nud_state & NUD_VALID))
 918                neigh_event_send(n, NULL);
 919
 920        ret = -ENOMEM;
 921        if (n->dev->flags & IFF_LOOPBACK) {
 922                if (iptype == 4)
 923                        ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
 924                else if (IS_ENABLED(CONFIG_IPV6))
 925                        ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
 926                else
 927                        ndev = NULL;
 928
 929                if (!ndev) {
 930                        ret = -ENODEV;
 931                        goto out;
 932                }
 933
 934                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
 935                                         n, ndev, 0);
 936                if (!csk->l2t)
 937                        goto out;
 938                csk->mtu = ndev->mtu;
 939                csk->tx_chan = cxgb4_port_chan(ndev);
 940                csk->smac_idx =
 941                               ((struct port_info *)netdev_priv(ndev))->smt_idx;
 942                step = cdev->lldi.ntxq /
 943                        cdev->lldi.nchan;
 944                csk->txq_idx = cxgb4_port_idx(ndev) * step;
 945                step = cdev->lldi.nrxq /
 946                        cdev->lldi.nchan;
 947                csk->ctrlq_idx = cxgb4_port_idx(ndev);
 948                csk->rss_qid = cdev->lldi.rxq_ids[
 949                                cxgb4_port_idx(ndev) * step];
 950                csk->port_id = cxgb4_port_idx(ndev);
 951                cxgbit_set_tcp_window(csk,
 952                                      (struct port_info *)netdev_priv(ndev));
 953        } else {
 954                ndev = cxgbit_get_real_dev(n->dev);
 955                if (!ndev) {
 956                        ret = -ENODEV;
 957                        goto out;
 958                }
 959
 960#ifdef CONFIG_CHELSIO_T4_DCB
 961                if (cxgbit_get_iscsi_dcb_state(ndev))
 962                        priority = cxgbit_get_iscsi_dcb_priority(ndev,
 963                                                                 local_port);
 964
 965                csk->dcb_priority = priority;
 966
 967                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
 968#else
 969                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
 970#endif
 971                if (!csk->l2t)
 972                        goto out;
 973                port_id = cxgb4_port_idx(ndev);
 974                csk->mtu = dst_mtu(dst);
 975                csk->tx_chan = cxgb4_port_chan(ndev);
 976                csk->smac_idx =
 977                               ((struct port_info *)netdev_priv(ndev))->smt_idx;
 978                step = cdev->lldi.ntxq /
 979                        cdev->lldi.nports;
 980                csk->txq_idx = (port_id * step) +
 981                                (cdev->selectq[port_id][0]++ % step);
 982                csk->ctrlq_idx = cxgb4_port_idx(ndev);
 983                step = cdev->lldi.nrxq /
 984                        cdev->lldi.nports;
 985                rxq_idx = (port_id * step) +
 986                                (cdev->selectq[port_id][1]++ % step);
 987                csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
 988                csk->port_id = port_id;
 989                cxgbit_set_tcp_window(csk,
 990                                      (struct port_info *)netdev_priv(ndev));
 991        }
 992        ret = 0;
 993out:
 994        rcu_read_unlock();
 995        neigh_release(n);
 996        return ret;
 997}
 998
 999int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
1000{
1001        int ret = 0;
1002
1003        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1004                kfree_skb(skb);
1005                pr_err("%s - device not up - dropping\n", __func__);
1006                return -EIO;
1007        }
1008
1009        ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
1010        if (ret < 0)
1011                kfree_skb(skb);
1012        return ret < 0 ? ret : 0;
1013}
1014
1015static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
1016{
1017        u32 len = roundup(sizeof(struct cpl_tid_release), 16);
1018        struct sk_buff *skb;
1019
1020        skb = alloc_skb(len, GFP_ATOMIC);
1021        if (!skb)
1022                return;
1023
1024        cxgb_mk_tid_release(skb, len, tid, 0);
1025        cxgbit_ofld_send(cdev, skb);
1026}
1027
1028int
1029cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
1030                struct l2t_entry *l2e)
1031{
1032        int ret = 0;
1033
1034        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1035                kfree_skb(skb);
1036                pr_err("%s - device not up - dropping\n", __func__);
1037                return -EIO;
1038        }
1039
1040        ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
1041        if (ret < 0)
1042                kfree_skb(skb);
1043        return ret < 0 ? ret : 0;
1044}
1045
1046static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
1047{
1048        if (csk->com.state != CSK_STATE_ESTABLISHED) {
1049                __kfree_skb(skb);
1050                return;
1051        }
1052
1053        cxgbit_ofld_send(csk->com.cdev, skb);
1054}
1055
1056/*
1057 * CPL connection rx data ack: host ->
1058 * Send RX credits through an RX_DATA_ACK CPL message.
1059 * Returns the number of credits sent.
1060 */
1061int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
1062{
1063        struct sk_buff *skb;
1064        u32 len = roundup(sizeof(struct cpl_rx_data_ack), 16);
1065        u32 credit_dack;
1066
1067        skb = alloc_skb(len, GFP_KERNEL);
1068        if (!skb)
1069                return -1;
1070
1071        credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
1072                      RX_CREDITS_V(csk->rx_credits);
1073
1074        cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
1075                            credit_dack);
1076
1077        csk->rx_credits = 0;
1078
1079        spin_lock_bh(&csk->lock);
1080        if (csk->lock_owner) {
1081                cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
1082                __skb_queue_tail(&csk->backlogq, skb);
1083                spin_unlock_bh(&csk->lock);
1084                return 0;
1085        }
1086
1087        cxgbit_send_rx_credits(csk, skb);
1088        spin_unlock_bh(&csk->lock);
1089
1090        return 0;
1091}
1092
1093#define FLOWC_WR_NPARAMS_MIN    9
1094#define FLOWC_WR_NPARAMS_MAX    11
1095static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
1096{
1097        struct sk_buff *skb;
1098        u32 len, flowclen;
1099        u8 i;
1100
1101        flowclen = offsetof(struct fw_flowc_wr,
1102                            mnemval[FLOWC_WR_NPARAMS_MAX]);
1103
1104        len = max_t(u32, sizeof(struct cpl_abort_req),
1105                    sizeof(struct cpl_abort_rpl));
1106
1107        len = max(len, flowclen);
1108        len = roundup(len, 16);
1109
1110        for (i = 0; i < 3; i++) {
1111                skb = alloc_skb(len, GFP_ATOMIC);
1112                if (!skb)
1113                        goto out;
1114                __skb_queue_tail(&csk->skbq, skb);
1115        }
1116
1117        skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
1118        if (!skb)
1119                goto out;
1120
1121        memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
1122        csk->lro_hskb = skb;
1123
1124        return 0;
1125out:
1126        __skb_queue_purge(&csk->skbq);
1127        return -ENOMEM;
1128}
1129
1130static void
1131cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
1132{
1133        struct sk_buff *skb;
1134        const struct tcphdr *tcph;
1135        struct cpl_t5_pass_accept_rpl *rpl5;
1136        struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
1137        unsigned int len = roundup(sizeof(*rpl5), 16);
1138        unsigned int mtu_idx;
1139        u64 opt0;
1140        u32 opt2, hlen;
1141        u32 wscale;
1142        u32 win;
1143
1144        pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
1145
1146        skb = alloc_skb(len, GFP_ATOMIC);
1147        if (!skb) {
1148                cxgbit_put_csk(csk);
1149                return;
1150        }
1151
1152        rpl5 = __skb_put_zero(skb, len);
1153
1154        INIT_TP_WR(rpl5, csk->tid);
1155        OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1156                                                     csk->tid));
1157        cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
1158                      req->tcpopt.tstamp,
1159                      (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
1160        wscale = cxgb_compute_wscale(csk->rcv_win);
1161        /*
1162         * Specify the largest window that will fit in opt0. The
1163         * remainder will be specified in the rx_data_ack.
1164         */
1165        win = csk->rcv_win >> 10;
1166        if (win > RCV_BUFSIZ_M)
1167                win = RCV_BUFSIZ_M;
1168        opt0 =  TCAM_BYPASS_F |
1169                WND_SCALE_V(wscale) |
1170                MSS_IDX_V(mtu_idx) |
1171                L2T_IDX_V(csk->l2t->idx) |
1172                TX_CHAN_V(csk->tx_chan) |
1173                SMAC_SEL_V(csk->smac_idx) |
1174                DSCP_V(csk->tos >> 2) |
1175                ULP_MODE_V(ULP_MODE_ISCSI) |
1176                RCV_BUFSIZ_V(win);
1177
1178        opt2 = RX_CHANNEL_V(0) |
1179                RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
1180
1181        if (!is_t5(lldi->adapter_type))
1182                opt2 |= RX_FC_DISABLE_F;
1183
1184        if (req->tcpopt.tstamp)
1185                opt2 |= TSTAMPS_EN_F;
1186        if (req->tcpopt.sack)
1187                opt2 |= SACK_EN_F;
1188        if (wscale)
1189                opt2 |= WND_SCALE_EN_F;
1190
1191        hlen = ntohl(req->hdr_len);
1192
1193        if (is_t5(lldi->adapter_type))
1194                tcph = (struct tcphdr *)((u8 *)(req + 1) +
1195                       ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen));
1196        else
1197                tcph = (struct tcphdr *)((u8 *)(req + 1) +
1198                       T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
1199
1200        if (tcph->ece && tcph->cwr)
1201                opt2 |= CCTRL_ECN_V(1);
1202
1203        opt2 |= RX_COALESCE_V(3);
1204        opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
1205
1206        opt2 |= T5_ISS_F;
1207        rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
1208
1209        opt2 |= T5_OPT_2_VALID_F;
1210
1211        rpl5->opt0 = cpu_to_be64(opt0);
1212        rpl5->opt2 = cpu_to_be32(opt2);
1213        set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
1214        t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard);
1215        cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
1216}
1217
1218static void
1219cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
1220{
1221        struct cxgbit_sock *csk = NULL;
1222        struct cxgbit_np *cnp;
1223        struct cpl_pass_accept_req *req = cplhdr(skb);
1224        unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1225        struct tid_info *t = cdev->lldi.tids;
1226        unsigned int tid = GET_TID(req);
1227        u16 peer_mss = ntohs(req->tcpopt.mss);
1228        unsigned short hdrs;
1229
1230        struct dst_entry *dst;
1231        __u8 local_ip[16], peer_ip[16];
1232        __be16 local_port, peer_port;
1233        int ret;
1234        int iptype;
1235
1236        pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
1237                 __func__, cdev, stid, tid);
1238
1239        cnp = lookup_stid(t, stid);
1240        if (!cnp) {
1241                pr_err("%s connect request on invalid stid %d\n",
1242                       __func__, stid);
1243                goto rel_skb;
1244        }
1245
1246        if (cnp->com.state != CSK_STATE_LISTEN) {
1247                pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
1248                       __func__);
1249                goto reject;
1250        }
1251
1252        csk = lookup_tid(t, tid);
1253        if (csk) {
1254                pr_err("%s csk not null tid %u\n",
1255                       __func__, tid);
1256                goto rel_skb;
1257        }
1258
1259        cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
1260                        peer_ip, &local_port, &peer_port);
1261
1262        /* Find output route */
1263        if (iptype == 4)  {
1264                pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
1265                         "lport %d rport %d peer_mss %d\n"
1266                         , __func__, cnp, tid,
1267                         local_ip, peer_ip, ntohs(local_port),
1268                         ntohs(peer_port), peer_mss);
1269                dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
1270                                      *(__be32 *)local_ip,
1271                                      *(__be32 *)peer_ip,
1272                                      local_port, peer_port,
1273                                      PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
1274        } else {
1275                pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
1276                         "lport %d rport %d peer_mss %d\n"
1277                         , __func__, cnp, tid,
1278                         local_ip, peer_ip, ntohs(local_port),
1279                         ntohs(peer_port), peer_mss);
1280                dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
1281                                       local_ip, peer_ip,
1282                                       local_port, peer_port,
1283                                       PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
1284                                       ((struct sockaddr_in6 *)
1285                                        &cnp->com.local_addr)->sin6_scope_id);
1286        }
1287        if (!dst) {
1288                pr_err("%s - failed to find dst entry!\n",
1289                       __func__);
1290                goto reject;
1291        }
1292
1293        csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
1294        if (!csk) {
1295                dst_release(dst);
1296                goto rel_skb;
1297        }
1298
1299        ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
1300                                  dst, cdev);
1301        if (ret) {
1302                pr_err("%s - failed to allocate l2t entry!\n",
1303                       __func__);
1304                dst_release(dst);
1305                kfree(csk);
1306                goto reject;
1307        }
1308
1309        kref_init(&csk->kref);
1310        init_completion(&csk->com.wr_wait.completion);
1311
1312        INIT_LIST_HEAD(&csk->accept_node);
1313
1314        hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
1315                sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0);
1316        if (peer_mss && csk->mtu > (peer_mss + hdrs))
1317                csk->mtu = peer_mss + hdrs;
1318
1319        csk->com.state = CSK_STATE_CONNECTING;
1320        csk->com.cdev = cdev;
1321        csk->cnp = cnp;
1322        csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1323        csk->dst = dst;
1324        csk->tid = tid;
1325        csk->wr_cred = cdev->lldi.wr_cred -
1326                        DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1327        csk->wr_max_cred = csk->wr_cred;
1328        csk->wr_una_cred = 0;
1329
1330        if (iptype == 4) {
1331                struct sockaddr_in *sin = (struct sockaddr_in *)
1332                                          &csk->com.local_addr;
1333                sin->sin_family = AF_INET;
1334                sin->sin_port = local_port;
1335                sin->sin_addr.s_addr = *(__be32 *)local_ip;
1336
1337                sin = (struct sockaddr_in *)&csk->com.remote_addr;
1338                sin->sin_family = AF_INET;
1339                sin->sin_port = peer_port;
1340                sin->sin_addr.s_addr = *(__be32 *)peer_ip;
1341        } else {
1342                struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
1343                                            &csk->com.local_addr;
1344
1345                sin6->sin6_family = PF_INET6;
1346                sin6->sin6_port = local_port;
1347                memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
1348                cxgb4_clip_get(cdev->lldi.ports[0],
1349                               (const u32 *)&sin6->sin6_addr.s6_addr,
1350                               1);
1351
1352                sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
1353                sin6->sin6_family = PF_INET6;
1354                sin6->sin6_port = peer_port;
1355                memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
1356        }
1357
1358        skb_queue_head_init(&csk->rxq);
1359        skb_queue_head_init(&csk->txq);
1360        skb_queue_head_init(&csk->ppodq);
1361        skb_queue_head_init(&csk->backlogq);
1362        skb_queue_head_init(&csk->skbq);
1363        cxgbit_sock_reset_wr_list(csk);
1364        spin_lock_init(&csk->lock);
1365        init_waitqueue_head(&csk->waitq);
1366        csk->lock_owner = false;
1367
1368        if (cxgbit_alloc_csk_skb(csk)) {
1369                dst_release(dst);
1370                kfree(csk);
1371                goto rel_skb;
1372        }
1373
1374        cxgbit_get_cnp(cnp);
1375        cxgbit_get_cdev(cdev);
1376
1377        spin_lock(&cdev->cskq.lock);
1378        list_add_tail(&csk->list, &cdev->cskq.list);
1379        spin_unlock(&cdev->cskq.lock);
1380        cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family);
1381        cxgbit_pass_accept_rpl(csk, req);
1382        goto rel_skb;
1383
1384reject:
1385        cxgbit_release_tid(cdev, tid);
1386rel_skb:
1387        __kfree_skb(skb);
1388}
1389
1390static u32
1391cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
1392                           u32 *flowclenp)
1393{
1394        u32 nparams, flowclen16, flowclen;
1395
1396        nparams = FLOWC_WR_NPARAMS_MIN;
1397
1398        if (csk->snd_wscale)
1399                nparams++;
1400
1401#ifdef CONFIG_CHELSIO_T4_DCB
1402        nparams++;
1403#endif
1404        flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
1405        flowclen16 = DIV_ROUND_UP(flowclen, 16);
1406        flowclen = flowclen16 * 16;
1407        /*
1408         * Return the number of 16-byte credits used by the flowc request.
1409         * Pass back the nparams and actual flowc length if requested.
1410         */
1411        if (nparamsp)
1412                *nparamsp = nparams;
1413        if (flowclenp)
1414                *flowclenp = flowclen;
1415        return flowclen16;
1416}
1417
1418u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
1419{
1420        struct cxgbit_device *cdev = csk->com.cdev;
1421        struct fw_flowc_wr *flowc;
1422        u32 nparams, flowclen16, flowclen;
1423        struct sk_buff *skb;
1424        u8 index;
1425
1426#ifdef CONFIG_CHELSIO_T4_DCB
1427        u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
1428#endif
1429
1430        flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
1431
1432        skb = __skb_dequeue(&csk->skbq);
1433        flowc = __skb_put_zero(skb, flowclen);
1434
1435        flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
1436                                           FW_FLOWC_WR_NPARAMS_V(nparams));
1437        flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
1438                                          FW_WR_FLOWID_V(csk->tid));
1439        flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
1440        flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
1441                                            (csk->com.cdev->lldi.pf));
1442        flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
1443        flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
1444        flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
1445        flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
1446        flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
1447        flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
1448        flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
1449        flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
1450        flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
1451        flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
1452        flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
1453        flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
1454        flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
1455        flowc->mnemval[7].val = cpu_to_be32(csk->emss);
1456
1457        flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
1458        if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
1459                flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
1460        else
1461                flowc->mnemval[8].val = cpu_to_be32(16384);
1462
1463        index = 9;
1464
1465        if (csk->snd_wscale) {
1466                flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
1467                flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
1468                index++;
1469        }
1470
1471#ifdef CONFIG_CHELSIO_T4_DCB
1472        flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
1473        if (vlan == VLAN_NONE) {
1474                pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
1475                flowc->mnemval[index].val = cpu_to_be32(0);
1476        } else
1477                flowc->mnemval[index].val = cpu_to_be32(
1478                                (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
1479#endif
1480
1481        pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
1482                 " rcv_seq = %u; snd_win = %u; emss = %u\n",
1483                 __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
1484                 csk->rcv_nxt, csk->snd_win, csk->emss);
1485        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
1486        cxgbit_ofld_send(csk->com.cdev, skb);
1487        return flowclen16;
1488}
1489
1490static int
1491cxgbit_send_tcb_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1492{
1493        spin_lock_bh(&csk->lock);
1494        if (unlikely(csk->com.state != CSK_STATE_ESTABLISHED)) {
1495                spin_unlock_bh(&csk->lock);
1496                pr_err("%s: csk 0x%p, tid %u, state %u\n",
1497                       __func__, csk, csk->tid, csk->com.state);
1498                __kfree_skb(skb);
1499                return -1;
1500        }
1501
1502        cxgbit_get_csk(csk);
1503        cxgbit_init_wr_wait(&csk->com.wr_wait);
1504        cxgbit_ofld_send(csk->com.cdev, skb);
1505        spin_unlock_bh(&csk->lock);
1506
1507        return 0;
1508}
1509
1510int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
1511{
1512        struct sk_buff *skb;
1513        struct cpl_set_tcb_field *req;
1514        u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
1515        u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
1516        unsigned int len = roundup(sizeof(*req), 16);
1517        int ret;
1518
1519        skb = alloc_skb(len, GFP_KERNEL);
1520        if (!skb)
1521                return -ENOMEM;
1522
1523        /*  set up ulp submode */
1524        req = __skb_put_zero(skb, len);
1525
1526        INIT_TP_WR(req, csk->tid);
1527        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1528        req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1529        req->word_cookie = htons(0);
1530        req->mask = cpu_to_be64(0x3 << 4);
1531        req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1532                                (dcrc ? ULP_CRC_DATA : 0)) << 4);
1533        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1534
1535        if (cxgbit_send_tcb_skb(csk, skb))
1536                return -1;
1537
1538        ret = cxgbit_wait_for_reply(csk->com.cdev,
1539                                    &csk->com.wr_wait,
1540                                    csk->tid, 5, __func__);
1541        if (ret)
1542                return -1;
1543
1544        return 0;
1545}
1546
1547int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
1548{
1549        struct sk_buff *skb;
1550        struct cpl_set_tcb_field *req;
1551        unsigned int len = roundup(sizeof(*req), 16);
1552        int ret;
1553
1554        skb = alloc_skb(len, GFP_KERNEL);
1555        if (!skb)
1556                return -ENOMEM;
1557
1558        req = __skb_put_zero(skb, len);
1559
1560        INIT_TP_WR(req, csk->tid);
1561        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1562        req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1563        req->word_cookie = htons(0);
1564        req->mask = cpu_to_be64(0x3 << 8);
1565        req->val = cpu_to_be64(pg_idx << 8);
1566        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1567
1568        if (cxgbit_send_tcb_skb(csk, skb))
1569                return -1;
1570
1571        ret = cxgbit_wait_for_reply(csk->com.cdev,
1572                                    &csk->com.wr_wait,
1573                                    csk->tid, 5, __func__);
1574        if (ret)
1575                return -1;
1576
1577        return 0;
1578}
1579
1580static void
1581cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1582{
1583        struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1584        struct tid_info *t = cdev->lldi.tids;
1585        unsigned int stid = GET_TID(rpl);
1586        struct cxgbit_np *cnp = lookup_stid(t, stid);
1587
1588        pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1589                 __func__, cnp, stid, rpl->status);
1590
1591        if (!cnp) {
1592                pr_info("%s stid %d lookup failure\n", __func__, stid);
1593                goto rel_skb;
1594        }
1595
1596        cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1597        cxgbit_put_cnp(cnp);
1598rel_skb:
1599        __kfree_skb(skb);
1600}
1601
1602static void
1603cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1604{
1605        struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1606        struct tid_info *t = cdev->lldi.tids;
1607        unsigned int stid = GET_TID(rpl);
1608        struct cxgbit_np *cnp = lookup_stid(t, stid);
1609
1610        pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1611                 __func__, cnp, stid, rpl->status);
1612
1613        if (!cnp) {
1614                pr_info("%s stid %d lookup failure\n", __func__, stid);
1615                goto rel_skb;
1616        }
1617
1618        cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1619        cxgbit_put_cnp(cnp);
1620rel_skb:
1621        __kfree_skb(skb);
1622}
1623
1624static void
1625cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
1626{
1627        struct cpl_pass_establish *req = cplhdr(skb);
1628        struct tid_info *t = cdev->lldi.tids;
1629        unsigned int tid = GET_TID(req);
1630        struct cxgbit_sock *csk;
1631        struct cxgbit_np *cnp;
1632        u16 tcp_opt = be16_to_cpu(req->tcp_opt);
1633        u32 snd_isn = be32_to_cpu(req->snd_isn);
1634        u32 rcv_isn = be32_to_cpu(req->rcv_isn);
1635
1636        csk = lookup_tid(t, tid);
1637        if (unlikely(!csk)) {
1638                pr_err("can't find connection for tid %u.\n", tid);
1639                goto rel_skb;
1640        }
1641        cnp = csk->cnp;
1642
1643        pr_debug("%s: csk %p; tid %u; cnp %p\n",
1644                 __func__, csk, tid, cnp);
1645
1646        csk->write_seq = snd_isn;
1647        csk->snd_una = snd_isn;
1648        csk->snd_nxt = snd_isn;
1649
1650        csk->rcv_nxt = rcv_isn;
1651
1652        if (csk->rcv_win > (RCV_BUFSIZ_M << 10))
1653                csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10));
1654
1655        csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
1656        cxgbit_set_emss(csk, tcp_opt);
1657        dst_confirm(csk->dst);
1658        csk->com.state = CSK_STATE_ESTABLISHED;
1659        spin_lock_bh(&cnp->np_accept_lock);
1660        list_add_tail(&csk->accept_node, &cnp->np_accept_list);
1661        spin_unlock_bh(&cnp->np_accept_lock);
1662        complete(&cnp->accept_comp);
1663rel_skb:
1664        __kfree_skb(skb);
1665}
1666
1667static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1668{
1669        cxgbit_skcb_flags(skb) = 0;
1670        spin_lock_bh(&csk->rxq.lock);
1671        __skb_queue_tail(&csk->rxq, skb);
1672        spin_unlock_bh(&csk->rxq.lock);
1673        wake_up(&csk->waitq);
1674}
1675
1676static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
1677{
1678        pr_debug("%s: csk %p; tid %u; state %d\n",
1679                 __func__, csk, csk->tid, csk->com.state);
1680
1681        switch (csk->com.state) {
1682        case CSK_STATE_ESTABLISHED:
1683                csk->com.state = CSK_STATE_CLOSING;
1684                cxgbit_queue_rx_skb(csk, skb);
1685                return;
1686        case CSK_STATE_CLOSING:
1687                /* simultaneous close */
1688                csk->com.state = CSK_STATE_MORIBUND;
1689                break;
1690        case CSK_STATE_MORIBUND:
1691                csk->com.state = CSK_STATE_DEAD;
1692                cxgbit_put_csk(csk);
1693                break;
1694        case CSK_STATE_ABORTING:
1695                break;
1696        default:
1697                pr_info("%s: cpl_peer_close in bad state %d\n",
1698                        __func__, csk->com.state);
1699        }
1700
1701        __kfree_skb(skb);
1702}
1703
1704static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1705{
1706        pr_debug("%s: csk %p; tid %u; state %d\n",
1707                 __func__, csk, csk->tid, csk->com.state);
1708
1709        switch (csk->com.state) {
1710        case CSK_STATE_CLOSING:
1711                csk->com.state = CSK_STATE_MORIBUND;
1712                break;
1713        case CSK_STATE_MORIBUND:
1714                csk->com.state = CSK_STATE_DEAD;
1715                cxgbit_put_csk(csk);
1716                break;
1717        case CSK_STATE_ABORTING:
1718        case CSK_STATE_DEAD:
1719                break;
1720        default:
1721                pr_info("%s: cpl_close_con_rpl in bad state %d\n",
1722                        __func__, csk->com.state);
1723        }
1724
1725        __kfree_skb(skb);
1726}
1727
1728static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1729{
1730        struct cpl_abort_req_rss *hdr = cplhdr(skb);
1731        unsigned int tid = GET_TID(hdr);
1732        struct sk_buff *rpl_skb;
1733        bool release = false;
1734        bool wakeup_thread = false;
1735        u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
1736
1737        pr_debug("%s: csk %p; tid %u; state %d\n",
1738                 __func__, csk, tid, csk->com.state);
1739
1740        if (cxgb_is_neg_adv(hdr->status)) {
1741                pr_err("%s: got neg advise %d on tid %u\n",
1742                       __func__, hdr->status, tid);
1743                goto rel_skb;
1744        }
1745
1746        switch (csk->com.state) {
1747        case CSK_STATE_CONNECTING:
1748        case CSK_STATE_MORIBUND:
1749                csk->com.state = CSK_STATE_DEAD;
1750                release = true;
1751                break;
1752        case CSK_STATE_ESTABLISHED:
1753                csk->com.state = CSK_STATE_DEAD;
1754                wakeup_thread = true;
1755                break;
1756        case CSK_STATE_CLOSING:
1757                csk->com.state = CSK_STATE_DEAD;
1758                if (!csk->conn)
1759                        release = true;
1760                break;
1761        case CSK_STATE_ABORTING:
1762                break;
1763        default:
1764                pr_info("%s: cpl_abort_req_rss in bad state %d\n",
1765                        __func__, csk->com.state);
1766                csk->com.state = CSK_STATE_DEAD;
1767        }
1768
1769        __skb_queue_purge(&csk->txq);
1770
1771        if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
1772                cxgbit_send_tx_flowc_wr(csk);
1773
1774        rpl_skb = __skb_dequeue(&csk->skbq);
1775
1776        cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
1777        cxgbit_ofld_send(csk->com.cdev, rpl_skb);
1778
1779        if (wakeup_thread) {
1780                cxgbit_queue_rx_skb(csk, skb);
1781                return;
1782        }
1783
1784        if (release)
1785                cxgbit_put_csk(csk);
1786rel_skb:
1787        __kfree_skb(skb);
1788}
1789
1790static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1791{
1792        struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1793
1794        pr_debug("%s: csk %p; tid %u; state %d\n",
1795                 __func__, csk, csk->tid, csk->com.state);
1796
1797        switch (csk->com.state) {
1798        case CSK_STATE_ABORTING:
1799                csk->com.state = CSK_STATE_DEAD;
1800                if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
1801                        cxgbit_wake_up(&csk->com.wr_wait, __func__,
1802                                       rpl->status);
1803                cxgbit_put_csk(csk);
1804                break;
1805        default:
1806                pr_info("%s: cpl_abort_rpl_rss in state %d\n",
1807                        __func__, csk->com.state);
1808        }
1809
1810        __kfree_skb(skb);
1811}
1812
1813static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
1814{
1815        const struct sk_buff *skb = csk->wr_pending_head;
1816        u32 credit = 0;
1817
1818        if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
1819                pr_err("csk 0x%p, tid %u, credit %u > %u\n",
1820                       csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
1821                return true;
1822        }
1823
1824        while (skb) {
1825                credit += (__force u32)skb->csum;
1826                skb = cxgbit_skcb_tx_wr_next(skb);
1827        }
1828
1829        if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
1830                pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1831                       csk, csk->tid, csk->wr_cred,
1832                       credit, csk->wr_max_cred);
1833
1834                return true;
1835        }
1836
1837        return false;
1838}
1839
1840static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
1841{
1842        struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
1843        u32 credits = rpl->credits;
1844        u32 snd_una = ntohl(rpl->snd_una);
1845
1846        csk->wr_cred += credits;
1847        if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
1848                csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
1849
1850        while (credits) {
1851                struct sk_buff *p = cxgbit_sock_peek_wr(csk);
1852                u32 csum;
1853
1854                if (unlikely(!p)) {
1855                        pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
1856                               csk, csk->tid, credits,
1857                               csk->wr_cred, csk->wr_una_cred);
1858                        break;
1859                }
1860
1861                csum = (__force u32)p->csum;
1862                if (unlikely(credits < csum)) {
1863                        pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
1864                                csk,  csk->tid,
1865                                credits, csk->wr_cred, csk->wr_una_cred,
1866                                csum);
1867                        p->csum = (__force __wsum)(csum - credits);
1868                        break;
1869                }
1870
1871                cxgbit_sock_dequeue_wr(csk);
1872                credits -= csum;
1873                kfree_skb(p);
1874        }
1875
1876        if (unlikely(cxgbit_credit_err(csk))) {
1877                cxgbit_queue_rx_skb(csk, skb);
1878                return;
1879        }
1880
1881        if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
1882                if (unlikely(before(snd_una, csk->snd_una))) {
1883                        pr_warn("csk 0x%p,%u, snd_una %u/%u.",
1884                                csk, csk->tid, snd_una,
1885                                csk->snd_una);
1886                        goto rel_skb;
1887                }
1888
1889                if (csk->snd_una != snd_una) {
1890                        csk->snd_una = snd_una;
1891                        dst_confirm(csk->dst);
1892                }
1893        }
1894
1895        if (skb_queue_len(&csk->txq))
1896                cxgbit_push_tx_frames(csk);
1897
1898rel_skb:
1899        __kfree_skb(skb);
1900}
1901
1902static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1903{
1904        struct cxgbit_sock *csk;
1905        struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1906        unsigned int tid = GET_TID(rpl);
1907        struct cxgb4_lld_info *lldi = &cdev->lldi;
1908        struct tid_info *t = lldi->tids;
1909
1910        csk = lookup_tid(t, tid);
1911        if (unlikely(!csk)) {
1912                pr_err("can't find connection for tid %u.\n", tid);
1913                goto rel_skb;
1914        } else {
1915                cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
1916        }
1917
1918        cxgbit_put_csk(csk);
1919rel_skb:
1920        __kfree_skb(skb);
1921}
1922
1923static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
1924{
1925        struct cxgbit_sock *csk;
1926        struct cpl_rx_data *cpl = cplhdr(skb);
1927        unsigned int tid = GET_TID(cpl);
1928        struct cxgb4_lld_info *lldi = &cdev->lldi;
1929        struct tid_info *t = lldi->tids;
1930
1931        csk = lookup_tid(t, tid);
1932        if (unlikely(!csk)) {
1933                pr_err("can't find conn. for tid %u.\n", tid);
1934                goto rel_skb;
1935        }
1936
1937        cxgbit_queue_rx_skb(csk, skb);
1938        return;
1939rel_skb:
1940        __kfree_skb(skb);
1941}
1942
1943static void
1944__cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1945{
1946        spin_lock(&csk->lock);
1947        if (csk->lock_owner) {
1948                __skb_queue_tail(&csk->backlogq, skb);
1949                spin_unlock(&csk->lock);
1950                return;
1951        }
1952
1953        cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
1954        spin_unlock(&csk->lock);
1955}
1956
1957static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1958{
1959        cxgbit_get_csk(csk);
1960        __cxgbit_process_rx_cpl(csk, skb);
1961        cxgbit_put_csk(csk);
1962}
1963
1964static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1965{
1966        struct cxgbit_sock *csk;
1967        struct cpl_tx_data *cpl = cplhdr(skb);
1968        struct cxgb4_lld_info *lldi = &cdev->lldi;
1969        struct tid_info *t = lldi->tids;
1970        unsigned int tid = GET_TID(cpl);
1971        u8 opcode = cxgbit_skcb_rx_opcode(skb);
1972        bool ref = true;
1973
1974        switch (opcode) {
1975        case CPL_FW4_ACK:
1976                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
1977                        ref = false;
1978                        break;
1979        case CPL_PEER_CLOSE:
1980                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
1981                        break;
1982        case CPL_CLOSE_CON_RPL:
1983                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
1984                        break;
1985        case CPL_ABORT_REQ_RSS:
1986                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
1987                        break;
1988        case CPL_ABORT_RPL_RSS:
1989                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
1990                        break;
1991        default:
1992                goto rel_skb;
1993        }
1994
1995        csk = lookup_tid(t, tid);
1996        if (unlikely(!csk)) {
1997                pr_err("can't find conn. for tid %u.\n", tid);
1998                goto rel_skb;
1999        }
2000
2001        if (ref)
2002                cxgbit_process_rx_cpl(csk, skb);
2003        else
2004                __cxgbit_process_rx_cpl(csk, skb);
2005
2006        return;
2007rel_skb:
2008        __kfree_skb(skb);
2009}
2010
2011cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
2012        [CPL_PASS_OPEN_RPL]     = cxgbit_pass_open_rpl,
2013        [CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
2014        [CPL_PASS_ACCEPT_REQ]   = cxgbit_pass_accept_req,
2015        [CPL_PASS_ESTABLISH]    = cxgbit_pass_establish,
2016        [CPL_SET_TCB_RPL]       = cxgbit_set_tcb_rpl,
2017        [CPL_RX_DATA]           = cxgbit_rx_data,
2018        [CPL_FW4_ACK]           = cxgbit_rx_cpl,
2019        [CPL_PEER_CLOSE]        = cxgbit_rx_cpl,
2020        [CPL_CLOSE_CON_RPL]     = cxgbit_rx_cpl,
2021        [CPL_ABORT_REQ_RSS]     = cxgbit_rx_cpl,
2022        [CPL_ABORT_RPL_RSS]     = cxgbit_rx_cpl,
2023};
2024