linux/drivers/target/iscsi/cxgbit/cxgbit_cm.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016 Chelsio Communications, Inc.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/list.h>
  11#include <linux/workqueue.h>
  12#include <linux/skbuff.h>
  13#include <linux/timer.h>
  14#include <linux/notifier.h>
  15#include <linux/inetdevice.h>
  16#include <linux/ip.h>
  17#include <linux/tcp.h>
  18#include <linux/if_vlan.h>
  19
  20#include <net/neighbour.h>
  21#include <net/netevent.h>
  22#include <net/route.h>
  23#include <net/tcp.h>
  24#include <net/ip6_route.h>
  25#include <net/addrconf.h>
  26
  27#include <libcxgb_cm.h>
  28#include "cxgbit.h"
  29#include "clip_tbl.h"
  30
  31static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
  32{
  33        wr_waitp->ret = 0;
  34        reinit_completion(&wr_waitp->completion);
  35}
  36
  37static void
  38cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
  39{
  40        if (ret == CPL_ERR_NONE)
  41                wr_waitp->ret = 0;
  42        else
  43                wr_waitp->ret = -EIO;
  44
  45        if (wr_waitp->ret)
  46                pr_err("%s: err:%u", func, ret);
  47
  48        complete(&wr_waitp->completion);
  49}
  50
  51static int
  52cxgbit_wait_for_reply(struct cxgbit_device *cdev,
  53                      struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
  54                      const char *func)
  55{
  56        int ret;
  57
  58        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
  59                wr_waitp->ret = -EIO;
  60                goto out;
  61        }
  62
  63        ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
  64        if (!ret) {
  65                pr_info("%s - Device %s not responding tid %u\n",
  66                        func, pci_name(cdev->lldi.pdev), tid);
  67                wr_waitp->ret = -ETIMEDOUT;
  68        }
  69out:
  70        if (wr_waitp->ret)
  71                pr_info("%s: FW reply %d tid %u\n",
  72                        pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
  73        return wr_waitp->ret;
  74}
  75
  76static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
  77{
  78        return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
  79}
  80
  81static struct np_info *
  82cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
  83                   unsigned int stid)
  84{
  85        struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
  86
  87        if (p) {
  88                int bucket = cxgbit_np_hashfn(cnp);
  89
  90                p->cnp = cnp;
  91                p->stid = stid;
  92                spin_lock(&cdev->np_lock);
  93                p->next = cdev->np_hash_tab[bucket];
  94                cdev->np_hash_tab[bucket] = p;
  95                spin_unlock(&cdev->np_lock);
  96        }
  97
  98        return p;
  99}
 100
 101static int
 102cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 103{
 104        int stid = -1, bucket = cxgbit_np_hashfn(cnp);
 105        struct np_info *p;
 106
 107        spin_lock(&cdev->np_lock);
 108        for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
 109                if (p->cnp == cnp) {
 110                        stid = p->stid;
 111                        break;
 112                }
 113        }
 114        spin_unlock(&cdev->np_lock);
 115
 116        return stid;
 117}
 118
 119static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 120{
 121        int stid = -1, bucket = cxgbit_np_hashfn(cnp);
 122        struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
 123
 124        spin_lock(&cdev->np_lock);
 125        for (p = *prev; p; prev = &p->next, p = p->next) {
 126                if (p->cnp == cnp) {
 127                        stid = p->stid;
 128                        *prev = p->next;
 129                        kfree(p);
 130                        break;
 131                }
 132        }
 133        spin_unlock(&cdev->np_lock);
 134
 135        return stid;
 136}
 137
 138void _cxgbit_free_cnp(struct kref *kref)
 139{
 140        struct cxgbit_np *cnp;
 141
 142        cnp = container_of(kref, struct cxgbit_np, kref);
 143        kfree(cnp);
 144}
 145
 146static int
 147cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
 148                      struct cxgbit_np *cnp)
 149{
 150        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
 151                                     &cnp->com.local_addr;
 152        int addr_type;
 153        int ret;
 154
 155        pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
 156                 __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
 157
 158        addr_type = ipv6_addr_type((const struct in6_addr *)
 159                                   &sin6->sin6_addr);
 160        if (addr_type != IPV6_ADDR_ANY) {
 161                ret = cxgb4_clip_get(cdev->lldi.ports[0],
 162                                     (const u32 *)&sin6->sin6_addr.s6_addr, 1);
 163                if (ret) {
 164                        pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
 165                               sin6->sin6_addr.s6_addr, ret);
 166                        return -ENOMEM;
 167                }
 168        }
 169
 170        cxgbit_get_cnp(cnp);
 171        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 172
 173        ret = cxgb4_create_server6(cdev->lldi.ports[0],
 174                                   stid, &sin6->sin6_addr,
 175                                   sin6->sin6_port,
 176                                   cdev->lldi.rxq_ids[0]);
 177        if (!ret)
 178                ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
 179                                            0, 10, __func__);
 180        else if (ret > 0)
 181                ret = net_xmit_errno(ret);
 182        else
 183                cxgbit_put_cnp(cnp);
 184
 185        if (ret) {
 186                if (ret != -ETIMEDOUT)
 187                        cxgb4_clip_release(cdev->lldi.ports[0],
 188                                   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
 189
 190                pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
 191                       ret, stid, sin6->sin6_addr.s6_addr,
 192                       ntohs(sin6->sin6_port));
 193        }
 194
 195        return ret;
 196}
 197
 198static int
 199cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
 200                      struct cxgbit_np *cnp)
 201{
 202        struct sockaddr_in *sin = (struct sockaddr_in *)
 203                                   &cnp->com.local_addr;
 204        int ret;
 205
 206        pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
 207                 __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
 208
 209        cxgbit_get_cnp(cnp);
 210        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 211
 212        ret = cxgb4_create_server(cdev->lldi.ports[0],
 213                                  stid, sin->sin_addr.s_addr,
 214                                  sin->sin_port, 0,
 215                                  cdev->lldi.rxq_ids[0]);
 216        if (!ret)
 217                ret = cxgbit_wait_for_reply(cdev,
 218                                            &cnp->com.wr_wait,
 219                                            0, 10, __func__);
 220        else if (ret > 0)
 221                ret = net_xmit_errno(ret);
 222        else
 223                cxgbit_put_cnp(cnp);
 224
 225        if (ret)
 226                pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
 227                       ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
 228        return ret;
 229}
 230
 231struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
 232{
 233        struct cxgbit_device *cdev;
 234        u8 i;
 235
 236        list_for_each_entry(cdev, &cdev_list_head, list) {
 237                struct cxgb4_lld_info *lldi = &cdev->lldi;
 238
 239                for (i = 0; i < lldi->nports; i++) {
 240                        if (lldi->ports[i] == ndev) {
 241                                if (port_id)
 242                                        *port_id = i;
 243                                return cdev;
 244                        }
 245                }
 246        }
 247
 248        return NULL;
 249}
 250
 251static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
 252{
 253        if (ndev->priv_flags & IFF_BONDING) {
 254                pr_err("Bond devices are not supported. Interface:%s\n",
 255                       ndev->name);
 256                return NULL;
 257        }
 258
 259        if (is_vlan_dev(ndev))
 260                return vlan_dev_real_dev(ndev);
 261
 262        return ndev;
 263}
 264
 265static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
 266{
 267        struct net_device *ndev;
 268
 269        ndev = __ip_dev_find(&init_net, saddr, false);
 270        if (!ndev)
 271                return NULL;
 272
 273        return cxgbit_get_real_dev(ndev);
 274}
 275
 276static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
 277{
 278        struct net_device *ndev = NULL;
 279        bool found = false;
 280
 281        if (IS_ENABLED(CONFIG_IPV6)) {
 282                for_each_netdev_rcu(&init_net, ndev)
 283                        if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
 284                                found = true;
 285                                break;
 286                        }
 287        }
 288        if (!found)
 289                return NULL;
 290        return cxgbit_get_real_dev(ndev);
 291}
 292
 293static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
 294{
 295        struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
 296        int ss_family = sockaddr->ss_family;
 297        struct net_device *ndev = NULL;
 298        struct cxgbit_device *cdev = NULL;
 299
 300        rcu_read_lock();
 301        if (ss_family == AF_INET) {
 302                struct sockaddr_in *sin;
 303
 304                sin = (struct sockaddr_in *)sockaddr;
 305                ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
 306        } else if (ss_family == AF_INET6) {
 307                struct sockaddr_in6 *sin6;
 308
 309                sin6 = (struct sockaddr_in6 *)sockaddr;
 310                ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
 311        }
 312        if (!ndev)
 313                goto out;
 314
 315        cdev = cxgbit_find_device(ndev, NULL);
 316out:
 317        rcu_read_unlock();
 318        return cdev;
 319}
 320
 321static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
 322{
 323        struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
 324        int ss_family = sockaddr->ss_family;
 325        int addr_type;
 326
 327        if (ss_family == AF_INET) {
 328                struct sockaddr_in *sin;
 329
 330                sin = (struct sockaddr_in *)sockaddr;
 331                if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
 332                        return true;
 333        } else if (ss_family == AF_INET6) {
 334                struct sockaddr_in6 *sin6;
 335
 336                sin6 = (struct sockaddr_in6 *)sockaddr;
 337                addr_type = ipv6_addr_type((const struct in6_addr *)
 338                                &sin6->sin6_addr);
 339                if (addr_type == IPV6_ADDR_ANY)
 340                        return true;
 341        }
 342        return false;
 343}
 344
 345static int
 346__cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 347{
 348        int stid, ret;
 349        int ss_family = cnp->com.local_addr.ss_family;
 350
 351        if (!test_bit(CDEV_STATE_UP, &cdev->flags))
 352                return -EINVAL;
 353
 354        stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
 355        if (stid < 0)
 356                return -EINVAL;
 357
 358        if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
 359                cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
 360                return -EINVAL;
 361        }
 362
 363        if (ss_family == AF_INET)
 364                ret = cxgbit_create_server4(cdev, stid, cnp);
 365        else
 366                ret = cxgbit_create_server6(cdev, stid, cnp);
 367
 368        if (ret) {
 369                if (ret != -ETIMEDOUT)
 370                        cxgb4_free_stid(cdev->lldi.tids, stid,
 371                                        ss_family);
 372                cxgbit_np_hash_del(cdev, cnp);
 373                return ret;
 374        }
 375        return ret;
 376}
 377
 378static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
 379{
 380        struct cxgbit_device *cdev;
 381        int ret = -1;
 382
 383        mutex_lock(&cdev_list_lock);
 384        cdev = cxgbit_find_np_cdev(cnp);
 385        if (!cdev)
 386                goto out;
 387
 388        if (cxgbit_np_hash_find(cdev, cnp) >= 0)
 389                goto out;
 390
 391        if (__cxgbit_setup_cdev_np(cdev, cnp))
 392                goto out;
 393
 394        cnp->com.cdev = cdev;
 395        ret = 0;
 396out:
 397        mutex_unlock(&cdev_list_lock);
 398        return ret;
 399}
 400
 401static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
 402{
 403        struct cxgbit_device *cdev;
 404        int ret;
 405        u32 count = 0;
 406
 407        mutex_lock(&cdev_list_lock);
 408        list_for_each_entry(cdev, &cdev_list_head, list) {
 409                if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
 410                        mutex_unlock(&cdev_list_lock);
 411                        return -1;
 412                }
 413        }
 414
 415        list_for_each_entry(cdev, &cdev_list_head, list) {
 416                ret = __cxgbit_setup_cdev_np(cdev, cnp);
 417                if (ret == -ETIMEDOUT)
 418                        break;
 419                if (ret != 0)
 420                        continue;
 421                count++;
 422        }
 423        mutex_unlock(&cdev_list_lock);
 424
 425        return count ? 0 : -1;
 426}
 427
 428int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
 429{
 430        struct cxgbit_np *cnp;
 431        int ret;
 432
 433        if ((ksockaddr->ss_family != AF_INET) &&
 434            (ksockaddr->ss_family != AF_INET6))
 435                return -EINVAL;
 436
 437        cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
 438        if (!cnp)
 439                return -ENOMEM;
 440
 441        init_waitqueue_head(&cnp->accept_wait);
 442        init_completion(&cnp->com.wr_wait.completion);
 443        init_completion(&cnp->accept_comp);
 444        INIT_LIST_HEAD(&cnp->np_accept_list);
 445        spin_lock_init(&cnp->np_accept_lock);
 446        kref_init(&cnp->kref);
 447        memcpy(&np->np_sockaddr, ksockaddr,
 448               sizeof(struct sockaddr_storage));
 449        memcpy(&cnp->com.local_addr, &np->np_sockaddr,
 450               sizeof(cnp->com.local_addr));
 451
 452        cnp->np = np;
 453        cnp->com.cdev = NULL;
 454
 455        if (cxgbit_inaddr_any(cnp))
 456                ret = cxgbit_setup_all_np(cnp);
 457        else
 458                ret = cxgbit_setup_cdev_np(cnp);
 459
 460        if (ret) {
 461                cxgbit_put_cnp(cnp);
 462                return -EINVAL;
 463        }
 464
 465        np->np_context = cnp;
 466        cnp->com.state = CSK_STATE_LISTEN;
 467        return 0;
 468}
 469
 470static void
 471cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
 472                     struct cxgbit_sock *csk)
 473{
 474        conn->login_family = np->np_sockaddr.ss_family;
 475        conn->login_sockaddr = csk->com.remote_addr;
 476        conn->local_sockaddr = csk->com.local_addr;
 477}
 478
 479int cxgbit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
 480{
 481        struct cxgbit_np *cnp = np->np_context;
 482        struct cxgbit_sock *csk;
 483        int ret = 0;
 484
 485accept_wait:
 486        ret = wait_for_completion_interruptible(&cnp->accept_comp);
 487        if (ret)
 488                return -ENODEV;
 489
 490        spin_lock_bh(&np->np_thread_lock);
 491        if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
 492                spin_unlock_bh(&np->np_thread_lock);
 493                /**
 494                 * No point in stalling here when np_thread
 495                 * is in state RESET/SHUTDOWN/EXIT - bail
 496                 **/
 497                return -ENODEV;
 498        }
 499        spin_unlock_bh(&np->np_thread_lock);
 500
 501        spin_lock_bh(&cnp->np_accept_lock);
 502        if (list_empty(&cnp->np_accept_list)) {
 503                spin_unlock_bh(&cnp->np_accept_lock);
 504                goto accept_wait;
 505        }
 506
 507        csk = list_first_entry(&cnp->np_accept_list,
 508                               struct cxgbit_sock,
 509                               accept_node);
 510
 511        list_del_init(&csk->accept_node);
 512        spin_unlock_bh(&cnp->np_accept_lock);
 513        conn->context = csk;
 514        csk->conn = conn;
 515
 516        cxgbit_set_conn_info(np, conn, csk);
 517        return 0;
 518}
 519
 520static int
 521__cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 522{
 523        int stid, ret;
 524        bool ipv6 = false;
 525
 526        stid = cxgbit_np_hash_del(cdev, cnp);
 527        if (stid < 0)
 528                return -EINVAL;
 529        if (!test_bit(CDEV_STATE_UP, &cdev->flags))
 530                return -EINVAL;
 531
 532        if (cnp->np->np_sockaddr.ss_family == AF_INET6)
 533                ipv6 = true;
 534
 535        cxgbit_get_cnp(cnp);
 536        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 537        ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
 538                                  cdev->lldi.rxq_ids[0], ipv6);
 539
 540        if (ret > 0)
 541                ret = net_xmit_errno(ret);
 542
 543        if (ret) {
 544                cxgbit_put_cnp(cnp);
 545                return ret;
 546        }
 547
 548        ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
 549                                    0, 10, __func__);
 550        if (ret == -ETIMEDOUT)
 551                return ret;
 552
 553        if (ipv6 && cnp->com.cdev) {
 554                struct sockaddr_in6 *sin6;
 555
 556                sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
 557                cxgb4_clip_release(cdev->lldi.ports[0],
 558                                   (const u32 *)&sin6->sin6_addr.s6_addr,
 559                                   1);
 560        }
 561
 562        cxgb4_free_stid(cdev->lldi.tids, stid,
 563                        cnp->com.local_addr.ss_family);
 564        return 0;
 565}
 566
 567static void cxgbit_free_all_np(struct cxgbit_np *cnp)
 568{
 569        struct cxgbit_device *cdev;
 570        int ret;
 571
 572        mutex_lock(&cdev_list_lock);
 573        list_for_each_entry(cdev, &cdev_list_head, list) {
 574                ret = __cxgbit_free_cdev_np(cdev, cnp);
 575                if (ret == -ETIMEDOUT)
 576                        break;
 577        }
 578        mutex_unlock(&cdev_list_lock);
 579}
 580
 581static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
 582{
 583        struct cxgbit_device *cdev;
 584        bool found = false;
 585
 586        mutex_lock(&cdev_list_lock);
 587        list_for_each_entry(cdev, &cdev_list_head, list) {
 588                if (cdev == cnp->com.cdev) {
 589                        found = true;
 590                        break;
 591                }
 592        }
 593        if (!found)
 594                goto out;
 595
 596        __cxgbit_free_cdev_np(cdev, cnp);
 597out:
 598        mutex_unlock(&cdev_list_lock);
 599}
 600
 601static void __cxgbit_free_conn(struct cxgbit_sock *csk);
 602
 603void cxgbit_free_np(struct iscsi_np *np)
 604{
 605        struct cxgbit_np *cnp = np->np_context;
 606        struct cxgbit_sock *csk, *tmp;
 607
 608        cnp->com.state = CSK_STATE_DEAD;
 609        if (cnp->com.cdev)
 610                cxgbit_free_cdev_np(cnp);
 611        else
 612                cxgbit_free_all_np(cnp);
 613
 614        spin_lock_bh(&cnp->np_accept_lock);
 615        list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
 616                list_del_init(&csk->accept_node);
 617                __cxgbit_free_conn(csk);
 618        }
 619        spin_unlock_bh(&cnp->np_accept_lock);
 620
 621        np->np_context = NULL;
 622        cxgbit_put_cnp(cnp);
 623}
 624
 625static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
 626{
 627        struct sk_buff *skb;
 628        u32 len = roundup(sizeof(struct cpl_close_con_req), 16);
 629
 630        skb = alloc_skb(len, GFP_ATOMIC);
 631        if (!skb)
 632                return;
 633
 634        cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
 635                              NULL, NULL);
 636
 637        cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
 638        __skb_queue_tail(&csk->txq, skb);
 639        cxgbit_push_tx_frames(csk);
 640}
 641
 642static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
 643{
 644        struct cxgbit_sock *csk = handle;
 645
 646        pr_debug("%s cxgbit_device %p\n", __func__, handle);
 647        kfree_skb(skb);
 648        cxgbit_put_csk(csk);
 649}
 650
 651static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
 652{
 653        struct cxgbit_device *cdev = handle;
 654        struct cpl_abort_req *req = cplhdr(skb);
 655
 656        pr_debug("%s cdev %p\n", __func__, cdev);
 657        req->cmd = CPL_ABORT_NO_RST;
 658        cxgbit_ofld_send(cdev, skb);
 659}
 660
 661static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
 662{
 663        struct sk_buff *skb;
 664        u32 len = roundup(sizeof(struct cpl_abort_req), 16);
 665
 666        pr_debug("%s: csk %p tid %u; state %d\n",
 667                 __func__, csk, csk->tid, csk->com.state);
 668
 669        __skb_queue_purge(&csk->txq);
 670
 671        if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
 672                cxgbit_send_tx_flowc_wr(csk);
 673
 674        skb = __skb_dequeue(&csk->skbq);
 675        cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
 676                          csk->com.cdev, cxgbit_abort_arp_failure);
 677
 678        return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
 679}
 680
 681static void
 682__cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
 683{
 684        __kfree_skb(skb);
 685
 686        if (csk->com.state != CSK_STATE_ESTABLISHED)
 687                goto no_abort;
 688
 689        set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
 690        csk->com.state = CSK_STATE_ABORTING;
 691
 692        cxgbit_send_abort_req(csk);
 693
 694        return;
 695
 696no_abort:
 697        cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
 698        cxgbit_put_csk(csk);
 699}
 700
 701void cxgbit_abort_conn(struct cxgbit_sock *csk)
 702{
 703        struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
 704
 705        cxgbit_get_csk(csk);
 706        cxgbit_init_wr_wait(&csk->com.wr_wait);
 707
 708        spin_lock_bh(&csk->lock);
 709        if (csk->lock_owner) {
 710                cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
 711                __skb_queue_tail(&csk->backlogq, skb);
 712        } else {
 713                __cxgbit_abort_conn(csk, skb);
 714        }
 715        spin_unlock_bh(&csk->lock);
 716
 717        cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
 718                              csk->tid, 600, __func__);
 719}
 720
 721static void __cxgbit_free_conn(struct cxgbit_sock *csk)
 722{
 723        struct iscsi_conn *conn = csk->conn;
 724        bool release = false;
 725
 726        pr_debug("%s: state %d\n",
 727                 __func__, csk->com.state);
 728
 729        spin_lock_bh(&csk->lock);
 730        switch (csk->com.state) {
 731        case CSK_STATE_ESTABLISHED:
 732                if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
 733                        csk->com.state = CSK_STATE_CLOSING;
 734                        cxgbit_send_halfclose(csk);
 735                } else {
 736                        csk->com.state = CSK_STATE_ABORTING;
 737                        cxgbit_send_abort_req(csk);
 738                }
 739                break;
 740        case CSK_STATE_CLOSING:
 741                csk->com.state = CSK_STATE_MORIBUND;
 742                cxgbit_send_halfclose(csk);
 743                break;
 744        case CSK_STATE_DEAD:
 745                release = true;
 746                break;
 747        default:
 748                pr_err("%s: csk %p; state %d\n",
 749                       __func__, csk, csk->com.state);
 750        }
 751        spin_unlock_bh(&csk->lock);
 752
 753        if (release)
 754                cxgbit_put_csk(csk);
 755}
 756
 757void cxgbit_free_conn(struct iscsi_conn *conn)
 758{
 759        __cxgbit_free_conn(conn->context);
 760}
 761
 762static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
 763{
 764        csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
 765                        ((csk->com.remote_addr.ss_family == AF_INET) ?
 766                        sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
 767                        sizeof(struct tcphdr);
 768        csk->mss = csk->emss;
 769        if (TCPOPT_TSTAMP_G(opt))
 770                csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
 771        if (csk->emss < 128)
 772                csk->emss = 128;
 773        if (csk->emss & 7)
 774                pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
 775                        TCPOPT_MSS_G(opt), csk->mss, csk->emss);
 776        pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
 777                 csk->mss, csk->emss);
 778}
 779
 780static void cxgbit_free_skb(struct cxgbit_sock *csk)
 781{
 782        struct sk_buff *skb;
 783
 784        __skb_queue_purge(&csk->txq);
 785        __skb_queue_purge(&csk->rxq);
 786        __skb_queue_purge(&csk->backlogq);
 787        __skb_queue_purge(&csk->ppodq);
 788        __skb_queue_purge(&csk->skbq);
 789
 790        while ((skb = cxgbit_sock_dequeue_wr(csk)))
 791                kfree_skb(skb);
 792
 793        __kfree_skb(csk->lro_hskb);
 794}
 795
 796void _cxgbit_free_csk(struct kref *kref)
 797{
 798        struct cxgbit_sock *csk;
 799        struct cxgbit_device *cdev;
 800
 801        csk = container_of(kref, struct cxgbit_sock, kref);
 802
 803        pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
 804
 805        if (csk->com.local_addr.ss_family == AF_INET6) {
 806                struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
 807                                             &csk->com.local_addr;
 808                cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
 809                                   (const u32 *)
 810                                   &sin6->sin6_addr.s6_addr, 1);
 811        }
 812
 813        cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid,
 814                         csk->com.local_addr.ss_family);
 815        dst_release(csk->dst);
 816        cxgb4_l2t_release(csk->l2t);
 817
 818        cdev = csk->com.cdev;
 819        spin_lock_bh(&cdev->cskq.lock);
 820        list_del(&csk->list);
 821        spin_unlock_bh(&cdev->cskq.lock);
 822
 823        cxgbit_free_skb(csk);
 824        cxgbit_put_cnp(csk->cnp);
 825        cxgbit_put_cdev(cdev);
 826
 827        kfree(csk);
 828}
 829
 830static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
 831{
 832        unsigned int linkspeed;
 833        u8 scale;
 834
 835        linkspeed = pi->link_cfg.speed;
 836        scale = linkspeed / SPEED_10000;
 837
 838#define CXGBIT_10G_RCV_WIN (256 * 1024)
 839        csk->rcv_win = CXGBIT_10G_RCV_WIN;
 840        if (scale)
 841                csk->rcv_win *= scale;
 842
 843#define CXGBIT_10G_SND_WIN (256 * 1024)
 844        csk->snd_win = CXGBIT_10G_SND_WIN;
 845        if (scale)
 846                csk->snd_win *= scale;
 847
 848        pr_debug("%s snd_win %d rcv_win %d\n",
 849                 __func__, csk->snd_win, csk->rcv_win);
 850}
 851
 852#ifdef CONFIG_CHELSIO_T4_DCB
 853static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
 854{
 855        return ndev->dcbnl_ops->getstate(ndev);
 856}
 857
 858static int cxgbit_select_priority(int pri_mask)
 859{
 860        if (!pri_mask)
 861                return 0;
 862
 863        return (ffs(pri_mask) - 1);
 864}
 865
 866static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
 867{
 868        int ret;
 869        u8 caps;
 870
 871        struct dcb_app iscsi_dcb_app = {
 872                .protocol = local_port
 873        };
 874
 875        ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
 876
 877        if (ret)
 878                return 0;
 879
 880        if (caps & DCB_CAP_DCBX_VER_IEEE) {
 881                iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM;
 882                ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
 883                if (!ret) {
 884                        iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
 885                        ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
 886                }
 887        } else if (caps & DCB_CAP_DCBX_VER_CEE) {
 888                iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
 889
 890                ret = dcb_getapp(ndev, &iscsi_dcb_app);
 891        }
 892
 893        pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
 894
 895        return cxgbit_select_priority(ret);
 896}
 897#endif
 898
 899static int
 900cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
 901                    u16 local_port, struct dst_entry *dst,
 902                    struct cxgbit_device *cdev)
 903{
 904        struct neighbour *n;
 905        int ret, step;
 906        struct net_device *ndev;
 907        u16 rxq_idx, port_id;
 908#ifdef CONFIG_CHELSIO_T4_DCB
 909        u8 priority = 0;
 910#endif
 911
 912        n = dst_neigh_lookup(dst, peer_ip);
 913        if (!n)
 914                return -ENODEV;
 915
 916        rcu_read_lock();
 917        if (!(n->nud_state & NUD_VALID))
 918                neigh_event_send(n, NULL);
 919
 920        ret = -ENOMEM;
 921        if (n->dev->flags & IFF_LOOPBACK) {
 922                if (iptype == 4)
 923                        ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
 924                else if (IS_ENABLED(CONFIG_IPV6))
 925                        ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
 926                else
 927                        ndev = NULL;
 928
 929                if (!ndev) {
 930                        ret = -ENODEV;
 931                        goto out;
 932                }
 933
 934                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
 935                                         n, ndev, 0);
 936                if (!csk->l2t)
 937                        goto out;
 938                csk->mtu = ndev->mtu;
 939                csk->tx_chan = cxgb4_port_chan(ndev);
 940                csk->smac_idx =
 941                               ((struct port_info *)netdev_priv(ndev))->smt_idx;
 942                step = cdev->lldi.ntxq /
 943                        cdev->lldi.nchan;
 944                csk->txq_idx = cxgb4_port_idx(ndev) * step;
 945                step = cdev->lldi.nrxq /
 946                        cdev->lldi.nchan;
 947                csk->ctrlq_idx = cxgb4_port_idx(ndev);
 948                csk->rss_qid = cdev->lldi.rxq_ids[
 949                                cxgb4_port_idx(ndev) * step];
 950                csk->port_id = cxgb4_port_idx(ndev);
 951                cxgbit_set_tcp_window(csk,
 952                                      (struct port_info *)netdev_priv(ndev));
 953        } else {
 954                ndev = cxgbit_get_real_dev(n->dev);
 955                if (!ndev) {
 956                        ret = -ENODEV;
 957                        goto out;
 958                }
 959
 960#ifdef CONFIG_CHELSIO_T4_DCB
 961                if (cxgbit_get_iscsi_dcb_state(ndev))
 962                        priority = cxgbit_get_iscsi_dcb_priority(ndev,
 963                                                                 local_port);
 964
 965                csk->dcb_priority = priority;
 966
 967                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
 968#else
 969                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
 970#endif
 971                if (!csk->l2t)
 972                        goto out;
 973                port_id = cxgb4_port_idx(ndev);
 974                csk->mtu = dst_mtu(dst);
 975                csk->tx_chan = cxgb4_port_chan(ndev);
 976                csk->smac_idx =
 977                               ((struct port_info *)netdev_priv(ndev))->smt_idx;
 978                step = cdev->lldi.ntxq /
 979                        cdev->lldi.nports;
 980                csk->txq_idx = (port_id * step) +
 981                                (cdev->selectq[port_id][0]++ % step);
 982                csk->ctrlq_idx = cxgb4_port_idx(ndev);
 983                step = cdev->lldi.nrxq /
 984                        cdev->lldi.nports;
 985                rxq_idx = (port_id * step) +
 986                                (cdev->selectq[port_id][1]++ % step);
 987                csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
 988                csk->port_id = port_id;
 989                cxgbit_set_tcp_window(csk,
 990                                      (struct port_info *)netdev_priv(ndev));
 991        }
 992        ret = 0;
 993out:
 994        rcu_read_unlock();
 995        neigh_release(n);
 996        return ret;
 997}
 998
 999int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
1000{
1001        int ret = 0;
1002
1003        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1004                kfree_skb(skb);
1005                pr_err("%s - device not up - dropping\n", __func__);
1006                return -EIO;
1007        }
1008
1009        ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
1010        if (ret < 0)
1011                kfree_skb(skb);
1012        return ret < 0 ? ret : 0;
1013}
1014
1015static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
1016{
1017        u32 len = roundup(sizeof(struct cpl_tid_release), 16);
1018        struct sk_buff *skb;
1019
1020        skb = alloc_skb(len, GFP_ATOMIC);
1021        if (!skb)
1022                return;
1023
1024        cxgb_mk_tid_release(skb, len, tid, 0);
1025        cxgbit_ofld_send(cdev, skb);
1026}
1027
1028int
1029cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
1030                struct l2t_entry *l2e)
1031{
1032        int ret = 0;
1033
1034        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1035                kfree_skb(skb);
1036                pr_err("%s - device not up - dropping\n", __func__);
1037                return -EIO;
1038        }
1039
1040        ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
1041        if (ret < 0)
1042                kfree_skb(skb);
1043        return ret < 0 ? ret : 0;
1044}
1045
1046static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
1047{
1048        if (csk->com.state != CSK_STATE_ESTABLISHED) {
1049                __kfree_skb(skb);
1050                return;
1051        }
1052
1053        cxgbit_ofld_send(csk->com.cdev, skb);
1054}
1055
1056/*
1057 * CPL connection rx data ack: host ->
1058 * Send RX credits through an RX_DATA_ACK CPL message.
1059 * Returns the number of credits sent.
1060 */
1061int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
1062{
1063        struct sk_buff *skb;
1064        u32 len = roundup(sizeof(struct cpl_rx_data_ack), 16);
1065        u32 credit_dack;
1066
1067        skb = alloc_skb(len, GFP_KERNEL);
1068        if (!skb)
1069                return -1;
1070
1071        credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
1072                      RX_CREDITS_V(csk->rx_credits);
1073
1074        cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
1075                            credit_dack);
1076
1077        csk->rx_credits = 0;
1078
1079        spin_lock_bh(&csk->lock);
1080        if (csk->lock_owner) {
1081                cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
1082                __skb_queue_tail(&csk->backlogq, skb);
1083                spin_unlock_bh(&csk->lock);
1084                return 0;
1085        }
1086
1087        cxgbit_send_rx_credits(csk, skb);
1088        spin_unlock_bh(&csk->lock);
1089
1090        return 0;
1091}
1092
1093#define FLOWC_WR_NPARAMS_MIN    9
1094#define FLOWC_WR_NPARAMS_MAX    11
1095static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
1096{
1097        struct sk_buff *skb;
1098        u32 len, flowclen;
1099        u8 i;
1100
1101        flowclen = offsetof(struct fw_flowc_wr,
1102                            mnemval[FLOWC_WR_NPARAMS_MAX]);
1103
1104        len = max_t(u32, sizeof(struct cpl_abort_req),
1105                    sizeof(struct cpl_abort_rpl));
1106
1107        len = max(len, flowclen);
1108        len = roundup(len, 16);
1109
1110        for (i = 0; i < 3; i++) {
1111                skb = alloc_skb(len, GFP_ATOMIC);
1112                if (!skb)
1113                        goto out;
1114                __skb_queue_tail(&csk->skbq, skb);
1115        }
1116
1117        skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
1118        if (!skb)
1119                goto out;
1120
1121        memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
1122        csk->lro_hskb = skb;
1123
1124        return 0;
1125out:
1126        __skb_queue_purge(&csk->skbq);
1127        return -ENOMEM;
1128}
1129
1130static void
1131cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
1132{
1133        struct sk_buff *skb;
1134        const struct tcphdr *tcph;
1135        struct cpl_t5_pass_accept_rpl *rpl5;
1136        struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
1137        unsigned int len = roundup(sizeof(*rpl5), 16);
1138        unsigned int mtu_idx;
1139        u64 opt0;
1140        u32 opt2, hlen;
1141        u32 wscale;
1142        u32 win;
1143
1144        pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
1145
1146        skb = alloc_skb(len, GFP_ATOMIC);
1147        if (!skb) {
1148                cxgbit_put_csk(csk);
1149                return;
1150        }
1151
1152        rpl5 = __skb_put_zero(skb, len);
1153
1154        INIT_TP_WR(rpl5, csk->tid);
1155        OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1156                                                     csk->tid));
1157        cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
1158                      req->tcpopt.tstamp,
1159                      (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
1160        wscale = cxgb_compute_wscale(csk->rcv_win);
1161        /*
1162         * Specify the largest window that will fit in opt0. The
1163         * remainder will be specified in the rx_data_ack.
1164         */
1165        win = csk->rcv_win >> 10;
1166        if (win > RCV_BUFSIZ_M)
1167                win = RCV_BUFSIZ_M;
1168        opt0 =  TCAM_BYPASS_F |
1169                WND_SCALE_V(wscale) |
1170                MSS_IDX_V(mtu_idx) |
1171                L2T_IDX_V(csk->l2t->idx) |
1172                TX_CHAN_V(csk->tx_chan) |
1173                SMAC_SEL_V(csk->smac_idx) |
1174                DSCP_V(csk->tos >> 2) |
1175                ULP_MODE_V(ULP_MODE_ISCSI) |
1176                RCV_BUFSIZ_V(win);
1177
1178        opt2 = RX_CHANNEL_V(0) |
1179                RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
1180
1181        if (!is_t5(lldi->adapter_type))
1182                opt2 |= RX_FC_DISABLE_F;
1183
1184        if (req->tcpopt.tstamp)
1185                opt2 |= TSTAMPS_EN_F;
1186        if (req->tcpopt.sack)
1187                opt2 |= SACK_EN_F;
1188        if (wscale)
1189                opt2 |= WND_SCALE_EN_F;
1190
1191        hlen = ntohl(req->hdr_len);
1192
1193        if (is_t5(lldi->adapter_type))
1194                tcph = (struct tcphdr *)((u8 *)(req + 1) +
1195                       ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen));
1196        else
1197                tcph = (struct tcphdr *)((u8 *)(req + 1) +
1198                       T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
1199
1200        if (tcph->ece && tcph->cwr)
1201                opt2 |= CCTRL_ECN_V(1);
1202
1203        opt2 |= RX_COALESCE_V(3);
1204        opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
1205
1206        opt2 |= T5_ISS_F;
1207        rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
1208
1209        opt2 |= T5_OPT_2_VALID_F;
1210
1211        rpl5->opt0 = cpu_to_be64(opt0);
1212        rpl5->opt2 = cpu_to_be32(opt2);
1213        set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
1214        t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard);
1215        cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
1216}
1217
1218static void
1219cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
1220{
1221        struct cxgbit_sock *csk = NULL;
1222        struct cxgbit_np *cnp;
1223        struct cpl_pass_accept_req *req = cplhdr(skb);
1224        unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1225        struct tid_info *t = cdev->lldi.tids;
1226        unsigned int tid = GET_TID(req);
1227        u16 peer_mss = ntohs(req->tcpopt.mss);
1228        unsigned short hdrs;
1229
1230        struct dst_entry *dst;
1231        __u8 local_ip[16], peer_ip[16];
1232        __be16 local_port, peer_port;
1233        int ret;
1234        int iptype;
1235
1236        pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
1237                 __func__, cdev, stid, tid);
1238
1239        cnp = lookup_stid(t, stid);
1240        if (!cnp) {
1241                pr_err("%s connect request on invalid stid %d\n",
1242                       __func__, stid);
1243                goto rel_skb;
1244        }
1245
1246        if (cnp->com.state != CSK_STATE_LISTEN) {
1247                pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
1248                       __func__);
1249                goto reject;
1250        }
1251
1252        csk = lookup_tid(t, tid);
1253        if (csk) {
1254                pr_err("%s csk not null tid %u\n",
1255                       __func__, tid);
1256                goto rel_skb;
1257        }
1258
1259        cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
1260                        peer_ip, &local_port, &peer_port);
1261
1262        /* Find output route */
1263        if (iptype == 4)  {
1264                pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
1265                         "lport %d rport %d peer_mss %d\n"
1266                         , __func__, cnp, tid,
1267                         local_ip, peer_ip, ntohs(local_port),
1268                         ntohs(peer_port), peer_mss);
1269                dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
1270                                      *(__be32 *)local_ip,
1271                                      *(__be32 *)peer_ip,
1272                                      local_port, peer_port,
1273                                      PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
1274        } else {
1275                pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
1276                         "lport %d rport %d peer_mss %d\n"
1277                         , __func__, cnp, tid,
1278                         local_ip, peer_ip, ntohs(local_port),
1279                         ntohs(peer_port), peer_mss);
1280                dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
1281                                       local_ip, peer_ip,
1282                                       local_port, peer_port,
1283                                       PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
1284                                       ((struct sockaddr_in6 *)
1285                                        &cnp->com.local_addr)->sin6_scope_id);
1286        }
1287        if (!dst) {
1288                pr_err("%s - failed to find dst entry!\n",
1289                       __func__);
1290                goto reject;
1291        }
1292
1293        csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
1294        if (!csk) {
1295                dst_release(dst);
1296                goto rel_skb;
1297        }
1298
1299        ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
1300                                  dst, cdev);
1301        if (ret) {
1302                pr_err("%s - failed to allocate l2t entry!\n",
1303                       __func__);
1304                dst_release(dst);
1305                kfree(csk);
1306                goto reject;
1307        }
1308
1309        kref_init(&csk->kref);
1310        init_completion(&csk->com.wr_wait.completion);
1311
1312        INIT_LIST_HEAD(&csk->accept_node);
1313
1314        hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
1315                sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0);
1316        if (peer_mss && csk->mtu > (peer_mss + hdrs))
1317                csk->mtu = peer_mss + hdrs;
1318
1319        csk->com.state = CSK_STATE_CONNECTING;
1320        csk->com.cdev = cdev;
1321        csk->cnp = cnp;
1322        csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1323        csk->dst = dst;
1324        csk->tid = tid;
1325        csk->wr_cred = cdev->lldi.wr_cred -
1326                        DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1327        csk->wr_max_cred = csk->wr_cred;
1328        csk->wr_una_cred = 0;
1329
1330        if (iptype == 4) {
1331                struct sockaddr_in *sin = (struct sockaddr_in *)
1332                                          &csk->com.local_addr;
1333                sin->sin_family = AF_INET;
1334                sin->sin_port = local_port;
1335                sin->sin_addr.s_addr = *(__be32 *)local_ip;
1336
1337                sin = (struct sockaddr_in *)&csk->com.remote_addr;
1338                sin->sin_family = AF_INET;
1339                sin->sin_port = peer_port;
1340                sin->sin_addr.s_addr = *(__be32 *)peer_ip;
1341        } else {
1342                struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
1343                                            &csk->com.local_addr;
1344
1345                sin6->sin6_family = PF_INET6;
1346                sin6->sin6_port = local_port;
1347                memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
1348                cxgb4_clip_get(cdev->lldi.ports[0],
1349                               (const u32 *)&sin6->sin6_addr.s6_addr,
1350                               1);
1351
1352                sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
1353                sin6->sin6_family = PF_INET6;
1354                sin6->sin6_port = peer_port;
1355                memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
1356        }
1357
1358        skb_queue_head_init(&csk->rxq);
1359        skb_queue_head_init(&csk->txq);
1360        skb_queue_head_init(&csk->ppodq);
1361        skb_queue_head_init(&csk->backlogq);
1362        skb_queue_head_init(&csk->skbq);
1363        cxgbit_sock_reset_wr_list(csk);
1364        spin_lock_init(&csk->lock);
1365        init_waitqueue_head(&csk->waitq);
1366        init_waitqueue_head(&csk->ack_waitq);
1367        csk->lock_owner = false;
1368
1369        if (cxgbit_alloc_csk_skb(csk)) {
1370                dst_release(dst);
1371                kfree(csk);
1372                goto rel_skb;
1373        }
1374
1375        cxgbit_get_cnp(cnp);
1376        cxgbit_get_cdev(cdev);
1377
1378        spin_lock(&cdev->cskq.lock);
1379        list_add_tail(&csk->list, &cdev->cskq.list);
1380        spin_unlock(&cdev->cskq.lock);
1381        cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family);
1382        cxgbit_pass_accept_rpl(csk, req);
1383        goto rel_skb;
1384
1385reject:
1386        cxgbit_release_tid(cdev, tid);
1387rel_skb:
1388        __kfree_skb(skb);
1389}
1390
1391static u32
1392cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
1393                           u32 *flowclenp)
1394{
1395        u32 nparams, flowclen16, flowclen;
1396
1397        nparams = FLOWC_WR_NPARAMS_MIN;
1398
1399        if (csk->snd_wscale)
1400                nparams++;
1401
1402#ifdef CONFIG_CHELSIO_T4_DCB
1403        nparams++;
1404#endif
1405        flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
1406        flowclen16 = DIV_ROUND_UP(flowclen, 16);
1407        flowclen = flowclen16 * 16;
1408        /*
1409         * Return the number of 16-byte credits used by the flowc request.
1410         * Pass back the nparams and actual flowc length if requested.
1411         */
1412        if (nparamsp)
1413                *nparamsp = nparams;
1414        if (flowclenp)
1415                *flowclenp = flowclen;
1416        return flowclen16;
1417}
1418
1419u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
1420{
1421        struct cxgbit_device *cdev = csk->com.cdev;
1422        struct fw_flowc_wr *flowc;
1423        u32 nparams, flowclen16, flowclen;
1424        struct sk_buff *skb;
1425        u8 index;
1426
1427#ifdef CONFIG_CHELSIO_T4_DCB
1428        u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
1429#endif
1430
1431        flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
1432
1433        skb = __skb_dequeue(&csk->skbq);
1434        flowc = __skb_put_zero(skb, flowclen);
1435
1436        flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
1437                                           FW_FLOWC_WR_NPARAMS_V(nparams));
1438        flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
1439                                          FW_WR_FLOWID_V(csk->tid));
1440        flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
1441        flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
1442                                            (csk->com.cdev->lldi.pf));
1443        flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
1444        flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
1445        flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
1446        flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
1447        flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
1448        flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
1449        flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
1450        flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
1451        flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
1452        flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
1453        flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
1454        flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
1455        flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
1456        flowc->mnemval[7].val = cpu_to_be32(csk->emss);
1457
1458        flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
1459        if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
1460                flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
1461        else
1462                flowc->mnemval[8].val = cpu_to_be32(16384);
1463
1464        index = 9;
1465
1466        if (csk->snd_wscale) {
1467                flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
1468                flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
1469                index++;
1470        }
1471
1472#ifdef CONFIG_CHELSIO_T4_DCB
1473        flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
1474        if (vlan == VLAN_NONE) {
1475                pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
1476                flowc->mnemval[index].val = cpu_to_be32(0);
1477        } else
1478                flowc->mnemval[index].val = cpu_to_be32(
1479                                (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
1480#endif
1481
1482        pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
1483                 " rcv_seq = %u; snd_win = %u; emss = %u\n",
1484                 __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
1485                 csk->rcv_nxt, csk->snd_win, csk->emss);
1486        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
1487        cxgbit_ofld_send(csk->com.cdev, skb);
1488        return flowclen16;
1489}
1490
1491int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
1492{
1493        struct sk_buff *skb;
1494        struct cpl_set_tcb_field *req;
1495        u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
1496        u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
1497        unsigned int len = roundup(sizeof(*req), 16);
1498        int ret;
1499
1500        skb = alloc_skb(len, GFP_KERNEL);
1501        if (!skb)
1502                return -ENOMEM;
1503
1504        /*  set up ulp submode */
1505        req = __skb_put_zero(skb, len);
1506
1507        INIT_TP_WR(req, csk->tid);
1508        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1509        req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1510        req->word_cookie = htons(0);
1511        req->mask = cpu_to_be64(0x3 << 4);
1512        req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1513                                (dcrc ? ULP_CRC_DATA : 0)) << 4);
1514        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1515
1516        cxgbit_get_csk(csk);
1517        cxgbit_init_wr_wait(&csk->com.wr_wait);
1518
1519        cxgbit_ofld_send(csk->com.cdev, skb);
1520
1521        ret = cxgbit_wait_for_reply(csk->com.cdev,
1522                                    &csk->com.wr_wait,
1523                                    csk->tid, 5, __func__);
1524        if (ret)
1525                return -1;
1526
1527        return 0;
1528}
1529
1530int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
1531{
1532        struct sk_buff *skb;
1533        struct cpl_set_tcb_field *req;
1534        unsigned int len = roundup(sizeof(*req), 16);
1535        int ret;
1536
1537        skb = alloc_skb(len, GFP_KERNEL);
1538        if (!skb)
1539                return -ENOMEM;
1540
1541        req = __skb_put_zero(skb, len);
1542
1543        INIT_TP_WR(req, csk->tid);
1544        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1545        req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1546        req->word_cookie = htons(0);
1547        req->mask = cpu_to_be64(0x3 << 8);
1548        req->val = cpu_to_be64(pg_idx << 8);
1549        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1550
1551        cxgbit_get_csk(csk);
1552        cxgbit_init_wr_wait(&csk->com.wr_wait);
1553
1554        cxgbit_ofld_send(csk->com.cdev, skb);
1555
1556        ret = cxgbit_wait_for_reply(csk->com.cdev,
1557                                    &csk->com.wr_wait,
1558                                    csk->tid, 5, __func__);
1559        if (ret)
1560                return -1;
1561
1562        return 0;
1563}
1564
1565static void
1566cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1567{
1568        struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1569        struct tid_info *t = cdev->lldi.tids;
1570        unsigned int stid = GET_TID(rpl);
1571        struct cxgbit_np *cnp = lookup_stid(t, stid);
1572
1573        pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1574                 __func__, cnp, stid, rpl->status);
1575
1576        if (!cnp) {
1577                pr_info("%s stid %d lookup failure\n", __func__, stid);
1578                goto rel_skb;
1579        }
1580
1581        cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1582        cxgbit_put_cnp(cnp);
1583rel_skb:
1584        __kfree_skb(skb);
1585}
1586
1587static void
1588cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1589{
1590        struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1591        struct tid_info *t = cdev->lldi.tids;
1592        unsigned int stid = GET_TID(rpl);
1593        struct cxgbit_np *cnp = lookup_stid(t, stid);
1594
1595        pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1596                 __func__, cnp, stid, rpl->status);
1597
1598        if (!cnp) {
1599                pr_info("%s stid %d lookup failure\n", __func__, stid);
1600                goto rel_skb;
1601        }
1602
1603        cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1604        cxgbit_put_cnp(cnp);
1605rel_skb:
1606        __kfree_skb(skb);
1607}
1608
1609static void
1610cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
1611{
1612        struct cpl_pass_establish *req = cplhdr(skb);
1613        struct tid_info *t = cdev->lldi.tids;
1614        unsigned int tid = GET_TID(req);
1615        struct cxgbit_sock *csk;
1616        struct cxgbit_np *cnp;
1617        u16 tcp_opt = be16_to_cpu(req->tcp_opt);
1618        u32 snd_isn = be32_to_cpu(req->snd_isn);
1619        u32 rcv_isn = be32_to_cpu(req->rcv_isn);
1620
1621        csk = lookup_tid(t, tid);
1622        if (unlikely(!csk)) {
1623                pr_err("can't find connection for tid %u.\n", tid);
1624                goto rel_skb;
1625        }
1626        cnp = csk->cnp;
1627
1628        pr_debug("%s: csk %p; tid %u; cnp %p\n",
1629                 __func__, csk, tid, cnp);
1630
1631        csk->write_seq = snd_isn;
1632        csk->snd_una = snd_isn;
1633        csk->snd_nxt = snd_isn;
1634
1635        csk->rcv_nxt = rcv_isn;
1636
1637        if (csk->rcv_win > (RCV_BUFSIZ_M << 10))
1638                csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10));
1639
1640        csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
1641        cxgbit_set_emss(csk, tcp_opt);
1642        dst_confirm(csk->dst);
1643        csk->com.state = CSK_STATE_ESTABLISHED;
1644        spin_lock_bh(&cnp->np_accept_lock);
1645        list_add_tail(&csk->accept_node, &cnp->np_accept_list);
1646        spin_unlock_bh(&cnp->np_accept_lock);
1647        complete(&cnp->accept_comp);
1648rel_skb:
1649        __kfree_skb(skb);
1650}
1651
1652static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1653{
1654        cxgbit_skcb_flags(skb) = 0;
1655        spin_lock_bh(&csk->rxq.lock);
1656        __skb_queue_tail(&csk->rxq, skb);
1657        spin_unlock_bh(&csk->rxq.lock);
1658        wake_up(&csk->waitq);
1659}
1660
1661static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
1662{
1663        pr_debug("%s: csk %p; tid %u; state %d\n",
1664                 __func__, csk, csk->tid, csk->com.state);
1665
1666        switch (csk->com.state) {
1667        case CSK_STATE_ESTABLISHED:
1668                csk->com.state = CSK_STATE_CLOSING;
1669                cxgbit_queue_rx_skb(csk, skb);
1670                return;
1671        case CSK_STATE_CLOSING:
1672                /* simultaneous close */
1673                csk->com.state = CSK_STATE_MORIBUND;
1674                break;
1675        case CSK_STATE_MORIBUND:
1676                csk->com.state = CSK_STATE_DEAD;
1677                cxgbit_put_csk(csk);
1678                break;
1679        case CSK_STATE_ABORTING:
1680                break;
1681        default:
1682                pr_info("%s: cpl_peer_close in bad state %d\n",
1683                        __func__, csk->com.state);
1684        }
1685
1686        __kfree_skb(skb);
1687}
1688
1689static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1690{
1691        pr_debug("%s: csk %p; tid %u; state %d\n",
1692                 __func__, csk, csk->tid, csk->com.state);
1693
1694        switch (csk->com.state) {
1695        case CSK_STATE_CLOSING:
1696                csk->com.state = CSK_STATE_MORIBUND;
1697                break;
1698        case CSK_STATE_MORIBUND:
1699                csk->com.state = CSK_STATE_DEAD;
1700                cxgbit_put_csk(csk);
1701                break;
1702        case CSK_STATE_ABORTING:
1703        case CSK_STATE_DEAD:
1704                break;
1705        default:
1706                pr_info("%s: cpl_close_con_rpl in bad state %d\n",
1707                        __func__, csk->com.state);
1708        }
1709
1710        __kfree_skb(skb);
1711}
1712
1713static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1714{
1715        struct cpl_abort_req_rss *hdr = cplhdr(skb);
1716        unsigned int tid = GET_TID(hdr);
1717        struct sk_buff *rpl_skb;
1718        bool release = false;
1719        bool wakeup_thread = false;
1720        u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
1721
1722        pr_debug("%s: csk %p; tid %u; state %d\n",
1723                 __func__, csk, tid, csk->com.state);
1724
1725        if (cxgb_is_neg_adv(hdr->status)) {
1726                pr_err("%s: got neg advise %d on tid %u\n",
1727                       __func__, hdr->status, tid);
1728                goto rel_skb;
1729        }
1730
1731        switch (csk->com.state) {
1732        case CSK_STATE_CONNECTING:
1733        case CSK_STATE_MORIBUND:
1734                csk->com.state = CSK_STATE_DEAD;
1735                release = true;
1736                break;
1737        case CSK_STATE_ESTABLISHED:
1738                csk->com.state = CSK_STATE_DEAD;
1739                wakeup_thread = true;
1740                break;
1741        case CSK_STATE_CLOSING:
1742                csk->com.state = CSK_STATE_DEAD;
1743                if (!csk->conn)
1744                        release = true;
1745                break;
1746        case CSK_STATE_ABORTING:
1747                break;
1748        default:
1749                pr_info("%s: cpl_abort_req_rss in bad state %d\n",
1750                        __func__, csk->com.state);
1751                csk->com.state = CSK_STATE_DEAD;
1752        }
1753
1754        __skb_queue_purge(&csk->txq);
1755
1756        if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
1757                cxgbit_send_tx_flowc_wr(csk);
1758
1759        rpl_skb = __skb_dequeue(&csk->skbq);
1760
1761        cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
1762        cxgbit_ofld_send(csk->com.cdev, rpl_skb);
1763
1764        if (wakeup_thread) {
1765                cxgbit_queue_rx_skb(csk, skb);
1766                return;
1767        }
1768
1769        if (release)
1770                cxgbit_put_csk(csk);
1771rel_skb:
1772        __kfree_skb(skb);
1773}
1774
1775static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1776{
1777        struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1778
1779        pr_debug("%s: csk %p; tid %u; state %d\n",
1780                 __func__, csk, csk->tid, csk->com.state);
1781
1782        switch (csk->com.state) {
1783        case CSK_STATE_ABORTING:
1784                csk->com.state = CSK_STATE_DEAD;
1785                if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
1786                        cxgbit_wake_up(&csk->com.wr_wait, __func__,
1787                                       rpl->status);
1788                cxgbit_put_csk(csk);
1789                break;
1790        default:
1791                pr_info("%s: cpl_abort_rpl_rss in state %d\n",
1792                        __func__, csk->com.state);
1793        }
1794
1795        __kfree_skb(skb);
1796}
1797
1798static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
1799{
1800        const struct sk_buff *skb = csk->wr_pending_head;
1801        u32 credit = 0;
1802
1803        if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
1804                pr_err("csk 0x%p, tid %u, credit %u > %u\n",
1805                       csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
1806                return true;
1807        }
1808
1809        while (skb) {
1810                credit += (__force u32)skb->csum;
1811                skb = cxgbit_skcb_tx_wr_next(skb);
1812        }
1813
1814        if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
1815                pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1816                       csk, csk->tid, csk->wr_cred,
1817                       credit, csk->wr_max_cred);
1818
1819                return true;
1820        }
1821
1822        return false;
1823}
1824
1825static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
1826{
1827        struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
1828        u32 credits = rpl->credits;
1829        u32 snd_una = ntohl(rpl->snd_una);
1830
1831        csk->wr_cred += credits;
1832        if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
1833                csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
1834
1835        while (credits) {
1836                struct sk_buff *p = cxgbit_sock_peek_wr(csk);
1837                const u32 csum = (__force u32)p->csum;
1838
1839                if (unlikely(!p)) {
1840                        pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
1841                               csk, csk->tid, credits,
1842                               csk->wr_cred, csk->wr_una_cred);
1843                        break;
1844                }
1845
1846                if (unlikely(credits < csum)) {
1847                        pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
1848                                csk,  csk->tid,
1849                                credits, csk->wr_cred, csk->wr_una_cred,
1850                                csum);
1851                        p->csum = (__force __wsum)(csum - credits);
1852                        break;
1853                }
1854
1855                cxgbit_sock_dequeue_wr(csk);
1856                credits -= csum;
1857                kfree_skb(p);
1858        }
1859
1860        if (unlikely(cxgbit_credit_err(csk))) {
1861                cxgbit_queue_rx_skb(csk, skb);
1862                return;
1863        }
1864
1865        if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
1866                if (unlikely(before(snd_una, csk->snd_una))) {
1867                        pr_warn("csk 0x%p,%u, snd_una %u/%u.",
1868                                csk, csk->tid, snd_una,
1869                                csk->snd_una);
1870                        goto rel_skb;
1871                }
1872
1873                if (csk->snd_una != snd_una) {
1874                        csk->snd_una = snd_una;
1875                        dst_confirm(csk->dst);
1876                        wake_up(&csk->ack_waitq);
1877                }
1878        }
1879
1880        if (skb_queue_len(&csk->txq))
1881                cxgbit_push_tx_frames(csk);
1882
1883rel_skb:
1884        __kfree_skb(skb);
1885}
1886
1887static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1888{
1889        struct cxgbit_sock *csk;
1890        struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1891        unsigned int tid = GET_TID(rpl);
1892        struct cxgb4_lld_info *lldi = &cdev->lldi;
1893        struct tid_info *t = lldi->tids;
1894
1895        csk = lookup_tid(t, tid);
1896        if (unlikely(!csk)) {
1897                pr_err("can't find connection for tid %u.\n", tid);
1898                goto rel_skb;
1899        } else {
1900                cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
1901        }
1902
1903        cxgbit_put_csk(csk);
1904rel_skb:
1905        __kfree_skb(skb);
1906}
1907
1908static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
1909{
1910        struct cxgbit_sock *csk;
1911        struct cpl_rx_data *cpl = cplhdr(skb);
1912        unsigned int tid = GET_TID(cpl);
1913        struct cxgb4_lld_info *lldi = &cdev->lldi;
1914        struct tid_info *t = lldi->tids;
1915
1916        csk = lookup_tid(t, tid);
1917        if (unlikely(!csk)) {
1918                pr_err("can't find conn. for tid %u.\n", tid);
1919                goto rel_skb;
1920        }
1921
1922        cxgbit_queue_rx_skb(csk, skb);
1923        return;
1924rel_skb:
1925        __kfree_skb(skb);
1926}
1927
1928static void
1929__cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1930{
1931        spin_lock(&csk->lock);
1932        if (csk->lock_owner) {
1933                __skb_queue_tail(&csk->backlogq, skb);
1934                spin_unlock(&csk->lock);
1935                return;
1936        }
1937
1938        cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
1939        spin_unlock(&csk->lock);
1940}
1941
1942static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1943{
1944        cxgbit_get_csk(csk);
1945        __cxgbit_process_rx_cpl(csk, skb);
1946        cxgbit_put_csk(csk);
1947}
1948
1949static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1950{
1951        struct cxgbit_sock *csk;
1952        struct cpl_tx_data *cpl = cplhdr(skb);
1953        struct cxgb4_lld_info *lldi = &cdev->lldi;
1954        struct tid_info *t = lldi->tids;
1955        unsigned int tid = GET_TID(cpl);
1956        u8 opcode = cxgbit_skcb_rx_opcode(skb);
1957        bool ref = true;
1958
1959        switch (opcode) {
1960        case CPL_FW4_ACK:
1961                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
1962                        ref = false;
1963                        break;
1964        case CPL_PEER_CLOSE:
1965                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
1966                        break;
1967        case CPL_CLOSE_CON_RPL:
1968                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
1969                        break;
1970        case CPL_ABORT_REQ_RSS:
1971                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
1972                        break;
1973        case CPL_ABORT_RPL_RSS:
1974                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
1975                        break;
1976        default:
1977                goto rel_skb;
1978        }
1979
1980        csk = lookup_tid(t, tid);
1981        if (unlikely(!csk)) {
1982                pr_err("can't find conn. for tid %u.\n", tid);
1983                goto rel_skb;
1984        }
1985
1986        if (ref)
1987                cxgbit_process_rx_cpl(csk, skb);
1988        else
1989                __cxgbit_process_rx_cpl(csk, skb);
1990
1991        return;
1992rel_skb:
1993        __kfree_skb(skb);
1994}
1995
1996cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
1997        [CPL_PASS_OPEN_RPL]     = cxgbit_pass_open_rpl,
1998        [CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
1999        [CPL_PASS_ACCEPT_REQ]   = cxgbit_pass_accept_req,
2000        [CPL_PASS_ESTABLISH]    = cxgbit_pass_establish,
2001        [CPL_SET_TCB_RPL]       = cxgbit_set_tcb_rpl,
2002        [CPL_RX_DATA]           = cxgbit_rx_data,
2003        [CPL_FW4_ACK]           = cxgbit_rx_cpl,
2004        [CPL_PEER_CLOSE]        = cxgbit_rx_cpl,
2005        [CPL_CLOSE_CON_RPL]     = cxgbit_rx_cpl,
2006        [CPL_ABORT_REQ_RSS]     = cxgbit_rx_cpl,
2007        [CPL_ABORT_RPL_RSS]     = cxgbit_rx_cpl,
2008};
2009