linux/drivers/target/iscsi/cxgbit/cxgbit_cm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2016 Chelsio Communications, Inc.
   4 */
   5
   6#include <linux/module.h>
   7#include <linux/list.h>
   8#include <linux/workqueue.h>
   9#include <linux/skbuff.h>
  10#include <linux/timer.h>
  11#include <linux/notifier.h>
  12#include <linux/inetdevice.h>
  13#include <linux/ip.h>
  14#include <linux/tcp.h>
  15#include <linux/if_vlan.h>
  16
  17#include <net/neighbour.h>
  18#include <net/netevent.h>
  19#include <net/route.h>
  20#include <net/tcp.h>
  21#include <net/ip6_route.h>
  22#include <net/addrconf.h>
  23
  24#include <libcxgb_cm.h>
  25#include "cxgbit.h"
  26#include "clip_tbl.h"
  27
  28static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
  29{
  30        wr_waitp->ret = 0;
  31        reinit_completion(&wr_waitp->completion);
  32}
  33
  34static void
  35cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
  36{
  37        if (ret == CPL_ERR_NONE)
  38                wr_waitp->ret = 0;
  39        else
  40                wr_waitp->ret = -EIO;
  41
  42        if (wr_waitp->ret)
  43                pr_err("%s: err:%u", func, ret);
  44
  45        complete(&wr_waitp->completion);
  46}
  47
  48static int
  49cxgbit_wait_for_reply(struct cxgbit_device *cdev,
  50                      struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
  51                      const char *func)
  52{
  53        int ret;
  54
  55        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
  56                wr_waitp->ret = -EIO;
  57                goto out;
  58        }
  59
  60        ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
  61        if (!ret) {
  62                pr_info("%s - Device %s not responding tid %u\n",
  63                        func, pci_name(cdev->lldi.pdev), tid);
  64                wr_waitp->ret = -ETIMEDOUT;
  65        }
  66out:
  67        if (wr_waitp->ret)
  68                pr_info("%s: FW reply %d tid %u\n",
  69                        pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
  70        return wr_waitp->ret;
  71}
  72
  73static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
  74{
  75        return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
  76}
  77
  78static struct np_info *
  79cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
  80                   unsigned int stid)
  81{
  82        struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
  83
  84        if (p) {
  85                int bucket = cxgbit_np_hashfn(cnp);
  86
  87                p->cnp = cnp;
  88                p->stid = stid;
  89                spin_lock(&cdev->np_lock);
  90                p->next = cdev->np_hash_tab[bucket];
  91                cdev->np_hash_tab[bucket] = p;
  92                spin_unlock(&cdev->np_lock);
  93        }
  94
  95        return p;
  96}
  97
  98static int
  99cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 100{
 101        int stid = -1, bucket = cxgbit_np_hashfn(cnp);
 102        struct np_info *p;
 103
 104        spin_lock(&cdev->np_lock);
 105        for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
 106                if (p->cnp == cnp) {
 107                        stid = p->stid;
 108                        break;
 109                }
 110        }
 111        spin_unlock(&cdev->np_lock);
 112
 113        return stid;
 114}
 115
 116static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 117{
 118        int stid = -1, bucket = cxgbit_np_hashfn(cnp);
 119        struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
 120
 121        spin_lock(&cdev->np_lock);
 122        for (p = *prev; p; prev = &p->next, p = p->next) {
 123                if (p->cnp == cnp) {
 124                        stid = p->stid;
 125                        *prev = p->next;
 126                        kfree(p);
 127                        break;
 128                }
 129        }
 130        spin_unlock(&cdev->np_lock);
 131
 132        return stid;
 133}
 134
 135void _cxgbit_free_cnp(struct kref *kref)
 136{
 137        struct cxgbit_np *cnp;
 138
 139        cnp = container_of(kref, struct cxgbit_np, kref);
 140        kfree(cnp);
 141}
 142
 143static int
 144cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
 145                      struct cxgbit_np *cnp)
 146{
 147        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
 148                                     &cnp->com.local_addr;
 149        int addr_type;
 150        int ret;
 151
 152        pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
 153                 __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
 154
 155        addr_type = ipv6_addr_type((const struct in6_addr *)
 156                                   &sin6->sin6_addr);
 157        if (addr_type != IPV6_ADDR_ANY) {
 158                ret = cxgb4_clip_get(cdev->lldi.ports[0],
 159                                     (const u32 *)&sin6->sin6_addr.s6_addr, 1);
 160                if (ret) {
 161                        pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
 162                               sin6->sin6_addr.s6_addr, ret);
 163                        return -ENOMEM;
 164                }
 165        }
 166
 167        cxgbit_get_cnp(cnp);
 168        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 169
 170        ret = cxgb4_create_server6(cdev->lldi.ports[0],
 171                                   stid, &sin6->sin6_addr,
 172                                   sin6->sin6_port,
 173                                   cdev->lldi.rxq_ids[0]);
 174        if (!ret)
 175                ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
 176                                            0, 10, __func__);
 177        else if (ret > 0)
 178                ret = net_xmit_errno(ret);
 179        else
 180                cxgbit_put_cnp(cnp);
 181
 182        if (ret) {
 183                if (ret != -ETIMEDOUT)
 184                        cxgb4_clip_release(cdev->lldi.ports[0],
 185                                   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
 186
 187                pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
 188                       ret, stid, sin6->sin6_addr.s6_addr,
 189                       ntohs(sin6->sin6_port));
 190        }
 191
 192        return ret;
 193}
 194
 195static int
 196cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
 197                      struct cxgbit_np *cnp)
 198{
 199        struct sockaddr_in *sin = (struct sockaddr_in *)
 200                                   &cnp->com.local_addr;
 201        int ret;
 202
 203        pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
 204                 __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
 205
 206        cxgbit_get_cnp(cnp);
 207        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 208
 209        ret = cxgb4_create_server(cdev->lldi.ports[0],
 210                                  stid, sin->sin_addr.s_addr,
 211                                  sin->sin_port, 0,
 212                                  cdev->lldi.rxq_ids[0]);
 213        if (!ret)
 214                ret = cxgbit_wait_for_reply(cdev,
 215                                            &cnp->com.wr_wait,
 216                                            0, 10, __func__);
 217        else if (ret > 0)
 218                ret = net_xmit_errno(ret);
 219        else
 220                cxgbit_put_cnp(cnp);
 221
 222        if (ret)
 223                pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
 224                       ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
 225        return ret;
 226}
 227
 228struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
 229{
 230        struct cxgbit_device *cdev;
 231        u8 i;
 232
 233        list_for_each_entry(cdev, &cdev_list_head, list) {
 234                struct cxgb4_lld_info *lldi = &cdev->lldi;
 235
 236                for (i = 0; i < lldi->nports; i++) {
 237                        if (lldi->ports[i] == ndev) {
 238                                if (port_id)
 239                                        *port_id = i;
 240                                return cdev;
 241                        }
 242                }
 243        }
 244
 245        return NULL;
 246}
 247
 248static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
 249{
 250        if (ndev->priv_flags & IFF_BONDING) {
 251                pr_err("Bond devices are not supported. Interface:%s\n",
 252                       ndev->name);
 253                return NULL;
 254        }
 255
 256        if (is_vlan_dev(ndev))
 257                return vlan_dev_real_dev(ndev);
 258
 259        return ndev;
 260}
 261
 262static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
 263{
 264        struct net_device *ndev;
 265
 266        ndev = __ip_dev_find(&init_net, saddr, false);
 267        if (!ndev)
 268                return NULL;
 269
 270        return cxgbit_get_real_dev(ndev);
 271}
 272
 273static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
 274{
 275        struct net_device *ndev = NULL;
 276        bool found = false;
 277
 278        if (IS_ENABLED(CONFIG_IPV6)) {
 279                for_each_netdev_rcu(&init_net, ndev)
 280                        if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
 281                                found = true;
 282                                break;
 283                        }
 284        }
 285        if (!found)
 286                return NULL;
 287        return cxgbit_get_real_dev(ndev);
 288}
 289
 290static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
 291{
 292        struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
 293        int ss_family = sockaddr->ss_family;
 294        struct net_device *ndev = NULL;
 295        struct cxgbit_device *cdev = NULL;
 296
 297        rcu_read_lock();
 298        if (ss_family == AF_INET) {
 299                struct sockaddr_in *sin;
 300
 301                sin = (struct sockaddr_in *)sockaddr;
 302                ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
 303        } else if (ss_family == AF_INET6) {
 304                struct sockaddr_in6 *sin6;
 305
 306                sin6 = (struct sockaddr_in6 *)sockaddr;
 307                ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
 308        }
 309        if (!ndev)
 310                goto out;
 311
 312        cdev = cxgbit_find_device(ndev, NULL);
 313out:
 314        rcu_read_unlock();
 315        return cdev;
 316}
 317
 318static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
 319{
 320        struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
 321        int ss_family = sockaddr->ss_family;
 322        int addr_type;
 323
 324        if (ss_family == AF_INET) {
 325                struct sockaddr_in *sin;
 326
 327                sin = (struct sockaddr_in *)sockaddr;
 328                if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
 329                        return true;
 330        } else if (ss_family == AF_INET6) {
 331                struct sockaddr_in6 *sin6;
 332
 333                sin6 = (struct sockaddr_in6 *)sockaddr;
 334                addr_type = ipv6_addr_type((const struct in6_addr *)
 335                                &sin6->sin6_addr);
 336                if (addr_type == IPV6_ADDR_ANY)
 337                        return true;
 338        }
 339        return false;
 340}
 341
 342static int
 343__cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 344{
 345        int stid, ret;
 346        int ss_family = cnp->com.local_addr.ss_family;
 347
 348        if (!test_bit(CDEV_STATE_UP, &cdev->flags))
 349                return -EINVAL;
 350
 351        stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
 352        if (stid < 0)
 353                return -EINVAL;
 354
 355        if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
 356                cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
 357                return -EINVAL;
 358        }
 359
 360        if (ss_family == AF_INET)
 361                ret = cxgbit_create_server4(cdev, stid, cnp);
 362        else
 363                ret = cxgbit_create_server6(cdev, stid, cnp);
 364
 365        if (ret) {
 366                if (ret != -ETIMEDOUT)
 367                        cxgb4_free_stid(cdev->lldi.tids, stid,
 368                                        ss_family);
 369                cxgbit_np_hash_del(cdev, cnp);
 370                return ret;
 371        }
 372        return ret;
 373}
 374
 375static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
 376{
 377        struct cxgbit_device *cdev;
 378        int ret = -1;
 379
 380        mutex_lock(&cdev_list_lock);
 381        cdev = cxgbit_find_np_cdev(cnp);
 382        if (!cdev)
 383                goto out;
 384
 385        if (cxgbit_np_hash_find(cdev, cnp) >= 0)
 386                goto out;
 387
 388        if (__cxgbit_setup_cdev_np(cdev, cnp))
 389                goto out;
 390
 391        cnp->com.cdev = cdev;
 392        ret = 0;
 393out:
 394        mutex_unlock(&cdev_list_lock);
 395        return ret;
 396}
 397
 398static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
 399{
 400        struct cxgbit_device *cdev;
 401        int ret;
 402        u32 count = 0;
 403
 404        mutex_lock(&cdev_list_lock);
 405        list_for_each_entry(cdev, &cdev_list_head, list) {
 406                if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
 407                        mutex_unlock(&cdev_list_lock);
 408                        return -1;
 409                }
 410        }
 411
 412        list_for_each_entry(cdev, &cdev_list_head, list) {
 413                ret = __cxgbit_setup_cdev_np(cdev, cnp);
 414                if (ret == -ETIMEDOUT)
 415                        break;
 416                if (ret != 0)
 417                        continue;
 418                count++;
 419        }
 420        mutex_unlock(&cdev_list_lock);
 421
 422        return count ? 0 : -1;
 423}
 424
 425int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
 426{
 427        struct cxgbit_np *cnp;
 428        int ret;
 429
 430        if ((ksockaddr->ss_family != AF_INET) &&
 431            (ksockaddr->ss_family != AF_INET6))
 432                return -EINVAL;
 433
 434        cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
 435        if (!cnp)
 436                return -ENOMEM;
 437
 438        init_waitqueue_head(&cnp->accept_wait);
 439        init_completion(&cnp->com.wr_wait.completion);
 440        init_completion(&cnp->accept_comp);
 441        INIT_LIST_HEAD(&cnp->np_accept_list);
 442        spin_lock_init(&cnp->np_accept_lock);
 443        kref_init(&cnp->kref);
 444        memcpy(&np->np_sockaddr, ksockaddr,
 445               sizeof(struct sockaddr_storage));
 446        memcpy(&cnp->com.local_addr, &np->np_sockaddr,
 447               sizeof(cnp->com.local_addr));
 448
 449        cnp->np = np;
 450        cnp->com.cdev = NULL;
 451
 452        if (cxgbit_inaddr_any(cnp))
 453                ret = cxgbit_setup_all_np(cnp);
 454        else
 455                ret = cxgbit_setup_cdev_np(cnp);
 456
 457        if (ret) {
 458                cxgbit_put_cnp(cnp);
 459                return -EINVAL;
 460        }
 461
 462        np->np_context = cnp;
 463        cnp->com.state = CSK_STATE_LISTEN;
 464        return 0;
 465}
 466
 467static void
 468cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
 469                     struct cxgbit_sock *csk)
 470{
 471        conn->login_family = np->np_sockaddr.ss_family;
 472        conn->login_sockaddr = csk->com.remote_addr;
 473        conn->local_sockaddr = csk->com.local_addr;
 474}
 475
 476int cxgbit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
 477{
 478        struct cxgbit_np *cnp = np->np_context;
 479        struct cxgbit_sock *csk;
 480        int ret = 0;
 481
 482accept_wait:
 483        ret = wait_for_completion_interruptible(&cnp->accept_comp);
 484        if (ret)
 485                return -ENODEV;
 486
 487        spin_lock_bh(&np->np_thread_lock);
 488        if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
 489                spin_unlock_bh(&np->np_thread_lock);
 490                /**
 491                 * No point in stalling here when np_thread
 492                 * is in state RESET/SHUTDOWN/EXIT - bail
 493                 **/
 494                return -ENODEV;
 495        }
 496        spin_unlock_bh(&np->np_thread_lock);
 497
 498        spin_lock_bh(&cnp->np_accept_lock);
 499        if (list_empty(&cnp->np_accept_list)) {
 500                spin_unlock_bh(&cnp->np_accept_lock);
 501                goto accept_wait;
 502        }
 503
 504        csk = list_first_entry(&cnp->np_accept_list,
 505                               struct cxgbit_sock,
 506                               accept_node);
 507
 508        list_del_init(&csk->accept_node);
 509        spin_unlock_bh(&cnp->np_accept_lock);
 510        conn->context = csk;
 511        csk->conn = conn;
 512
 513        cxgbit_set_conn_info(np, conn, csk);
 514        return 0;
 515}
 516
 517static int
 518__cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 519{
 520        int stid, ret;
 521        bool ipv6 = false;
 522
 523        stid = cxgbit_np_hash_del(cdev, cnp);
 524        if (stid < 0)
 525                return -EINVAL;
 526        if (!test_bit(CDEV_STATE_UP, &cdev->flags))
 527                return -EINVAL;
 528
 529        if (cnp->np->np_sockaddr.ss_family == AF_INET6)
 530                ipv6 = true;
 531
 532        cxgbit_get_cnp(cnp);
 533        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 534        ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
 535                                  cdev->lldi.rxq_ids[0], ipv6);
 536
 537        if (ret > 0)
 538                ret = net_xmit_errno(ret);
 539
 540        if (ret) {
 541                cxgbit_put_cnp(cnp);
 542                return ret;
 543        }
 544
 545        ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
 546                                    0, 10, __func__);
 547        if (ret == -ETIMEDOUT)
 548                return ret;
 549
 550        if (ipv6 && cnp->com.cdev) {
 551                struct sockaddr_in6 *sin6;
 552
 553                sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
 554                cxgb4_clip_release(cdev->lldi.ports[0],
 555                                   (const u32 *)&sin6->sin6_addr.s6_addr,
 556                                   1);
 557        }
 558
 559        cxgb4_free_stid(cdev->lldi.tids, stid,
 560                        cnp->com.local_addr.ss_family);
 561        return 0;
 562}
 563
 564static void cxgbit_free_all_np(struct cxgbit_np *cnp)
 565{
 566        struct cxgbit_device *cdev;
 567        int ret;
 568
 569        mutex_lock(&cdev_list_lock);
 570        list_for_each_entry(cdev, &cdev_list_head, list) {
 571                ret = __cxgbit_free_cdev_np(cdev, cnp);
 572                if (ret == -ETIMEDOUT)
 573                        break;
 574        }
 575        mutex_unlock(&cdev_list_lock);
 576}
 577
 578static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
 579{
 580        struct cxgbit_device *cdev;
 581        bool found = false;
 582
 583        mutex_lock(&cdev_list_lock);
 584        list_for_each_entry(cdev, &cdev_list_head, list) {
 585                if (cdev == cnp->com.cdev) {
 586                        found = true;
 587                        break;
 588                }
 589        }
 590        if (!found)
 591                goto out;
 592
 593        __cxgbit_free_cdev_np(cdev, cnp);
 594out:
 595        mutex_unlock(&cdev_list_lock);
 596}
 597
 598static void __cxgbit_free_conn(struct cxgbit_sock *csk);
 599
 600void cxgbit_free_np(struct iscsi_np *np)
 601{
 602        struct cxgbit_np *cnp = np->np_context;
 603        struct cxgbit_sock *csk, *tmp;
 604
 605        cnp->com.state = CSK_STATE_DEAD;
 606        if (cnp->com.cdev)
 607                cxgbit_free_cdev_np(cnp);
 608        else
 609                cxgbit_free_all_np(cnp);
 610
 611        spin_lock_bh(&cnp->np_accept_lock);
 612        list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
 613                list_del_init(&csk->accept_node);
 614                __cxgbit_free_conn(csk);
 615        }
 616        spin_unlock_bh(&cnp->np_accept_lock);
 617
 618        np->np_context = NULL;
 619        cxgbit_put_cnp(cnp);
 620}
 621
 622static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
 623{
 624        struct sk_buff *skb;
 625        u32 len = roundup(sizeof(struct cpl_close_con_req), 16);
 626
 627        skb = alloc_skb(len, GFP_ATOMIC);
 628        if (!skb)
 629                return;
 630
 631        cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
 632                              NULL, NULL);
 633
 634        cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
 635        __skb_queue_tail(&csk->txq, skb);
 636        cxgbit_push_tx_frames(csk);
 637}
 638
 639static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
 640{
 641        struct cxgbit_sock *csk = handle;
 642
 643        pr_debug("%s cxgbit_device %p\n", __func__, handle);
 644        kfree_skb(skb);
 645        cxgbit_put_csk(csk);
 646}
 647
 648static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
 649{
 650        struct cxgbit_device *cdev = handle;
 651        struct cpl_abort_req *req = cplhdr(skb);
 652
 653        pr_debug("%s cdev %p\n", __func__, cdev);
 654        req->cmd = CPL_ABORT_NO_RST;
 655        cxgbit_ofld_send(cdev, skb);
 656}
 657
 658static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
 659{
 660        struct sk_buff *skb;
 661        u32 len = roundup(sizeof(struct cpl_abort_req), 16);
 662
 663        pr_debug("%s: csk %p tid %u; state %d\n",
 664                 __func__, csk, csk->tid, csk->com.state);
 665
 666        __skb_queue_purge(&csk->txq);
 667
 668        if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
 669                cxgbit_send_tx_flowc_wr(csk);
 670
 671        skb = __skb_dequeue(&csk->skbq);
 672        cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
 673                          csk->com.cdev, cxgbit_abort_arp_failure);
 674
 675        return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
 676}
 677
 678static void
 679__cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
 680{
 681        __kfree_skb(skb);
 682
 683        if (csk->com.state != CSK_STATE_ESTABLISHED)
 684                goto no_abort;
 685
 686        set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
 687        csk->com.state = CSK_STATE_ABORTING;
 688
 689        cxgbit_send_abort_req(csk);
 690
 691        return;
 692
 693no_abort:
 694        cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
 695        cxgbit_put_csk(csk);
 696}
 697
 698void cxgbit_abort_conn(struct cxgbit_sock *csk)
 699{
 700        struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
 701
 702        cxgbit_get_csk(csk);
 703        cxgbit_init_wr_wait(&csk->com.wr_wait);
 704
 705        spin_lock_bh(&csk->lock);
 706        if (csk->lock_owner) {
 707                cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
 708                __skb_queue_tail(&csk->backlogq, skb);
 709        } else {
 710                __cxgbit_abort_conn(csk, skb);
 711        }
 712        spin_unlock_bh(&csk->lock);
 713
 714        cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
 715                              csk->tid, 600, __func__);
 716}
 717
 718static void __cxgbit_free_conn(struct cxgbit_sock *csk)
 719{
 720        struct iscsi_conn *conn = csk->conn;
 721        bool release = false;
 722
 723        pr_debug("%s: state %d\n",
 724                 __func__, csk->com.state);
 725
 726        spin_lock_bh(&csk->lock);
 727        switch (csk->com.state) {
 728        case CSK_STATE_ESTABLISHED:
 729                if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
 730                        csk->com.state = CSK_STATE_CLOSING;
 731                        cxgbit_send_halfclose(csk);
 732                } else {
 733                        csk->com.state = CSK_STATE_ABORTING;
 734                        cxgbit_send_abort_req(csk);
 735                }
 736                break;
 737        case CSK_STATE_CLOSING:
 738                csk->com.state = CSK_STATE_MORIBUND;
 739                cxgbit_send_halfclose(csk);
 740                break;
 741        case CSK_STATE_DEAD:
 742                release = true;
 743                break;
 744        default:
 745                pr_err("%s: csk %p; state %d\n",
 746                       __func__, csk, csk->com.state);
 747        }
 748        spin_unlock_bh(&csk->lock);
 749
 750        if (release)
 751                cxgbit_put_csk(csk);
 752}
 753
 754void cxgbit_free_conn(struct iscsi_conn *conn)
 755{
 756        __cxgbit_free_conn(conn->context);
 757}
 758
 759static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
 760{
 761        csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
 762                        ((csk->com.remote_addr.ss_family == AF_INET) ?
 763                        sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
 764                        sizeof(struct tcphdr);
 765        csk->mss = csk->emss;
 766        if (TCPOPT_TSTAMP_G(opt))
 767                csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
 768        if (csk->emss < 128)
 769                csk->emss = 128;
 770        if (csk->emss & 7)
 771                pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
 772                        TCPOPT_MSS_G(opt), csk->mss, csk->emss);
 773        pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
 774                 csk->mss, csk->emss);
 775}
 776
 777static void cxgbit_free_skb(struct cxgbit_sock *csk)
 778{
 779        struct sk_buff *skb;
 780
 781        __skb_queue_purge(&csk->txq);
 782        __skb_queue_purge(&csk->rxq);
 783        __skb_queue_purge(&csk->backlogq);
 784        __skb_queue_purge(&csk->ppodq);
 785        __skb_queue_purge(&csk->skbq);
 786
 787        while ((skb = cxgbit_sock_dequeue_wr(csk)))
 788                kfree_skb(skb);
 789
 790        __kfree_skb(csk->lro_hskb);
 791}
 792
 793void _cxgbit_free_csk(struct kref *kref)
 794{
 795        struct cxgbit_sock *csk;
 796        struct cxgbit_device *cdev;
 797
 798        csk = container_of(kref, struct cxgbit_sock, kref);
 799
 800        pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
 801
 802        if (csk->com.local_addr.ss_family == AF_INET6) {
 803                struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
 804                                             &csk->com.local_addr;
 805                cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
 806                                   (const u32 *)
 807                                   &sin6->sin6_addr.s6_addr, 1);
 808        }
 809
 810        cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid,
 811                         csk->com.local_addr.ss_family);
 812        dst_release(csk->dst);
 813        cxgb4_l2t_release(csk->l2t);
 814
 815        cdev = csk->com.cdev;
 816        spin_lock_bh(&cdev->cskq.lock);
 817        list_del(&csk->list);
 818        spin_unlock_bh(&cdev->cskq.lock);
 819
 820        cxgbit_free_skb(csk);
 821        cxgbit_put_cnp(csk->cnp);
 822        cxgbit_put_cdev(cdev);
 823
 824        kfree(csk);
 825}
 826
 827static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
 828{
 829        unsigned int linkspeed;
 830        u8 scale;
 831
 832        linkspeed = pi->link_cfg.speed;
 833        scale = linkspeed / SPEED_10000;
 834
 835#define CXGBIT_10G_RCV_WIN (256 * 1024)
 836        csk->rcv_win = CXGBIT_10G_RCV_WIN;
 837        if (scale)
 838                csk->rcv_win *= scale;
 839
 840#define CXGBIT_10G_SND_WIN (256 * 1024)
 841        csk->snd_win = CXGBIT_10G_SND_WIN;
 842        if (scale)
 843                csk->snd_win *= scale;
 844
 845        pr_debug("%s snd_win %d rcv_win %d\n",
 846                 __func__, csk->snd_win, csk->rcv_win);
 847}
 848
 849#ifdef CONFIG_CHELSIO_T4_DCB
 850static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
 851{
 852        return ndev->dcbnl_ops->getstate(ndev);
 853}
 854
 855static int cxgbit_select_priority(int pri_mask)
 856{
 857        if (!pri_mask)
 858                return 0;
 859
 860        return (ffs(pri_mask) - 1);
 861}
 862
 863static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
 864{
 865        int ret;
 866        u8 caps;
 867
 868        struct dcb_app iscsi_dcb_app = {
 869                .protocol = local_port
 870        };
 871
 872        ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
 873
 874        if (ret)
 875                return 0;
 876
 877        if (caps & DCB_CAP_DCBX_VER_IEEE) {
 878                iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM;
 879                ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
 880                if (!ret) {
 881                        iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
 882                        ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
 883                }
 884        } else if (caps & DCB_CAP_DCBX_VER_CEE) {
 885                iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
 886
 887                ret = dcb_getapp(ndev, &iscsi_dcb_app);
 888        }
 889
 890        pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
 891
 892        return cxgbit_select_priority(ret);
 893}
 894#endif
 895
 896static int
 897cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
 898                    u16 local_port, struct dst_entry *dst,
 899                    struct cxgbit_device *cdev)
 900{
 901        struct neighbour *n;
 902        int ret, step;
 903        struct net_device *ndev;
 904        u16 rxq_idx, port_id;
 905#ifdef CONFIG_CHELSIO_T4_DCB
 906        u8 priority = 0;
 907#endif
 908
 909        n = dst_neigh_lookup(dst, peer_ip);
 910        if (!n)
 911                return -ENODEV;
 912
 913        rcu_read_lock();
 914        if (!(n->nud_state & NUD_VALID))
 915                neigh_event_send(n, NULL);
 916
 917        ret = -ENOMEM;
 918        if (n->dev->flags & IFF_LOOPBACK) {
 919                if (iptype == 4)
 920                        ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
 921                else if (IS_ENABLED(CONFIG_IPV6))
 922                        ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
 923                else
 924                        ndev = NULL;
 925
 926                if (!ndev) {
 927                        ret = -ENODEV;
 928                        goto out;
 929                }
 930
 931                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
 932                                         n, ndev, 0);
 933                if (!csk->l2t)
 934                        goto out;
 935                csk->mtu = ndev->mtu;
 936                csk->tx_chan = cxgb4_port_chan(ndev);
 937                csk->smac_idx =
 938                               ((struct port_info *)netdev_priv(ndev))->smt_idx;
 939                step = cdev->lldi.ntxq /
 940                        cdev->lldi.nchan;
 941                csk->txq_idx = cxgb4_port_idx(ndev) * step;
 942                step = cdev->lldi.nrxq /
 943                        cdev->lldi.nchan;
 944                csk->ctrlq_idx = cxgb4_port_idx(ndev);
 945                csk->rss_qid = cdev->lldi.rxq_ids[
 946                                cxgb4_port_idx(ndev) * step];
 947                csk->port_id = cxgb4_port_idx(ndev);
 948                cxgbit_set_tcp_window(csk,
 949                                      (struct port_info *)netdev_priv(ndev));
 950        } else {
 951                ndev = cxgbit_get_real_dev(n->dev);
 952                if (!ndev) {
 953                        ret = -ENODEV;
 954                        goto out;
 955                }
 956
 957#ifdef CONFIG_CHELSIO_T4_DCB
 958                if (cxgbit_get_iscsi_dcb_state(ndev))
 959                        priority = cxgbit_get_iscsi_dcb_priority(ndev,
 960                                                                 local_port);
 961
 962                csk->dcb_priority = priority;
 963
 964                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
 965#else
 966                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
 967#endif
 968                if (!csk->l2t)
 969                        goto out;
 970                port_id = cxgb4_port_idx(ndev);
 971                csk->mtu = dst_mtu(dst);
 972                csk->tx_chan = cxgb4_port_chan(ndev);
 973                csk->smac_idx =
 974                               ((struct port_info *)netdev_priv(ndev))->smt_idx;
 975                step = cdev->lldi.ntxq /
 976                        cdev->lldi.nports;
 977                csk->txq_idx = (port_id * step) +
 978                                (cdev->selectq[port_id][0]++ % step);
 979                csk->ctrlq_idx = cxgb4_port_idx(ndev);
 980                step = cdev->lldi.nrxq /
 981                        cdev->lldi.nports;
 982                rxq_idx = (port_id * step) +
 983                                (cdev->selectq[port_id][1]++ % step);
 984                csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
 985                csk->port_id = port_id;
 986                cxgbit_set_tcp_window(csk,
 987                                      (struct port_info *)netdev_priv(ndev));
 988        }
 989        ret = 0;
 990out:
 991        rcu_read_unlock();
 992        neigh_release(n);
 993        return ret;
 994}
 995
 996int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
 997{
 998        int ret = 0;
 999
1000        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1001                kfree_skb(skb);
1002                pr_err("%s - device not up - dropping\n", __func__);
1003                return -EIO;
1004        }
1005
1006        ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
1007        if (ret < 0)
1008                kfree_skb(skb);
1009        return ret < 0 ? ret : 0;
1010}
1011
1012static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
1013{
1014        u32 len = roundup(sizeof(struct cpl_tid_release), 16);
1015        struct sk_buff *skb;
1016
1017        skb = alloc_skb(len, GFP_ATOMIC);
1018        if (!skb)
1019                return;
1020
1021        cxgb_mk_tid_release(skb, len, tid, 0);
1022        cxgbit_ofld_send(cdev, skb);
1023}
1024
1025int
1026cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
1027                struct l2t_entry *l2e)
1028{
1029        int ret = 0;
1030
1031        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1032                kfree_skb(skb);
1033                pr_err("%s - device not up - dropping\n", __func__);
1034                return -EIO;
1035        }
1036
1037        ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
1038        if (ret < 0)
1039                kfree_skb(skb);
1040        return ret < 0 ? ret : 0;
1041}
1042
1043static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
1044{
1045        if (csk->com.state != CSK_STATE_ESTABLISHED) {
1046                __kfree_skb(skb);
1047                return;
1048        }
1049
1050        cxgbit_ofld_send(csk->com.cdev, skb);
1051}
1052
1053/*
1054 * CPL connection rx data ack: host ->
1055 * Send RX credits through an RX_DATA_ACK CPL message.
1056 * Returns the number of credits sent.
1057 */
1058int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
1059{
1060        struct sk_buff *skb;
1061        u32 len = roundup(sizeof(struct cpl_rx_data_ack), 16);
1062        u32 credit_dack;
1063
1064        skb = alloc_skb(len, GFP_KERNEL);
1065        if (!skb)
1066                return -1;
1067
1068        credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
1069                      RX_CREDITS_V(csk->rx_credits);
1070
1071        cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
1072                            credit_dack);
1073
1074        csk->rx_credits = 0;
1075
1076        spin_lock_bh(&csk->lock);
1077        if (csk->lock_owner) {
1078                cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
1079                __skb_queue_tail(&csk->backlogq, skb);
1080                spin_unlock_bh(&csk->lock);
1081                return 0;
1082        }
1083
1084        cxgbit_send_rx_credits(csk, skb);
1085        spin_unlock_bh(&csk->lock);
1086
1087        return 0;
1088}
1089
1090#define FLOWC_WR_NPARAMS_MIN    9
1091#define FLOWC_WR_NPARAMS_MAX    11
1092static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
1093{
1094        struct sk_buff *skb;
1095        u32 len, flowclen;
1096        u8 i;
1097
1098        flowclen = offsetof(struct fw_flowc_wr,
1099                            mnemval[FLOWC_WR_NPARAMS_MAX]);
1100
1101        len = max_t(u32, sizeof(struct cpl_abort_req),
1102                    sizeof(struct cpl_abort_rpl));
1103
1104        len = max(len, flowclen);
1105        len = roundup(len, 16);
1106
1107        for (i = 0; i < 3; i++) {
1108                skb = alloc_skb(len, GFP_ATOMIC);
1109                if (!skb)
1110                        goto out;
1111                __skb_queue_tail(&csk->skbq, skb);
1112        }
1113
1114        skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
1115        if (!skb)
1116                goto out;
1117
1118        memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
1119        csk->lro_hskb = skb;
1120
1121        return 0;
1122out:
1123        __skb_queue_purge(&csk->skbq);
1124        return -ENOMEM;
1125}
1126
1127static void
1128cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
1129{
1130        struct sk_buff *skb;
1131        const struct tcphdr *tcph;
1132        struct cpl_t5_pass_accept_rpl *rpl5;
1133        struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
1134        unsigned int len = roundup(sizeof(*rpl5), 16);
1135        unsigned int mtu_idx;
1136        u64 opt0;
1137        u32 opt2, hlen;
1138        u32 wscale;
1139        u32 win;
1140
1141        pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
1142
1143        skb = alloc_skb(len, GFP_ATOMIC);
1144        if (!skb) {
1145                cxgbit_put_csk(csk);
1146                return;
1147        }
1148
1149        rpl5 = __skb_put_zero(skb, len);
1150
1151        INIT_TP_WR(rpl5, csk->tid);
1152        OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1153                                                     csk->tid));
1154        cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
1155                      req->tcpopt.tstamp,
1156                      (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
1157        wscale = cxgb_compute_wscale(csk->rcv_win);
1158        /*
1159         * Specify the largest window that will fit in opt0. The
1160         * remainder will be specified in the rx_data_ack.
1161         */
1162        win = csk->rcv_win >> 10;
1163        if (win > RCV_BUFSIZ_M)
1164                win = RCV_BUFSIZ_M;
1165        opt0 =  TCAM_BYPASS_F |
1166                WND_SCALE_V(wscale) |
1167                MSS_IDX_V(mtu_idx) |
1168                L2T_IDX_V(csk->l2t->idx) |
1169                TX_CHAN_V(csk->tx_chan) |
1170                SMAC_SEL_V(csk->smac_idx) |
1171                DSCP_V(csk->tos >> 2) |
1172                ULP_MODE_V(ULP_MODE_ISCSI) |
1173                RCV_BUFSIZ_V(win);
1174
1175        opt2 = RX_CHANNEL_V(0) |
1176                RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
1177
1178        if (!is_t5(lldi->adapter_type))
1179                opt2 |= RX_FC_DISABLE_F;
1180
1181        if (req->tcpopt.tstamp)
1182                opt2 |= TSTAMPS_EN_F;
1183        if (req->tcpopt.sack)
1184                opt2 |= SACK_EN_F;
1185        if (wscale)
1186                opt2 |= WND_SCALE_EN_F;
1187
1188        hlen = ntohl(req->hdr_len);
1189
1190        if (is_t5(lldi->adapter_type))
1191                tcph = (struct tcphdr *)((u8 *)(req + 1) +
1192                       ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen));
1193        else
1194                tcph = (struct tcphdr *)((u8 *)(req + 1) +
1195                       T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
1196
1197        if (tcph->ece && tcph->cwr)
1198                opt2 |= CCTRL_ECN_V(1);
1199
1200        opt2 |= RX_COALESCE_V(3);
1201        opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
1202
1203        opt2 |= T5_ISS_F;
1204        rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
1205
1206        opt2 |= T5_OPT_2_VALID_F;
1207
1208        rpl5->opt0 = cpu_to_be64(opt0);
1209        rpl5->opt2 = cpu_to_be32(opt2);
1210        set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
1211        t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard);
1212        cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
1213}
1214
1215static void
1216cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
1217{
1218        struct cxgbit_sock *csk = NULL;
1219        struct cxgbit_np *cnp;
1220        struct cpl_pass_accept_req *req = cplhdr(skb);
1221        unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1222        struct tid_info *t = cdev->lldi.tids;
1223        unsigned int tid = GET_TID(req);
1224        u16 peer_mss = ntohs(req->tcpopt.mss);
1225        unsigned short hdrs;
1226
1227        struct dst_entry *dst;
1228        __u8 local_ip[16], peer_ip[16];
1229        __be16 local_port, peer_port;
1230        int ret;
1231        int iptype;
1232
1233        pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
1234                 __func__, cdev, stid, tid);
1235
1236        cnp = lookup_stid(t, stid);
1237        if (!cnp) {
1238                pr_err("%s connect request on invalid stid %d\n",
1239                       __func__, stid);
1240                goto rel_skb;
1241        }
1242
1243        if (cnp->com.state != CSK_STATE_LISTEN) {
1244                pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
1245                       __func__);
1246                goto reject;
1247        }
1248
1249        csk = lookup_tid(t, tid);
1250        if (csk) {
1251                pr_err("%s csk not null tid %u\n",
1252                       __func__, tid);
1253                goto rel_skb;
1254        }
1255
1256        cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
1257                        peer_ip, &local_port, &peer_port);
1258
1259        /* Find output route */
1260        if (iptype == 4)  {
1261                pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
1262                         "lport %d rport %d peer_mss %d\n"
1263                         , __func__, cnp, tid,
1264                         local_ip, peer_ip, ntohs(local_port),
1265                         ntohs(peer_port), peer_mss);
1266                dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
1267                                      *(__be32 *)local_ip,
1268                                      *(__be32 *)peer_ip,
1269                                      local_port, peer_port,
1270                                      PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
1271        } else {
1272                pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
1273                         "lport %d rport %d peer_mss %d\n"
1274                         , __func__, cnp, tid,
1275                         local_ip, peer_ip, ntohs(local_port),
1276                         ntohs(peer_port), peer_mss);
1277                dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
1278                                       local_ip, peer_ip,
1279                                       local_port, peer_port,
1280                                       PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
1281                                       ((struct sockaddr_in6 *)
1282                                        &cnp->com.local_addr)->sin6_scope_id);
1283        }
1284        if (!dst) {
1285                pr_err("%s - failed to find dst entry!\n",
1286                       __func__);
1287                goto reject;
1288        }
1289
1290        csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
1291        if (!csk) {
1292                dst_release(dst);
1293                goto rel_skb;
1294        }
1295
1296        ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
1297                                  dst, cdev);
1298        if (ret) {
1299                pr_err("%s - failed to allocate l2t entry!\n",
1300                       __func__);
1301                dst_release(dst);
1302                kfree(csk);
1303                goto reject;
1304        }
1305
1306        kref_init(&csk->kref);
1307        init_completion(&csk->com.wr_wait.completion);
1308
1309        INIT_LIST_HEAD(&csk->accept_node);
1310
1311        hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
1312                sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0);
1313        if (peer_mss && csk->mtu > (peer_mss + hdrs))
1314                csk->mtu = peer_mss + hdrs;
1315
1316        csk->com.state = CSK_STATE_CONNECTING;
1317        csk->com.cdev = cdev;
1318        csk->cnp = cnp;
1319        csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1320        csk->dst = dst;
1321        csk->tid = tid;
1322        csk->wr_cred = cdev->lldi.wr_cred -
1323                        DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1324        csk->wr_max_cred = csk->wr_cred;
1325        csk->wr_una_cred = 0;
1326
1327        if (iptype == 4) {
1328                struct sockaddr_in *sin = (struct sockaddr_in *)
1329                                          &csk->com.local_addr;
1330                sin->sin_family = AF_INET;
1331                sin->sin_port = local_port;
1332                sin->sin_addr.s_addr = *(__be32 *)local_ip;
1333
1334                sin = (struct sockaddr_in *)&csk->com.remote_addr;
1335                sin->sin_family = AF_INET;
1336                sin->sin_port = peer_port;
1337                sin->sin_addr.s_addr = *(__be32 *)peer_ip;
1338        } else {
1339                struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
1340                                            &csk->com.local_addr;
1341
1342                sin6->sin6_family = PF_INET6;
1343                sin6->sin6_port = local_port;
1344                memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
1345                cxgb4_clip_get(cdev->lldi.ports[0],
1346                               (const u32 *)&sin6->sin6_addr.s6_addr,
1347                               1);
1348
1349                sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
1350                sin6->sin6_family = PF_INET6;
1351                sin6->sin6_port = peer_port;
1352                memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
1353        }
1354
1355        skb_queue_head_init(&csk->rxq);
1356        skb_queue_head_init(&csk->txq);
1357        skb_queue_head_init(&csk->ppodq);
1358        skb_queue_head_init(&csk->backlogq);
1359        skb_queue_head_init(&csk->skbq);
1360        cxgbit_sock_reset_wr_list(csk);
1361        spin_lock_init(&csk->lock);
1362        init_waitqueue_head(&csk->waitq);
1363        csk->lock_owner = false;
1364
1365        if (cxgbit_alloc_csk_skb(csk)) {
1366                dst_release(dst);
1367                kfree(csk);
1368                goto rel_skb;
1369        }
1370
1371        cxgbit_get_cnp(cnp);
1372        cxgbit_get_cdev(cdev);
1373
1374        spin_lock(&cdev->cskq.lock);
1375        list_add_tail(&csk->list, &cdev->cskq.list);
1376        spin_unlock(&cdev->cskq.lock);
1377        cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family);
1378        cxgbit_pass_accept_rpl(csk, req);
1379        goto rel_skb;
1380
1381reject:
1382        cxgbit_release_tid(cdev, tid);
1383rel_skb:
1384        __kfree_skb(skb);
1385}
1386
1387static u32
1388cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
1389                           u32 *flowclenp)
1390{
1391        u32 nparams, flowclen16, flowclen;
1392
1393        nparams = FLOWC_WR_NPARAMS_MIN;
1394
1395        if (csk->snd_wscale)
1396                nparams++;
1397
1398#ifdef CONFIG_CHELSIO_T4_DCB
1399        nparams++;
1400#endif
1401        flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
1402        flowclen16 = DIV_ROUND_UP(flowclen, 16);
1403        flowclen = flowclen16 * 16;
1404        /*
1405         * Return the number of 16-byte credits used by the flowc request.
1406         * Pass back the nparams and actual flowc length if requested.
1407         */
1408        if (nparamsp)
1409                *nparamsp = nparams;
1410        if (flowclenp)
1411                *flowclenp = flowclen;
1412        return flowclen16;
1413}
1414
1415u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
1416{
1417        struct cxgbit_device *cdev = csk->com.cdev;
1418        struct fw_flowc_wr *flowc;
1419        u32 nparams, flowclen16, flowclen;
1420        struct sk_buff *skb;
1421        u8 index;
1422
1423#ifdef CONFIG_CHELSIO_T4_DCB
1424        u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
1425#endif
1426
1427        flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
1428
1429        skb = __skb_dequeue(&csk->skbq);
1430        flowc = __skb_put_zero(skb, flowclen);
1431
1432        flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
1433                                           FW_FLOWC_WR_NPARAMS_V(nparams));
1434        flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
1435                                          FW_WR_FLOWID_V(csk->tid));
1436        flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
1437        flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
1438                                            (csk->com.cdev->lldi.pf));
1439        flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
1440        flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
1441        flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
1442        flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
1443        flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
1444        flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
1445        flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
1446        flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
1447        flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
1448        flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
1449        flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
1450        flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
1451        flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
1452        flowc->mnemval[7].val = cpu_to_be32(csk->emss);
1453
1454        flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
1455        if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
1456                flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
1457        else
1458                flowc->mnemval[8].val = cpu_to_be32(16384);
1459
1460        index = 9;
1461
1462        if (csk->snd_wscale) {
1463                flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
1464                flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
1465                index++;
1466        }
1467
1468#ifdef CONFIG_CHELSIO_T4_DCB
1469        flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
1470        if (vlan == VLAN_NONE) {
1471                pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
1472                flowc->mnemval[index].val = cpu_to_be32(0);
1473        } else
1474                flowc->mnemval[index].val = cpu_to_be32(
1475                                (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
1476#endif
1477
1478        pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
1479                 " rcv_seq = %u; snd_win = %u; emss = %u\n",
1480                 __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
1481                 csk->rcv_nxt, csk->snd_win, csk->emss);
1482        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
1483        cxgbit_ofld_send(csk->com.cdev, skb);
1484        return flowclen16;
1485}
1486
1487static int
1488cxgbit_send_tcb_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1489{
1490        spin_lock_bh(&csk->lock);
1491        if (unlikely(csk->com.state != CSK_STATE_ESTABLISHED)) {
1492                spin_unlock_bh(&csk->lock);
1493                pr_err("%s: csk 0x%p, tid %u, state %u\n",
1494                       __func__, csk, csk->tid, csk->com.state);
1495                __kfree_skb(skb);
1496                return -1;
1497        }
1498
1499        cxgbit_get_csk(csk);
1500        cxgbit_init_wr_wait(&csk->com.wr_wait);
1501        cxgbit_ofld_send(csk->com.cdev, skb);
1502        spin_unlock_bh(&csk->lock);
1503
1504        return 0;
1505}
1506
1507int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
1508{
1509        struct sk_buff *skb;
1510        struct cpl_set_tcb_field *req;
1511        u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
1512        u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
1513        unsigned int len = roundup(sizeof(*req), 16);
1514        int ret;
1515
1516        skb = alloc_skb(len, GFP_KERNEL);
1517        if (!skb)
1518                return -ENOMEM;
1519
1520        /*  set up ulp submode */
1521        req = __skb_put_zero(skb, len);
1522
1523        INIT_TP_WR(req, csk->tid);
1524        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1525        req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1526        req->word_cookie = htons(0);
1527        req->mask = cpu_to_be64(0x3 << 4);
1528        req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1529                                (dcrc ? ULP_CRC_DATA : 0)) << 4);
1530        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1531
1532        if (cxgbit_send_tcb_skb(csk, skb))
1533                return -1;
1534
1535        ret = cxgbit_wait_for_reply(csk->com.cdev,
1536                                    &csk->com.wr_wait,
1537                                    csk->tid, 5, __func__);
1538        if (ret)
1539                return -1;
1540
1541        return 0;
1542}
1543
1544int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
1545{
1546        struct sk_buff *skb;
1547        struct cpl_set_tcb_field *req;
1548        unsigned int len = roundup(sizeof(*req), 16);
1549        int ret;
1550
1551        skb = alloc_skb(len, GFP_KERNEL);
1552        if (!skb)
1553                return -ENOMEM;
1554
1555        req = __skb_put_zero(skb, len);
1556
1557        INIT_TP_WR(req, csk->tid);
1558        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1559        req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1560        req->word_cookie = htons(0);
1561        req->mask = cpu_to_be64(0x3 << 8);
1562        req->val = cpu_to_be64(pg_idx << 8);
1563        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1564
1565        if (cxgbit_send_tcb_skb(csk, skb))
1566                return -1;
1567
1568        ret = cxgbit_wait_for_reply(csk->com.cdev,
1569                                    &csk->com.wr_wait,
1570                                    csk->tid, 5, __func__);
1571        if (ret)
1572                return -1;
1573
1574        return 0;
1575}
1576
1577static void
1578cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1579{
1580        struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1581        struct tid_info *t = cdev->lldi.tids;
1582        unsigned int stid = GET_TID(rpl);
1583        struct cxgbit_np *cnp = lookup_stid(t, stid);
1584
1585        pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1586                 __func__, cnp, stid, rpl->status);
1587
1588        if (!cnp) {
1589                pr_info("%s stid %d lookup failure\n", __func__, stid);
1590                goto rel_skb;
1591        }
1592
1593        cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1594        cxgbit_put_cnp(cnp);
1595rel_skb:
1596        __kfree_skb(skb);
1597}
1598
1599static void
1600cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1601{
1602        struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1603        struct tid_info *t = cdev->lldi.tids;
1604        unsigned int stid = GET_TID(rpl);
1605        struct cxgbit_np *cnp = lookup_stid(t, stid);
1606
1607        pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1608                 __func__, cnp, stid, rpl->status);
1609
1610        if (!cnp) {
1611                pr_info("%s stid %d lookup failure\n", __func__, stid);
1612                goto rel_skb;
1613        }
1614
1615        cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1616        cxgbit_put_cnp(cnp);
1617rel_skb:
1618        __kfree_skb(skb);
1619}
1620
1621static void
1622cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
1623{
1624        struct cpl_pass_establish *req = cplhdr(skb);
1625        struct tid_info *t = cdev->lldi.tids;
1626        unsigned int tid = GET_TID(req);
1627        struct cxgbit_sock *csk;
1628        struct cxgbit_np *cnp;
1629        u16 tcp_opt = be16_to_cpu(req->tcp_opt);
1630        u32 snd_isn = be32_to_cpu(req->snd_isn);
1631        u32 rcv_isn = be32_to_cpu(req->rcv_isn);
1632
1633        csk = lookup_tid(t, tid);
1634        if (unlikely(!csk)) {
1635                pr_err("can't find connection for tid %u.\n", tid);
1636                goto rel_skb;
1637        }
1638        cnp = csk->cnp;
1639
1640        pr_debug("%s: csk %p; tid %u; cnp %p\n",
1641                 __func__, csk, tid, cnp);
1642
1643        csk->write_seq = snd_isn;
1644        csk->snd_una = snd_isn;
1645        csk->snd_nxt = snd_isn;
1646
1647        csk->rcv_nxt = rcv_isn;
1648
1649        if (csk->rcv_win > (RCV_BUFSIZ_M << 10))
1650                csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10));
1651
1652        csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
1653        cxgbit_set_emss(csk, tcp_opt);
1654        dst_confirm(csk->dst);
1655        csk->com.state = CSK_STATE_ESTABLISHED;
1656        spin_lock_bh(&cnp->np_accept_lock);
1657        list_add_tail(&csk->accept_node, &cnp->np_accept_list);
1658        spin_unlock_bh(&cnp->np_accept_lock);
1659        complete(&cnp->accept_comp);
1660rel_skb:
1661        __kfree_skb(skb);
1662}
1663
1664static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1665{
1666        cxgbit_skcb_flags(skb) = 0;
1667        spin_lock_bh(&csk->rxq.lock);
1668        __skb_queue_tail(&csk->rxq, skb);
1669        spin_unlock_bh(&csk->rxq.lock);
1670        wake_up(&csk->waitq);
1671}
1672
1673static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
1674{
1675        pr_debug("%s: csk %p; tid %u; state %d\n",
1676                 __func__, csk, csk->tid, csk->com.state);
1677
1678        switch (csk->com.state) {
1679        case CSK_STATE_ESTABLISHED:
1680                csk->com.state = CSK_STATE_CLOSING;
1681                cxgbit_queue_rx_skb(csk, skb);
1682                return;
1683        case CSK_STATE_CLOSING:
1684                /* simultaneous close */
1685                csk->com.state = CSK_STATE_MORIBUND;
1686                break;
1687        case CSK_STATE_MORIBUND:
1688                csk->com.state = CSK_STATE_DEAD;
1689                cxgbit_put_csk(csk);
1690                break;
1691        case CSK_STATE_ABORTING:
1692                break;
1693        default:
1694                pr_info("%s: cpl_peer_close in bad state %d\n",
1695                        __func__, csk->com.state);
1696        }
1697
1698        __kfree_skb(skb);
1699}
1700
1701static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1702{
1703        pr_debug("%s: csk %p; tid %u; state %d\n",
1704                 __func__, csk, csk->tid, csk->com.state);
1705
1706        switch (csk->com.state) {
1707        case CSK_STATE_CLOSING:
1708                csk->com.state = CSK_STATE_MORIBUND;
1709                break;
1710        case CSK_STATE_MORIBUND:
1711                csk->com.state = CSK_STATE_DEAD;
1712                cxgbit_put_csk(csk);
1713                break;
1714        case CSK_STATE_ABORTING:
1715        case CSK_STATE_DEAD:
1716                break;
1717        default:
1718                pr_info("%s: cpl_close_con_rpl in bad state %d\n",
1719                        __func__, csk->com.state);
1720        }
1721
1722        __kfree_skb(skb);
1723}
1724
1725static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1726{
1727        struct cpl_abort_req_rss *hdr = cplhdr(skb);
1728        unsigned int tid = GET_TID(hdr);
1729        struct sk_buff *rpl_skb;
1730        bool release = false;
1731        bool wakeup_thread = false;
1732        u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
1733
1734        pr_debug("%s: csk %p; tid %u; state %d\n",
1735                 __func__, csk, tid, csk->com.state);
1736
1737        if (cxgb_is_neg_adv(hdr->status)) {
1738                pr_err("%s: got neg advise %d on tid %u\n",
1739                       __func__, hdr->status, tid);
1740                goto rel_skb;
1741        }
1742
1743        switch (csk->com.state) {
1744        case CSK_STATE_CONNECTING:
1745        case CSK_STATE_MORIBUND:
1746                csk->com.state = CSK_STATE_DEAD;
1747                release = true;
1748                break;
1749        case CSK_STATE_ESTABLISHED:
1750                csk->com.state = CSK_STATE_DEAD;
1751                wakeup_thread = true;
1752                break;
1753        case CSK_STATE_CLOSING:
1754                csk->com.state = CSK_STATE_DEAD;
1755                if (!csk->conn)
1756                        release = true;
1757                break;
1758        case CSK_STATE_ABORTING:
1759                break;
1760        default:
1761                pr_info("%s: cpl_abort_req_rss in bad state %d\n",
1762                        __func__, csk->com.state);
1763                csk->com.state = CSK_STATE_DEAD;
1764        }
1765
1766        __skb_queue_purge(&csk->txq);
1767
1768        if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
1769                cxgbit_send_tx_flowc_wr(csk);
1770
1771        rpl_skb = __skb_dequeue(&csk->skbq);
1772
1773        cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
1774        cxgbit_ofld_send(csk->com.cdev, rpl_skb);
1775
1776        if (wakeup_thread) {
1777                cxgbit_queue_rx_skb(csk, skb);
1778                return;
1779        }
1780
1781        if (release)
1782                cxgbit_put_csk(csk);
1783rel_skb:
1784        __kfree_skb(skb);
1785}
1786
1787static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1788{
1789        struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1790
1791        pr_debug("%s: csk %p; tid %u; state %d\n",
1792                 __func__, csk, csk->tid, csk->com.state);
1793
1794        switch (csk->com.state) {
1795        case CSK_STATE_ABORTING:
1796                csk->com.state = CSK_STATE_DEAD;
1797                if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
1798                        cxgbit_wake_up(&csk->com.wr_wait, __func__,
1799                                       rpl->status);
1800                cxgbit_put_csk(csk);
1801                break;
1802        default:
1803                pr_info("%s: cpl_abort_rpl_rss in state %d\n",
1804                        __func__, csk->com.state);
1805        }
1806
1807        __kfree_skb(skb);
1808}
1809
1810static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
1811{
1812        const struct sk_buff *skb = csk->wr_pending_head;
1813        u32 credit = 0;
1814
1815        if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
1816                pr_err("csk 0x%p, tid %u, credit %u > %u\n",
1817                       csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
1818                return true;
1819        }
1820
1821        while (skb) {
1822                credit += (__force u32)skb->csum;
1823                skb = cxgbit_skcb_tx_wr_next(skb);
1824        }
1825
1826        if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
1827                pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1828                       csk, csk->tid, csk->wr_cred,
1829                       credit, csk->wr_max_cred);
1830
1831                return true;
1832        }
1833
1834        return false;
1835}
1836
1837static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
1838{
1839        struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
1840        u32 credits = rpl->credits;
1841        u32 snd_una = ntohl(rpl->snd_una);
1842
1843        csk->wr_cred += credits;
1844        if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
1845                csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
1846
1847        while (credits) {
1848                struct sk_buff *p = cxgbit_sock_peek_wr(csk);
1849                u32 csum;
1850
1851                if (unlikely(!p)) {
1852                        pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
1853                               csk, csk->tid, credits,
1854                               csk->wr_cred, csk->wr_una_cred);
1855                        break;
1856                }
1857
1858                csum = (__force u32)p->csum;
1859                if (unlikely(credits < csum)) {
1860                        pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
1861                                csk,  csk->tid,
1862                                credits, csk->wr_cred, csk->wr_una_cred,
1863                                csum);
1864                        p->csum = (__force __wsum)(csum - credits);
1865                        break;
1866                }
1867
1868                cxgbit_sock_dequeue_wr(csk);
1869                credits -= csum;
1870                kfree_skb(p);
1871        }
1872
1873        if (unlikely(cxgbit_credit_err(csk))) {
1874                cxgbit_queue_rx_skb(csk, skb);
1875                return;
1876        }
1877
1878        if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
1879                if (unlikely(before(snd_una, csk->snd_una))) {
1880                        pr_warn("csk 0x%p,%u, snd_una %u/%u.",
1881                                csk, csk->tid, snd_una,
1882                                csk->snd_una);
1883                        goto rel_skb;
1884                }
1885
1886                if (csk->snd_una != snd_una) {
1887                        csk->snd_una = snd_una;
1888                        dst_confirm(csk->dst);
1889                }
1890        }
1891
1892        if (skb_queue_len(&csk->txq))
1893                cxgbit_push_tx_frames(csk);
1894
1895rel_skb:
1896        __kfree_skb(skb);
1897}
1898
1899static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1900{
1901        struct cxgbit_sock *csk;
1902        struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1903        unsigned int tid = GET_TID(rpl);
1904        struct cxgb4_lld_info *lldi = &cdev->lldi;
1905        struct tid_info *t = lldi->tids;
1906
1907        csk = lookup_tid(t, tid);
1908        if (unlikely(!csk)) {
1909                pr_err("can't find connection for tid %u.\n", tid);
1910                goto rel_skb;
1911        } else {
1912                cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
1913        }
1914
1915        cxgbit_put_csk(csk);
1916rel_skb:
1917        __kfree_skb(skb);
1918}
1919
1920static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
1921{
1922        struct cxgbit_sock *csk;
1923        struct cpl_rx_data *cpl = cplhdr(skb);
1924        unsigned int tid = GET_TID(cpl);
1925        struct cxgb4_lld_info *lldi = &cdev->lldi;
1926        struct tid_info *t = lldi->tids;
1927
1928        csk = lookup_tid(t, tid);
1929        if (unlikely(!csk)) {
1930                pr_err("can't find conn. for tid %u.\n", tid);
1931                goto rel_skb;
1932        }
1933
1934        cxgbit_queue_rx_skb(csk, skb);
1935        return;
1936rel_skb:
1937        __kfree_skb(skb);
1938}
1939
1940static void
1941__cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1942{
1943        spin_lock(&csk->lock);
1944        if (csk->lock_owner) {
1945                __skb_queue_tail(&csk->backlogq, skb);
1946                spin_unlock(&csk->lock);
1947                return;
1948        }
1949
1950        cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
1951        spin_unlock(&csk->lock);
1952}
1953
1954static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1955{
1956        cxgbit_get_csk(csk);
1957        __cxgbit_process_rx_cpl(csk, skb);
1958        cxgbit_put_csk(csk);
1959}
1960
1961static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1962{
1963        struct cxgbit_sock *csk;
1964        struct cpl_tx_data *cpl = cplhdr(skb);
1965        struct cxgb4_lld_info *lldi = &cdev->lldi;
1966        struct tid_info *t = lldi->tids;
1967        unsigned int tid = GET_TID(cpl);
1968        u8 opcode = cxgbit_skcb_rx_opcode(skb);
1969        bool ref = true;
1970
1971        switch (opcode) {
1972        case CPL_FW4_ACK:
1973                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
1974                        ref = false;
1975                        break;
1976        case CPL_PEER_CLOSE:
1977                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
1978                        break;
1979        case CPL_CLOSE_CON_RPL:
1980                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
1981                        break;
1982        case CPL_ABORT_REQ_RSS:
1983                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
1984                        break;
1985        case CPL_ABORT_RPL_RSS:
1986                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
1987                        break;
1988        default:
1989                goto rel_skb;
1990        }
1991
1992        csk = lookup_tid(t, tid);
1993        if (unlikely(!csk)) {
1994                pr_err("can't find conn. for tid %u.\n", tid);
1995                goto rel_skb;
1996        }
1997
1998        if (ref)
1999                cxgbit_process_rx_cpl(csk, skb);
2000        else
2001                __cxgbit_process_rx_cpl(csk, skb);
2002
2003        return;
2004rel_skb:
2005        __kfree_skb(skb);
2006}
2007
2008cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
2009        [CPL_PASS_OPEN_RPL]     = cxgbit_pass_open_rpl,
2010        [CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
2011        [CPL_PASS_ACCEPT_REQ]   = cxgbit_pass_accept_req,
2012        [CPL_PASS_ESTABLISH]    = cxgbit_pass_establish,
2013        [CPL_SET_TCB_RPL]       = cxgbit_set_tcb_rpl,
2014        [CPL_RX_DATA]           = cxgbit_rx_data,
2015        [CPL_FW4_ACK]           = cxgbit_rx_cpl,
2016        [CPL_PEER_CLOSE]        = cxgbit_rx_cpl,
2017        [CPL_CLOSE_CON_RPL]     = cxgbit_rx_cpl,
2018        [CPL_ABORT_REQ_RSS]     = cxgbit_rx_cpl,
2019        [CPL_ABORT_RPL_RSS]     = cxgbit_rx_cpl,
2020};
2021