linux/drivers/target/iscsi/cxgbit/cxgbit_cm.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016 Chelsio Communications, Inc.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/list.h>
  11#include <linux/workqueue.h>
  12#include <linux/skbuff.h>
  13#include <linux/timer.h>
  14#include <linux/notifier.h>
  15#include <linux/inetdevice.h>
  16#include <linux/ip.h>
  17#include <linux/tcp.h>
  18#include <linux/if_vlan.h>
  19
  20#include <net/neighbour.h>
  21#include <net/netevent.h>
  22#include <net/route.h>
  23#include <net/tcp.h>
  24#include <net/ip6_route.h>
  25#include <net/addrconf.h>
  26
  27#include "cxgbit.h"
  28#include "clip_tbl.h"
  29
  30static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
  31{
  32        wr_waitp->ret = 0;
  33        reinit_completion(&wr_waitp->completion);
  34}
  35
  36static void
  37cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
  38{
  39        if (ret == CPL_ERR_NONE)
  40                wr_waitp->ret = 0;
  41        else
  42                wr_waitp->ret = -EIO;
  43
  44        if (wr_waitp->ret)
  45                pr_err("%s: err:%u", func, ret);
  46
  47        complete(&wr_waitp->completion);
  48}
  49
  50static int
  51cxgbit_wait_for_reply(struct cxgbit_device *cdev,
  52                      struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
  53                      const char *func)
  54{
  55        int ret;
  56
  57        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
  58                wr_waitp->ret = -EIO;
  59                goto out;
  60        }
  61
  62        ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
  63        if (!ret) {
  64                pr_info("%s - Device %s not responding tid %u\n",
  65                        func, pci_name(cdev->lldi.pdev), tid);
  66                wr_waitp->ret = -ETIMEDOUT;
  67        }
  68out:
  69        if (wr_waitp->ret)
  70                pr_info("%s: FW reply %d tid %u\n",
  71                        pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
  72        return wr_waitp->ret;
  73}
  74
  75/* Returns whether a CPL status conveys negative advice.
  76 */
  77static int cxgbit_is_neg_adv(unsigned int status)
  78{
  79        return status == CPL_ERR_RTX_NEG_ADVICE ||
  80                status == CPL_ERR_PERSIST_NEG_ADVICE ||
  81                status == CPL_ERR_KEEPALV_NEG_ADVICE;
  82}
  83
  84static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
  85{
  86        return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
  87}
  88
  89static struct np_info *
  90cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
  91                   unsigned int stid)
  92{
  93        struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
  94
  95        if (p) {
  96                int bucket = cxgbit_np_hashfn(cnp);
  97
  98                p->cnp = cnp;
  99                p->stid = stid;
 100                spin_lock(&cdev->np_lock);
 101                p->next = cdev->np_hash_tab[bucket];
 102                cdev->np_hash_tab[bucket] = p;
 103                spin_unlock(&cdev->np_lock);
 104        }
 105
 106        return p;
 107}
 108
 109static int
 110cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 111{
 112        int stid = -1, bucket = cxgbit_np_hashfn(cnp);
 113        struct np_info *p;
 114
 115        spin_lock(&cdev->np_lock);
 116        for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
 117                if (p->cnp == cnp) {
 118                        stid = p->stid;
 119                        break;
 120                }
 121        }
 122        spin_unlock(&cdev->np_lock);
 123
 124        return stid;
 125}
 126
 127static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 128{
 129        int stid = -1, bucket = cxgbit_np_hashfn(cnp);
 130        struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
 131
 132        spin_lock(&cdev->np_lock);
 133        for (p = *prev; p; prev = &p->next, p = p->next) {
 134                if (p->cnp == cnp) {
 135                        stid = p->stid;
 136                        *prev = p->next;
 137                        kfree(p);
 138                        break;
 139                }
 140        }
 141        spin_unlock(&cdev->np_lock);
 142
 143        return stid;
 144}
 145
 146void _cxgbit_free_cnp(struct kref *kref)
 147{
 148        struct cxgbit_np *cnp;
 149
 150        cnp = container_of(kref, struct cxgbit_np, kref);
 151        kfree(cnp);
 152}
 153
 154static int
 155cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
 156                      struct cxgbit_np *cnp)
 157{
 158        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
 159                                     &cnp->com.local_addr;
 160        int addr_type;
 161        int ret;
 162
 163        pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
 164                 __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
 165
 166        addr_type = ipv6_addr_type((const struct in6_addr *)
 167                                   &sin6->sin6_addr);
 168        if (addr_type != IPV6_ADDR_ANY) {
 169                ret = cxgb4_clip_get(cdev->lldi.ports[0],
 170                                     (const u32 *)&sin6->sin6_addr.s6_addr, 1);
 171                if (ret) {
 172                        pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
 173                               sin6->sin6_addr.s6_addr, ret);
 174                        return -ENOMEM;
 175                }
 176        }
 177
 178        cxgbit_get_cnp(cnp);
 179        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 180
 181        ret = cxgb4_create_server6(cdev->lldi.ports[0],
 182                                   stid, &sin6->sin6_addr,
 183                                   sin6->sin6_port,
 184                                   cdev->lldi.rxq_ids[0]);
 185        if (!ret)
 186                ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
 187                                            0, 10, __func__);
 188        else if (ret > 0)
 189                ret = net_xmit_errno(ret);
 190        else
 191                cxgbit_put_cnp(cnp);
 192
 193        if (ret) {
 194                if (ret != -ETIMEDOUT)
 195                        cxgb4_clip_release(cdev->lldi.ports[0],
 196                                   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
 197
 198                pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
 199                       ret, stid, sin6->sin6_addr.s6_addr,
 200                       ntohs(sin6->sin6_port));
 201        }
 202
 203        return ret;
 204}
 205
 206static int
 207cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
 208                      struct cxgbit_np *cnp)
 209{
 210        struct sockaddr_in *sin = (struct sockaddr_in *)
 211                                   &cnp->com.local_addr;
 212        int ret;
 213
 214        pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
 215                 __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
 216
 217        cxgbit_get_cnp(cnp);
 218        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 219
 220        ret = cxgb4_create_server(cdev->lldi.ports[0],
 221                                  stid, sin->sin_addr.s_addr,
 222                                  sin->sin_port, 0,
 223                                  cdev->lldi.rxq_ids[0]);
 224        if (!ret)
 225                ret = cxgbit_wait_for_reply(cdev,
 226                                            &cnp->com.wr_wait,
 227                                            0, 10, __func__);
 228        else if (ret > 0)
 229                ret = net_xmit_errno(ret);
 230        else
 231                cxgbit_put_cnp(cnp);
 232
 233        if (ret)
 234                pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
 235                       ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
 236        return ret;
 237}
 238
 239struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
 240{
 241        struct cxgbit_device *cdev;
 242        u8 i;
 243
 244        list_for_each_entry(cdev, &cdev_list_head, list) {
 245                struct cxgb4_lld_info *lldi = &cdev->lldi;
 246
 247                for (i = 0; i < lldi->nports; i++) {
 248                        if (lldi->ports[i] == ndev) {
 249                                if (port_id)
 250                                        *port_id = i;
 251                                return cdev;
 252                        }
 253                }
 254        }
 255
 256        return NULL;
 257}
 258
 259static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
 260{
 261        if (ndev->priv_flags & IFF_BONDING) {
 262                pr_err("Bond devices are not supported. Interface:%s\n",
 263                       ndev->name);
 264                return NULL;
 265        }
 266
 267        if (is_vlan_dev(ndev))
 268                return vlan_dev_real_dev(ndev);
 269
 270        return ndev;
 271}
 272
 273static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
 274{
 275        struct net_device *ndev;
 276
 277        ndev = __ip_dev_find(&init_net, saddr, false);
 278        if (!ndev)
 279                return NULL;
 280
 281        return cxgbit_get_real_dev(ndev);
 282}
 283
 284static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
 285{
 286        struct net_device *ndev = NULL;
 287        bool found = false;
 288
 289        if (IS_ENABLED(CONFIG_IPV6)) {
 290                for_each_netdev_rcu(&init_net, ndev)
 291                        if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
 292                                found = true;
 293                                break;
 294                        }
 295        }
 296        if (!found)
 297                return NULL;
 298        return cxgbit_get_real_dev(ndev);
 299}
 300
 301static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
 302{
 303        struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
 304        int ss_family = sockaddr->ss_family;
 305        struct net_device *ndev = NULL;
 306        struct cxgbit_device *cdev = NULL;
 307
 308        rcu_read_lock();
 309        if (ss_family == AF_INET) {
 310                struct sockaddr_in *sin;
 311
 312                sin = (struct sockaddr_in *)sockaddr;
 313                ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
 314        } else if (ss_family == AF_INET6) {
 315                struct sockaddr_in6 *sin6;
 316
 317                sin6 = (struct sockaddr_in6 *)sockaddr;
 318                ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
 319        }
 320        if (!ndev)
 321                goto out;
 322
 323        cdev = cxgbit_find_device(ndev, NULL);
 324out:
 325        rcu_read_unlock();
 326        return cdev;
 327}
 328
 329static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
 330{
 331        struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
 332        int ss_family = sockaddr->ss_family;
 333        int addr_type;
 334
 335        if (ss_family == AF_INET) {
 336                struct sockaddr_in *sin;
 337
 338                sin = (struct sockaddr_in *)sockaddr;
 339                if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
 340                        return true;
 341        } else if (ss_family == AF_INET6) {
 342                struct sockaddr_in6 *sin6;
 343
 344                sin6 = (struct sockaddr_in6 *)sockaddr;
 345                addr_type = ipv6_addr_type((const struct in6_addr *)
 346                                &sin6->sin6_addr);
 347                if (addr_type == IPV6_ADDR_ANY)
 348                        return true;
 349        }
 350        return false;
 351}
 352
 353static int
 354__cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 355{
 356        int stid, ret;
 357        int ss_family = cnp->com.local_addr.ss_family;
 358
 359        if (!test_bit(CDEV_STATE_UP, &cdev->flags))
 360                return -EINVAL;
 361
 362        stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
 363        if (stid < 0)
 364                return -EINVAL;
 365
 366        if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
 367                cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
 368                return -EINVAL;
 369        }
 370
 371        if (ss_family == AF_INET)
 372                ret = cxgbit_create_server4(cdev, stid, cnp);
 373        else
 374                ret = cxgbit_create_server6(cdev, stid, cnp);
 375
 376        if (ret) {
 377                if (ret != -ETIMEDOUT)
 378                        cxgb4_free_stid(cdev->lldi.tids, stid,
 379                                        ss_family);
 380                cxgbit_np_hash_del(cdev, cnp);
 381                return ret;
 382        }
 383        return ret;
 384}
 385
 386static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
 387{
 388        struct cxgbit_device *cdev;
 389        int ret = -1;
 390
 391        mutex_lock(&cdev_list_lock);
 392        cdev = cxgbit_find_np_cdev(cnp);
 393        if (!cdev)
 394                goto out;
 395
 396        if (cxgbit_np_hash_find(cdev, cnp) >= 0)
 397                goto out;
 398
 399        if (__cxgbit_setup_cdev_np(cdev, cnp))
 400                goto out;
 401
 402        cnp->com.cdev = cdev;
 403        ret = 0;
 404out:
 405        mutex_unlock(&cdev_list_lock);
 406        return ret;
 407}
 408
 409static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
 410{
 411        struct cxgbit_device *cdev;
 412        int ret;
 413        u32 count = 0;
 414
 415        mutex_lock(&cdev_list_lock);
 416        list_for_each_entry(cdev, &cdev_list_head, list) {
 417                if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
 418                        mutex_unlock(&cdev_list_lock);
 419                        return -1;
 420                }
 421        }
 422
 423        list_for_each_entry(cdev, &cdev_list_head, list) {
 424                ret = __cxgbit_setup_cdev_np(cdev, cnp);
 425                if (ret == -ETIMEDOUT)
 426                        break;
 427                if (ret != 0)
 428                        continue;
 429                count++;
 430        }
 431        mutex_unlock(&cdev_list_lock);
 432
 433        return count ? 0 : -1;
 434}
 435
 436int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
 437{
 438        struct cxgbit_np *cnp;
 439        int ret;
 440
 441        if ((ksockaddr->ss_family != AF_INET) &&
 442            (ksockaddr->ss_family != AF_INET6))
 443                return -EINVAL;
 444
 445        cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
 446        if (!cnp)
 447                return -ENOMEM;
 448
 449        init_waitqueue_head(&cnp->accept_wait);
 450        init_completion(&cnp->com.wr_wait.completion);
 451        init_completion(&cnp->accept_comp);
 452        INIT_LIST_HEAD(&cnp->np_accept_list);
 453        spin_lock_init(&cnp->np_accept_lock);
 454        kref_init(&cnp->kref);
 455        memcpy(&np->np_sockaddr, ksockaddr,
 456               sizeof(struct sockaddr_storage));
 457        memcpy(&cnp->com.local_addr, &np->np_sockaddr,
 458               sizeof(cnp->com.local_addr));
 459
 460        cnp->np = np;
 461        cnp->com.cdev = NULL;
 462
 463        if (cxgbit_inaddr_any(cnp))
 464                ret = cxgbit_setup_all_np(cnp);
 465        else
 466                ret = cxgbit_setup_cdev_np(cnp);
 467
 468        if (ret) {
 469                cxgbit_put_cnp(cnp);
 470                return -EINVAL;
 471        }
 472
 473        np->np_context = cnp;
 474        cnp->com.state = CSK_STATE_LISTEN;
 475        return 0;
 476}
 477
 478static void
 479cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
 480                     struct cxgbit_sock *csk)
 481{
 482        conn->login_family = np->np_sockaddr.ss_family;
 483        conn->login_sockaddr = csk->com.remote_addr;
 484        conn->local_sockaddr = csk->com.local_addr;
 485}
 486
 487int cxgbit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
 488{
 489        struct cxgbit_np *cnp = np->np_context;
 490        struct cxgbit_sock *csk;
 491        int ret = 0;
 492
 493accept_wait:
 494        ret = wait_for_completion_interruptible(&cnp->accept_comp);
 495        if (ret)
 496                return -ENODEV;
 497
 498        spin_lock_bh(&np->np_thread_lock);
 499        if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
 500                spin_unlock_bh(&np->np_thread_lock);
 501                /**
 502                 * No point in stalling here when np_thread
 503                 * is in state RESET/SHUTDOWN/EXIT - bail
 504                 **/
 505                return -ENODEV;
 506        }
 507        spin_unlock_bh(&np->np_thread_lock);
 508
 509        spin_lock_bh(&cnp->np_accept_lock);
 510        if (list_empty(&cnp->np_accept_list)) {
 511                spin_unlock_bh(&cnp->np_accept_lock);
 512                goto accept_wait;
 513        }
 514
 515        csk = list_first_entry(&cnp->np_accept_list,
 516                               struct cxgbit_sock,
 517                               accept_node);
 518
 519        list_del_init(&csk->accept_node);
 520        spin_unlock_bh(&cnp->np_accept_lock);
 521        conn->context = csk;
 522        csk->conn = conn;
 523
 524        cxgbit_set_conn_info(np, conn, csk);
 525        return 0;
 526}
 527
 528static int
 529__cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
 530{
 531        int stid, ret;
 532        bool ipv6 = false;
 533
 534        stid = cxgbit_np_hash_del(cdev, cnp);
 535        if (stid < 0)
 536                return -EINVAL;
 537        if (!test_bit(CDEV_STATE_UP, &cdev->flags))
 538                return -EINVAL;
 539
 540        if (cnp->np->np_sockaddr.ss_family == AF_INET6)
 541                ipv6 = true;
 542
 543        cxgbit_get_cnp(cnp);
 544        cxgbit_init_wr_wait(&cnp->com.wr_wait);
 545        ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
 546                                  cdev->lldi.rxq_ids[0], ipv6);
 547
 548        if (ret > 0)
 549                ret = net_xmit_errno(ret);
 550
 551        if (ret) {
 552                cxgbit_put_cnp(cnp);
 553                return ret;
 554        }
 555
 556        ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
 557                                    0, 10, __func__);
 558        if (ret == -ETIMEDOUT)
 559                return ret;
 560
 561        if (ipv6 && cnp->com.cdev) {
 562                struct sockaddr_in6 *sin6;
 563
 564                sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
 565                cxgb4_clip_release(cdev->lldi.ports[0],
 566                                   (const u32 *)&sin6->sin6_addr.s6_addr,
 567                                   1);
 568        }
 569
 570        cxgb4_free_stid(cdev->lldi.tids, stid,
 571                        cnp->com.local_addr.ss_family);
 572        return 0;
 573}
 574
 575static void cxgbit_free_all_np(struct cxgbit_np *cnp)
 576{
 577        struct cxgbit_device *cdev;
 578        int ret;
 579
 580        mutex_lock(&cdev_list_lock);
 581        list_for_each_entry(cdev, &cdev_list_head, list) {
 582                ret = __cxgbit_free_cdev_np(cdev, cnp);
 583                if (ret == -ETIMEDOUT)
 584                        break;
 585        }
 586        mutex_unlock(&cdev_list_lock);
 587}
 588
 589static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
 590{
 591        struct cxgbit_device *cdev;
 592        bool found = false;
 593
 594        mutex_lock(&cdev_list_lock);
 595        list_for_each_entry(cdev, &cdev_list_head, list) {
 596                if (cdev == cnp->com.cdev) {
 597                        found = true;
 598                        break;
 599                }
 600        }
 601        if (!found)
 602                goto out;
 603
 604        __cxgbit_free_cdev_np(cdev, cnp);
 605out:
 606        mutex_unlock(&cdev_list_lock);
 607}
 608
 609void cxgbit_free_np(struct iscsi_np *np)
 610{
 611        struct cxgbit_np *cnp = np->np_context;
 612
 613        cnp->com.state = CSK_STATE_DEAD;
 614        if (cnp->com.cdev)
 615                cxgbit_free_cdev_np(cnp);
 616        else
 617                cxgbit_free_all_np(cnp);
 618
 619        np->np_context = NULL;
 620        cxgbit_put_cnp(cnp);
 621}
 622
 623static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
 624{
 625        struct sk_buff *skb;
 626        struct cpl_close_con_req *req;
 627        unsigned int len = roundup(sizeof(struct cpl_close_con_req), 16);
 628
 629        skb = alloc_skb(len, GFP_ATOMIC);
 630        if (!skb)
 631                return;
 632
 633        req = (struct cpl_close_con_req *)__skb_put(skb, len);
 634        memset(req, 0, len);
 635
 636        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
 637        INIT_TP_WR(req, csk->tid);
 638        OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
 639                                                    csk->tid));
 640        req->rsvd = 0;
 641
 642        cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
 643        __skb_queue_tail(&csk->txq, skb);
 644        cxgbit_push_tx_frames(csk);
 645}
 646
 647static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
 648{
 649        pr_debug("%s cxgbit_device %p\n", __func__, handle);
 650        kfree_skb(skb);
 651}
 652
 653static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
 654{
 655        struct cxgbit_device *cdev = handle;
 656        struct cpl_abort_req *req = cplhdr(skb);
 657
 658        pr_debug("%s cdev %p\n", __func__, cdev);
 659        req->cmd = CPL_ABORT_NO_RST;
 660        cxgbit_ofld_send(cdev, skb);
 661}
 662
 663static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
 664{
 665        struct cpl_abort_req *req;
 666        unsigned int len = roundup(sizeof(*req), 16);
 667        struct sk_buff *skb;
 668
 669        pr_debug("%s: csk %p tid %u; state %d\n",
 670                 __func__, csk, csk->tid, csk->com.state);
 671
 672        __skb_queue_purge(&csk->txq);
 673
 674        if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
 675                cxgbit_send_tx_flowc_wr(csk);
 676
 677        skb = __skb_dequeue(&csk->skbq);
 678        req = (struct cpl_abort_req *)__skb_put(skb, len);
 679        memset(req, 0, len);
 680
 681        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
 682        t4_set_arp_err_handler(skb, csk->com.cdev, cxgbit_abort_arp_failure);
 683        INIT_TP_WR(req, csk->tid);
 684        OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ,
 685                                                    csk->tid));
 686        req->cmd = CPL_ABORT_SEND_RST;
 687        return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
 688}
 689
 690void cxgbit_free_conn(struct iscsi_conn *conn)
 691{
 692        struct cxgbit_sock *csk = conn->context;
 693        bool release = false;
 694
 695        pr_debug("%s: state %d\n",
 696                 __func__, csk->com.state);
 697
 698        spin_lock_bh(&csk->lock);
 699        switch (csk->com.state) {
 700        case CSK_STATE_ESTABLISHED:
 701                if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
 702                        csk->com.state = CSK_STATE_CLOSING;
 703                        cxgbit_send_halfclose(csk);
 704                } else {
 705                        csk->com.state = CSK_STATE_ABORTING;
 706                        cxgbit_send_abort_req(csk);
 707                }
 708                break;
 709        case CSK_STATE_CLOSING:
 710                csk->com.state = CSK_STATE_MORIBUND;
 711                cxgbit_send_halfclose(csk);
 712                break;
 713        case CSK_STATE_DEAD:
 714                release = true;
 715                break;
 716        default:
 717                pr_err("%s: csk %p; state %d\n",
 718                       __func__, csk, csk->com.state);
 719        }
 720        spin_unlock_bh(&csk->lock);
 721
 722        if (release)
 723                cxgbit_put_csk(csk);
 724}
 725
 726static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
 727{
 728        csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
 729                        ((csk->com.remote_addr.ss_family == AF_INET) ?
 730                        sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
 731                        sizeof(struct tcphdr);
 732        csk->mss = csk->emss;
 733        if (TCPOPT_TSTAMP_G(opt))
 734                csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
 735        if (csk->emss < 128)
 736                csk->emss = 128;
 737        if (csk->emss & 7)
 738                pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
 739                        TCPOPT_MSS_G(opt), csk->mss, csk->emss);
 740        pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
 741                 csk->mss, csk->emss);
 742}
 743
 744static void cxgbit_free_skb(struct cxgbit_sock *csk)
 745{
 746        struct sk_buff *skb;
 747
 748        __skb_queue_purge(&csk->txq);
 749        __skb_queue_purge(&csk->rxq);
 750        __skb_queue_purge(&csk->backlogq);
 751        __skb_queue_purge(&csk->ppodq);
 752        __skb_queue_purge(&csk->skbq);
 753
 754        while ((skb = cxgbit_sock_dequeue_wr(csk)))
 755                kfree_skb(skb);
 756
 757        __kfree_skb(csk->lro_hskb);
 758}
 759
 760void _cxgbit_free_csk(struct kref *kref)
 761{
 762        struct cxgbit_sock *csk;
 763        struct cxgbit_device *cdev;
 764
 765        csk = container_of(kref, struct cxgbit_sock, kref);
 766
 767        pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
 768
 769        if (csk->com.local_addr.ss_family == AF_INET6) {
 770                struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
 771                                             &csk->com.local_addr;
 772                cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
 773                                   (const u32 *)
 774                                   &sin6->sin6_addr.s6_addr, 1);
 775        }
 776
 777        cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid);
 778        dst_release(csk->dst);
 779        cxgb4_l2t_release(csk->l2t);
 780
 781        cdev = csk->com.cdev;
 782        spin_lock_bh(&cdev->cskq.lock);
 783        list_del(&csk->list);
 784        spin_unlock_bh(&cdev->cskq.lock);
 785
 786        cxgbit_free_skb(csk);
 787        cxgbit_put_cdev(cdev);
 788
 789        kfree(csk);
 790}
 791
 792static void
 793cxgbit_get_tuple_info(struct cpl_pass_accept_req *req, int *iptype,
 794                      __u8 *local_ip, __u8 *peer_ip, __be16 *local_port,
 795                      __be16 *peer_port)
 796{
 797        u32 eth_len = ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
 798        u32 ip_len = IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
 799        struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
 800        struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
 801        struct tcphdr *tcp = (struct tcphdr *)
 802                              ((u8 *)(req + 1) + eth_len + ip_len);
 803
 804        if (ip->version == 4) {
 805                pr_debug("%s saddr 0x%x daddr 0x%x sport %u dport %u\n",
 806                         __func__,
 807                         ntohl(ip->saddr), ntohl(ip->daddr),
 808                         ntohs(tcp->source),
 809                         ntohs(tcp->dest));
 810                *iptype = 4;
 811                memcpy(peer_ip, &ip->saddr, 4);
 812                memcpy(local_ip, &ip->daddr, 4);
 813        } else {
 814                pr_debug("%s saddr %pI6 daddr %pI6 sport %u dport %u\n",
 815                         __func__,
 816                         ip6->saddr.s6_addr, ip6->daddr.s6_addr,
 817                         ntohs(tcp->source),
 818                         ntohs(tcp->dest));
 819                *iptype = 6;
 820                memcpy(peer_ip, ip6->saddr.s6_addr, 16);
 821                memcpy(local_ip, ip6->daddr.s6_addr, 16);
 822        }
 823
 824        *peer_port = tcp->source;
 825        *local_port = tcp->dest;
 826}
 827
 828static int
 829cxgbit_our_interface(struct cxgbit_device *cdev, struct net_device *egress_dev)
 830{
 831        u8 i;
 832
 833        egress_dev = cxgbit_get_real_dev(egress_dev);
 834        for (i = 0; i < cdev->lldi.nports; i++)
 835                if (cdev->lldi.ports[i] == egress_dev)
 836                        return 1;
 837        return 0;
 838}
 839
 840static struct dst_entry *
 841cxgbit_find_route6(struct cxgbit_device *cdev, __u8 *local_ip, __u8 *peer_ip,
 842                   __be16 local_port, __be16 peer_port, u8 tos,
 843                   __u32 sin6_scope_id)
 844{
 845        struct dst_entry *dst = NULL;
 846
 847        if (IS_ENABLED(CONFIG_IPV6)) {
 848                struct flowi6 fl6;
 849
 850                memset(&fl6, 0, sizeof(fl6));
 851                memcpy(&fl6.daddr, peer_ip, 16);
 852                memcpy(&fl6.saddr, local_ip, 16);
 853                if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
 854                        fl6.flowi6_oif = sin6_scope_id;
 855                dst = ip6_route_output(&init_net, NULL, &fl6);
 856                if (!dst)
 857                        goto out;
 858                if (!cxgbit_our_interface(cdev, ip6_dst_idev(dst)->dev) &&
 859                    !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
 860                        dst_release(dst);
 861                        dst = NULL;
 862                }
 863        }
 864out:
 865        return dst;
 866}
 867
 868static struct dst_entry *
 869cxgbit_find_route(struct cxgbit_device *cdev, __be32 local_ip, __be32 peer_ip,
 870                  __be16 local_port, __be16 peer_port, u8 tos)
 871{
 872        struct rtable *rt;
 873        struct flowi4 fl4;
 874        struct neighbour *n;
 875
 876        rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip,
 877                                   local_ip,
 878                                   peer_port, local_port, IPPROTO_TCP,
 879                                   tos, 0);
 880        if (IS_ERR(rt))
 881                return NULL;
 882        n = dst_neigh_lookup(&rt->dst, &peer_ip);
 883        if (!n)
 884                return NULL;
 885        if (!cxgbit_our_interface(cdev, n->dev) &&
 886            !(n->dev->flags & IFF_LOOPBACK)) {
 887                neigh_release(n);
 888                dst_release(&rt->dst);
 889                return NULL;
 890        }
 891        neigh_release(n);
 892        return &rt->dst;
 893}
 894
 895static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
 896{
 897        unsigned int linkspeed;
 898        u8 scale;
 899
 900        linkspeed = pi->link_cfg.speed;
 901        scale = linkspeed / SPEED_10000;
 902
 903#define CXGBIT_10G_RCV_WIN (256 * 1024)
 904        csk->rcv_win = CXGBIT_10G_RCV_WIN;
 905        if (scale)
 906                csk->rcv_win *= scale;
 907
 908#define CXGBIT_10G_SND_WIN (256 * 1024)
 909        csk->snd_win = CXGBIT_10G_SND_WIN;
 910        if (scale)
 911                csk->snd_win *= scale;
 912
 913        pr_debug("%s snd_win %d rcv_win %d\n",
 914                 __func__, csk->snd_win, csk->rcv_win);
 915}
 916
 917#ifdef CONFIG_CHELSIO_T4_DCB
 918static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
 919{
 920        return ndev->dcbnl_ops->getstate(ndev);
 921}
 922
 923static int cxgbit_select_priority(int pri_mask)
 924{
 925        if (!pri_mask)
 926                return 0;
 927
 928        return (ffs(pri_mask) - 1);
 929}
 930
 931static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
 932{
 933        int ret;
 934        u8 caps;
 935
 936        struct dcb_app iscsi_dcb_app = {
 937                .protocol = local_port
 938        };
 939
 940        ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
 941
 942        if (ret)
 943                return 0;
 944
 945        if (caps & DCB_CAP_DCBX_VER_IEEE) {
 946                iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
 947
 948                ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
 949
 950        } else if (caps & DCB_CAP_DCBX_VER_CEE) {
 951                iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
 952
 953                ret = dcb_getapp(ndev, &iscsi_dcb_app);
 954        }
 955
 956        pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
 957
 958        return cxgbit_select_priority(ret);
 959}
 960#endif
 961
 962static int
 963cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
 964                    u16 local_port, struct dst_entry *dst,
 965                    struct cxgbit_device *cdev)
 966{
 967        struct neighbour *n;
 968        int ret, step;
 969        struct net_device *ndev;
 970        u16 rxq_idx, port_id;
 971#ifdef CONFIG_CHELSIO_T4_DCB
 972        u8 priority = 0;
 973#endif
 974
 975        n = dst_neigh_lookup(dst, peer_ip);
 976        if (!n)
 977                return -ENODEV;
 978
 979        rcu_read_lock();
 980        ret = -ENOMEM;
 981        if (n->dev->flags & IFF_LOOPBACK) {
 982                if (iptype == 4)
 983                        ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
 984                else if (IS_ENABLED(CONFIG_IPV6))
 985                        ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
 986                else
 987                        ndev = NULL;
 988
 989                if (!ndev) {
 990                        ret = -ENODEV;
 991                        goto out;
 992                }
 993
 994                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
 995                                         n, ndev, 0);
 996                if (!csk->l2t)
 997                        goto out;
 998                csk->mtu = ndev->mtu;
 999                csk->tx_chan = cxgb4_port_chan(ndev);
1000                csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1;
1001                step = cdev->lldi.ntxq /
1002                        cdev->lldi.nchan;
1003                csk->txq_idx = cxgb4_port_idx(ndev) * step;
1004                step = cdev->lldi.nrxq /
1005                        cdev->lldi.nchan;
1006                csk->ctrlq_idx = cxgb4_port_idx(ndev);
1007                csk->rss_qid = cdev->lldi.rxq_ids[
1008                                cxgb4_port_idx(ndev) * step];
1009                csk->port_id = cxgb4_port_idx(ndev);
1010                cxgbit_set_tcp_window(csk,
1011                                      (struct port_info *)netdev_priv(ndev));
1012        } else {
1013                ndev = cxgbit_get_real_dev(n->dev);
1014                if (!ndev) {
1015                        ret = -ENODEV;
1016                        goto out;
1017                }
1018
1019#ifdef CONFIG_CHELSIO_T4_DCB
1020                if (cxgbit_get_iscsi_dcb_state(ndev))
1021                        priority = cxgbit_get_iscsi_dcb_priority(ndev,
1022                                                                 local_port);
1023
1024                csk->dcb_priority = priority;
1025
1026                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
1027#else
1028                csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
1029#endif
1030                if (!csk->l2t)
1031                        goto out;
1032                port_id = cxgb4_port_idx(ndev);
1033                csk->mtu = dst_mtu(dst);
1034                csk->tx_chan = cxgb4_port_chan(ndev);
1035                csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1;
1036                step = cdev->lldi.ntxq /
1037                        cdev->lldi.nports;
1038                csk->txq_idx = (port_id * step) +
1039                                (cdev->selectq[port_id][0]++ % step);
1040                csk->ctrlq_idx = cxgb4_port_idx(ndev);
1041                step = cdev->lldi.nrxq /
1042                        cdev->lldi.nports;
1043                rxq_idx = (port_id * step) +
1044                                (cdev->selectq[port_id][1]++ % step);
1045                csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
1046                csk->port_id = port_id;
1047                cxgbit_set_tcp_window(csk,
1048                                      (struct port_info *)netdev_priv(ndev));
1049        }
1050        ret = 0;
1051out:
1052        rcu_read_unlock();
1053        neigh_release(n);
1054        return ret;
1055}
1056
1057int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
1058{
1059        int ret = 0;
1060
1061        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1062                kfree_skb(skb);
1063                pr_err("%s - device not up - dropping\n", __func__);
1064                return -EIO;
1065        }
1066
1067        ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
1068        if (ret < 0)
1069                kfree_skb(skb);
1070        return ret < 0 ? ret : 0;
1071}
1072
1073static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
1074{
1075        struct cpl_tid_release *req;
1076        unsigned int len = roundup(sizeof(*req), 16);
1077        struct sk_buff *skb;
1078
1079        skb = alloc_skb(len, GFP_ATOMIC);
1080        if (!skb)
1081                return;
1082
1083        req = (struct cpl_tid_release *)__skb_put(skb, len);
1084        memset(req, 0, len);
1085
1086        INIT_TP_WR(req, tid);
1087        OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(
1088                   CPL_TID_RELEASE, tid));
1089        set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
1090        cxgbit_ofld_send(cdev, skb);
1091}
1092
1093int
1094cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
1095                struct l2t_entry *l2e)
1096{
1097        int ret = 0;
1098
1099        if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
1100                kfree_skb(skb);
1101                pr_err("%s - device not up - dropping\n", __func__);
1102                return -EIO;
1103        }
1104
1105        ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
1106        if (ret < 0)
1107                kfree_skb(skb);
1108        return ret < 0 ? ret : 0;
1109}
1110
1111static void
1112cxgbit_best_mtu(const unsigned short *mtus, unsigned short mtu,
1113                unsigned int *idx, int use_ts, int ipv6)
1114{
1115        unsigned short hdr_size = (ipv6 ? sizeof(struct ipv6hdr) :
1116                                   sizeof(struct iphdr)) +
1117                                   sizeof(struct tcphdr) +
1118                                   (use_ts ? round_up(TCPOLEN_TIMESTAMP,
1119                                    4) : 0);
1120        unsigned short data_size = mtu - hdr_size;
1121
1122        cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
1123}
1124
1125static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
1126{
1127        if (csk->com.state != CSK_STATE_ESTABLISHED) {
1128                __kfree_skb(skb);
1129                return;
1130        }
1131
1132        cxgbit_ofld_send(csk->com.cdev, skb);
1133}
1134
1135/*
1136 * CPL connection rx data ack: host ->
1137 * Send RX credits through an RX_DATA_ACK CPL message.
1138 * Returns the number of credits sent.
1139 */
1140int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
1141{
1142        struct sk_buff *skb;
1143        struct cpl_rx_data_ack *req;
1144        unsigned int len = roundup(sizeof(*req), 16);
1145
1146        skb = alloc_skb(len, GFP_KERNEL);
1147        if (!skb)
1148                return -1;
1149
1150        req = (struct cpl_rx_data_ack *)__skb_put(skb, len);
1151        memset(req, 0, len);
1152
1153        set_wr_txq(skb, CPL_PRIORITY_ACK, csk->ctrlq_idx);
1154        INIT_TP_WR(req, csk->tid);
1155        OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
1156                                                    csk->tid));
1157        req->credit_dack = cpu_to_be32(RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
1158                                       RX_CREDITS_V(csk->rx_credits));
1159
1160        csk->rx_credits = 0;
1161
1162        spin_lock_bh(&csk->lock);
1163        if (csk->lock_owner) {
1164                cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
1165                __skb_queue_tail(&csk->backlogq, skb);
1166                spin_unlock_bh(&csk->lock);
1167                return 0;
1168        }
1169
1170        cxgbit_send_rx_credits(csk, skb);
1171        spin_unlock_bh(&csk->lock);
1172
1173        return 0;
1174}
1175
1176#define FLOWC_WR_NPARAMS_MIN    9
1177#define FLOWC_WR_NPARAMS_MAX    11
1178static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
1179{
1180        struct sk_buff *skb;
1181        u32 len, flowclen;
1182        u8 i;
1183
1184        flowclen = offsetof(struct fw_flowc_wr,
1185                            mnemval[FLOWC_WR_NPARAMS_MAX]);
1186
1187        len = max_t(u32, sizeof(struct cpl_abort_req),
1188                    sizeof(struct cpl_abort_rpl));
1189
1190        len = max(len, flowclen);
1191        len = roundup(len, 16);
1192
1193        for (i = 0; i < 3; i++) {
1194                skb = alloc_skb(len, GFP_ATOMIC);
1195                if (!skb)
1196                        goto out;
1197                __skb_queue_tail(&csk->skbq, skb);
1198        }
1199
1200        skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
1201        if (!skb)
1202                goto out;
1203
1204        memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
1205        csk->lro_hskb = skb;
1206
1207        return 0;
1208out:
1209        __skb_queue_purge(&csk->skbq);
1210        return -ENOMEM;
1211}
1212
1213static u32 cxgbit_compute_wscale(u32 win)
1214{
1215        u32 wscale = 0;
1216
1217        while (wscale < 14 && (65535 << wscale) < win)
1218                wscale++;
1219        return wscale;
1220}
1221
1222static void
1223cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
1224{
1225        struct sk_buff *skb;
1226        const struct tcphdr *tcph;
1227        struct cpl_t5_pass_accept_rpl *rpl5;
1228        unsigned int len = roundup(sizeof(*rpl5), 16);
1229        unsigned int mtu_idx;
1230        u64 opt0;
1231        u32 opt2, hlen;
1232        u32 wscale;
1233        u32 win;
1234
1235        pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
1236
1237        skb = alloc_skb(len, GFP_ATOMIC);
1238        if (!skb) {
1239                cxgbit_put_csk(csk);
1240                return;
1241        }
1242
1243        rpl5 = (struct cpl_t5_pass_accept_rpl *)__skb_put(skb, len);
1244        memset(rpl5, 0, len);
1245
1246        INIT_TP_WR(rpl5, csk->tid);
1247        OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1248                                                     csk->tid));
1249        cxgbit_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
1250                        req->tcpopt.tstamp,
1251                        (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
1252        wscale = cxgbit_compute_wscale(csk->rcv_win);
1253        /*
1254         * Specify the largest window that will fit in opt0. The
1255         * remainder will be specified in the rx_data_ack.
1256         */
1257        win = csk->rcv_win >> 10;
1258        if (win > RCV_BUFSIZ_M)
1259                win = RCV_BUFSIZ_M;
1260        opt0 =  TCAM_BYPASS_F |
1261                WND_SCALE_V(wscale) |
1262                MSS_IDX_V(mtu_idx) |
1263                L2T_IDX_V(csk->l2t->idx) |
1264                TX_CHAN_V(csk->tx_chan) |
1265                SMAC_SEL_V(csk->smac_idx) |
1266                DSCP_V(csk->tos >> 2) |
1267                ULP_MODE_V(ULP_MODE_ISCSI) |
1268                RCV_BUFSIZ_V(win);
1269
1270        opt2 = RX_CHANNEL_V(0) |
1271                RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
1272
1273        if (req->tcpopt.tstamp)
1274                opt2 |= TSTAMPS_EN_F;
1275        if (req->tcpopt.sack)
1276                opt2 |= SACK_EN_F;
1277        if (wscale)
1278                opt2 |= WND_SCALE_EN_F;
1279
1280        hlen = ntohl(req->hdr_len);
1281        tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
1282                IP_HDR_LEN_G(hlen);
1283
1284        if (tcph->ece && tcph->cwr)
1285                opt2 |= CCTRL_ECN_V(1);
1286
1287        opt2 |= RX_COALESCE_V(3);
1288        opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
1289
1290        opt2 |= T5_ISS_F;
1291        rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
1292
1293        opt2 |= T5_OPT_2_VALID_F;
1294
1295        rpl5->opt0 = cpu_to_be64(opt0);
1296        rpl5->opt2 = cpu_to_be32(opt2);
1297        set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
1298        t4_set_arp_err_handler(skb, NULL, cxgbit_arp_failure_discard);
1299        cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
1300}
1301
1302static void
1303cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
1304{
1305        struct cxgbit_sock *csk = NULL;
1306        struct cxgbit_np *cnp;
1307        struct cpl_pass_accept_req *req = cplhdr(skb);
1308        unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
1309        struct tid_info *t = cdev->lldi.tids;
1310        unsigned int tid = GET_TID(req);
1311        u16 peer_mss = ntohs(req->tcpopt.mss);
1312        unsigned short hdrs;
1313
1314        struct dst_entry *dst;
1315        __u8 local_ip[16], peer_ip[16];
1316        __be16 local_port, peer_port;
1317        int ret;
1318        int iptype;
1319
1320        pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
1321                 __func__, cdev, stid, tid);
1322
1323        cnp = lookup_stid(t, stid);
1324        if (!cnp) {
1325                pr_err("%s connect request on invalid stid %d\n",
1326                       __func__, stid);
1327                goto rel_skb;
1328        }
1329
1330        if (cnp->com.state != CSK_STATE_LISTEN) {
1331                pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
1332                       __func__);
1333                goto reject;
1334        }
1335
1336        csk = lookup_tid(t, tid);
1337        if (csk) {
1338                pr_err("%s csk not null tid %u\n",
1339                       __func__, tid);
1340                goto rel_skb;
1341        }
1342
1343        cxgbit_get_tuple_info(req, &iptype, local_ip, peer_ip,
1344                              &local_port, &peer_port);
1345
1346        /* Find output route */
1347        if (iptype == 4)  {
1348                pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
1349                         "lport %d rport %d peer_mss %d\n"
1350                         , __func__, cnp, tid,
1351                         local_ip, peer_ip, ntohs(local_port),
1352                         ntohs(peer_port), peer_mss);
1353                dst = cxgbit_find_route(cdev, *(__be32 *)local_ip,
1354                                        *(__be32 *)peer_ip,
1355                                        local_port, peer_port,
1356                                        PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
1357        } else {
1358                pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
1359                         "lport %d rport %d peer_mss %d\n"
1360                         , __func__, cnp, tid,
1361                         local_ip, peer_ip, ntohs(local_port),
1362                         ntohs(peer_port), peer_mss);
1363                dst = cxgbit_find_route6(cdev, local_ip, peer_ip,
1364                                         local_port, peer_port,
1365                                         PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
1366                                         ((struct sockaddr_in6 *)
1367                                         &cnp->com.local_addr)->sin6_scope_id);
1368        }
1369        if (!dst) {
1370                pr_err("%s - failed to find dst entry!\n",
1371                       __func__);
1372                goto reject;
1373        }
1374
1375        csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
1376        if (!csk) {
1377                dst_release(dst);
1378                goto rel_skb;
1379        }
1380
1381        ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
1382                                  dst, cdev);
1383        if (ret) {
1384                pr_err("%s - failed to allocate l2t entry!\n",
1385                       __func__);
1386                dst_release(dst);
1387                kfree(csk);
1388                goto reject;
1389        }
1390
1391        kref_init(&csk->kref);
1392        init_completion(&csk->com.wr_wait.completion);
1393
1394        INIT_LIST_HEAD(&csk->accept_node);
1395
1396        hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
1397                sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0);
1398        if (peer_mss && csk->mtu > (peer_mss + hdrs))
1399                csk->mtu = peer_mss + hdrs;
1400
1401        csk->com.state = CSK_STATE_CONNECTING;
1402        csk->com.cdev = cdev;
1403        csk->cnp = cnp;
1404        csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
1405        csk->dst = dst;
1406        csk->tid = tid;
1407        csk->wr_cred = cdev->lldi.wr_cred -
1408                        DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1409        csk->wr_max_cred = csk->wr_cred;
1410        csk->wr_una_cred = 0;
1411
1412        if (iptype == 4) {
1413                struct sockaddr_in *sin = (struct sockaddr_in *)
1414                                          &csk->com.local_addr;
1415                sin->sin_family = AF_INET;
1416                sin->sin_port = local_port;
1417                sin->sin_addr.s_addr = *(__be32 *)local_ip;
1418
1419                sin = (struct sockaddr_in *)&csk->com.remote_addr;
1420                sin->sin_family = AF_INET;
1421                sin->sin_port = peer_port;
1422                sin->sin_addr.s_addr = *(__be32 *)peer_ip;
1423        } else {
1424                struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
1425                                            &csk->com.local_addr;
1426
1427                sin6->sin6_family = PF_INET6;
1428                sin6->sin6_port = local_port;
1429                memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
1430                cxgb4_clip_get(cdev->lldi.ports[0],
1431                               (const u32 *)&sin6->sin6_addr.s6_addr,
1432                               1);
1433
1434                sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
1435                sin6->sin6_family = PF_INET6;
1436                sin6->sin6_port = peer_port;
1437                memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
1438        }
1439
1440        skb_queue_head_init(&csk->rxq);
1441        skb_queue_head_init(&csk->txq);
1442        skb_queue_head_init(&csk->ppodq);
1443        skb_queue_head_init(&csk->backlogq);
1444        skb_queue_head_init(&csk->skbq);
1445        cxgbit_sock_reset_wr_list(csk);
1446        spin_lock_init(&csk->lock);
1447        init_waitqueue_head(&csk->waitq);
1448        init_waitqueue_head(&csk->ack_waitq);
1449        csk->lock_owner = false;
1450
1451        if (cxgbit_alloc_csk_skb(csk)) {
1452                dst_release(dst);
1453                kfree(csk);
1454                goto rel_skb;
1455        }
1456
1457        cxgbit_get_cdev(cdev);
1458
1459        spin_lock(&cdev->cskq.lock);
1460        list_add_tail(&csk->list, &cdev->cskq.list);
1461        spin_unlock(&cdev->cskq.lock);
1462
1463        cxgb4_insert_tid(t, csk, tid);
1464        cxgbit_pass_accept_rpl(csk, req);
1465        goto rel_skb;
1466
1467reject:
1468        cxgbit_release_tid(cdev, tid);
1469rel_skb:
1470        __kfree_skb(skb);
1471}
1472
1473static u32
1474cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
1475                           u32 *flowclenp)
1476{
1477        u32 nparams, flowclen16, flowclen;
1478
1479        nparams = FLOWC_WR_NPARAMS_MIN;
1480
1481        if (csk->snd_wscale)
1482                nparams++;
1483
1484#ifdef CONFIG_CHELSIO_T4_DCB
1485        nparams++;
1486#endif
1487        flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
1488        flowclen16 = DIV_ROUND_UP(flowclen, 16);
1489        flowclen = flowclen16 * 16;
1490        /*
1491         * Return the number of 16-byte credits used by the flowc request.
1492         * Pass back the nparams and actual flowc length if requested.
1493         */
1494        if (nparamsp)
1495                *nparamsp = nparams;
1496        if (flowclenp)
1497                *flowclenp = flowclen;
1498        return flowclen16;
1499}
1500
1501u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
1502{
1503        struct cxgbit_device *cdev = csk->com.cdev;
1504        struct fw_flowc_wr *flowc;
1505        u32 nparams, flowclen16, flowclen;
1506        struct sk_buff *skb;
1507        u8 index;
1508
1509#ifdef CONFIG_CHELSIO_T4_DCB
1510        u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
1511#endif
1512
1513        flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
1514
1515        skb = __skb_dequeue(&csk->skbq);
1516        flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
1517        memset(flowc, 0, flowclen);
1518
1519        flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
1520                                           FW_FLOWC_WR_NPARAMS_V(nparams));
1521        flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
1522                                          FW_WR_FLOWID_V(csk->tid));
1523        flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
1524        flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
1525                                            (csk->com.cdev->lldi.pf));
1526        flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
1527        flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
1528        flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
1529        flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
1530        flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
1531        flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
1532        flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
1533        flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
1534        flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
1535        flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
1536        flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
1537        flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
1538        flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
1539        flowc->mnemval[7].val = cpu_to_be32(csk->emss);
1540
1541        flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
1542        if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
1543                flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
1544        else
1545                flowc->mnemval[8].val = cpu_to_be32(16384);
1546
1547        index = 9;
1548
1549        if (csk->snd_wscale) {
1550                flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
1551                flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
1552                index++;
1553        }
1554
1555#ifdef CONFIG_CHELSIO_T4_DCB
1556        flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
1557        if (vlan == VLAN_NONE) {
1558                pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
1559                flowc->mnemval[index].val = cpu_to_be32(0);
1560        } else
1561                flowc->mnemval[index].val = cpu_to_be32(
1562                                (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
1563#endif
1564
1565        pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
1566                 " rcv_seq = %u; snd_win = %u; emss = %u\n",
1567                 __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
1568                 csk->rcv_nxt, csk->snd_win, csk->emss);
1569        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
1570        cxgbit_ofld_send(csk->com.cdev, skb);
1571        return flowclen16;
1572}
1573
1574int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
1575{
1576        struct sk_buff *skb;
1577        struct cpl_set_tcb_field *req;
1578        u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
1579        u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
1580        unsigned int len = roundup(sizeof(*req), 16);
1581        int ret;
1582
1583        skb = alloc_skb(len, GFP_KERNEL);
1584        if (!skb)
1585                return -ENOMEM;
1586
1587        /*  set up ulp submode */
1588        req = (struct cpl_set_tcb_field *)__skb_put(skb, len);
1589        memset(req, 0, len);
1590
1591        INIT_TP_WR(req, csk->tid);
1592        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1593        req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1594        req->word_cookie = htons(0);
1595        req->mask = cpu_to_be64(0x3 << 4);
1596        req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1597                                (dcrc ? ULP_CRC_DATA : 0)) << 4);
1598        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1599
1600        cxgbit_get_csk(csk);
1601        cxgbit_init_wr_wait(&csk->com.wr_wait);
1602
1603        cxgbit_ofld_send(csk->com.cdev, skb);
1604
1605        ret = cxgbit_wait_for_reply(csk->com.cdev,
1606                                    &csk->com.wr_wait,
1607                                    csk->tid, 5, __func__);
1608        if (ret)
1609                return -1;
1610
1611        return 0;
1612}
1613
1614int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
1615{
1616        struct sk_buff *skb;
1617        struct cpl_set_tcb_field *req;
1618        unsigned int len = roundup(sizeof(*req), 16);
1619        int ret;
1620
1621        skb = alloc_skb(len, GFP_KERNEL);
1622        if (!skb)
1623                return -ENOMEM;
1624
1625        req = (struct cpl_set_tcb_field *)__skb_put(skb, len);
1626        memset(req, 0, len);
1627
1628        INIT_TP_WR(req, csk->tid);
1629        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1630        req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
1631        req->word_cookie = htons(0);
1632        req->mask = cpu_to_be64(0x3 << 8);
1633        req->val = cpu_to_be64(pg_idx << 8);
1634        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
1635
1636        cxgbit_get_csk(csk);
1637        cxgbit_init_wr_wait(&csk->com.wr_wait);
1638
1639        cxgbit_ofld_send(csk->com.cdev, skb);
1640
1641        ret = cxgbit_wait_for_reply(csk->com.cdev,
1642                                    &csk->com.wr_wait,
1643                                    csk->tid, 5, __func__);
1644        if (ret)
1645                return -1;
1646
1647        return 0;
1648}
1649
1650static void
1651cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1652{
1653        struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1654        struct tid_info *t = cdev->lldi.tids;
1655        unsigned int stid = GET_TID(rpl);
1656        struct cxgbit_np *cnp = lookup_stid(t, stid);
1657
1658        pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1659                 __func__, cnp, stid, rpl->status);
1660
1661        if (!cnp) {
1662                pr_info("%s stid %d lookup failure\n", __func__, stid);
1663                return;
1664        }
1665
1666        cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1667        cxgbit_put_cnp(cnp);
1668}
1669
1670static void
1671cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1672{
1673        struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1674        struct tid_info *t = cdev->lldi.tids;
1675        unsigned int stid = GET_TID(rpl);
1676        struct cxgbit_np *cnp = lookup_stid(t, stid);
1677
1678        pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
1679                 __func__, cnp, stid, rpl->status);
1680
1681        if (!cnp) {
1682                pr_info("%s stid %d lookup failure\n", __func__, stid);
1683                return;
1684        }
1685
1686        cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
1687        cxgbit_put_cnp(cnp);
1688}
1689
1690static void
1691cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
1692{
1693        struct cpl_pass_establish *req = cplhdr(skb);
1694        struct tid_info *t = cdev->lldi.tids;
1695        unsigned int tid = GET_TID(req);
1696        struct cxgbit_sock *csk;
1697        struct cxgbit_np *cnp;
1698        u16 tcp_opt = be16_to_cpu(req->tcp_opt);
1699        u32 snd_isn = be32_to_cpu(req->snd_isn);
1700        u32 rcv_isn = be32_to_cpu(req->rcv_isn);
1701
1702        csk = lookup_tid(t, tid);
1703        if (unlikely(!csk)) {
1704                pr_err("can't find connection for tid %u.\n", tid);
1705                goto rel_skb;
1706        }
1707        cnp = csk->cnp;
1708
1709        pr_debug("%s: csk %p; tid %u; cnp %p\n",
1710                 __func__, csk, tid, cnp);
1711
1712        csk->write_seq = snd_isn;
1713        csk->snd_una = snd_isn;
1714        csk->snd_nxt = snd_isn;
1715
1716        csk->rcv_nxt = rcv_isn;
1717
1718        if (csk->rcv_win > (RCV_BUFSIZ_M << 10))
1719                csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10));
1720
1721        csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
1722        cxgbit_set_emss(csk, tcp_opt);
1723        dst_confirm(csk->dst);
1724        csk->com.state = CSK_STATE_ESTABLISHED;
1725        spin_lock_bh(&cnp->np_accept_lock);
1726        list_add_tail(&csk->accept_node, &cnp->np_accept_list);
1727        spin_unlock_bh(&cnp->np_accept_lock);
1728        complete(&cnp->accept_comp);
1729rel_skb:
1730        __kfree_skb(skb);
1731}
1732
1733static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
1734{
1735        cxgbit_skcb_flags(skb) = 0;
1736        spin_lock_bh(&csk->rxq.lock);
1737        __skb_queue_tail(&csk->rxq, skb);
1738        spin_unlock_bh(&csk->rxq.lock);
1739        wake_up(&csk->waitq);
1740}
1741
1742static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
1743{
1744        pr_debug("%s: csk %p; tid %u; state %d\n",
1745                 __func__, csk, csk->tid, csk->com.state);
1746
1747        switch (csk->com.state) {
1748        case CSK_STATE_ESTABLISHED:
1749                csk->com.state = CSK_STATE_CLOSING;
1750                cxgbit_queue_rx_skb(csk, skb);
1751                return;
1752        case CSK_STATE_CLOSING:
1753                /* simultaneous close */
1754                csk->com.state = CSK_STATE_MORIBUND;
1755                break;
1756        case CSK_STATE_MORIBUND:
1757                csk->com.state = CSK_STATE_DEAD;
1758                cxgbit_put_csk(csk);
1759                break;
1760        case CSK_STATE_ABORTING:
1761                break;
1762        default:
1763                pr_info("%s: cpl_peer_close in bad state %d\n",
1764                        __func__, csk->com.state);
1765        }
1766
1767        __kfree_skb(skb);
1768}
1769
1770static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
1771{
1772        pr_debug("%s: csk %p; tid %u; state %d\n",
1773                 __func__, csk, csk->tid, csk->com.state);
1774
1775        switch (csk->com.state) {
1776        case CSK_STATE_CLOSING:
1777                csk->com.state = CSK_STATE_MORIBUND;
1778                break;
1779        case CSK_STATE_MORIBUND:
1780                csk->com.state = CSK_STATE_DEAD;
1781                cxgbit_put_csk(csk);
1782                break;
1783        case CSK_STATE_ABORTING:
1784        case CSK_STATE_DEAD:
1785                break;
1786        default:
1787                pr_info("%s: cpl_close_con_rpl in bad state %d\n",
1788                        __func__, csk->com.state);
1789        }
1790
1791        __kfree_skb(skb);
1792}
1793
1794static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1795{
1796        struct cpl_abort_req_rss *hdr = cplhdr(skb);
1797        unsigned int tid = GET_TID(hdr);
1798        struct cpl_abort_rpl *rpl;
1799        struct sk_buff *rpl_skb;
1800        bool release = false;
1801        bool wakeup_thread = false;
1802        unsigned int len = roundup(sizeof(*rpl), 16);
1803
1804        pr_debug("%s: csk %p; tid %u; state %d\n",
1805                 __func__, csk, tid, csk->com.state);
1806
1807        if (cxgbit_is_neg_adv(hdr->status)) {
1808                pr_err("%s: got neg advise %d on tid %u\n",
1809                       __func__, hdr->status, tid);
1810                goto rel_skb;
1811        }
1812
1813        switch (csk->com.state) {
1814        case CSK_STATE_CONNECTING:
1815        case CSK_STATE_MORIBUND:
1816                csk->com.state = CSK_STATE_DEAD;
1817                release = true;
1818                break;
1819        case CSK_STATE_ESTABLISHED:
1820                csk->com.state = CSK_STATE_DEAD;
1821                wakeup_thread = true;
1822                break;
1823        case CSK_STATE_CLOSING:
1824                csk->com.state = CSK_STATE_DEAD;
1825                if (!csk->conn)
1826                        release = true;
1827                break;
1828        case CSK_STATE_ABORTING:
1829                break;
1830        default:
1831                pr_info("%s: cpl_abort_req_rss in bad state %d\n",
1832                        __func__, csk->com.state);
1833                csk->com.state = CSK_STATE_DEAD;
1834        }
1835
1836        __skb_queue_purge(&csk->txq);
1837
1838        if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
1839                cxgbit_send_tx_flowc_wr(csk);
1840
1841        rpl_skb = __skb_dequeue(&csk->skbq);
1842        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
1843
1844        rpl = (struct cpl_abort_rpl *)__skb_put(rpl_skb, len);
1845        memset(rpl, 0, len);
1846
1847        INIT_TP_WR(rpl, csk->tid);
1848        OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
1849        rpl->cmd = CPL_ABORT_NO_RST;
1850        cxgbit_ofld_send(csk->com.cdev, rpl_skb);
1851
1852        if (wakeup_thread) {
1853                cxgbit_queue_rx_skb(csk, skb);
1854                return;
1855        }
1856
1857        if (release)
1858                cxgbit_put_csk(csk);
1859rel_skb:
1860        __kfree_skb(skb);
1861}
1862
1863static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
1864{
1865        pr_debug("%s: csk %p; tid %u; state %d\n",
1866                 __func__, csk, csk->tid, csk->com.state);
1867
1868        switch (csk->com.state) {
1869        case CSK_STATE_ABORTING:
1870                csk->com.state = CSK_STATE_DEAD;
1871                cxgbit_put_csk(csk);
1872                break;
1873        default:
1874                pr_info("%s: cpl_abort_rpl_rss in state %d\n",
1875                        __func__, csk->com.state);
1876        }
1877
1878        __kfree_skb(skb);
1879}
1880
1881static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
1882{
1883        const struct sk_buff *skb = csk->wr_pending_head;
1884        u32 credit = 0;
1885
1886        if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
1887                pr_err("csk 0x%p, tid %u, credit %u > %u\n",
1888                       csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
1889                return true;
1890        }
1891
1892        while (skb) {
1893                credit += skb->csum;
1894                skb = cxgbit_skcb_tx_wr_next(skb);
1895        }
1896
1897        if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
1898                pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
1899                       csk, csk->tid, csk->wr_cred,
1900                       credit, csk->wr_max_cred);
1901
1902                return true;
1903        }
1904
1905        return false;
1906}
1907
1908static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
1909{
1910        struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
1911        u32 credits = rpl->credits;
1912        u32 snd_una = ntohl(rpl->snd_una);
1913
1914        csk->wr_cred += credits;
1915        if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
1916                csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
1917
1918        while (credits) {
1919                struct sk_buff *p = cxgbit_sock_peek_wr(csk);
1920
1921                if (unlikely(!p)) {
1922                        pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
1923                               csk, csk->tid, credits,
1924                               csk->wr_cred, csk->wr_una_cred);
1925                        break;
1926                }
1927
1928                if (unlikely(credits < p->csum)) {
1929                        pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
1930                                csk,  csk->tid,
1931                                credits, csk->wr_cred, csk->wr_una_cred,
1932                                p->csum);
1933                        p->csum -= credits;
1934                        break;
1935                }
1936
1937                cxgbit_sock_dequeue_wr(csk);
1938                credits -= p->csum;
1939                kfree_skb(p);
1940        }
1941
1942        if (unlikely(cxgbit_credit_err(csk))) {
1943                cxgbit_queue_rx_skb(csk, skb);
1944                return;
1945        }
1946
1947        if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
1948                if (unlikely(before(snd_una, csk->snd_una))) {
1949                        pr_warn("csk 0x%p,%u, snd_una %u/%u.",
1950                                csk, csk->tid, snd_una,
1951                                csk->snd_una);
1952                        goto rel_skb;
1953                }
1954
1955                if (csk->snd_una != snd_una) {
1956                        csk->snd_una = snd_una;
1957                        dst_confirm(csk->dst);
1958                        wake_up(&csk->ack_waitq);
1959                }
1960        }
1961
1962        if (skb_queue_len(&csk->txq))
1963                cxgbit_push_tx_frames(csk);
1964
1965rel_skb:
1966        __kfree_skb(skb);
1967}
1968
1969static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
1970{
1971        struct cxgbit_sock *csk;
1972        struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1973        unsigned int tid = GET_TID(rpl);
1974        struct cxgb4_lld_info *lldi = &cdev->lldi;
1975        struct tid_info *t = lldi->tids;
1976
1977        csk = lookup_tid(t, tid);
1978        if (unlikely(!csk))
1979                pr_err("can't find connection for tid %u.\n", tid);
1980        else
1981                cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
1982
1983        cxgbit_put_csk(csk);
1984}
1985
1986static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
1987{
1988        struct cxgbit_sock *csk;
1989        struct cpl_rx_data *cpl = cplhdr(skb);
1990        unsigned int tid = GET_TID(cpl);
1991        struct cxgb4_lld_info *lldi = &cdev->lldi;
1992        struct tid_info *t = lldi->tids;
1993
1994        csk = lookup_tid(t, tid);
1995        if (unlikely(!csk)) {
1996                pr_err("can't find conn. for tid %u.\n", tid);
1997                goto rel_skb;
1998        }
1999
2000        cxgbit_queue_rx_skb(csk, skb);
2001        return;
2002rel_skb:
2003        __kfree_skb(skb);
2004}
2005
2006static void
2007__cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
2008{
2009        spin_lock(&csk->lock);
2010        if (csk->lock_owner) {
2011                __skb_queue_tail(&csk->backlogq, skb);
2012                spin_unlock(&csk->lock);
2013                return;
2014        }
2015
2016        cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
2017        spin_unlock(&csk->lock);
2018}
2019
2020static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
2021{
2022        cxgbit_get_csk(csk);
2023        __cxgbit_process_rx_cpl(csk, skb);
2024        cxgbit_put_csk(csk);
2025}
2026
2027static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
2028{
2029        struct cxgbit_sock *csk;
2030        struct cpl_tx_data *cpl = cplhdr(skb);
2031        struct cxgb4_lld_info *lldi = &cdev->lldi;
2032        struct tid_info *t = lldi->tids;
2033        unsigned int tid = GET_TID(cpl);
2034        u8 opcode = cxgbit_skcb_rx_opcode(skb);
2035        bool ref = true;
2036
2037        switch (opcode) {
2038        case CPL_FW4_ACK:
2039                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
2040                        ref = false;
2041                        break;
2042        case CPL_PEER_CLOSE:
2043                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
2044                        break;
2045        case CPL_CLOSE_CON_RPL:
2046                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
2047                        break;
2048        case CPL_ABORT_REQ_RSS:
2049                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
2050                        break;
2051        case CPL_ABORT_RPL_RSS:
2052                        cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
2053                        break;
2054        default:
2055                goto rel_skb;
2056        }
2057
2058        csk = lookup_tid(t, tid);
2059        if (unlikely(!csk)) {
2060                pr_err("can't find conn. for tid %u.\n", tid);
2061                goto rel_skb;
2062        }
2063
2064        if (ref)
2065                cxgbit_process_rx_cpl(csk, skb);
2066        else
2067                __cxgbit_process_rx_cpl(csk, skb);
2068
2069        return;
2070rel_skb:
2071        __kfree_skb(skb);
2072}
2073
2074cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
2075        [CPL_PASS_OPEN_RPL]     = cxgbit_pass_open_rpl,
2076        [CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
2077        [CPL_PASS_ACCEPT_REQ]   = cxgbit_pass_accept_req,
2078        [CPL_PASS_ESTABLISH]    = cxgbit_pass_establish,
2079        [CPL_SET_TCB_RPL]       = cxgbit_set_tcb_rpl,
2080        [CPL_RX_DATA]           = cxgbit_rx_data,
2081        [CPL_FW4_ACK]           = cxgbit_rx_cpl,
2082        [CPL_PEER_CLOSE]        = cxgbit_rx_cpl,
2083        [CPL_CLOSE_CON_RPL]     = cxgbit_rx_cpl,
2084        [CPL_ABORT_REQ_RSS]     = cxgbit_rx_cpl,
2085        [CPL_ABORT_RPL_RSS]     = cxgbit_rx_cpl,
2086};
2087