linux/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
<<
>>
Prefs
   1/*
   2 * cxgb4i.c: Chelsio T4 iSCSI driver.
   3 *
   4 * Copyright (c) 2010-2015 Chelsio Communications, Inc.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 *
  10 * Written by:  Karen Xie (kxie@chelsio.com)
  11 *              Rakesh Ranjan (rranjan@chelsio.com)
  12 */
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
  15
  16#include <linux/kernel.h>
  17#include <linux/module.h>
  18#include <linux/moduleparam.h>
  19#include <scsi/scsi_host.h>
  20#include <net/tcp.h>
  21#include <net/dst.h>
  22#include <linux/netdevice.h>
  23#include <net/addrconf.h>
  24
  25#include "t4_regs.h"
  26#include "t4_msg.h"
  27#include "cxgb4.h"
  28#include "cxgb4_uld.h"
  29#include "t4fw_api.h"
  30#include "l2t.h"
  31#include "cxgb4i.h"
  32#include "clip_tbl.h"
  33
  34static unsigned int dbg_level;
  35
  36#include "../libcxgbi.h"
  37
  38#ifdef CONFIG_CHELSIO_T4_DCB
  39#include <net/dcbevent.h>
  40#include "cxgb4_dcb.h"
  41#endif
  42
  43#define DRV_MODULE_NAME         "cxgb4i"
  44#define DRV_MODULE_DESC         "Chelsio T4-T6 iSCSI Driver"
  45#define DRV_MODULE_VERSION      "0.9.5-ko"
  46#define DRV_MODULE_RELDATE      "Apr. 2015"
  47
  48static char version[] =
  49        DRV_MODULE_DESC " " DRV_MODULE_NAME
  50        " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  51
  52MODULE_AUTHOR("Chelsio Communications, Inc.");
  53MODULE_DESCRIPTION(DRV_MODULE_DESC);
  54MODULE_VERSION(DRV_MODULE_VERSION);
  55MODULE_LICENSE("GPL");
  56
  57module_param(dbg_level, uint, 0644);
  58MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
  59
  60#define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024)
  61static int cxgb4i_rcv_win = -1;
  62module_param(cxgb4i_rcv_win, int, 0644);
  63MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP receive window in bytes");
  64
  65#define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024)
  66static int cxgb4i_snd_win = -1;
  67module_param(cxgb4i_snd_win, int, 0644);
  68MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
  69
  70static int cxgb4i_rx_credit_thres = 10 * 1024;
  71module_param(cxgb4i_rx_credit_thres, int, 0644);
  72MODULE_PARM_DESC(cxgb4i_rx_credit_thres,
  73                "RX credits return threshold in bytes (default=10KB)");
  74
  75static unsigned int cxgb4i_max_connect = (8 * 1024);
  76module_param(cxgb4i_max_connect, uint, 0644);
  77MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections");
  78
  79static unsigned short cxgb4i_sport_base = 20000;
  80module_param(cxgb4i_sport_base, ushort, 0644);
  81MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)");
  82
  83typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
  84
  85static void *t4_uld_add(const struct cxgb4_lld_info *);
  86static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
  87static int t4_uld_state_change(void *, enum cxgb4_state state);
  88static inline int send_tx_flowc_wr(struct cxgbi_sock *);
  89
  90static const struct cxgb4_uld_info cxgb4i_uld_info = {
  91        .name = DRV_MODULE_NAME,
  92        .nrxq = MAX_ULD_QSETS,
  93        .ntxq = MAX_ULD_QSETS,
  94        .rxq_size = 1024,
  95        .lro = false,
  96        .add = t4_uld_add,
  97        .rx_handler = t4_uld_rx_handler,
  98        .state_change = t4_uld_state_change,
  99};
 100
 101static struct scsi_host_template cxgb4i_host_template = {
 102        .module         = THIS_MODULE,
 103        .name           = DRV_MODULE_NAME,
 104        .proc_name      = DRV_MODULE_NAME,
 105        .can_queue      = CXGB4I_SCSI_HOST_QDEPTH,
 106        .queuecommand   = iscsi_queuecommand,
 107        .change_queue_depth = scsi_change_queue_depth,
 108        .sg_tablesize   = SG_ALL,
 109        .max_sectors    = 0xFFFF,
 110        .cmd_per_lun    = ISCSI_DEF_CMD_PER_LUN,
 111        .eh_timed_out   = iscsi_eh_cmd_timed_out,
 112        .eh_abort_handler = iscsi_eh_abort,
 113        .eh_device_reset_handler = iscsi_eh_device_reset,
 114        .eh_target_reset_handler = iscsi_eh_recover_target,
 115        .target_alloc   = iscsi_target_alloc,
 116        .dma_boundary   = PAGE_SIZE - 1,
 117        .this_id        = -1,
 118        .track_queue_depth = 1,
 119};
 120
 121static struct iscsi_transport cxgb4i_iscsi_transport = {
 122        .owner          = THIS_MODULE,
 123        .name           = DRV_MODULE_NAME,
 124        .caps           = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
 125                                CAP_DATADGST | CAP_DIGEST_OFFLOAD |
 126                                CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
 127        .attr_is_visible        = cxgbi_attr_is_visible,
 128        .get_host_param = cxgbi_get_host_param,
 129        .set_host_param = cxgbi_set_host_param,
 130        /* session management */
 131        .create_session = cxgbi_create_session,
 132        .destroy_session        = cxgbi_destroy_session,
 133        .get_session_param = iscsi_session_get_param,
 134        /* connection management */
 135        .create_conn    = cxgbi_create_conn,
 136        .bind_conn              = cxgbi_bind_conn,
 137        .destroy_conn   = iscsi_tcp_conn_teardown,
 138        .start_conn             = iscsi_conn_start,
 139        .stop_conn              = iscsi_conn_stop,
 140        .get_conn_param = iscsi_conn_get_param,
 141        .set_param      = cxgbi_set_conn_param,
 142        .get_stats      = cxgbi_get_conn_stats,
 143        /* pdu xmit req from user space */
 144        .send_pdu       = iscsi_conn_send_pdu,
 145        /* task */
 146        .init_task      = iscsi_tcp_task_init,
 147        .xmit_task      = iscsi_tcp_task_xmit,
 148        .cleanup_task   = cxgbi_cleanup_task,
 149        /* pdu */
 150        .alloc_pdu      = cxgbi_conn_alloc_pdu,
 151        .init_pdu       = cxgbi_conn_init_pdu,
 152        .xmit_pdu       = cxgbi_conn_xmit_pdu,
 153        .parse_pdu_itt  = cxgbi_parse_pdu_itt,
 154        /* TCP connect/disconnect */
 155        .get_ep_param   = cxgbi_get_ep_param,
 156        .ep_connect     = cxgbi_ep_connect,
 157        .ep_poll        = cxgbi_ep_poll,
 158        .ep_disconnect  = cxgbi_ep_disconnect,
 159        /* Error recovery timeout call */
 160        .session_recovery_timedout = iscsi_session_recovery_timedout,
 161};
 162
 163#ifdef CONFIG_CHELSIO_T4_DCB
 164static int
 165cxgb4_dcb_change_notify(struct notifier_block *, unsigned long, void *);
 166
 167static struct notifier_block cxgb4_dcb_change = {
 168        .notifier_call = cxgb4_dcb_change_notify,
 169};
 170#endif
 171
 172static struct scsi_transport_template *cxgb4i_stt;
 173
 174/*
 175 * CPL (Chelsio Protocol Language) defines a message passing interface between
 176 * the host driver and Chelsio asic.
 177 * The section below implments CPLs that related to iscsi tcp connection
 178 * open/close/abort and data send/receive.
 179 */
 180
 181#define RCV_BUFSIZ_MASK         0x3FFU
 182#define MAX_IMM_TX_PKT_LEN      256
 183
 184static int push_tx_frames(struct cxgbi_sock *, int);
 185
 186/*
 187 * is_ofld_imm - check whether a packet can be sent as immediate data
 188 * @skb: the packet
 189 *
 190 * Returns true if a packet can be sent as an offload WR with immediate
 191 * data.  We currently use the same limit as for Ethernet packets.
 192 */
 193static inline bool is_ofld_imm(const struct sk_buff *skb)
 194{
 195        int len = skb->len;
 196
 197        if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
 198                len += sizeof(struct fw_ofld_tx_data_wr);
 199
 200        return len <= MAX_IMM_TX_PKT_LEN;
 201}
 202
 203static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
 204                                struct l2t_entry *e)
 205{
 206        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
 207        int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
 208        unsigned long long opt0;
 209        unsigned int opt2;
 210        unsigned int qid_atid = ((unsigned int)csk->atid) |
 211                                 (((unsigned int)csk->rss_qid) << 14);
 212
 213        opt0 = KEEP_ALIVE_F |
 214                WND_SCALE_V(wscale) |
 215                MSS_IDX_V(csk->mss_idx) |
 216                L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
 217                TX_CHAN_V(csk->tx_chan) |
 218                SMAC_SEL_V(csk->smac_idx) |
 219                ULP_MODE_V(ULP_MODE_ISCSI) |
 220                RCV_BUFSIZ_V(csk->rcv_win >> 10);
 221
 222        opt2 = RX_CHANNEL_V(0) |
 223                RSS_QUEUE_VALID_F |
 224                RSS_QUEUE_V(csk->rss_qid);
 225
 226        if (is_t4(lldi->adapter_type)) {
 227                struct cpl_act_open_req *req =
 228                                (struct cpl_act_open_req *)skb->head;
 229
 230                INIT_TP_WR(req, 0);
 231                OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
 232                                        qid_atid));
 233                req->local_port = csk->saddr.sin_port;
 234                req->peer_port = csk->daddr.sin_port;
 235                req->local_ip = csk->saddr.sin_addr.s_addr;
 236                req->peer_ip = csk->daddr.sin_addr.s_addr;
 237                req->opt0 = cpu_to_be64(opt0);
 238                req->params = cpu_to_be32(cxgb4_select_ntuple(
 239                                        csk->cdev->ports[csk->port_id],
 240                                        csk->l2t));
 241                opt2 |= RX_FC_VALID_F;
 242                req->opt2 = cpu_to_be32(opt2);
 243
 244                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 245                        "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
 246                        csk, &req->local_ip, ntohs(req->local_port),
 247                        &req->peer_ip, ntohs(req->peer_port),
 248                        csk->atid, csk->rss_qid);
 249        } else if (is_t5(lldi->adapter_type)) {
 250                struct cpl_t5_act_open_req *req =
 251                                (struct cpl_t5_act_open_req *)skb->head;
 252                u32 isn = (prandom_u32() & ~7UL) - 1;
 253
 254                INIT_TP_WR(req, 0);
 255                OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
 256                                        qid_atid));
 257                req->local_port = csk->saddr.sin_port;
 258                req->peer_port = csk->daddr.sin_port;
 259                req->local_ip = csk->saddr.sin_addr.s_addr;
 260                req->peer_ip = csk->daddr.sin_addr.s_addr;
 261                req->opt0 = cpu_to_be64(opt0);
 262                req->params = cpu_to_be64(FILTER_TUPLE_V(
 263                                cxgb4_select_ntuple(
 264                                        csk->cdev->ports[csk->port_id],
 265                                        csk->l2t)));
 266                req->rsvd = cpu_to_be32(isn);
 267                opt2 |= T5_ISS_VALID;
 268                opt2 |= T5_OPT_2_VALID_F;
 269
 270                req->opt2 = cpu_to_be32(opt2);
 271
 272                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 273                        "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
 274                        csk, &req->local_ip, ntohs(req->local_port),
 275                        &req->peer_ip, ntohs(req->peer_port),
 276                        csk->atid, csk->rss_qid);
 277        } else {
 278                struct cpl_t6_act_open_req *req =
 279                                (struct cpl_t6_act_open_req *)skb->head;
 280                u32 isn = (prandom_u32() & ~7UL) - 1;
 281
 282                INIT_TP_WR(req, 0);
 283                OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
 284                                                            qid_atid));
 285                req->local_port = csk->saddr.sin_port;
 286                req->peer_port = csk->daddr.sin_port;
 287                req->local_ip = csk->saddr.sin_addr.s_addr;
 288                req->peer_ip = csk->daddr.sin_addr.s_addr;
 289                req->opt0 = cpu_to_be64(opt0);
 290                req->params = cpu_to_be64(FILTER_TUPLE_V(
 291                                cxgb4_select_ntuple(
 292                                        csk->cdev->ports[csk->port_id],
 293                                        csk->l2t)));
 294                req->rsvd = cpu_to_be32(isn);
 295
 296                opt2 |= T5_ISS_VALID;
 297                opt2 |= RX_FC_DISABLE_F;
 298                opt2 |= T5_OPT_2_VALID_F;
 299
 300                req->opt2 = cpu_to_be32(opt2);
 301                req->rsvd2 = cpu_to_be32(0);
 302                req->opt3 = cpu_to_be32(0);
 303
 304                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 305                          "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
 306                          csk, &req->local_ip, ntohs(req->local_port),
 307                          &req->peer_ip, ntohs(req->peer_port),
 308                          csk->atid, csk->rss_qid);
 309        }
 310
 311        set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
 312
 313        pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
 314                       (&csk->saddr), (&csk->daddr),
 315                       CHELSIO_CHIP_VERSION(lldi->adapter_type), csk,
 316                       csk->state, csk->flags, csk->atid, csk->rss_qid);
 317
 318        cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
 319}
 320
 321#if IS_ENABLED(CONFIG_IPV6)
 322static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
 323                               struct l2t_entry *e)
 324{
 325        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
 326        int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
 327        unsigned long long opt0;
 328        unsigned int opt2;
 329        unsigned int qid_atid = ((unsigned int)csk->atid) |
 330                                 (((unsigned int)csk->rss_qid) << 14);
 331
 332        opt0 = KEEP_ALIVE_F |
 333                WND_SCALE_V(wscale) |
 334                MSS_IDX_V(csk->mss_idx) |
 335                L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
 336                TX_CHAN_V(csk->tx_chan) |
 337                SMAC_SEL_V(csk->smac_idx) |
 338                ULP_MODE_V(ULP_MODE_ISCSI) |
 339                RCV_BUFSIZ_V(csk->rcv_win >> 10);
 340
 341        opt2 = RX_CHANNEL_V(0) |
 342                RSS_QUEUE_VALID_F |
 343                RSS_QUEUE_V(csk->rss_qid);
 344
 345        if (is_t4(lldi->adapter_type)) {
 346                struct cpl_act_open_req6 *req =
 347                            (struct cpl_act_open_req6 *)skb->head;
 348
 349                INIT_TP_WR(req, 0);
 350                OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
 351                                                            qid_atid));
 352                req->local_port = csk->saddr6.sin6_port;
 353                req->peer_port = csk->daddr6.sin6_port;
 354
 355                req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
 356                req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
 357                                                                    8);
 358                req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
 359                req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
 360                                                                    8);
 361
 362                req->opt0 = cpu_to_be64(opt0);
 363
 364                opt2 |= RX_FC_VALID_F;
 365                req->opt2 = cpu_to_be32(opt2);
 366
 367                req->params = cpu_to_be32(cxgb4_select_ntuple(
 368                                          csk->cdev->ports[csk->port_id],
 369                                          csk->l2t));
 370        } else if (is_t5(lldi->adapter_type)) {
 371                struct cpl_t5_act_open_req6 *req =
 372                                (struct cpl_t5_act_open_req6 *)skb->head;
 373
 374                INIT_TP_WR(req, 0);
 375                OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
 376                                                            qid_atid));
 377                req->local_port = csk->saddr6.sin6_port;
 378                req->peer_port = csk->daddr6.sin6_port;
 379                req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
 380                req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
 381                                                                        8);
 382                req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
 383                req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
 384                                                                        8);
 385                req->opt0 = cpu_to_be64(opt0);
 386
 387                opt2 |= T5_OPT_2_VALID_F;
 388                req->opt2 = cpu_to_be32(opt2);
 389
 390                req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
 391                                          csk->cdev->ports[csk->port_id],
 392                                          csk->l2t)));
 393        } else {
 394                struct cpl_t6_act_open_req6 *req =
 395                                (struct cpl_t6_act_open_req6 *)skb->head;
 396
 397                INIT_TP_WR(req, 0);
 398                OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
 399                                                            qid_atid));
 400                req->local_port = csk->saddr6.sin6_port;
 401                req->peer_port = csk->daddr6.sin6_port;
 402                req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
 403                req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
 404                                                                        8);
 405                req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
 406                req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
 407                                                                        8);
 408                req->opt0 = cpu_to_be64(opt0);
 409
 410                opt2 |= RX_FC_DISABLE_F;
 411                opt2 |= T5_OPT_2_VALID_F;
 412
 413                req->opt2 = cpu_to_be32(opt2);
 414
 415                req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
 416                                          csk->cdev->ports[csk->port_id],
 417                                          csk->l2t)));
 418
 419                req->rsvd2 = cpu_to_be32(0);
 420                req->opt3 = cpu_to_be32(0);
 421        }
 422
 423        set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
 424
 425        pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
 426                CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state,
 427                csk->flags, csk->atid,
 428                &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
 429                &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
 430                csk->rss_qid);
 431
 432        cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
 433}
 434#endif
 435
 436static void send_close_req(struct cxgbi_sock *csk)
 437{
 438        struct sk_buff *skb = csk->cpl_close;
 439        struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
 440        unsigned int tid = csk->tid;
 441
 442        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 443                "csk 0x%p,%u,0x%lx, tid %u.\n",
 444                csk, csk->state, csk->flags, csk->tid);
 445        csk->cpl_close = NULL;
 446        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
 447        INIT_TP_WR(req, tid);
 448        OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
 449        req->rsvd = 0;
 450
 451        cxgbi_sock_skb_entail(csk, skb);
 452        if (csk->state >= CTP_ESTABLISHED)
 453                push_tx_frames(csk, 1);
 454}
 455
 456static void abort_arp_failure(void *handle, struct sk_buff *skb)
 457{
 458        struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
 459        struct cpl_abort_req *req;
 460
 461        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 462                "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
 463                csk, csk->state, csk->flags, csk->tid);
 464        req = (struct cpl_abort_req *)skb->data;
 465        req->cmd = CPL_ABORT_NO_RST;
 466        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
 467}
 468
 469static void send_abort_req(struct cxgbi_sock *csk)
 470{
 471        struct cpl_abort_req *req;
 472        struct sk_buff *skb = csk->cpl_abort_req;
 473
 474        if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
 475                return;
 476
 477        if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
 478                send_tx_flowc_wr(csk);
 479                cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
 480        }
 481
 482        cxgbi_sock_set_state(csk, CTP_ABORTING);
 483        cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
 484        cxgbi_sock_purge_write_queue(csk);
 485
 486        csk->cpl_abort_req = NULL;
 487        req = (struct cpl_abort_req *)skb->head;
 488        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
 489        req->cmd = CPL_ABORT_SEND_RST;
 490        t4_set_arp_err_handler(skb, csk, abort_arp_failure);
 491        INIT_TP_WR(req, csk->tid);
 492        OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
 493        req->rsvd0 = htonl(csk->snd_nxt);
 494        req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
 495
 496        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 497                "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
 498                csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
 499                req->rsvd1);
 500
 501        cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
 502}
 503
 504static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
 505{
 506        struct sk_buff *skb = csk->cpl_abort_rpl;
 507        struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
 508
 509        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 510                "csk 0x%p,%u,0x%lx,%u, status %d.\n",
 511                csk, csk->state, csk->flags, csk->tid, rst_status);
 512
 513        csk->cpl_abort_rpl = NULL;
 514        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
 515        INIT_TP_WR(rpl, csk->tid);
 516        OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
 517        rpl->cmd = rst_status;
 518        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
 519}
 520
 521/*
 522 * CPL connection rx data ack: host ->
 523 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
 524 * credits sent.
 525 */
 526static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
 527{
 528        struct sk_buff *skb;
 529        struct cpl_rx_data_ack *req;
 530
 531        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
 532                "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
 533                csk, csk->state, csk->flags, csk->tid, credits);
 534
 535        skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
 536        if (!skb) {
 537                pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
 538                return 0;
 539        }
 540        req = (struct cpl_rx_data_ack *)skb->head;
 541
 542        set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
 543        INIT_TP_WR(req, csk->tid);
 544        OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
 545                                      csk->tid));
 546        req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits)
 547                                       | RX_FORCE_ACK_F);
 548        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
 549        return credits;
 550}
 551
 552/*
 553 * sgl_len - calculates the size of an SGL of the given capacity
 554 * @n: the number of SGL entries
 555 * Calculates the number of flits needed for a scatter/gather list that
 556 * can hold the given number of entries.
 557 */
 558static inline unsigned int sgl_len(unsigned int n)
 559{
 560        n--;
 561        return (3 * n) / 2 + (n & 1) + 2;
 562}
 563
 564/*
 565 * calc_tx_flits_ofld - calculate # of flits for an offload packet
 566 * @skb: the packet
 567 *
 568 * Returns the number of flits needed for the given offload packet.
 569 * These packets are already fully constructed and no additional headers
 570 * will be added.
 571 */
 572static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
 573{
 574        unsigned int flits, cnt;
 575
 576        if (is_ofld_imm(skb))
 577                return DIV_ROUND_UP(skb->len, 8);
 578        flits = skb_transport_offset(skb) / 8;
 579        cnt = skb_shinfo(skb)->nr_frags;
 580        if (skb_tail_pointer(skb) != skb_transport_header(skb))
 581                cnt++;
 582        return flits + sgl_len(cnt);
 583}
 584
 585#define FLOWC_WR_NPARAMS_MIN    9
 586static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp)
 587{
 588        int nparams, flowclen16, flowclen;
 589
 590        nparams = FLOWC_WR_NPARAMS_MIN;
 591#ifdef CONFIG_CHELSIO_T4_DCB
 592        nparams++;
 593#endif
 594        flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
 595        flowclen16 = DIV_ROUND_UP(flowclen, 16);
 596        flowclen = flowclen16 * 16;
 597        /*
 598         * Return the number of 16-byte credits used by the FlowC request.
 599         * Pass back the nparams and actual FlowC length if requested.
 600         */
 601        if (nparamsp)
 602                *nparamsp = nparams;
 603        if (flowclenp)
 604                *flowclenp = flowclen;
 605
 606        return flowclen16;
 607}
 608
 609static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
 610{
 611        struct sk_buff *skb;
 612        struct fw_flowc_wr *flowc;
 613        int nparams, flowclen16, flowclen;
 614
 615#ifdef CONFIG_CHELSIO_T4_DCB
 616        u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
 617#endif
 618        flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
 619        skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
 620        flowc = (struct fw_flowc_wr *)skb->head;
 621        flowc->op_to_nparams =
 622                htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams));
 623        flowc->flowid_len16 =
 624                htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid));
 625        flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
 626        flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
 627        flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
 628        flowc->mnemval[1].val = htonl(csk->tx_chan);
 629        flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
 630        flowc->mnemval[2].val = htonl(csk->tx_chan);
 631        flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
 632        flowc->mnemval[3].val = htonl(csk->rss_qid);
 633        flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
 634        flowc->mnemval[4].val = htonl(csk->snd_nxt);
 635        flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
 636        flowc->mnemval[5].val = htonl(csk->rcv_nxt);
 637        flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
 638        flowc->mnemval[6].val = htonl(csk->snd_win);
 639        flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
 640        flowc->mnemval[7].val = htonl(csk->advmss);
 641        flowc->mnemval[8].mnemonic = 0;
 642        flowc->mnemval[8].val = 0;
 643        flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
 644        flowc->mnemval[8].val = 16384;
 645#ifdef CONFIG_CHELSIO_T4_DCB
 646        flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
 647        if (vlan == CPL_L2T_VLAN_NONE) {
 648                pr_warn_ratelimited("csk %u without VLAN Tag on DCB Link\n",
 649                                    csk->tid);
 650                flowc->mnemval[9].val = cpu_to_be32(0);
 651        } else {
 652                flowc->mnemval[9].val = cpu_to_be32((vlan & VLAN_PRIO_MASK) >>
 653                                        VLAN_PRIO_SHIFT);
 654        }
 655#endif
 656
 657        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
 658
 659        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 660                "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
 661                csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
 662                csk->snd_nxt, csk->rcv_nxt, csk->snd_win,
 663                csk->advmss);
 664
 665        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
 666
 667        return flowclen16;
 668}
 669
 670static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
 671                                   int dlen, int len, u32 credits, int compl)
 672{
 673        struct fw_ofld_tx_data_wr *req;
 674        unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
 675        unsigned int wr_ulp_mode = 0, val;
 676        bool imm = is_ofld_imm(skb);
 677
 678        req = __skb_push(skb, sizeof(*req));
 679
 680        if (imm) {
 681                req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
 682                                        FW_WR_COMPL_F |
 683                                        FW_WR_IMMDLEN_V(dlen));
 684                req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) |
 685                                                FW_WR_LEN16_V(credits));
 686        } else {
 687                req->op_to_immdlen =
 688                        cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
 689                                        FW_WR_COMPL_F |
 690                                        FW_WR_IMMDLEN_V(0));
 691                req->flowid_len16 =
 692                        cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
 693                                        FW_WR_LEN16_V(credits));
 694        }
 695        if (submode)
 696                wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) |
 697                                FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
 698        val = skb_peek(&csk->write_queue) ? 0 : 1;
 699        req->tunnel_to_proxy = htonl(wr_ulp_mode |
 700                                     FW_OFLD_TX_DATA_WR_SHOVE_V(val));
 701        req->plen = htonl(len);
 702        if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
 703                cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
 704}
 705
 706static void arp_failure_skb_discard(void *handle, struct sk_buff *skb)
 707{
 708        kfree_skb(skb);
 709}
 710
 711static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
 712{
 713        int total_size = 0;
 714        struct sk_buff *skb;
 715
 716        if (unlikely(csk->state < CTP_ESTABLISHED ||
 717                csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
 718                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK |
 719                         1 << CXGBI_DBG_PDU_TX,
 720                        "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
 721                        csk, csk->state, csk->flags, csk->tid);
 722                return 0;
 723        }
 724
 725        while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
 726                int dlen = skb->len;
 727                int len = skb->len;
 728                unsigned int credits_needed;
 729                int flowclen16 = 0;
 730
 731                skb_reset_transport_header(skb);
 732                if (is_ofld_imm(skb))
 733                        credits_needed = DIV_ROUND_UP(dlen, 16);
 734                else
 735                        credits_needed = DIV_ROUND_UP(
 736                                                8 * calc_tx_flits_ofld(skb),
 737                                                16);
 738
 739                if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
 740                        credits_needed += DIV_ROUND_UP(
 741                                        sizeof(struct fw_ofld_tx_data_wr),
 742                                        16);
 743
 744                /*
 745                 * Assumes the initial credits is large enough to support
 746                 * fw_flowc_wr plus largest possible first payload
 747                 */
 748                if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
 749                        flowclen16 = send_tx_flowc_wr(csk);
 750                        csk->wr_cred -= flowclen16;
 751                        csk->wr_una_cred += flowclen16;
 752                        cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
 753                }
 754
 755                if (csk->wr_cred < credits_needed) {
 756                        log_debug(1 << CXGBI_DBG_PDU_TX,
 757                                "csk 0x%p, skb %u/%u, wr %d < %u.\n",
 758                                csk, skb->len, skb->data_len,
 759                                credits_needed, csk->wr_cred);
 760                        break;
 761                }
 762                __skb_unlink(skb, &csk->write_queue);
 763                set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
 764                skb->csum = credits_needed + flowclen16;
 765                csk->wr_cred -= credits_needed;
 766                csk->wr_una_cred += credits_needed;
 767                cxgbi_sock_enqueue_wr(csk, skb);
 768
 769                log_debug(1 << CXGBI_DBG_PDU_TX,
 770                        "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
 771                        csk, skb->len, skb->data_len, credits_needed,
 772                        csk->wr_cred, csk->wr_una_cred);
 773
 774                if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
 775                        len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
 776                        make_tx_data_wr(csk, skb, dlen, len, credits_needed,
 777                                        req_completion);
 778                        csk->snd_nxt += len;
 779                        cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
 780                } else if (cxgbi_skcb_test_flag(skb, SKCBF_TX_FLAG_COMPL) &&
 781                           (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
 782                        struct cpl_close_con_req *req =
 783                                (struct cpl_close_con_req *)skb->data;
 784                        req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
 785                }
 786                total_size += skb->truesize;
 787                t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
 788
 789                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
 790                        "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
 791                        csk, csk->state, csk->flags, csk->tid, skb, len);
 792
 793                cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
 794        }
 795        return total_size;
 796}
 797
 798static inline void free_atid(struct cxgbi_sock *csk)
 799{
 800        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
 801
 802        if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
 803                cxgb4_free_atid(lldi->tids, csk->atid);
 804                cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
 805                cxgbi_sock_put(csk);
 806        }
 807}
 808
 809static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
 810{
 811        struct cxgbi_sock *csk;
 812        struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
 813        unsigned short tcp_opt = ntohs(req->tcp_opt);
 814        unsigned int tid = GET_TID(req);
 815        unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
 816        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 817        struct tid_info *t = lldi->tids;
 818        u32 rcv_isn = be32_to_cpu(req->rcv_isn);
 819
 820        csk = lookup_atid(t, atid);
 821        if (unlikely(!csk)) {
 822                pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev);
 823                goto rel_skb;
 824        }
 825
 826        if (csk->atid != atid) {
 827                pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
 828                        atid, csk, csk->state, csk->flags, csk->tid, csk->atid);
 829                goto rel_skb;
 830        }
 831
 832        pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
 833                       (&csk->saddr), (&csk->daddr),
 834                       atid, tid, csk, csk->state, csk->flags, rcv_isn);
 835
 836        module_put(cdev->owner);
 837
 838        cxgbi_sock_get(csk);
 839        csk->tid = tid;
 840        cxgb4_insert_tid(lldi->tids, csk, tid, csk->csk_family);
 841        cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
 842
 843        free_atid(csk);
 844
 845        spin_lock_bh(&csk->lock);
 846        if (unlikely(csk->state != CTP_ACTIVE_OPEN))
 847                pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
 848                        csk, csk->state, csk->flags, csk->tid);
 849
 850        if (csk->retry_timer.function) {
 851                del_timer(&csk->retry_timer);
 852                csk->retry_timer.function = NULL;
 853        }
 854
 855        csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
 856        /*
 857         * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
 858         * pass through opt0.
 859         */
 860        if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10))
 861                csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10);
 862
 863        csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40;
 864        if (TCPOPT_TSTAMP_G(tcp_opt))
 865                csk->advmss -= 12;
 866        if (csk->advmss < 128)
 867                csk->advmss = 128;
 868
 869        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 870                "csk 0x%p, mss_idx %u, advmss %u.\n",
 871                        csk, TCPOPT_MSS_G(tcp_opt), csk->advmss);
 872
 873        cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
 874
 875        if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
 876                send_abort_req(csk);
 877        else {
 878                if (skb_queue_len(&csk->write_queue))
 879                        push_tx_frames(csk, 0);
 880                cxgbi_conn_tx_open(csk);
 881        }
 882        spin_unlock_bh(&csk->lock);
 883
 884rel_skb:
 885        __kfree_skb(skb);
 886}
 887
 888static int act_open_rpl_status_to_errno(int status)
 889{
 890        switch (status) {
 891        case CPL_ERR_CONN_RESET:
 892                return -ECONNREFUSED;
 893        case CPL_ERR_ARP_MISS:
 894                return -EHOSTUNREACH;
 895        case CPL_ERR_CONN_TIMEDOUT:
 896                return -ETIMEDOUT;
 897        case CPL_ERR_TCAM_FULL:
 898                return -ENOMEM;
 899        case CPL_ERR_CONN_EXIST:
 900                return -EADDRINUSE;
 901        default:
 902                return -EIO;
 903        }
 904}
 905
 906static void csk_act_open_retry_timer(struct timer_list *t)
 907{
 908        struct sk_buff *skb = NULL;
 909        struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
 910        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
 911        void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *,
 912                                   struct l2t_entry *);
 913        int t4 = is_t4(lldi->adapter_type), size, size6;
 914
 915        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 916                "csk 0x%p,%u,0x%lx,%u.\n",
 917                csk, csk->state, csk->flags, csk->tid);
 918
 919        cxgbi_sock_get(csk);
 920        spin_lock_bh(&csk->lock);
 921
 922        if (t4) {
 923                size = sizeof(struct cpl_act_open_req);
 924                size6 = sizeof(struct cpl_act_open_req6);
 925        } else {
 926                size = sizeof(struct cpl_t5_act_open_req);
 927                size6 = sizeof(struct cpl_t5_act_open_req6);
 928        }
 929
 930        if (csk->csk_family == AF_INET) {
 931                send_act_open_func = send_act_open_req;
 932                skb = alloc_wr(size, 0, GFP_ATOMIC);
 933#if IS_ENABLED(CONFIG_IPV6)
 934        } else {
 935                send_act_open_func = send_act_open_req6;
 936                skb = alloc_wr(size6, 0, GFP_ATOMIC);
 937#endif
 938        }
 939
 940        if (!skb)
 941                cxgbi_sock_fail_act_open(csk, -ENOMEM);
 942        else {
 943                skb->sk = (struct sock *)csk;
 944                t4_set_arp_err_handler(skb, csk,
 945                                       cxgbi_sock_act_open_req_arp_failure);
 946                send_act_open_func(csk, skb, csk->l2t);
 947        }
 948
 949        spin_unlock_bh(&csk->lock);
 950        cxgbi_sock_put(csk);
 951
 952}
 953
 954static inline bool is_neg_adv(unsigned int status)
 955{
 956        return status == CPL_ERR_RTX_NEG_ADVICE ||
 957                status == CPL_ERR_KEEPALV_NEG_ADVICE ||
 958                status == CPL_ERR_PERSIST_NEG_ADVICE;
 959}
 960
 961static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
 962{
 963        struct cxgbi_sock *csk;
 964        struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
 965        unsigned int tid = GET_TID(rpl);
 966        unsigned int atid =
 967                TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status)));
 968        unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status));
 969        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 970        struct tid_info *t = lldi->tids;
 971
 972        csk = lookup_atid(t, atid);
 973        if (unlikely(!csk)) {
 974                pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid);
 975                goto rel_skb;
 976        }
 977
 978        pr_info_ipaddr("tid %u/%u, status %u.\n"
 979                       "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
 980                       atid, tid, status, csk, csk->state, csk->flags);
 981
 982        if (is_neg_adv(status))
 983                goto rel_skb;
 984
 985        module_put(cdev->owner);
 986
 987        if (status && status != CPL_ERR_TCAM_FULL &&
 988            status != CPL_ERR_CONN_EXIST &&
 989            status != CPL_ERR_ARP_MISS)
 990                cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl),
 991                                 csk->csk_family);
 992
 993        cxgbi_sock_get(csk);
 994        spin_lock_bh(&csk->lock);
 995
 996        if (status == CPL_ERR_CONN_EXIST &&
 997            csk->retry_timer.function != csk_act_open_retry_timer) {
 998                csk->retry_timer.function = csk_act_open_retry_timer;
 999                mod_timer(&csk->retry_timer, jiffies + HZ / 2);
1000        } else
1001                cxgbi_sock_fail_act_open(csk,
1002                                        act_open_rpl_status_to_errno(status));
1003
1004        spin_unlock_bh(&csk->lock);
1005        cxgbi_sock_put(csk);
1006rel_skb:
1007        __kfree_skb(skb);
1008}
1009
1010static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
1011{
1012        struct cxgbi_sock *csk;
1013        struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data;
1014        unsigned int tid = GET_TID(req);
1015        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1016        struct tid_info *t = lldi->tids;
1017
1018        csk = lookup_tid(t, tid);
1019        if (unlikely(!csk)) {
1020                pr_err("can't find connection for tid %u.\n", tid);
1021                goto rel_skb;
1022        }
1023        pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
1024                       (&csk->saddr), (&csk->daddr),
1025                       csk, csk->state, csk->flags, csk->tid);
1026        cxgbi_sock_rcv_peer_close(csk);
1027rel_skb:
1028        __kfree_skb(skb);
1029}
1030
1031static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1032{
1033        struct cxgbi_sock *csk;
1034        struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data;
1035        unsigned int tid = GET_TID(rpl);
1036        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1037        struct tid_info *t = lldi->tids;
1038
1039        csk = lookup_tid(t, tid);
1040        if (unlikely(!csk)) {
1041                pr_err("can't find connection for tid %u.\n", tid);
1042                goto rel_skb;
1043        }
1044        pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
1045                       (&csk->saddr), (&csk->daddr),
1046                       csk, csk->state, csk->flags, csk->tid);
1047        cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
1048rel_skb:
1049        __kfree_skb(skb);
1050}
1051
1052static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
1053                                                                int *need_rst)
1054{
1055        switch (abort_reason) {
1056        case CPL_ERR_BAD_SYN: /* fall through */
1057        case CPL_ERR_CONN_RESET:
1058                return csk->state > CTP_ESTABLISHED ?
1059                        -EPIPE : -ECONNRESET;
1060        case CPL_ERR_XMIT_TIMEDOUT:
1061        case CPL_ERR_PERSIST_TIMEDOUT:
1062        case CPL_ERR_FINWAIT2_TIMEDOUT:
1063        case CPL_ERR_KEEPALIVE_TIMEDOUT:
1064                return -ETIMEDOUT;
1065        default:
1066                return -EIO;
1067        }
1068}
1069
1070static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
1071{
1072        struct cxgbi_sock *csk;
1073        struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data;
1074        unsigned int tid = GET_TID(req);
1075        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1076        struct tid_info *t = lldi->tids;
1077        int rst_status = CPL_ABORT_NO_RST;
1078
1079        csk = lookup_tid(t, tid);
1080        if (unlikely(!csk)) {
1081                pr_err("can't find connection for tid %u.\n", tid);
1082                goto rel_skb;
1083        }
1084
1085        pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
1086                       (&csk->saddr), (&csk->daddr),
1087                       csk, csk->state, csk->flags, csk->tid, req->status);
1088
1089        if (is_neg_adv(req->status))
1090                goto rel_skb;
1091
1092        cxgbi_sock_get(csk);
1093        spin_lock_bh(&csk->lock);
1094
1095        cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
1096
1097        if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
1098                send_tx_flowc_wr(csk);
1099                cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
1100        }
1101
1102        cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
1103        cxgbi_sock_set_state(csk, CTP_ABORTING);
1104
1105        send_abort_rpl(csk, rst_status);
1106
1107        if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
1108                csk->err = abort_status_to_errno(csk, req->status, &rst_status);
1109                cxgbi_sock_closed(csk);
1110        }
1111
1112        spin_unlock_bh(&csk->lock);
1113        cxgbi_sock_put(csk);
1114rel_skb:
1115        __kfree_skb(skb);
1116}
1117
1118static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
1119{
1120        struct cxgbi_sock *csk;
1121        struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data;
1122        unsigned int tid = GET_TID(rpl);
1123        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1124        struct tid_info *t = lldi->tids;
1125
1126        csk = lookup_tid(t, tid);
1127        if (!csk)
1128                goto rel_skb;
1129
1130        if (csk)
1131                pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
1132                               (&csk->saddr), (&csk->daddr), csk,
1133                               csk->state, csk->flags, csk->tid, rpl->status);
1134
1135        if (rpl->status == CPL_ERR_ABORT_FAILED)
1136                goto rel_skb;
1137
1138        cxgbi_sock_rcv_abort_rpl(csk);
1139rel_skb:
1140        __kfree_skb(skb);
1141}
1142
1143static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb)
1144{
1145        struct cxgbi_sock *csk;
1146        struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data;
1147        unsigned int tid = GET_TID(cpl);
1148        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1149        struct tid_info *t = lldi->tids;
1150
1151        csk = lookup_tid(t, tid);
1152        if (!csk) {
1153                pr_err("can't find connection for tid %u.\n", tid);
1154        } else {
1155                /* not expecting this, reset the connection. */
1156                pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid);
1157                spin_lock_bh(&csk->lock);
1158                send_abort_req(csk);
1159                spin_unlock_bh(&csk->lock);
1160        }
1161        __kfree_skb(skb);
1162}
1163
1164static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
1165{
1166        struct cxgbi_sock *csk;
1167        struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
1168        unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
1169        unsigned int tid = GET_TID(cpl);
1170        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1171        struct tid_info *t = lldi->tids;
1172
1173        csk = lookup_tid(t, tid);
1174        if (unlikely(!csk)) {
1175                pr_err("can't find conn. for tid %u.\n", tid);
1176                goto rel_skb;
1177        }
1178
1179        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1180                "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1181                csk, csk->state, csk->flags, csk->tid, skb, skb->len,
1182                pdu_len_ddp);
1183
1184        spin_lock_bh(&csk->lock);
1185
1186        if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1187                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1188                        "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1189                        csk, csk->state, csk->flags, csk->tid);
1190                if (csk->state != CTP_ABORTING)
1191                        goto abort_conn;
1192                else
1193                        goto discard;
1194        }
1195
1196        cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq);
1197        cxgbi_skcb_flags(skb) = 0;
1198
1199        skb_reset_transport_header(skb);
1200        __skb_pull(skb, sizeof(*cpl));
1201        __pskb_trim(skb, ntohs(cpl->len));
1202
1203        if (!csk->skb_ulp_lhdr) {
1204                unsigned char *bhs;
1205                unsigned int hlen, dlen, plen;
1206
1207                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1208                        "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
1209                        csk, csk->state, csk->flags, csk->tid, skb);
1210                csk->skb_ulp_lhdr = skb;
1211                cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
1212
1213                if ((CHELSIO_CHIP_VERSION(lldi->adapter_type) <= CHELSIO_T5) &&
1214                    (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt)) {
1215                        pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
1216                                csk->tid, cxgbi_skcb_tcp_seq(skb),
1217                                csk->rcv_nxt);
1218                        goto abort_conn;
1219                }
1220
1221                bhs = skb->data;
1222                hlen = ntohs(cpl->len);
1223                dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
1224
1225                plen = ISCSI_PDU_LEN_G(pdu_len_ddp);
1226                if (is_t4(lldi->adapter_type))
1227                        plen -= 40;
1228
1229                if ((hlen + dlen) != plen) {
1230                        pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
1231                                "mismatch %u != %u + %u, seq 0x%x.\n",
1232                                csk->tid, plen, hlen, dlen,
1233                                cxgbi_skcb_tcp_seq(skb));
1234                        goto abort_conn;
1235                }
1236
1237                cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3);
1238                if (dlen)
1239                        cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len;
1240                csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
1241
1242                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1243                        "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
1244                        csk, skb, *bhs, hlen, dlen,
1245                        ntohl(*((unsigned int *)(bhs + 16))),
1246                        ntohl(*((unsigned int *)(bhs + 24))));
1247
1248        } else {
1249                struct sk_buff *lskb = csk->skb_ulp_lhdr;
1250
1251                cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
1252                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1253                        "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1254                        csk, csk->state, csk->flags, skb, lskb);
1255        }
1256
1257        __skb_queue_tail(&csk->receive_queue, skb);
1258        spin_unlock_bh(&csk->lock);
1259        return;
1260
1261abort_conn:
1262        send_abort_req(csk);
1263discard:
1264        spin_unlock_bh(&csk->lock);
1265rel_skb:
1266        __kfree_skb(skb);
1267}
1268
1269static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb)
1270{
1271        struct cxgbi_sock *csk;
1272        struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
1273        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1274        struct tid_info *t = lldi->tids;
1275        struct sk_buff *lskb;
1276        u32 tid = GET_TID(cpl);
1277        u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
1278
1279        csk = lookup_tid(t, tid);
1280        if (unlikely(!csk)) {
1281                pr_err("can't find conn. for tid %u.\n", tid);
1282                goto rel_skb;
1283        }
1284
1285        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1286                  "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1287                  csk, csk->state, csk->flags, csk->tid, skb,
1288                  skb->len, pdu_len_ddp);
1289
1290        spin_lock_bh(&csk->lock);
1291
1292        if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1293                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1294                          "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1295                          csk, csk->state, csk->flags, csk->tid);
1296
1297                if (csk->state != CTP_ABORTING)
1298                        goto abort_conn;
1299                else
1300                        goto discard;
1301        }
1302
1303        cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq);
1304        cxgbi_skcb_flags(skb) = 0;
1305
1306        skb_reset_transport_header(skb);
1307        __skb_pull(skb, sizeof(*cpl));
1308        __pskb_trim(skb, ntohs(cpl->len));
1309
1310        if (!csk->skb_ulp_lhdr)
1311                csk->skb_ulp_lhdr = skb;
1312
1313        lskb = csk->skb_ulp_lhdr;
1314        cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
1315
1316        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1317                  "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1318                  csk, csk->state, csk->flags, skb, lskb);
1319
1320        __skb_queue_tail(&csk->receive_queue, skb);
1321        spin_unlock_bh(&csk->lock);
1322        return;
1323
1324abort_conn:
1325        send_abort_req(csk);
1326discard:
1327        spin_unlock_bh(&csk->lock);
1328rel_skb:
1329        __kfree_skb(skb);
1330}
1331
1332static void
1333cxgb4i_process_ddpvld(struct cxgbi_sock *csk,
1334                      struct sk_buff *skb, u32 ddpvld)
1335{
1336        if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
1337                pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
1338                        csk, skb, ddpvld, cxgbi_skcb_flags(skb));
1339                cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
1340        }
1341
1342        if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
1343                pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1344                        csk, skb, ddpvld, cxgbi_skcb_flags(skb));
1345                cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
1346        }
1347
1348        if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
1349                log_debug(1 << CXGBI_DBG_PDU_RX,
1350                          "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1351                          csk, skb, ddpvld);
1352                cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
1353        }
1354
1355        if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
1356            !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
1357                log_debug(1 << CXGBI_DBG_PDU_RX,
1358                          "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1359                          csk, skb, ddpvld);
1360                cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
1361        }
1362}
1363
1364static void do_rx_data_ddp(struct cxgbi_device *cdev,
1365                                  struct sk_buff *skb)
1366{
1367        struct cxgbi_sock *csk;
1368        struct sk_buff *lskb;
1369        struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data;
1370        unsigned int tid = GET_TID(rpl);
1371        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1372        struct tid_info *t = lldi->tids;
1373        u32 ddpvld = be32_to_cpu(rpl->ddpvld);
1374
1375        csk = lookup_tid(t, tid);
1376        if (unlikely(!csk)) {
1377                pr_err("can't find connection for tid %u.\n", tid);
1378                goto rel_skb;
1379        }
1380
1381        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1382                "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
1383                csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr);
1384
1385        spin_lock_bh(&csk->lock);
1386
1387        if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1388                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1389                        "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1390                        csk, csk->state, csk->flags, csk->tid);
1391                if (csk->state != CTP_ABORTING)
1392                        goto abort_conn;
1393                else
1394                        goto discard;
1395        }
1396
1397        if (!csk->skb_ulp_lhdr) {
1398                pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid);
1399                goto abort_conn;
1400        }
1401
1402        lskb = csk->skb_ulp_lhdr;
1403        csk->skb_ulp_lhdr = NULL;
1404
1405        cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc);
1406
1407        if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb))
1408                pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
1409                        csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
1410
1411        cxgb4i_process_ddpvld(csk, lskb, ddpvld);
1412
1413        log_debug(1 << CXGBI_DBG_PDU_RX,
1414                "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1415                csk, lskb, cxgbi_skcb_flags(lskb));
1416
1417        cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS);
1418        cxgbi_conn_pdu_ready(csk);
1419        spin_unlock_bh(&csk->lock);
1420        goto rel_skb;
1421
1422abort_conn:
1423        send_abort_req(csk);
1424discard:
1425        spin_unlock_bh(&csk->lock);
1426rel_skb:
1427        __kfree_skb(skb);
1428}
1429
1430static void
1431do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb)
1432{
1433        struct cxgbi_sock *csk;
1434        struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data;
1435        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1436        struct tid_info *t = lldi->tids;
1437        struct sk_buff *data_skb = NULL;
1438        u32 tid = GET_TID(rpl);
1439        u32 ddpvld = be32_to_cpu(rpl->ddpvld);
1440        u32 seq = be32_to_cpu(rpl->seq);
1441        u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp);
1442
1443        csk = lookup_tid(t, tid);
1444        if (unlikely(!csk)) {
1445                pr_err("can't find connection for tid %u.\n", tid);
1446                goto rel_skb;
1447        }
1448
1449        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1450                  "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, "
1451                  "pdu_len_ddp %u, status %u.\n",
1452                  csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr,
1453                  ntohs(rpl->len), pdu_len_ddp,  rpl->status);
1454
1455        spin_lock_bh(&csk->lock);
1456
1457        if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1458                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1459                          "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1460                          csk, csk->state, csk->flags, csk->tid);
1461
1462                if (csk->state != CTP_ABORTING)
1463                        goto abort_conn;
1464                else
1465                        goto discard;
1466        }
1467
1468        cxgbi_skcb_tcp_seq(skb) = seq;
1469        cxgbi_skcb_flags(skb) = 0;
1470        cxgbi_skcb_rx_pdulen(skb) = 0;
1471
1472        skb_reset_transport_header(skb);
1473        __skb_pull(skb, sizeof(*rpl));
1474        __pskb_trim(skb, be16_to_cpu(rpl->len));
1475
1476        csk->rcv_nxt = seq + pdu_len_ddp;
1477
1478        if (csk->skb_ulp_lhdr) {
1479                data_skb = skb_peek(&csk->receive_queue);
1480                if (!data_skb ||
1481                    !cxgbi_skcb_test_flag(data_skb, SKCBF_RX_DATA)) {
1482                        pr_err("Error! freelist data not found 0x%p, tid %u\n",
1483                               data_skb, tid);
1484
1485                        goto abort_conn;
1486                }
1487                __skb_unlink(data_skb, &csk->receive_queue);
1488
1489                cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA);
1490
1491                __skb_queue_tail(&csk->receive_queue, skb);
1492                __skb_queue_tail(&csk->receive_queue, data_skb);
1493        } else {
1494                 __skb_queue_tail(&csk->receive_queue, skb);
1495        }
1496
1497        csk->skb_ulp_lhdr = NULL;
1498
1499        cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
1500        cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
1501        cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL);
1502        cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc);
1503
1504        cxgb4i_process_ddpvld(csk, skb, ddpvld);
1505
1506        log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n",
1507                  csk, skb, cxgbi_skcb_flags(skb));
1508
1509        cxgbi_conn_pdu_ready(csk);
1510        spin_unlock_bh(&csk->lock);
1511
1512        return;
1513
1514abort_conn:
1515        send_abort_req(csk);
1516discard:
1517        spin_unlock_bh(&csk->lock);
1518rel_skb:
1519        __kfree_skb(skb);
1520}
1521
1522static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
1523{
1524        struct cxgbi_sock *csk;
1525        struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data;
1526        unsigned int tid = GET_TID(rpl);
1527        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1528        struct tid_info *t = lldi->tids;
1529
1530        csk = lookup_tid(t, tid);
1531        if (unlikely(!csk))
1532                pr_err("can't find connection for tid %u.\n", tid);
1533        else {
1534                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1535                        "csk 0x%p,%u,0x%lx,%u.\n",
1536                        csk, csk->state, csk->flags, csk->tid);
1537                cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una),
1538                                        rpl->seq_vld);
1539        }
1540        __kfree_skb(skb);
1541}
1542
1543static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1544{
1545        struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1546        unsigned int tid = GET_TID(rpl);
1547        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1548        struct tid_info *t = lldi->tids;
1549        struct cxgbi_sock *csk;
1550
1551        csk = lookup_tid(t, tid);
1552        if (!csk) {
1553                pr_err("can't find conn. for tid %u.\n", tid);
1554                return;
1555        }
1556
1557        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1558                "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1559                csk, csk->state, csk->flags, csk->tid, rpl->status);
1560
1561        if (rpl->status != CPL_ERR_NONE) {
1562                pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1563                        csk, tid, rpl->status);
1564                csk->err = -EINVAL;
1565        }
1566
1567        complete(&csk->cmpl);
1568
1569        __kfree_skb(skb);
1570}
1571
1572static int alloc_cpls(struct cxgbi_sock *csk)
1573{
1574        csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req),
1575                                        0, GFP_KERNEL);
1576        if (!csk->cpl_close)
1577                return -ENOMEM;
1578
1579        csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req),
1580                                        0, GFP_KERNEL);
1581        if (!csk->cpl_abort_req)
1582                goto free_cpls;
1583
1584        csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl),
1585                                        0, GFP_KERNEL);
1586        if (!csk->cpl_abort_rpl)
1587                goto free_cpls;
1588        return 0;
1589
1590free_cpls:
1591        cxgbi_sock_free_cpl_skbs(csk);
1592        return -ENOMEM;
1593}
1594
1595static inline void l2t_put(struct cxgbi_sock *csk)
1596{
1597        if (csk->l2t) {
1598                cxgb4_l2t_release(csk->l2t);
1599                csk->l2t = NULL;
1600                cxgbi_sock_put(csk);
1601        }
1602}
1603
1604static void release_offload_resources(struct cxgbi_sock *csk)
1605{
1606        struct cxgb4_lld_info *lldi;
1607#if IS_ENABLED(CONFIG_IPV6)
1608        struct net_device *ndev = csk->cdev->ports[csk->port_id];
1609#endif
1610
1611        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1612                "csk 0x%p,%u,0x%lx,%u.\n",
1613                csk, csk->state, csk->flags, csk->tid);
1614
1615        cxgbi_sock_free_cpl_skbs(csk);
1616        cxgbi_sock_purge_write_queue(csk);
1617        if (csk->wr_cred != csk->wr_max_cred) {
1618                cxgbi_sock_purge_wr_queue(csk);
1619                cxgbi_sock_reset_wr_list(csk);
1620        }
1621
1622        l2t_put(csk);
1623#if IS_ENABLED(CONFIG_IPV6)
1624        if (csk->csk_family == AF_INET6)
1625                cxgb4_clip_release(ndev,
1626                                   (const u32 *)&csk->saddr6.sin6_addr, 1);
1627#endif
1628
1629        if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
1630                free_atid(csk);
1631        else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
1632                lldi = cxgbi_cdev_priv(csk->cdev);
1633                cxgb4_remove_tid(lldi->tids, 0, csk->tid,
1634                                 csk->csk_family);
1635                cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
1636                cxgbi_sock_put(csk);
1637        }
1638        csk->dst = NULL;
1639}
1640
1641#ifdef CONFIG_CHELSIO_T4_DCB
1642static inline u8 get_iscsi_dcb_state(struct net_device *ndev)
1643{
1644        return ndev->dcbnl_ops->getstate(ndev);
1645}
1646
1647static int select_priority(int pri_mask)
1648{
1649        if (!pri_mask)
1650                return 0;
1651        return (ffs(pri_mask) - 1);
1652}
1653
1654static u8 get_iscsi_dcb_priority(struct net_device *ndev)
1655{
1656        int rv;
1657        u8 caps;
1658
1659        struct dcb_app iscsi_dcb_app = {
1660                .protocol = 3260
1661        };
1662
1663        rv = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
1664        if (rv)
1665                return 0;
1666
1667        if (caps & DCB_CAP_DCBX_VER_IEEE) {
1668                iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM;
1669                rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
1670                if (!rv) {
1671                        iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
1672                        rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
1673                }
1674        } else if (caps & DCB_CAP_DCBX_VER_CEE) {
1675                iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
1676                rv = dcb_getapp(ndev, &iscsi_dcb_app);
1677        }
1678
1679        log_debug(1 << CXGBI_DBG_ISCSI,
1680                  "iSCSI priority is set to %u\n", select_priority(rv));
1681        return select_priority(rv);
1682}
1683#endif
1684
1685static int init_act_open(struct cxgbi_sock *csk)
1686{
1687        struct cxgbi_device *cdev = csk->cdev;
1688        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1689        struct net_device *ndev = cdev->ports[csk->port_id];
1690        struct sk_buff *skb = NULL;
1691        struct neighbour *n = NULL;
1692        void *daddr;
1693        unsigned int step;
1694        unsigned int rxq_idx;
1695        unsigned int size, size6;
1696        unsigned int linkspeed;
1697        unsigned int rcv_winf, snd_winf;
1698#ifdef CONFIG_CHELSIO_T4_DCB
1699        u8 priority = 0;
1700#endif
1701        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1702                "csk 0x%p,%u,0x%lx,%u.\n",
1703                csk, csk->state, csk->flags, csk->tid);
1704
1705        if (csk->csk_family == AF_INET)
1706                daddr = &csk->daddr.sin_addr.s_addr;
1707#if IS_ENABLED(CONFIG_IPV6)
1708        else if (csk->csk_family == AF_INET6)
1709                daddr = &csk->daddr6.sin6_addr;
1710#endif
1711        else {
1712                pr_err("address family 0x%x not supported\n", csk->csk_family);
1713                goto rel_resource;
1714        }
1715
1716        n = dst_neigh_lookup(csk->dst, daddr);
1717
1718        if (!n) {
1719                pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1720                goto rel_resource;
1721        }
1722
1723        if (!(n->nud_state & NUD_VALID))
1724                neigh_event_send(n, NULL);
1725
1726        csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
1727        if (csk->atid < 0) {
1728                pr_err("%s, NO atid available.\n", ndev->name);
1729                goto rel_resource_without_clip;
1730        }
1731        cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1732        cxgbi_sock_get(csk);
1733
1734#ifdef CONFIG_CHELSIO_T4_DCB
1735        if (get_iscsi_dcb_state(ndev))
1736                priority = get_iscsi_dcb_priority(ndev);
1737
1738        csk->dcb_priority = priority;
1739        csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority);
1740#else
1741        csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
1742#endif
1743        if (!csk->l2t) {
1744                pr_err("%s, cannot alloc l2t.\n", ndev->name);
1745                goto rel_resource_without_clip;
1746        }
1747        cxgbi_sock_get(csk);
1748
1749#if IS_ENABLED(CONFIG_IPV6)
1750        if (csk->csk_family == AF_INET6)
1751                cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1);
1752#endif
1753
1754        if (is_t4(lldi->adapter_type)) {
1755                size = sizeof(struct cpl_act_open_req);
1756                size6 = sizeof(struct cpl_act_open_req6);
1757        } else if (is_t5(lldi->adapter_type)) {
1758                size = sizeof(struct cpl_t5_act_open_req);
1759                size6 = sizeof(struct cpl_t5_act_open_req6);
1760        } else {
1761                size = sizeof(struct cpl_t6_act_open_req);
1762                size6 = sizeof(struct cpl_t6_act_open_req6);
1763        }
1764
1765        if (csk->csk_family == AF_INET)
1766                skb = alloc_wr(size, 0, GFP_NOIO);
1767#if IS_ENABLED(CONFIG_IPV6)
1768        else
1769                skb = alloc_wr(size6, 0, GFP_NOIO);
1770#endif
1771
1772        if (!skb)
1773                goto rel_resource;
1774        skb->sk = (struct sock *)csk;
1775        t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure);
1776
1777        if (!csk->mtu)
1778                csk->mtu = dst_mtu(csk->dst);
1779        cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
1780        csk->tx_chan = cxgb4_port_chan(ndev);
1781        csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx;
1782        step = lldi->ntxq / lldi->nchan;
1783        csk->txq_idx = cxgb4_port_idx(ndev) * step;
1784        step = lldi->nrxq / lldi->nchan;
1785        rxq_idx = (cxgb4_port_idx(ndev) * step) + (cdev->rxq_idx_cntr % step);
1786        cdev->rxq_idx_cntr++;
1787        csk->rss_qid = lldi->rxq_ids[rxq_idx];
1788        linkspeed = ((struct port_info *)netdev_priv(ndev))->link_cfg.speed;
1789        csk->snd_win = cxgb4i_snd_win;
1790        csk->rcv_win = cxgb4i_rcv_win;
1791        if (cxgb4i_rcv_win <= 0) {
1792                csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN;
1793                rcv_winf = linkspeed / SPEED_10000;
1794                if (rcv_winf)
1795                        csk->rcv_win *= rcv_winf;
1796        }
1797        if (cxgb4i_snd_win <= 0) {
1798                csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN;
1799                snd_winf = linkspeed / SPEED_10000;
1800                if (snd_winf)
1801                        csk->snd_win *= snd_winf;
1802        }
1803        csk->wr_cred = lldi->wr_cred -
1804                       DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1805        csk->wr_max_cred = csk->wr_cred;
1806        csk->wr_una_cred = 0;
1807        cxgbi_sock_reset_wr_list(csk);
1808        csk->err = 0;
1809
1810        pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n",
1811                       (&csk->saddr), (&csk->daddr), csk, csk->state,
1812                       csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid,
1813                       csk->mtu, csk->mss_idx, csk->smac_idx);
1814
1815        /* must wait for either a act_open_rpl or act_open_establish */
1816        if (!try_module_get(cdev->owner)) {
1817                pr_err("%s, try_module_get failed.\n", ndev->name);
1818                goto rel_resource;
1819        }
1820
1821        cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1822        if (csk->csk_family == AF_INET)
1823                send_act_open_req(csk, skb, csk->l2t);
1824#if IS_ENABLED(CONFIG_IPV6)
1825        else
1826                send_act_open_req6(csk, skb, csk->l2t);
1827#endif
1828        neigh_release(n);
1829
1830        return 0;
1831
1832rel_resource:
1833#if IS_ENABLED(CONFIG_IPV6)
1834        if (csk->csk_family == AF_INET6)
1835                cxgb4_clip_release(ndev,
1836                                   (const u32 *)&csk->saddr6.sin6_addr, 1);
1837#endif
1838rel_resource_without_clip:
1839        if (n)
1840                neigh_release(n);
1841        if (skb)
1842                __kfree_skb(skb);
1843        return -EINVAL;
1844}
1845
1846static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
1847        [CPL_ACT_ESTABLISH] = do_act_establish,
1848        [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1849        [CPL_PEER_CLOSE] = do_peer_close,
1850        [CPL_ABORT_REQ_RSS] = do_abort_req_rss,
1851        [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss,
1852        [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1853        [CPL_FW4_ACK] = do_fw4_ack,
1854        [CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
1855        [CPL_ISCSI_DATA] = do_rx_iscsi_data,
1856        [CPL_SET_TCB_RPL] = do_set_tcb_rpl,
1857        [CPL_RX_DATA_DDP] = do_rx_data_ddp,
1858        [CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
1859        [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp,
1860        [CPL_RX_DATA] = do_rx_data,
1861};
1862
1863static int cxgb4i_ofld_init(struct cxgbi_device *cdev)
1864{
1865        int rc;
1866
1867        if (cxgb4i_max_connect > CXGB4I_MAX_CONN)
1868                cxgb4i_max_connect = CXGB4I_MAX_CONN;
1869
1870        rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base,
1871                                        cxgb4i_max_connect);
1872        if (rc < 0)
1873                return rc;
1874
1875        cdev->csk_release_offload_resources = release_offload_resources;
1876        cdev->csk_push_tx_frames = push_tx_frames;
1877        cdev->csk_send_abort_req = send_abort_req;
1878        cdev->csk_send_close_req = send_close_req;
1879        cdev->csk_send_rx_credits = send_rx_credits;
1880        cdev->csk_alloc_cpls = alloc_cpls;
1881        cdev->csk_init_act_open = init_act_open;
1882
1883        pr_info("cdev 0x%p, offload up, added.\n", cdev);
1884        return 0;
1885}
1886
1887static inline void
1888ulp_mem_io_set_hdr(struct cxgbi_device *cdev,
1889                   struct ulp_mem_io *req,
1890                   unsigned int wr_len, unsigned int dlen,
1891                   unsigned int pm_addr,
1892                   int tid)
1893{
1894        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1895        struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
1896
1897        INIT_ULPTX_WR(req, wr_len, 0, tid);
1898        req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
1899                FW_WR_ATOMIC_V(0));
1900        req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
1901                ULP_MEMIO_ORDER_V(is_t4(lldi->adapter_type)) |
1902                T5_ULP_MEMIO_IMM_V(!is_t4(lldi->adapter_type)));
1903        req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
1904        req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
1905        req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
1906
1907        idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
1908        idata->len = htonl(dlen);
1909}
1910
1911static struct sk_buff *
1912ddp_ppod_init_idata(struct cxgbi_device *cdev,
1913                    struct cxgbi_ppm *ppm,
1914                    unsigned int idx, unsigned int npods,
1915                    unsigned int tid)
1916{
1917        unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
1918        unsigned int dlen = npods << PPOD_SIZE_SHIFT;
1919        unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
1920                                sizeof(struct ulptx_idata) + dlen, 16);
1921        struct sk_buff *skb = alloc_wr(wr_len, 0, GFP_ATOMIC);
1922
1923        if (!skb) {
1924                pr_err("%s: %s idx %u, npods %u, OOM.\n",
1925                       __func__, ppm->ndev->name, idx, npods);
1926                return NULL;
1927        }
1928
1929        ulp_mem_io_set_hdr(cdev, (struct ulp_mem_io *)skb->head, wr_len, dlen,
1930                           pm_addr, tid);
1931
1932        return skb;
1933}
1934
1935static int ddp_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
1936                                struct cxgbi_task_tag_info *ttinfo,
1937                                unsigned int idx, unsigned int npods,
1938                                struct scatterlist **sg_pp,
1939                                unsigned int *sg_off)
1940{
1941        struct cxgbi_device *cdev = csk->cdev;
1942        struct sk_buff *skb = ddp_ppod_init_idata(cdev, ppm, idx, npods,
1943                                                  csk->tid);
1944        struct ulp_mem_io *req;
1945        struct ulptx_idata *idata;
1946        struct cxgbi_pagepod *ppod;
1947        int i;
1948
1949        if (!skb)
1950                return -ENOMEM;
1951
1952        req = (struct ulp_mem_io *)skb->head;
1953        idata = (struct ulptx_idata *)(req + 1);
1954        ppod = (struct cxgbi_pagepod *)(idata + 1);
1955
1956        for (i = 0; i < npods; i++, ppod++)
1957                cxgbi_ddp_set_one_ppod(ppod, ttinfo, sg_pp, sg_off);
1958
1959        cxgbi_skcb_set_flag(skb, SKCBF_TX_MEM_WRITE);
1960        cxgbi_skcb_set_flag(skb, SKCBF_TX_FLAG_COMPL);
1961        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
1962
1963        spin_lock_bh(&csk->lock);
1964        cxgbi_sock_skb_entail(csk, skb);
1965        spin_unlock_bh(&csk->lock);
1966
1967        return 0;
1968}
1969
1970static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
1971                       struct cxgbi_task_tag_info *ttinfo)
1972{
1973        unsigned int pidx = ttinfo->idx;
1974        unsigned int npods = ttinfo->npods;
1975        unsigned int i, cnt;
1976        int err = 0;
1977        struct scatterlist *sg = ttinfo->sgl;
1978        unsigned int offset = 0;
1979
1980        ttinfo->cid = csk->port_id;
1981
1982        for (i = 0; i < npods; i += cnt, pidx += cnt) {
1983                cnt = npods - i;
1984
1985                if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1986                        cnt = ULPMEM_IDATA_MAX_NPPODS;
1987                err = ddp_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt,
1988                                           &sg, &offset);
1989                if (err < 0)
1990                        break;
1991        }
1992
1993        return err;
1994}
1995
1996static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
1997                                int pg_idx)
1998{
1999        struct sk_buff *skb;
2000        struct cpl_set_tcb_field *req;
2001
2002        if (!pg_idx || pg_idx >= DDP_PGIDX_MAX)
2003                return 0;
2004
2005        skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
2006        if (!skb)
2007                return -ENOMEM;
2008
2009        /*  set up ulp page size */
2010        req = (struct cpl_set_tcb_field *)skb->head;
2011        INIT_TP_WR(req, csk->tid);
2012        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
2013        req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2014        req->word_cookie = htons(0);
2015        req->mask = cpu_to_be64(0x3 << 8);
2016        req->val = cpu_to_be64(pg_idx << 8);
2017        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
2018
2019        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2020                "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
2021
2022        reinit_completion(&csk->cmpl);
2023        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2024        wait_for_completion(&csk->cmpl);
2025
2026        return csk->err;
2027}
2028
2029static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
2030                                 int hcrc, int dcrc)
2031{
2032        struct sk_buff *skb;
2033        struct cpl_set_tcb_field *req;
2034
2035        if (!hcrc && !dcrc)
2036                return 0;
2037
2038        skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
2039        if (!skb)
2040                return -ENOMEM;
2041
2042        csk->hcrc_len = (hcrc ? 4 : 0);
2043        csk->dcrc_len = (dcrc ? 4 : 0);
2044        /*  set up ulp submode */
2045        req = (struct cpl_set_tcb_field *)skb->head;
2046        INIT_TP_WR(req, tid);
2047        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
2048        req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
2049        req->word_cookie = htons(0);
2050        req->mask = cpu_to_be64(0x3 << 4);
2051        req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
2052                                (dcrc ? ULP_CRC_DATA : 0)) << 4);
2053        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
2054
2055        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
2056                "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
2057
2058        reinit_completion(&csk->cmpl);
2059        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
2060        wait_for_completion(&csk->cmpl);
2061
2062        return csk->err;
2063}
2064
2065static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
2066{
2067        return (struct cxgbi_ppm *)(*((struct cxgb4_lld_info *)
2068                                       (cxgbi_cdev_priv(cdev)))->iscsi_ppm);
2069}
2070
2071static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
2072{
2073        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
2074        struct net_device *ndev = cdev->ports[0];
2075        struct cxgbi_tag_format tformat;
2076        unsigned int ppmax;
2077        int i, err;
2078
2079        if (!lldi->vr->iscsi.size) {
2080                pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name);
2081                return -EACCES;
2082        }
2083
2084        cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ;
2085        ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
2086
2087        memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
2088        for (i = 0; i < 4; i++)
2089                tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
2090                                         & 0xF;
2091        cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat);
2092
2093        pr_info("iscsi_edram.start 0x%x iscsi_edram.size 0x%x",
2094                lldi->vr->ppod_edram.start, lldi->vr->ppod_edram.size);
2095
2096        err = cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat,
2097                                  lldi->vr->iscsi.size, lldi->iscsi_llimit,
2098                                  lldi->vr->iscsi.start, 2,
2099                                  lldi->vr->ppod_edram.start,
2100                                  lldi->vr->ppod_edram.size);
2101
2102        if (err < 0)
2103                return err;
2104
2105        cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
2106        cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
2107        cdev->csk_ddp_set_map = ddp_set_map;
2108        cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
2109                                  lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN);
2110        cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
2111                                  lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN);
2112        cdev->cdev2ppm = cdev2ppm;
2113
2114        return 0;
2115}
2116
2117static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
2118{
2119        struct cxgbi_device *cdev;
2120        struct port_info *pi;
2121        int i, rc;
2122
2123        cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports);
2124        if (!cdev) {
2125                pr_info("t4 device 0x%p, register failed.\n", lldi);
2126                return NULL;
2127        }
2128        pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
2129                cdev, lldi->adapter_type, lldi->nports,
2130                lldi->ports[0]->name, lldi->nchan, lldi->ntxq,
2131                lldi->nrxq, lldi->wr_cred);
2132        for (i = 0; i < lldi->nrxq; i++)
2133                log_debug(1 << CXGBI_DBG_DEV,
2134                        "t4 0x%p, rxq id #%d: %u.\n",
2135                        cdev, i, lldi->rxq_ids[i]);
2136
2137        memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi));
2138        cdev->flags = CXGBI_FLAG_DEV_T4;
2139        cdev->pdev = lldi->pdev;
2140        cdev->ports = lldi->ports;
2141        cdev->nports = lldi->nports;
2142        cdev->mtus = lldi->mtus;
2143        cdev->nmtus = NMTUS;
2144        cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <=
2145                                 CHELSIO_T5) ? cxgb4i_rx_credit_thres : 0;
2146        cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
2147        cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
2148        cdev->itp = &cxgb4i_iscsi_transport;
2149        cdev->owner = THIS_MODULE;
2150
2151        cdev->pfvf = FW_PFVF_CMD_PFN_V(lldi->pf);
2152        pr_info("cdev 0x%p,%s, pfvf %u.\n",
2153                cdev, lldi->ports[0]->name, cdev->pfvf);
2154
2155        rc = cxgb4i_ddp_init(cdev);
2156        if (rc) {
2157                pr_info("t4 0x%p ddp init failed %d.\n", cdev, rc);
2158                goto err_out;
2159        }
2160        rc = cxgb4i_ofld_init(cdev);
2161        if (rc) {
2162                pr_info("t4 0x%p ofld init failed.\n", cdev);
2163                goto err_out;
2164        }
2165
2166        rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN,
2167                                &cxgb4i_host_template, cxgb4i_stt);
2168        if (rc)
2169                goto err_out;
2170
2171        for (i = 0; i < cdev->nports; i++) {
2172                pi = netdev_priv(lldi->ports[i]);
2173                cdev->hbas[i]->port_id = pi->port_id;
2174        }
2175        return cdev;
2176
2177err_out:
2178        cxgbi_device_unregister(cdev);
2179        return ERR_PTR(-ENOMEM);
2180}
2181
2182#define RX_PULL_LEN     128
2183static int t4_uld_rx_handler(void *handle, const __be64 *rsp,
2184                                const struct pkt_gl *pgl)
2185{
2186        const struct cpl_act_establish *rpl;
2187        struct sk_buff *skb;
2188        unsigned int opc;
2189        struct cxgbi_device *cdev = handle;
2190
2191        if (pgl == NULL) {
2192                unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
2193
2194                skb = alloc_wr(len, 0, GFP_ATOMIC);
2195                if (!skb)
2196                        goto nomem;
2197                skb_copy_to_linear_data(skb, &rsp[1], len);
2198        } else {
2199                if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) {
2200                        pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
2201                                pgl->va, be64_to_cpu(*rsp),
2202                                be64_to_cpu(*(u64 *)pgl->va),
2203                                pgl->tot_len);
2204                        return 0;
2205                }
2206                skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN);
2207                if (unlikely(!skb))
2208                        goto nomem;
2209        }
2210
2211        rpl = (struct cpl_act_establish *)skb->data;
2212        opc = rpl->ot.opcode;
2213        log_debug(1 << CXGBI_DBG_TOE,
2214                "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
2215                 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
2216        if (opc >= ARRAY_SIZE(cxgb4i_cplhandlers) || !cxgb4i_cplhandlers[opc]) {
2217                pr_err("No handler for opcode 0x%x.\n", opc);
2218                __kfree_skb(skb);
2219        } else
2220                cxgb4i_cplhandlers[opc](cdev, skb);
2221
2222        return 0;
2223nomem:
2224        log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n");
2225        return 1;
2226}
2227
2228static int t4_uld_state_change(void *handle, enum cxgb4_state state)
2229{
2230        struct cxgbi_device *cdev = handle;
2231
2232        switch (state) {
2233        case CXGB4_STATE_UP:
2234                pr_info("cdev 0x%p, UP.\n", cdev);
2235                break;
2236        case CXGB4_STATE_START_RECOVERY:
2237                pr_info("cdev 0x%p, RECOVERY.\n", cdev);
2238                /* close all connections */
2239                break;
2240        case CXGB4_STATE_DOWN:
2241                pr_info("cdev 0x%p, DOWN.\n", cdev);
2242                break;
2243        case CXGB4_STATE_DETACH:
2244                pr_info("cdev 0x%p, DETACH.\n", cdev);
2245                cxgbi_device_unregister(cdev);
2246                break;
2247        default:
2248                pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
2249                break;
2250        }
2251        return 0;
2252}
2253
2254#ifdef CONFIG_CHELSIO_T4_DCB
2255static int
2256cxgb4_dcb_change_notify(struct notifier_block *self, unsigned long val,
2257                        void *data)
2258{
2259        int i, port = 0xFF;
2260        struct net_device *ndev;
2261        struct cxgbi_device *cdev = NULL;
2262        struct dcb_app_type *iscsi_app = data;
2263        struct cxgbi_ports_map *pmap;
2264        u8 priority;
2265
2266        if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
2267                if ((iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_STREAM) &&
2268                    (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY))
2269                        return NOTIFY_DONE;
2270
2271                priority = iscsi_app->app.priority;
2272        } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
2273                if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
2274                        return NOTIFY_DONE;
2275
2276                if (!iscsi_app->app.priority)
2277                        return NOTIFY_DONE;
2278
2279                priority = ffs(iscsi_app->app.priority) - 1;
2280        } else {
2281                return NOTIFY_DONE;
2282        }
2283
2284        if (iscsi_app->app.protocol != 3260)
2285                return NOTIFY_DONE;
2286
2287        log_debug(1 << CXGBI_DBG_ISCSI, "iSCSI priority for ifid %d is %u\n",
2288                  iscsi_app->ifindex, priority);
2289
2290        ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
2291        if (!ndev)
2292                return NOTIFY_DONE;
2293
2294        cdev = cxgbi_device_find_by_netdev_rcu(ndev, &port);
2295
2296        dev_put(ndev);
2297        if (!cdev)
2298                return NOTIFY_DONE;
2299
2300        pmap = &cdev->pmap;
2301
2302        for (i = 0; i < pmap->used; i++) {
2303                if (pmap->port_csk[i]) {
2304                        struct cxgbi_sock *csk = pmap->port_csk[i];
2305
2306                        if (csk->dcb_priority != priority) {
2307                                iscsi_conn_failure(csk->user_data,
2308                                                   ISCSI_ERR_CONN_FAILED);
2309                                pr_info("Restarting iSCSI connection %p with "
2310                                        "priority %u->%u.\n", csk,
2311                                        csk->dcb_priority, priority);
2312                        }
2313                }
2314        }
2315        return NOTIFY_OK;
2316}
2317#endif
2318
2319static int __init cxgb4i_init_module(void)
2320{
2321        int rc;
2322
2323        printk(KERN_INFO "%s", version);
2324
2325        rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt);
2326        if (rc < 0)
2327                return rc;
2328        cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
2329
2330#ifdef CONFIG_CHELSIO_T4_DCB
2331        pr_info("%s dcb enabled.\n", DRV_MODULE_NAME);
2332        register_dcbevent_notifier(&cxgb4_dcb_change);
2333#endif
2334        return 0;
2335}
2336
2337static void __exit cxgb4i_exit_module(void)
2338{
2339#ifdef CONFIG_CHELSIO_T4_DCB
2340        unregister_dcbevent_notifier(&cxgb4_dcb_change);
2341#endif
2342        cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
2343        cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
2344        cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
2345}
2346
2347module_init(cxgb4i_init_module);
2348module_exit(cxgb4i_exit_module);
2349