linux/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
<<
>>
Prefs
   1/*
   2 * cxgb4i.c: Chelsio T4 iSCSI driver.
   3 *
   4 * Copyright (c) 2010-2015 Chelsio Communications, Inc.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 *
  10 * Written by:  Karen Xie (kxie@chelsio.com)
  11 *              Rakesh Ranjan (rranjan@chelsio.com)
  12 */
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
  15
  16#include <linux/kernel.h>
  17#include <linux/module.h>
  18#include <linux/moduleparam.h>
  19#include <scsi/scsi_host.h>
  20#include <net/tcp.h>
  21#include <net/dst.h>
  22#include <linux/netdevice.h>
  23#include <net/addrconf.h>
  24
  25#include "t4_regs.h"
  26#include "t4_msg.h"
  27#include "cxgb4.h"
  28#include "cxgb4_uld.h"
  29#include "t4fw_api.h"
  30#include "l2t.h"
  31#include "cxgb4i.h"
  32#include "clip_tbl.h"
  33
  34static unsigned int dbg_level;
  35
  36#include "../libcxgbi.h"
  37
  38#define DRV_MODULE_NAME         "cxgb4i"
  39#define DRV_MODULE_DESC         "Chelsio T4-T6 iSCSI Driver"
  40#define DRV_MODULE_VERSION      "0.9.5-ko"
  41#define DRV_MODULE_RELDATE      "Apr. 2015"
  42
  43static char version[] =
  44        DRV_MODULE_DESC " " DRV_MODULE_NAME
  45        " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  46
  47MODULE_AUTHOR("Chelsio Communications, Inc.");
  48MODULE_DESCRIPTION(DRV_MODULE_DESC);
  49MODULE_VERSION(DRV_MODULE_VERSION);
  50MODULE_LICENSE("GPL");
  51
  52module_param(dbg_level, uint, 0644);
  53MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
  54
  55#define CXGB4I_DEFAULT_10G_RCV_WIN (256 * 1024)
  56static int cxgb4i_rcv_win = -1;
  57module_param(cxgb4i_rcv_win, int, 0644);
  58MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes");
  59
  60#define CXGB4I_DEFAULT_10G_SND_WIN (128 * 1024)
  61static int cxgb4i_snd_win = -1;
  62module_param(cxgb4i_snd_win, int, 0644);
  63MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
  64
  65static int cxgb4i_rx_credit_thres = 10 * 1024;
  66module_param(cxgb4i_rx_credit_thres, int, 0644);
  67MODULE_PARM_DESC(cxgb4i_rx_credit_thres,
  68                "RX credits return threshold in bytes (default=10KB)");
  69
  70static unsigned int cxgb4i_max_connect = (8 * 1024);
  71module_param(cxgb4i_max_connect, uint, 0644);
  72MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections");
  73
  74static unsigned short cxgb4i_sport_base = 20000;
  75module_param(cxgb4i_sport_base, ushort, 0644);
  76MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)");
  77
  78typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
  79
  80static void *t4_uld_add(const struct cxgb4_lld_info *);
  81static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
  82static int t4_uld_state_change(void *, enum cxgb4_state state);
  83static inline int send_tx_flowc_wr(struct cxgbi_sock *);
  84
  85static const struct cxgb4_uld_info cxgb4i_uld_info = {
  86        .name = DRV_MODULE_NAME,
  87        .nrxq = MAX_ULD_QSETS,
  88        .ntxq = MAX_ULD_QSETS,
  89        .rxq_size = 1024,
  90        .lro = false,
  91        .add = t4_uld_add,
  92        .rx_handler = t4_uld_rx_handler,
  93        .state_change = t4_uld_state_change,
  94};
  95
  96static struct scsi_host_template cxgb4i_host_template = {
  97        .module         = THIS_MODULE,
  98        .name           = DRV_MODULE_NAME,
  99        .proc_name      = DRV_MODULE_NAME,
 100        .can_queue      = CXGB4I_SCSI_HOST_QDEPTH,
 101        .queuecommand   = iscsi_queuecommand,
 102        .change_queue_depth = scsi_change_queue_depth,
 103        .sg_tablesize   = SG_ALL,
 104        .max_sectors    = 0xFFFF,
 105        .cmd_per_lun    = ISCSI_DEF_CMD_PER_LUN,
 106        .eh_timed_out   = iscsi_eh_cmd_timed_out,
 107        .eh_abort_handler = iscsi_eh_abort,
 108        .eh_device_reset_handler = iscsi_eh_device_reset,
 109        .eh_target_reset_handler = iscsi_eh_recover_target,
 110        .target_alloc   = iscsi_target_alloc,
 111        .use_clustering = DISABLE_CLUSTERING,
 112        .this_id        = -1,
 113        .track_queue_depth = 1,
 114};
 115
 116static struct iscsi_transport cxgb4i_iscsi_transport = {
 117        .owner          = THIS_MODULE,
 118        .name           = DRV_MODULE_NAME,
 119        .caps           = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
 120                                CAP_DATADGST | CAP_DIGEST_OFFLOAD |
 121                                CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
 122        .attr_is_visible        = cxgbi_attr_is_visible,
 123        .get_host_param = cxgbi_get_host_param,
 124        .set_host_param = cxgbi_set_host_param,
 125        /* session management */
 126        .create_session = cxgbi_create_session,
 127        .destroy_session        = cxgbi_destroy_session,
 128        .get_session_param = iscsi_session_get_param,
 129        /* connection management */
 130        .create_conn    = cxgbi_create_conn,
 131        .bind_conn              = cxgbi_bind_conn,
 132        .destroy_conn   = iscsi_tcp_conn_teardown,
 133        .start_conn             = iscsi_conn_start,
 134        .stop_conn              = iscsi_conn_stop,
 135        .get_conn_param = iscsi_conn_get_param,
 136        .set_param      = cxgbi_set_conn_param,
 137        .get_stats      = cxgbi_get_conn_stats,
 138        /* pdu xmit req from user space */
 139        .send_pdu       = iscsi_conn_send_pdu,
 140        /* task */
 141        .init_task      = iscsi_tcp_task_init,
 142        .xmit_task      = iscsi_tcp_task_xmit,
 143        .cleanup_task   = cxgbi_cleanup_task,
 144        /* pdu */
 145        .alloc_pdu      = cxgbi_conn_alloc_pdu,
 146        .init_pdu       = cxgbi_conn_init_pdu,
 147        .xmit_pdu       = cxgbi_conn_xmit_pdu,
 148        .parse_pdu_itt  = cxgbi_parse_pdu_itt,
 149        /* TCP connect/disconnect */
 150        .get_ep_param   = cxgbi_get_ep_param,
 151        .ep_connect     = cxgbi_ep_connect,
 152        .ep_poll        = cxgbi_ep_poll,
 153        .ep_disconnect  = cxgbi_ep_disconnect,
 154        /* Error recovery timeout call */
 155        .session_recovery_timedout = iscsi_session_recovery_timedout,
 156};
 157
 158static struct scsi_transport_template *cxgb4i_stt;
 159
 160/*
 161 * CPL (Chelsio Protocol Language) defines a message passing interface between
 162 * the host driver and Chelsio asic.
 163 * The section below implments CPLs that related to iscsi tcp connection
 164 * open/close/abort and data send/receive.
 165 */
 166
 167#define RCV_BUFSIZ_MASK         0x3FFU
 168#define MAX_IMM_TX_PKT_LEN      256
 169
 170static int push_tx_frames(struct cxgbi_sock *, int);
 171
 172/*
 173 * is_ofld_imm - check whether a packet can be sent as immediate data
 174 * @skb: the packet
 175 *
 176 * Returns true if a packet can be sent as an offload WR with immediate
 177 * data.  We currently use the same limit as for Ethernet packets.
 178 */
 179static inline bool is_ofld_imm(const struct sk_buff *skb)
 180{
 181        int len = skb->len;
 182
 183        if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
 184                len += sizeof(struct fw_ofld_tx_data_wr);
 185
 186        return len <= MAX_IMM_TX_PKT_LEN;
 187}
 188
 189static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
 190                                struct l2t_entry *e)
 191{
 192        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
 193        int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
 194        unsigned long long opt0;
 195        unsigned int opt2;
 196        unsigned int qid_atid = ((unsigned int)csk->atid) |
 197                                 (((unsigned int)csk->rss_qid) << 14);
 198
 199        opt0 = KEEP_ALIVE_F |
 200                WND_SCALE_V(wscale) |
 201                MSS_IDX_V(csk->mss_idx) |
 202                L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
 203                TX_CHAN_V(csk->tx_chan) |
 204                SMAC_SEL_V(csk->smac_idx) |
 205                ULP_MODE_V(ULP_MODE_ISCSI) |
 206                RCV_BUFSIZ_V(csk->rcv_win >> 10);
 207
 208        opt2 = RX_CHANNEL_V(0) |
 209                RSS_QUEUE_VALID_F |
 210                RSS_QUEUE_V(csk->rss_qid);
 211
 212        if (is_t4(lldi->adapter_type)) {
 213                struct cpl_act_open_req *req =
 214                                (struct cpl_act_open_req *)skb->head;
 215
 216                INIT_TP_WR(req, 0);
 217                OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
 218                                        qid_atid));
 219                req->local_port = csk->saddr.sin_port;
 220                req->peer_port = csk->daddr.sin_port;
 221                req->local_ip = csk->saddr.sin_addr.s_addr;
 222                req->peer_ip = csk->daddr.sin_addr.s_addr;
 223                req->opt0 = cpu_to_be64(opt0);
 224                req->params = cpu_to_be32(cxgb4_select_ntuple(
 225                                        csk->cdev->ports[csk->port_id],
 226                                        csk->l2t));
 227                opt2 |= RX_FC_VALID_F;
 228                req->opt2 = cpu_to_be32(opt2);
 229
 230                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 231                        "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
 232                        csk, &req->local_ip, ntohs(req->local_port),
 233                        &req->peer_ip, ntohs(req->peer_port),
 234                        csk->atid, csk->rss_qid);
 235        } else if (is_t5(lldi->adapter_type)) {
 236                struct cpl_t5_act_open_req *req =
 237                                (struct cpl_t5_act_open_req *)skb->head;
 238                u32 isn = (prandom_u32() & ~7UL) - 1;
 239
 240                INIT_TP_WR(req, 0);
 241                OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
 242                                        qid_atid));
 243                req->local_port = csk->saddr.sin_port;
 244                req->peer_port = csk->daddr.sin_port;
 245                req->local_ip = csk->saddr.sin_addr.s_addr;
 246                req->peer_ip = csk->daddr.sin_addr.s_addr;
 247                req->opt0 = cpu_to_be64(opt0);
 248                req->params = cpu_to_be64(FILTER_TUPLE_V(
 249                                cxgb4_select_ntuple(
 250                                        csk->cdev->ports[csk->port_id],
 251                                        csk->l2t)));
 252                req->rsvd = cpu_to_be32(isn);
 253                opt2 |= T5_ISS_VALID;
 254                opt2 |= T5_OPT_2_VALID_F;
 255
 256                req->opt2 = cpu_to_be32(opt2);
 257
 258                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 259                        "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
 260                        csk, &req->local_ip, ntohs(req->local_port),
 261                        &req->peer_ip, ntohs(req->peer_port),
 262                        csk->atid, csk->rss_qid);
 263        } else {
 264                struct cpl_t6_act_open_req *req =
 265                                (struct cpl_t6_act_open_req *)skb->head;
 266                u32 isn = (prandom_u32() & ~7UL) - 1;
 267
 268                INIT_TP_WR(req, 0);
 269                OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
 270                                                            qid_atid));
 271                req->local_port = csk->saddr.sin_port;
 272                req->peer_port = csk->daddr.sin_port;
 273                req->local_ip = csk->saddr.sin_addr.s_addr;
 274                req->peer_ip = csk->daddr.sin_addr.s_addr;
 275                req->opt0 = cpu_to_be64(opt0);
 276                req->params = cpu_to_be64(FILTER_TUPLE_V(
 277                                cxgb4_select_ntuple(
 278                                        csk->cdev->ports[csk->port_id],
 279                                        csk->l2t)));
 280                req->rsvd = cpu_to_be32(isn);
 281
 282                opt2 |= T5_ISS_VALID;
 283                opt2 |= RX_FC_DISABLE_F;
 284                opt2 |= T5_OPT_2_VALID_F;
 285
 286                req->opt2 = cpu_to_be32(opt2);
 287                req->rsvd2 = cpu_to_be32(0);
 288                req->opt3 = cpu_to_be32(0);
 289
 290                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 291                          "csk t6 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
 292                          csk, &req->local_ip, ntohs(req->local_port),
 293                          &req->peer_ip, ntohs(req->peer_port),
 294                          csk->atid, csk->rss_qid);
 295        }
 296
 297        set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
 298
 299        pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
 300                       (&csk->saddr), (&csk->daddr),
 301                       CHELSIO_CHIP_VERSION(lldi->adapter_type), csk,
 302                       csk->state, csk->flags, csk->atid, csk->rss_qid);
 303
 304        cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
 305}
 306
 307#if IS_ENABLED(CONFIG_IPV6)
 308static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
 309                               struct l2t_entry *e)
 310{
 311        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
 312        int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
 313        unsigned long long opt0;
 314        unsigned int opt2;
 315        unsigned int qid_atid = ((unsigned int)csk->atid) |
 316                                 (((unsigned int)csk->rss_qid) << 14);
 317
 318        opt0 = KEEP_ALIVE_F |
 319                WND_SCALE_V(wscale) |
 320                MSS_IDX_V(csk->mss_idx) |
 321                L2T_IDX_V(((struct l2t_entry *)csk->l2t)->idx) |
 322                TX_CHAN_V(csk->tx_chan) |
 323                SMAC_SEL_V(csk->smac_idx) |
 324                ULP_MODE_V(ULP_MODE_ISCSI) |
 325                RCV_BUFSIZ_V(csk->rcv_win >> 10);
 326
 327        opt2 = RX_CHANNEL_V(0) |
 328                RSS_QUEUE_VALID_F |
 329                RSS_QUEUE_V(csk->rss_qid);
 330
 331        if (is_t4(lldi->adapter_type)) {
 332                struct cpl_act_open_req6 *req =
 333                            (struct cpl_act_open_req6 *)skb->head;
 334
 335                INIT_TP_WR(req, 0);
 336                OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
 337                                                            qid_atid));
 338                req->local_port = csk->saddr6.sin6_port;
 339                req->peer_port = csk->daddr6.sin6_port;
 340
 341                req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
 342                req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
 343                                                                    8);
 344                req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
 345                req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
 346                                                                    8);
 347
 348                req->opt0 = cpu_to_be64(opt0);
 349
 350                opt2 |= RX_FC_VALID_F;
 351                req->opt2 = cpu_to_be32(opt2);
 352
 353                req->params = cpu_to_be32(cxgb4_select_ntuple(
 354                                          csk->cdev->ports[csk->port_id],
 355                                          csk->l2t));
 356        } else if (is_t5(lldi->adapter_type)) {
 357                struct cpl_t5_act_open_req6 *req =
 358                                (struct cpl_t5_act_open_req6 *)skb->head;
 359
 360                INIT_TP_WR(req, 0);
 361                OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
 362                                                            qid_atid));
 363                req->local_port = csk->saddr6.sin6_port;
 364                req->peer_port = csk->daddr6.sin6_port;
 365                req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
 366                req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
 367                                                                        8);
 368                req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
 369                req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
 370                                                                        8);
 371                req->opt0 = cpu_to_be64(opt0);
 372
 373                opt2 |= T5_OPT_2_VALID_F;
 374                req->opt2 = cpu_to_be32(opt2);
 375
 376                req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
 377                                          csk->cdev->ports[csk->port_id],
 378                                          csk->l2t)));
 379        } else {
 380                struct cpl_t6_act_open_req6 *req =
 381                                (struct cpl_t6_act_open_req6 *)skb->head;
 382
 383                INIT_TP_WR(req, 0);
 384                OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
 385                                                            qid_atid));
 386                req->local_port = csk->saddr6.sin6_port;
 387                req->peer_port = csk->daddr6.sin6_port;
 388                req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
 389                req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
 390                                                                        8);
 391                req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
 392                req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
 393                                                                        8);
 394                req->opt0 = cpu_to_be64(opt0);
 395
 396                opt2 |= RX_FC_DISABLE_F;
 397                opt2 |= T5_OPT_2_VALID_F;
 398
 399                req->opt2 = cpu_to_be32(opt2);
 400
 401                req->params = cpu_to_be64(FILTER_TUPLE_V(cxgb4_select_ntuple(
 402                                          csk->cdev->ports[csk->port_id],
 403                                          csk->l2t)));
 404
 405                req->rsvd2 = cpu_to_be32(0);
 406                req->opt3 = cpu_to_be32(0);
 407        }
 408
 409        set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
 410
 411        pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
 412                CHELSIO_CHIP_VERSION(lldi->adapter_type), csk, csk->state,
 413                csk->flags, csk->atid,
 414                &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
 415                &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
 416                csk->rss_qid);
 417
 418        cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
 419}
 420#endif
 421
 422static void send_close_req(struct cxgbi_sock *csk)
 423{
 424        struct sk_buff *skb = csk->cpl_close;
 425        struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
 426        unsigned int tid = csk->tid;
 427
 428        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 429                "csk 0x%p,%u,0x%lx, tid %u.\n",
 430                csk, csk->state, csk->flags, csk->tid);
 431        csk->cpl_close = NULL;
 432        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
 433        INIT_TP_WR(req, tid);
 434        OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
 435        req->rsvd = 0;
 436
 437        cxgbi_sock_skb_entail(csk, skb);
 438        if (csk->state >= CTP_ESTABLISHED)
 439                push_tx_frames(csk, 1);
 440}
 441
 442static void abort_arp_failure(void *handle, struct sk_buff *skb)
 443{
 444        struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
 445        struct cpl_abort_req *req;
 446
 447        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 448                "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
 449                csk, csk->state, csk->flags, csk->tid);
 450        req = (struct cpl_abort_req *)skb->data;
 451        req->cmd = CPL_ABORT_NO_RST;
 452        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
 453}
 454
 455static void send_abort_req(struct cxgbi_sock *csk)
 456{
 457        struct cpl_abort_req *req;
 458        struct sk_buff *skb = csk->cpl_abort_req;
 459
 460        if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
 461                return;
 462
 463        if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
 464                send_tx_flowc_wr(csk);
 465                cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
 466        }
 467
 468        cxgbi_sock_set_state(csk, CTP_ABORTING);
 469        cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
 470        cxgbi_sock_purge_write_queue(csk);
 471
 472        csk->cpl_abort_req = NULL;
 473        req = (struct cpl_abort_req *)skb->head;
 474        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
 475        req->cmd = CPL_ABORT_SEND_RST;
 476        t4_set_arp_err_handler(skb, csk, abort_arp_failure);
 477        INIT_TP_WR(req, csk->tid);
 478        OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
 479        req->rsvd0 = htonl(csk->snd_nxt);
 480        req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
 481
 482        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 483                "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
 484                csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
 485                req->rsvd1);
 486
 487        cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
 488}
 489
 490static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
 491{
 492        struct sk_buff *skb = csk->cpl_abort_rpl;
 493        struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
 494
 495        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 496                "csk 0x%p,%u,0x%lx,%u, status %d.\n",
 497                csk, csk->state, csk->flags, csk->tid, rst_status);
 498
 499        csk->cpl_abort_rpl = NULL;
 500        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
 501        INIT_TP_WR(rpl, csk->tid);
 502        OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
 503        rpl->cmd = rst_status;
 504        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
 505}
 506
 507/*
 508 * CPL connection rx data ack: host ->
 509 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
 510 * credits sent.
 511 */
 512static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
 513{
 514        struct sk_buff *skb;
 515        struct cpl_rx_data_ack *req;
 516
 517        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
 518                "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
 519                csk, csk->state, csk->flags, csk->tid, credits);
 520
 521        skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
 522        if (!skb) {
 523                pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
 524                return 0;
 525        }
 526        req = (struct cpl_rx_data_ack *)skb->head;
 527
 528        set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
 529        INIT_TP_WR(req, csk->tid);
 530        OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
 531                                      csk->tid));
 532        req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits)
 533                                       | RX_FORCE_ACK_F);
 534        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
 535        return credits;
 536}
 537
 538/*
 539 * sgl_len - calculates the size of an SGL of the given capacity
 540 * @n: the number of SGL entries
 541 * Calculates the number of flits needed for a scatter/gather list that
 542 * can hold the given number of entries.
 543 */
 544static inline unsigned int sgl_len(unsigned int n)
 545{
 546        n--;
 547        return (3 * n) / 2 + (n & 1) + 2;
 548}
 549
 550/*
 551 * calc_tx_flits_ofld - calculate # of flits for an offload packet
 552 * @skb: the packet
 553 *
 554 * Returns the number of flits needed for the given offload packet.
 555 * These packets are already fully constructed and no additional headers
 556 * will be added.
 557 */
 558static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
 559{
 560        unsigned int flits, cnt;
 561
 562        if (is_ofld_imm(skb))
 563                return DIV_ROUND_UP(skb->len, 8);
 564        flits = skb_transport_offset(skb) / 8;
 565        cnt = skb_shinfo(skb)->nr_frags;
 566        if (skb_tail_pointer(skb) != skb_transport_header(skb))
 567                cnt++;
 568        return flits + sgl_len(cnt);
 569}
 570
 571#define FLOWC_WR_NPARAMS_MIN    9
 572static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp)
 573{
 574        int nparams, flowclen16, flowclen;
 575
 576        nparams = FLOWC_WR_NPARAMS_MIN;
 577        flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
 578        flowclen16 = DIV_ROUND_UP(flowclen, 16);
 579        flowclen = flowclen16 * 16;
 580        /*
 581         * Return the number of 16-byte credits used by the FlowC request.
 582         * Pass back the nparams and actual FlowC length if requested.
 583         */
 584        if (nparamsp)
 585                *nparamsp = nparams;
 586        if (flowclenp)
 587                *flowclenp = flowclen;
 588
 589        return flowclen16;
 590}
 591
 592static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
 593{
 594        struct sk_buff *skb;
 595        struct fw_flowc_wr *flowc;
 596        int nparams, flowclen16, flowclen;
 597
 598        flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
 599        skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
 600        flowc = (struct fw_flowc_wr *)skb->head;
 601        flowc->op_to_nparams =
 602                htonl(FW_WR_OP_V(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS_V(nparams));
 603        flowc->flowid_len16 =
 604                htonl(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid));
 605        flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
 606        flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
 607        flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
 608        flowc->mnemval[1].val = htonl(csk->tx_chan);
 609        flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
 610        flowc->mnemval[2].val = htonl(csk->tx_chan);
 611        flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
 612        flowc->mnemval[3].val = htonl(csk->rss_qid);
 613        flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
 614        flowc->mnemval[4].val = htonl(csk->snd_nxt);
 615        flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
 616        flowc->mnemval[5].val = htonl(csk->rcv_nxt);
 617        flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
 618        flowc->mnemval[6].val = htonl(csk->snd_win);
 619        flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
 620        flowc->mnemval[7].val = htonl(csk->advmss);
 621        flowc->mnemval[8].mnemonic = 0;
 622        flowc->mnemval[8].val = 0;
 623        flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
 624        flowc->mnemval[8].val = 16384;
 625
 626        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
 627
 628        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 629                "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
 630                csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
 631                csk->snd_nxt, csk->rcv_nxt, csk->snd_win,
 632                csk->advmss);
 633
 634        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
 635
 636        return flowclen16;
 637}
 638
 639static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
 640                                   int dlen, int len, u32 credits, int compl)
 641{
 642        struct fw_ofld_tx_data_wr *req;
 643        unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
 644        unsigned int wr_ulp_mode = 0, val;
 645        bool imm = is_ofld_imm(skb);
 646
 647        req = __skb_push(skb, sizeof(*req));
 648
 649        if (imm) {
 650                req->op_to_immdlen = htonl(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
 651                                        FW_WR_COMPL_F |
 652                                        FW_WR_IMMDLEN_V(dlen));
 653                req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) |
 654                                                FW_WR_LEN16_V(credits));
 655        } else {
 656                req->op_to_immdlen =
 657                        cpu_to_be32(FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
 658                                        FW_WR_COMPL_F |
 659                                        FW_WR_IMMDLEN_V(0));
 660                req->flowid_len16 =
 661                        cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
 662                                        FW_WR_LEN16_V(credits));
 663        }
 664        if (submode)
 665                wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP2_MODE_ISCSI) |
 666                                FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
 667        val = skb_peek(&csk->write_queue) ? 0 : 1;
 668        req->tunnel_to_proxy = htonl(wr_ulp_mode |
 669                                     FW_OFLD_TX_DATA_WR_SHOVE_V(val));
 670        req->plen = htonl(len);
 671        if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
 672                cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
 673}
 674
 675static void arp_failure_skb_discard(void *handle, struct sk_buff *skb)
 676{
 677        kfree_skb(skb);
 678}
 679
 680static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
 681{
 682        int total_size = 0;
 683        struct sk_buff *skb;
 684
 685        if (unlikely(csk->state < CTP_ESTABLISHED ||
 686                csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
 687                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK |
 688                         1 << CXGBI_DBG_PDU_TX,
 689                        "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
 690                        csk, csk->state, csk->flags, csk->tid);
 691                return 0;
 692        }
 693
 694        while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
 695                int dlen = skb->len;
 696                int len = skb->len;
 697                unsigned int credits_needed;
 698                int flowclen16 = 0;
 699
 700                skb_reset_transport_header(skb);
 701                if (is_ofld_imm(skb))
 702                        credits_needed = DIV_ROUND_UP(dlen, 16);
 703                else
 704                        credits_needed = DIV_ROUND_UP(
 705                                                8 * calc_tx_flits_ofld(skb),
 706                                                16);
 707
 708                if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR)))
 709                        credits_needed += DIV_ROUND_UP(
 710                                        sizeof(struct fw_ofld_tx_data_wr),
 711                                        16);
 712
 713                /*
 714                 * Assumes the initial credits is large enough to support
 715                 * fw_flowc_wr plus largest possible first payload
 716                 */
 717                if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
 718                        flowclen16 = send_tx_flowc_wr(csk);
 719                        csk->wr_cred -= flowclen16;
 720                        csk->wr_una_cred += flowclen16;
 721                        cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
 722                }
 723
 724                if (csk->wr_cred < credits_needed) {
 725                        log_debug(1 << CXGBI_DBG_PDU_TX,
 726                                "csk 0x%p, skb %u/%u, wr %d < %u.\n",
 727                                csk, skb->len, skb->data_len,
 728                                credits_needed, csk->wr_cred);
 729                        break;
 730                }
 731                __skb_unlink(skb, &csk->write_queue);
 732                set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
 733                skb->csum = credits_needed + flowclen16;
 734                csk->wr_cred -= credits_needed;
 735                csk->wr_una_cred += credits_needed;
 736                cxgbi_sock_enqueue_wr(csk, skb);
 737
 738                log_debug(1 << CXGBI_DBG_PDU_TX,
 739                        "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
 740                        csk, skb->len, skb->data_len, credits_needed,
 741                        csk->wr_cred, csk->wr_una_cred);
 742
 743                if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
 744                        len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
 745                        make_tx_data_wr(csk, skb, dlen, len, credits_needed,
 746                                        req_completion);
 747                        csk->snd_nxt += len;
 748                        cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
 749                } else if (cxgbi_skcb_test_flag(skb, SKCBF_TX_FLAG_COMPL) &&
 750                           (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
 751                        struct cpl_close_con_req *req =
 752                                (struct cpl_close_con_req *)skb->data;
 753                        req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
 754                }
 755                total_size += skb->truesize;
 756                t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
 757
 758                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
 759                        "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
 760                        csk, csk->state, csk->flags, csk->tid, skb, len);
 761
 762                cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
 763        }
 764        return total_size;
 765}
 766
 767static inline void free_atid(struct cxgbi_sock *csk)
 768{
 769        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
 770
 771        if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
 772                cxgb4_free_atid(lldi->tids, csk->atid);
 773                cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
 774                cxgbi_sock_put(csk);
 775        }
 776}
 777
 778static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
 779{
 780        struct cxgbi_sock *csk;
 781        struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
 782        unsigned short tcp_opt = ntohs(req->tcp_opt);
 783        unsigned int tid = GET_TID(req);
 784        unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
 785        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 786        struct tid_info *t = lldi->tids;
 787        u32 rcv_isn = be32_to_cpu(req->rcv_isn);
 788
 789        csk = lookup_atid(t, atid);
 790        if (unlikely(!csk)) {
 791                pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev);
 792                goto rel_skb;
 793        }
 794
 795        if (csk->atid != atid) {
 796                pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
 797                        atid, csk, csk->state, csk->flags, csk->tid, csk->atid);
 798                goto rel_skb;
 799        }
 800
 801        pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
 802                       (&csk->saddr), (&csk->daddr),
 803                       atid, tid, csk, csk->state, csk->flags, rcv_isn);
 804
 805        module_put(cdev->owner);
 806
 807        cxgbi_sock_get(csk);
 808        csk->tid = tid;
 809        cxgb4_insert_tid(lldi->tids, csk, tid, csk->csk_family);
 810        cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
 811
 812        free_atid(csk);
 813
 814        spin_lock_bh(&csk->lock);
 815        if (unlikely(csk->state != CTP_ACTIVE_OPEN))
 816                pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
 817                        csk, csk->state, csk->flags, csk->tid);
 818
 819        if (csk->retry_timer.function) {
 820                del_timer(&csk->retry_timer);
 821                csk->retry_timer.function = NULL;
 822        }
 823
 824        csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
 825        /*
 826         * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
 827         * pass through opt0.
 828         */
 829        if (csk->rcv_win > (RCV_BUFSIZ_MASK << 10))
 830                csk->rcv_wup -= csk->rcv_win - (RCV_BUFSIZ_MASK << 10);
 831
 832        csk->advmss = lldi->mtus[TCPOPT_MSS_G(tcp_opt)] - 40;
 833        if (TCPOPT_TSTAMP_G(tcp_opt))
 834                csk->advmss -= 12;
 835        if (csk->advmss < 128)
 836                csk->advmss = 128;
 837
 838        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 839                "csk 0x%p, mss_idx %u, advmss %u.\n",
 840                        csk, TCPOPT_MSS_G(tcp_opt), csk->advmss);
 841
 842        cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
 843
 844        if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
 845                send_abort_req(csk);
 846        else {
 847                if (skb_queue_len(&csk->write_queue))
 848                        push_tx_frames(csk, 0);
 849                cxgbi_conn_tx_open(csk);
 850        }
 851        spin_unlock_bh(&csk->lock);
 852
 853rel_skb:
 854        __kfree_skb(skb);
 855}
 856
 857static int act_open_rpl_status_to_errno(int status)
 858{
 859        switch (status) {
 860        case CPL_ERR_CONN_RESET:
 861                return -ECONNREFUSED;
 862        case CPL_ERR_ARP_MISS:
 863                return -EHOSTUNREACH;
 864        case CPL_ERR_CONN_TIMEDOUT:
 865                return -ETIMEDOUT;
 866        case CPL_ERR_TCAM_FULL:
 867                return -ENOMEM;
 868        case CPL_ERR_CONN_EXIST:
 869                return -EADDRINUSE;
 870        default:
 871                return -EIO;
 872        }
 873}
 874
 875static void csk_act_open_retry_timer(struct timer_list *t)
 876{
 877        struct sk_buff *skb = NULL;
 878        struct cxgbi_sock *csk = from_timer(csk, t, retry_timer);
 879        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
 880        void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *,
 881                                   struct l2t_entry *);
 882        int t4 = is_t4(lldi->adapter_type), size, size6;
 883
 884        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 885                "csk 0x%p,%u,0x%lx,%u.\n",
 886                csk, csk->state, csk->flags, csk->tid);
 887
 888        cxgbi_sock_get(csk);
 889        spin_lock_bh(&csk->lock);
 890
 891        if (t4) {
 892                size = sizeof(struct cpl_act_open_req);
 893                size6 = sizeof(struct cpl_act_open_req6);
 894        } else {
 895                size = sizeof(struct cpl_t5_act_open_req);
 896                size6 = sizeof(struct cpl_t5_act_open_req6);
 897        }
 898
 899        if (csk->csk_family == AF_INET) {
 900                send_act_open_func = send_act_open_req;
 901                skb = alloc_wr(size, 0, GFP_ATOMIC);
 902#if IS_ENABLED(CONFIG_IPV6)
 903        } else {
 904                send_act_open_func = send_act_open_req6;
 905                skb = alloc_wr(size6, 0, GFP_ATOMIC);
 906#endif
 907        }
 908
 909        if (!skb)
 910                cxgbi_sock_fail_act_open(csk, -ENOMEM);
 911        else {
 912                skb->sk = (struct sock *)csk;
 913                t4_set_arp_err_handler(skb, csk,
 914                                       cxgbi_sock_act_open_req_arp_failure);
 915                send_act_open_func(csk, skb, csk->l2t);
 916        }
 917
 918        spin_unlock_bh(&csk->lock);
 919        cxgbi_sock_put(csk);
 920
 921}
 922
 923static inline bool is_neg_adv(unsigned int status)
 924{
 925        return status == CPL_ERR_RTX_NEG_ADVICE ||
 926                status == CPL_ERR_KEEPALV_NEG_ADVICE ||
 927                status == CPL_ERR_PERSIST_NEG_ADVICE;
 928}
 929
 930static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
 931{
 932        struct cxgbi_sock *csk;
 933        struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
 934        unsigned int tid = GET_TID(rpl);
 935        unsigned int atid =
 936                TID_TID_G(AOPEN_ATID_G(be32_to_cpu(rpl->atid_status)));
 937        unsigned int status = AOPEN_STATUS_G(be32_to_cpu(rpl->atid_status));
 938        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 939        struct tid_info *t = lldi->tids;
 940
 941        csk = lookup_atid(t, atid);
 942        if (unlikely(!csk)) {
 943                pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid);
 944                goto rel_skb;
 945        }
 946
 947        pr_info_ipaddr("tid %u/%u, status %u.\n"
 948                       "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
 949                       atid, tid, status, csk, csk->state, csk->flags);
 950
 951        if (is_neg_adv(status))
 952                goto rel_skb;
 953
 954        module_put(cdev->owner);
 955
 956        if (status && status != CPL_ERR_TCAM_FULL &&
 957            status != CPL_ERR_CONN_EXIST &&
 958            status != CPL_ERR_ARP_MISS)
 959                cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl),
 960                                 csk->csk_family);
 961
 962        cxgbi_sock_get(csk);
 963        spin_lock_bh(&csk->lock);
 964
 965        if (status == CPL_ERR_CONN_EXIST &&
 966            csk->retry_timer.function != csk_act_open_retry_timer) {
 967                csk->retry_timer.function = csk_act_open_retry_timer;
 968                mod_timer(&csk->retry_timer, jiffies + HZ / 2);
 969        } else
 970                cxgbi_sock_fail_act_open(csk,
 971                                        act_open_rpl_status_to_errno(status));
 972
 973        spin_unlock_bh(&csk->lock);
 974        cxgbi_sock_put(csk);
 975rel_skb:
 976        __kfree_skb(skb);
 977}
 978
 979static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
 980{
 981        struct cxgbi_sock *csk;
 982        struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data;
 983        unsigned int tid = GET_TID(req);
 984        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 985        struct tid_info *t = lldi->tids;
 986
 987        csk = lookup_tid(t, tid);
 988        if (unlikely(!csk)) {
 989                pr_err("can't find connection for tid %u.\n", tid);
 990                goto rel_skb;
 991        }
 992        pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
 993                       (&csk->saddr), (&csk->daddr),
 994                       csk, csk->state, csk->flags, csk->tid);
 995        cxgbi_sock_rcv_peer_close(csk);
 996rel_skb:
 997        __kfree_skb(skb);
 998}
 999
1000static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1001{
1002        struct cxgbi_sock *csk;
1003        struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data;
1004        unsigned int tid = GET_TID(rpl);
1005        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1006        struct tid_info *t = lldi->tids;
1007
1008        csk = lookup_tid(t, tid);
1009        if (unlikely(!csk)) {
1010                pr_err("can't find connection for tid %u.\n", tid);
1011                goto rel_skb;
1012        }
1013        pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
1014                       (&csk->saddr), (&csk->daddr),
1015                       csk, csk->state, csk->flags, csk->tid);
1016        cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
1017rel_skb:
1018        __kfree_skb(skb);
1019}
1020
1021static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
1022                                                                int *need_rst)
1023{
1024        switch (abort_reason) {
1025        case CPL_ERR_BAD_SYN: /* fall through */
1026        case CPL_ERR_CONN_RESET:
1027                return csk->state > CTP_ESTABLISHED ?
1028                        -EPIPE : -ECONNRESET;
1029        case CPL_ERR_XMIT_TIMEDOUT:
1030        case CPL_ERR_PERSIST_TIMEDOUT:
1031        case CPL_ERR_FINWAIT2_TIMEDOUT:
1032        case CPL_ERR_KEEPALIVE_TIMEDOUT:
1033                return -ETIMEDOUT;
1034        default:
1035                return -EIO;
1036        }
1037}
1038
1039static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
1040{
1041        struct cxgbi_sock *csk;
1042        struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data;
1043        unsigned int tid = GET_TID(req);
1044        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1045        struct tid_info *t = lldi->tids;
1046        int rst_status = CPL_ABORT_NO_RST;
1047
1048        csk = lookup_tid(t, tid);
1049        if (unlikely(!csk)) {
1050                pr_err("can't find connection for tid %u.\n", tid);
1051                goto rel_skb;
1052        }
1053
1054        pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
1055                       (&csk->saddr), (&csk->daddr),
1056                       csk, csk->state, csk->flags, csk->tid, req->status);
1057
1058        if (is_neg_adv(req->status))
1059                goto rel_skb;
1060
1061        cxgbi_sock_get(csk);
1062        spin_lock_bh(&csk->lock);
1063
1064        cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
1065
1066        if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
1067                send_tx_flowc_wr(csk);
1068                cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
1069        }
1070
1071        cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
1072        cxgbi_sock_set_state(csk, CTP_ABORTING);
1073
1074        send_abort_rpl(csk, rst_status);
1075
1076        if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
1077                csk->err = abort_status_to_errno(csk, req->status, &rst_status);
1078                cxgbi_sock_closed(csk);
1079        }
1080
1081        spin_unlock_bh(&csk->lock);
1082        cxgbi_sock_put(csk);
1083rel_skb:
1084        __kfree_skb(skb);
1085}
1086
1087static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
1088{
1089        struct cxgbi_sock *csk;
1090        struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data;
1091        unsigned int tid = GET_TID(rpl);
1092        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1093        struct tid_info *t = lldi->tids;
1094
1095        csk = lookup_tid(t, tid);
1096        if (!csk)
1097                goto rel_skb;
1098
1099        if (csk)
1100                pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
1101                               (&csk->saddr), (&csk->daddr), csk,
1102                               csk->state, csk->flags, csk->tid, rpl->status);
1103
1104        if (rpl->status == CPL_ERR_ABORT_FAILED)
1105                goto rel_skb;
1106
1107        cxgbi_sock_rcv_abort_rpl(csk);
1108rel_skb:
1109        __kfree_skb(skb);
1110}
1111
1112static void do_rx_data(struct cxgbi_device *cdev, struct sk_buff *skb)
1113{
1114        struct cxgbi_sock *csk;
1115        struct cpl_rx_data *cpl = (struct cpl_rx_data *)skb->data;
1116        unsigned int tid = GET_TID(cpl);
1117        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1118        struct tid_info *t = lldi->tids;
1119
1120        csk = lookup_tid(t, tid);
1121        if (!csk) {
1122                pr_err("can't find connection for tid %u.\n", tid);
1123        } else {
1124                /* not expecting this, reset the connection. */
1125                pr_err("csk 0x%p, tid %u, rcv cpl_rx_data.\n", csk, tid);
1126                spin_lock_bh(&csk->lock);
1127                send_abort_req(csk);
1128                spin_unlock_bh(&csk->lock);
1129        }
1130        __kfree_skb(skb);
1131}
1132
1133static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
1134{
1135        struct cxgbi_sock *csk;
1136        struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
1137        unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
1138        unsigned int tid = GET_TID(cpl);
1139        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1140        struct tid_info *t = lldi->tids;
1141
1142        csk = lookup_tid(t, tid);
1143        if (unlikely(!csk)) {
1144                pr_err("can't find conn. for tid %u.\n", tid);
1145                goto rel_skb;
1146        }
1147
1148        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1149                "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1150                csk, csk->state, csk->flags, csk->tid, skb, skb->len,
1151                pdu_len_ddp);
1152
1153        spin_lock_bh(&csk->lock);
1154
1155        if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1156                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1157                        "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1158                        csk, csk->state, csk->flags, csk->tid);
1159                if (csk->state != CTP_ABORTING)
1160                        goto abort_conn;
1161                else
1162                        goto discard;
1163        }
1164
1165        cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq);
1166        cxgbi_skcb_flags(skb) = 0;
1167
1168        skb_reset_transport_header(skb);
1169        __skb_pull(skb, sizeof(*cpl));
1170        __pskb_trim(skb, ntohs(cpl->len));
1171
1172        if (!csk->skb_ulp_lhdr) {
1173                unsigned char *bhs;
1174                unsigned int hlen, dlen, plen;
1175
1176                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1177                        "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
1178                        csk, csk->state, csk->flags, csk->tid, skb);
1179                csk->skb_ulp_lhdr = skb;
1180                cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
1181
1182                if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) {
1183                        pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
1184                                csk->tid, cxgbi_skcb_tcp_seq(skb),
1185                                csk->rcv_nxt);
1186                        goto abort_conn;
1187                }
1188
1189                bhs = skb->data;
1190                hlen = ntohs(cpl->len);
1191                dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
1192
1193                plen = ISCSI_PDU_LEN_G(pdu_len_ddp);
1194                if (is_t4(lldi->adapter_type))
1195                        plen -= 40;
1196
1197                if ((hlen + dlen) != plen) {
1198                        pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
1199                                "mismatch %u != %u + %u, seq 0x%x.\n",
1200                                csk->tid, plen, hlen, dlen,
1201                                cxgbi_skcb_tcp_seq(skb));
1202                        goto abort_conn;
1203                }
1204
1205                cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3);
1206                if (dlen)
1207                        cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len;
1208                csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
1209
1210                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1211                        "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
1212                        csk, skb, *bhs, hlen, dlen,
1213                        ntohl(*((unsigned int *)(bhs + 16))),
1214                        ntohl(*((unsigned int *)(bhs + 24))));
1215
1216        } else {
1217                struct sk_buff *lskb = csk->skb_ulp_lhdr;
1218
1219                cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
1220                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1221                        "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1222                        csk, csk->state, csk->flags, skb, lskb);
1223        }
1224
1225        __skb_queue_tail(&csk->receive_queue, skb);
1226        spin_unlock_bh(&csk->lock);
1227        return;
1228
1229abort_conn:
1230        send_abort_req(csk);
1231discard:
1232        spin_unlock_bh(&csk->lock);
1233rel_skb:
1234        __kfree_skb(skb);
1235}
1236
1237static void do_rx_iscsi_data(struct cxgbi_device *cdev, struct sk_buff *skb)
1238{
1239        struct cxgbi_sock *csk;
1240        struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
1241        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1242        struct tid_info *t = lldi->tids;
1243        struct sk_buff *lskb;
1244        u32 tid = GET_TID(cpl);
1245        u16 pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
1246
1247        csk = lookup_tid(t, tid);
1248        if (unlikely(!csk)) {
1249                pr_err("can't find conn. for tid %u.\n", tid);
1250                goto rel_skb;
1251        }
1252
1253        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1254                  "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
1255                  csk, csk->state, csk->flags, csk->tid, skb,
1256                  skb->len, pdu_len_ddp);
1257
1258        spin_lock_bh(&csk->lock);
1259
1260        if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1261                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1262                          "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1263                          csk, csk->state, csk->flags, csk->tid);
1264
1265                if (csk->state != CTP_ABORTING)
1266                        goto abort_conn;
1267                else
1268                        goto discard;
1269        }
1270
1271        cxgbi_skcb_tcp_seq(skb) = be32_to_cpu(cpl->seq);
1272        cxgbi_skcb_flags(skb) = 0;
1273
1274        skb_reset_transport_header(skb);
1275        __skb_pull(skb, sizeof(*cpl));
1276        __pskb_trim(skb, ntohs(cpl->len));
1277
1278        if (!csk->skb_ulp_lhdr)
1279                csk->skb_ulp_lhdr = skb;
1280
1281        lskb = csk->skb_ulp_lhdr;
1282        cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
1283
1284        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1285                  "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1286                  csk, csk->state, csk->flags, skb, lskb);
1287
1288        __skb_queue_tail(&csk->receive_queue, skb);
1289        spin_unlock_bh(&csk->lock);
1290        return;
1291
1292abort_conn:
1293        send_abort_req(csk);
1294discard:
1295        spin_unlock_bh(&csk->lock);
1296rel_skb:
1297        __kfree_skb(skb);
1298}
1299
1300static void
1301cxgb4i_process_ddpvld(struct cxgbi_sock *csk,
1302                      struct sk_buff *skb, u32 ddpvld)
1303{
1304        if (ddpvld & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
1305                pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
1306                        csk, skb, ddpvld, cxgbi_skcb_flags(skb));
1307                cxgbi_skcb_set_flag(skb, SKCBF_RX_HCRC_ERR);
1308        }
1309
1310        if (ddpvld & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
1311                pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1312                        csk, skb, ddpvld, cxgbi_skcb_flags(skb));
1313                cxgbi_skcb_set_flag(skb, SKCBF_RX_DCRC_ERR);
1314        }
1315
1316        if (ddpvld & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
1317                log_debug(1 << CXGBI_DBG_PDU_RX,
1318                          "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1319                          csk, skb, ddpvld);
1320                cxgbi_skcb_set_flag(skb, SKCBF_RX_PAD_ERR);
1321        }
1322
1323        if ((ddpvld & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
1324            !cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
1325                log_debug(1 << CXGBI_DBG_PDU_RX,
1326                          "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1327                          csk, skb, ddpvld);
1328                cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA_DDPD);
1329        }
1330}
1331
1332static void do_rx_data_ddp(struct cxgbi_device *cdev,
1333                                  struct sk_buff *skb)
1334{
1335        struct cxgbi_sock *csk;
1336        struct sk_buff *lskb;
1337        struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data;
1338        unsigned int tid = GET_TID(rpl);
1339        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1340        struct tid_info *t = lldi->tids;
1341        u32 ddpvld = be32_to_cpu(rpl->ddpvld);
1342
1343        csk = lookup_tid(t, tid);
1344        if (unlikely(!csk)) {
1345                pr_err("can't find connection for tid %u.\n", tid);
1346                goto rel_skb;
1347        }
1348
1349        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1350                "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
1351                csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr);
1352
1353        spin_lock_bh(&csk->lock);
1354
1355        if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1356                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1357                        "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1358                        csk, csk->state, csk->flags, csk->tid);
1359                if (csk->state != CTP_ABORTING)
1360                        goto abort_conn;
1361                else
1362                        goto discard;
1363        }
1364
1365        if (!csk->skb_ulp_lhdr) {
1366                pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid);
1367                goto abort_conn;
1368        }
1369
1370        lskb = csk->skb_ulp_lhdr;
1371        csk->skb_ulp_lhdr = NULL;
1372
1373        cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc);
1374
1375        if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb))
1376                pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
1377                        csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
1378
1379        cxgb4i_process_ddpvld(csk, lskb, ddpvld);
1380
1381        log_debug(1 << CXGBI_DBG_PDU_RX,
1382                "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1383                csk, lskb, cxgbi_skcb_flags(lskb));
1384
1385        cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS);
1386        cxgbi_conn_pdu_ready(csk);
1387        spin_unlock_bh(&csk->lock);
1388        goto rel_skb;
1389
1390abort_conn:
1391        send_abort_req(csk);
1392discard:
1393        spin_unlock_bh(&csk->lock);
1394rel_skb:
1395        __kfree_skb(skb);
1396}
1397
1398static void
1399do_rx_iscsi_cmp(struct cxgbi_device *cdev, struct sk_buff *skb)
1400{
1401        struct cxgbi_sock *csk;
1402        struct cpl_rx_iscsi_cmp *rpl = (struct cpl_rx_iscsi_cmp *)skb->data;
1403        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1404        struct tid_info *t = lldi->tids;
1405        struct sk_buff *data_skb = NULL;
1406        u32 tid = GET_TID(rpl);
1407        u32 ddpvld = be32_to_cpu(rpl->ddpvld);
1408        u32 seq = be32_to_cpu(rpl->seq);
1409        u16 pdu_len_ddp = be16_to_cpu(rpl->pdu_len_ddp);
1410
1411        csk = lookup_tid(t, tid);
1412        if (unlikely(!csk)) {
1413                pr_err("can't find connection for tid %u.\n", tid);
1414                goto rel_skb;
1415        }
1416
1417        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1418                  "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p, len %u, "
1419                  "pdu_len_ddp %u, status %u.\n",
1420                  csk, csk->state, csk->flags, skb, ddpvld, csk->skb_ulp_lhdr,
1421                  ntohs(rpl->len), pdu_len_ddp,  rpl->status);
1422
1423        spin_lock_bh(&csk->lock);
1424
1425        if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1426                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1427                          "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1428                          csk, csk->state, csk->flags, csk->tid);
1429
1430                if (csk->state != CTP_ABORTING)
1431                        goto abort_conn;
1432                else
1433                        goto discard;
1434        }
1435
1436        cxgbi_skcb_tcp_seq(skb) = seq;
1437        cxgbi_skcb_flags(skb) = 0;
1438        cxgbi_skcb_rx_pdulen(skb) = 0;
1439
1440        skb_reset_transport_header(skb);
1441        __skb_pull(skb, sizeof(*rpl));
1442        __pskb_trim(skb, be16_to_cpu(rpl->len));
1443
1444        csk->rcv_nxt = seq + pdu_len_ddp;
1445
1446        if (csk->skb_ulp_lhdr) {
1447                data_skb = skb_peek(&csk->receive_queue);
1448                if (!data_skb ||
1449                    !cxgbi_skcb_test_flag(data_skb, SKCBF_RX_DATA)) {
1450                        pr_err("Error! freelist data not found 0x%p, tid %u\n",
1451                               data_skb, tid);
1452
1453                        goto abort_conn;
1454                }
1455                __skb_unlink(data_skb, &csk->receive_queue);
1456
1457                cxgbi_skcb_set_flag(skb, SKCBF_RX_DATA);
1458
1459                __skb_queue_tail(&csk->receive_queue, skb);
1460                __skb_queue_tail(&csk->receive_queue, data_skb);
1461        } else {
1462                 __skb_queue_tail(&csk->receive_queue, skb);
1463        }
1464
1465        csk->skb_ulp_lhdr = NULL;
1466
1467        cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
1468        cxgbi_skcb_set_flag(skb, SKCBF_RX_STATUS);
1469        cxgbi_skcb_set_flag(skb, SKCBF_RX_ISCSI_COMPL);
1470        cxgbi_skcb_rx_ddigest(skb) = be32_to_cpu(rpl->ulp_crc);
1471
1472        cxgb4i_process_ddpvld(csk, skb, ddpvld);
1473
1474        log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, skb 0x%p, f 0x%lx.\n",
1475                  csk, skb, cxgbi_skcb_flags(skb));
1476
1477        cxgbi_conn_pdu_ready(csk);
1478        spin_unlock_bh(&csk->lock);
1479
1480        return;
1481
1482abort_conn:
1483        send_abort_req(csk);
1484discard:
1485        spin_unlock_bh(&csk->lock);
1486rel_skb:
1487        __kfree_skb(skb);
1488}
1489
1490static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
1491{
1492        struct cxgbi_sock *csk;
1493        struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data;
1494        unsigned int tid = GET_TID(rpl);
1495        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1496        struct tid_info *t = lldi->tids;
1497
1498        csk = lookup_tid(t, tid);
1499        if (unlikely(!csk))
1500                pr_err("can't find connection for tid %u.\n", tid);
1501        else {
1502                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1503                        "csk 0x%p,%u,0x%lx,%u.\n",
1504                        csk, csk->state, csk->flags, csk->tid);
1505                cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una),
1506                                        rpl->seq_vld);
1507        }
1508        __kfree_skb(skb);
1509}
1510
1511static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1512{
1513        struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1514        unsigned int tid = GET_TID(rpl);
1515        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1516        struct tid_info *t = lldi->tids;
1517        struct cxgbi_sock *csk;
1518
1519        csk = lookup_tid(t, tid);
1520        if (!csk)
1521                pr_err("can't find conn. for tid %u.\n", tid);
1522
1523        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1524                "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1525                csk, csk->state, csk->flags, csk->tid, rpl->status);
1526
1527        if (rpl->status != CPL_ERR_NONE)
1528                pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1529                        csk, tid, rpl->status);
1530
1531        __kfree_skb(skb);
1532}
1533
1534static int alloc_cpls(struct cxgbi_sock *csk)
1535{
1536        csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req),
1537                                        0, GFP_KERNEL);
1538        if (!csk->cpl_close)
1539                return -ENOMEM;
1540
1541        csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req),
1542                                        0, GFP_KERNEL);
1543        if (!csk->cpl_abort_req)
1544                goto free_cpls;
1545
1546        csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl),
1547                                        0, GFP_KERNEL);
1548        if (!csk->cpl_abort_rpl)
1549                goto free_cpls;
1550        return 0;
1551
1552free_cpls:
1553        cxgbi_sock_free_cpl_skbs(csk);
1554        return -ENOMEM;
1555}
1556
1557static inline void l2t_put(struct cxgbi_sock *csk)
1558{
1559        if (csk->l2t) {
1560                cxgb4_l2t_release(csk->l2t);
1561                csk->l2t = NULL;
1562                cxgbi_sock_put(csk);
1563        }
1564}
1565
1566static void release_offload_resources(struct cxgbi_sock *csk)
1567{
1568        struct cxgb4_lld_info *lldi;
1569#if IS_ENABLED(CONFIG_IPV6)
1570        struct net_device *ndev = csk->cdev->ports[csk->port_id];
1571#endif
1572
1573        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1574                "csk 0x%p,%u,0x%lx,%u.\n",
1575                csk, csk->state, csk->flags, csk->tid);
1576
1577        cxgbi_sock_free_cpl_skbs(csk);
1578        cxgbi_sock_purge_write_queue(csk);
1579        if (csk->wr_cred != csk->wr_max_cred) {
1580                cxgbi_sock_purge_wr_queue(csk);
1581                cxgbi_sock_reset_wr_list(csk);
1582        }
1583
1584        l2t_put(csk);
1585#if IS_ENABLED(CONFIG_IPV6)
1586        if (csk->csk_family == AF_INET6)
1587                cxgb4_clip_release(ndev,
1588                                   (const u32 *)&csk->saddr6.sin6_addr, 1);
1589#endif
1590
1591        if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
1592                free_atid(csk);
1593        else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
1594                lldi = cxgbi_cdev_priv(csk->cdev);
1595                cxgb4_remove_tid(lldi->tids, 0, csk->tid,
1596                                 csk->csk_family);
1597                cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
1598                cxgbi_sock_put(csk);
1599        }
1600        csk->dst = NULL;
1601}
1602
1603static int init_act_open(struct cxgbi_sock *csk)
1604{
1605        struct cxgbi_device *cdev = csk->cdev;
1606        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1607        struct net_device *ndev = cdev->ports[csk->port_id];
1608        struct sk_buff *skb = NULL;
1609        struct neighbour *n = NULL;
1610        void *daddr;
1611        unsigned int step;
1612        unsigned int rxq_idx;
1613        unsigned int size, size6;
1614        unsigned int linkspeed;
1615        unsigned int rcv_winf, snd_winf;
1616
1617        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1618                "csk 0x%p,%u,0x%lx,%u.\n",
1619                csk, csk->state, csk->flags, csk->tid);
1620
1621        if (csk->csk_family == AF_INET)
1622                daddr = &csk->daddr.sin_addr.s_addr;
1623#if IS_ENABLED(CONFIG_IPV6)
1624        else if (csk->csk_family == AF_INET6)
1625                daddr = &csk->daddr6.sin6_addr;
1626#endif
1627        else {
1628                pr_err("address family 0x%x not supported\n", csk->csk_family);
1629                goto rel_resource;
1630        }
1631
1632        n = dst_neigh_lookup(csk->dst, daddr);
1633
1634        if (!n) {
1635                pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1636                goto rel_resource;
1637        }
1638
1639        if (!(n->nud_state & NUD_VALID))
1640                neigh_event_send(n, NULL);
1641
1642        csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
1643        if (csk->atid < 0) {
1644                pr_err("%s, NO atid available.\n", ndev->name);
1645                goto rel_resource_without_clip;
1646        }
1647        cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1648        cxgbi_sock_get(csk);
1649
1650        csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
1651        if (!csk->l2t) {
1652                pr_err("%s, cannot alloc l2t.\n", ndev->name);
1653                goto rel_resource_without_clip;
1654        }
1655        cxgbi_sock_get(csk);
1656
1657#if IS_ENABLED(CONFIG_IPV6)
1658        if (csk->csk_family == AF_INET6)
1659                cxgb4_clip_get(ndev, (const u32 *)&csk->saddr6.sin6_addr, 1);
1660#endif
1661
1662        if (is_t4(lldi->adapter_type)) {
1663                size = sizeof(struct cpl_act_open_req);
1664                size6 = sizeof(struct cpl_act_open_req6);
1665        } else if (is_t5(lldi->adapter_type)) {
1666                size = sizeof(struct cpl_t5_act_open_req);
1667                size6 = sizeof(struct cpl_t5_act_open_req6);
1668        } else {
1669                size = sizeof(struct cpl_t6_act_open_req);
1670                size6 = sizeof(struct cpl_t6_act_open_req6);
1671        }
1672
1673        if (csk->csk_family == AF_INET)
1674                skb = alloc_wr(size, 0, GFP_NOIO);
1675#if IS_ENABLED(CONFIG_IPV6)
1676        else
1677                skb = alloc_wr(size6, 0, GFP_NOIO);
1678#endif
1679
1680        if (!skb)
1681                goto rel_resource;
1682        skb->sk = (struct sock *)csk;
1683        t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure);
1684
1685        if (!csk->mtu)
1686                csk->mtu = dst_mtu(csk->dst);
1687        cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
1688        csk->tx_chan = cxgb4_port_chan(ndev);
1689        csk->smac_idx = cxgb4_tp_smt_idx(lldi->adapter_type,
1690                                         cxgb4_port_viid(ndev));
1691        step = lldi->ntxq / lldi->nchan;
1692        csk->txq_idx = cxgb4_port_idx(ndev) * step;
1693        step = lldi->nrxq / lldi->nchan;
1694        rxq_idx = (cxgb4_port_idx(ndev) * step) + (cdev->rxq_idx_cntr % step);
1695        cdev->rxq_idx_cntr++;
1696        csk->rss_qid = lldi->rxq_ids[rxq_idx];
1697        linkspeed = ((struct port_info *)netdev_priv(ndev))->link_cfg.speed;
1698        csk->snd_win = cxgb4i_snd_win;
1699        csk->rcv_win = cxgb4i_rcv_win;
1700        if (cxgb4i_rcv_win <= 0) {
1701                csk->rcv_win = CXGB4I_DEFAULT_10G_RCV_WIN;
1702                rcv_winf = linkspeed / SPEED_10000;
1703                if (rcv_winf)
1704                        csk->rcv_win *= rcv_winf;
1705        }
1706        if (cxgb4i_snd_win <= 0) {
1707                csk->snd_win = CXGB4I_DEFAULT_10G_SND_WIN;
1708                snd_winf = linkspeed / SPEED_10000;
1709                if (snd_winf)
1710                        csk->snd_win *= snd_winf;
1711        }
1712        csk->wr_cred = lldi->wr_cred -
1713                       DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
1714        csk->wr_max_cred = csk->wr_cred;
1715        csk->wr_una_cred = 0;
1716        cxgbi_sock_reset_wr_list(csk);
1717        csk->err = 0;
1718
1719        pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n",
1720                       (&csk->saddr), (&csk->daddr), csk, csk->state,
1721                       csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid,
1722                       csk->mtu, csk->mss_idx, csk->smac_idx);
1723
1724        /* must wait for either a act_open_rpl or act_open_establish */
1725        if (!try_module_get(cdev->owner)) {
1726                pr_err("%s, try_module_get failed.\n", ndev->name);
1727                goto rel_resource;
1728        }
1729
1730        cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1731        if (csk->csk_family == AF_INET)
1732                send_act_open_req(csk, skb, csk->l2t);
1733#if IS_ENABLED(CONFIG_IPV6)
1734        else
1735                send_act_open_req6(csk, skb, csk->l2t);
1736#endif
1737        neigh_release(n);
1738
1739        return 0;
1740
1741rel_resource:
1742#if IS_ENABLED(CONFIG_IPV6)
1743        if (csk->csk_family == AF_INET6)
1744                cxgb4_clip_release(ndev,
1745                                   (const u32 *)&csk->saddr6.sin6_addr, 1);
1746#endif
1747rel_resource_without_clip:
1748        if (n)
1749                neigh_release(n);
1750        if (skb)
1751                __kfree_skb(skb);
1752        return -EINVAL;
1753}
1754
1755static cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
1756        [CPL_ACT_ESTABLISH] = do_act_establish,
1757        [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1758        [CPL_PEER_CLOSE] = do_peer_close,
1759        [CPL_ABORT_REQ_RSS] = do_abort_req_rss,
1760        [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss,
1761        [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1762        [CPL_FW4_ACK] = do_fw4_ack,
1763        [CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
1764        [CPL_ISCSI_DATA] = do_rx_iscsi_data,
1765        [CPL_SET_TCB_RPL] = do_set_tcb_rpl,
1766        [CPL_RX_DATA_DDP] = do_rx_data_ddp,
1767        [CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
1768        [CPL_RX_ISCSI_CMP] = do_rx_iscsi_cmp,
1769        [CPL_RX_DATA] = do_rx_data,
1770};
1771
1772static int cxgb4i_ofld_init(struct cxgbi_device *cdev)
1773{
1774        int rc;
1775
1776        if (cxgb4i_max_connect > CXGB4I_MAX_CONN)
1777                cxgb4i_max_connect = CXGB4I_MAX_CONN;
1778
1779        rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base,
1780                                        cxgb4i_max_connect);
1781        if (rc < 0)
1782                return rc;
1783
1784        cdev->csk_release_offload_resources = release_offload_resources;
1785        cdev->csk_push_tx_frames = push_tx_frames;
1786        cdev->csk_send_abort_req = send_abort_req;
1787        cdev->csk_send_close_req = send_close_req;
1788        cdev->csk_send_rx_credits = send_rx_credits;
1789        cdev->csk_alloc_cpls = alloc_cpls;
1790        cdev->csk_init_act_open = init_act_open;
1791
1792        pr_info("cdev 0x%p, offload up, added.\n", cdev);
1793        return 0;
1794}
1795
1796static inline void
1797ulp_mem_io_set_hdr(struct cxgbi_device *cdev,
1798                   struct ulp_mem_io *req,
1799                   unsigned int wr_len, unsigned int dlen,
1800                   unsigned int pm_addr,
1801                   int tid)
1802{
1803        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1804        struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
1805
1806        INIT_ULPTX_WR(req, wr_len, 0, tid);
1807        req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
1808                FW_WR_ATOMIC_V(0));
1809        req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
1810                ULP_MEMIO_ORDER_V(is_t4(lldi->adapter_type)) |
1811                T5_ULP_MEMIO_IMM_V(!is_t4(lldi->adapter_type)));
1812        req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
1813        req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
1814        req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
1815
1816        idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
1817        idata->len = htonl(dlen);
1818}
1819
1820static struct sk_buff *
1821ddp_ppod_init_idata(struct cxgbi_device *cdev,
1822                    struct cxgbi_ppm *ppm,
1823                    unsigned int idx, unsigned int npods,
1824                    unsigned int tid)
1825{
1826        unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
1827        unsigned int dlen = npods << PPOD_SIZE_SHIFT;
1828        unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
1829                                sizeof(struct ulptx_idata) + dlen, 16);
1830        struct sk_buff *skb = alloc_wr(wr_len, 0, GFP_ATOMIC);
1831
1832        if (!skb) {
1833                pr_err("%s: %s idx %u, npods %u, OOM.\n",
1834                       __func__, ppm->ndev->name, idx, npods);
1835                return NULL;
1836        }
1837
1838        ulp_mem_io_set_hdr(cdev, (struct ulp_mem_io *)skb->head, wr_len, dlen,
1839                           pm_addr, tid);
1840
1841        return skb;
1842}
1843
1844static int ddp_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
1845                                struct cxgbi_task_tag_info *ttinfo,
1846                                unsigned int idx, unsigned int npods,
1847                                struct scatterlist **sg_pp,
1848                                unsigned int *sg_off)
1849{
1850        struct cxgbi_device *cdev = csk->cdev;
1851        struct sk_buff *skb = ddp_ppod_init_idata(cdev, ppm, idx, npods,
1852                                                  csk->tid);
1853        struct ulp_mem_io *req;
1854        struct ulptx_idata *idata;
1855        struct cxgbi_pagepod *ppod;
1856        int i;
1857
1858        if (!skb)
1859                return -ENOMEM;
1860
1861        req = (struct ulp_mem_io *)skb->head;
1862        idata = (struct ulptx_idata *)(req + 1);
1863        ppod = (struct cxgbi_pagepod *)(idata + 1);
1864
1865        for (i = 0; i < npods; i++, ppod++)
1866                cxgbi_ddp_set_one_ppod(ppod, ttinfo, sg_pp, sg_off);
1867
1868        cxgbi_skcb_set_flag(skb, SKCBF_TX_MEM_WRITE);
1869        cxgbi_skcb_set_flag(skb, SKCBF_TX_FLAG_COMPL);
1870        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
1871
1872        spin_lock_bh(&csk->lock);
1873        cxgbi_sock_skb_entail(csk, skb);
1874        spin_unlock_bh(&csk->lock);
1875
1876        return 0;
1877}
1878
1879static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
1880                       struct cxgbi_task_tag_info *ttinfo)
1881{
1882        unsigned int pidx = ttinfo->idx;
1883        unsigned int npods = ttinfo->npods;
1884        unsigned int i, cnt;
1885        int err = 0;
1886        struct scatterlist *sg = ttinfo->sgl;
1887        unsigned int offset = 0;
1888
1889        ttinfo->cid = csk->port_id;
1890
1891        for (i = 0; i < npods; i += cnt, pidx += cnt) {
1892                cnt = npods - i;
1893
1894                if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1895                        cnt = ULPMEM_IDATA_MAX_NPPODS;
1896                err = ddp_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt,
1897                                           &sg, &offset);
1898                if (err < 0)
1899                        break;
1900        }
1901
1902        return err;
1903}
1904
1905static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
1906                                int pg_idx, bool reply)
1907{
1908        struct sk_buff *skb;
1909        struct cpl_set_tcb_field *req;
1910
1911        if (!pg_idx || pg_idx >= DDP_PGIDX_MAX)
1912                return 0;
1913
1914        skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
1915        if (!skb)
1916                return -ENOMEM;
1917
1918        /*  set up ulp page size */
1919        req = (struct cpl_set_tcb_field *)skb->head;
1920        INIT_TP_WR(req, csk->tid);
1921        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1922        req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
1923        req->word_cookie = htons(0);
1924        req->mask = cpu_to_be64(0x3 << 8);
1925        req->val = cpu_to_be64(pg_idx << 8);
1926        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1927
1928        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1929                "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
1930
1931        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1932        return 0;
1933}
1934
1935static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1936                                 int hcrc, int dcrc, int reply)
1937{
1938        struct sk_buff *skb;
1939        struct cpl_set_tcb_field *req;
1940
1941        if (!hcrc && !dcrc)
1942                return 0;
1943
1944        skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
1945        if (!skb)
1946                return -ENOMEM;
1947
1948        csk->hcrc_len = (hcrc ? 4 : 0);
1949        csk->dcrc_len = (dcrc ? 4 : 0);
1950        /*  set up ulp submode */
1951        req = (struct cpl_set_tcb_field *)skb->head;
1952        INIT_TP_WR(req, tid);
1953        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1954        req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
1955        req->word_cookie = htons(0);
1956        req->mask = cpu_to_be64(0x3 << 4);
1957        req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1958                                (dcrc ? ULP_CRC_DATA : 0)) << 4);
1959        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1960
1961        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1962                "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
1963
1964        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1965        return 0;
1966}
1967
1968static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
1969{
1970        return (struct cxgbi_ppm *)(*((struct cxgb4_lld_info *)
1971                                       (cxgbi_cdev_priv(cdev)))->iscsi_ppm);
1972}
1973
1974static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
1975{
1976        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1977        struct net_device *ndev = cdev->ports[0];
1978        struct cxgbi_tag_format tformat;
1979        unsigned int ppmax;
1980        int i;
1981
1982        if (!lldi->vr->iscsi.size) {
1983                pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name);
1984                return -EACCES;
1985        }
1986
1987        cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ;
1988        ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
1989
1990        memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
1991        for (i = 0; i < 4; i++)
1992                tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
1993                                         & 0xF;
1994        cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat);
1995
1996        cxgbi_ddp_ppm_setup(lldi->iscsi_ppm, cdev, &tformat, ppmax,
1997                            lldi->iscsi_llimit, lldi->vr->iscsi.start, 2);
1998
1999        cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
2000        cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
2001        cdev->csk_ddp_set_map = ddp_set_map;
2002        cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
2003                                  lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN);
2004        cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
2005                                  lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN);
2006        cdev->cdev2ppm = cdev2ppm;
2007
2008        return 0;
2009}
2010
2011static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
2012{
2013        struct cxgbi_device *cdev;
2014        struct port_info *pi;
2015        int i, rc;
2016
2017        cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports);
2018        if (!cdev) {
2019                pr_info("t4 device 0x%p, register failed.\n", lldi);
2020                return NULL;
2021        }
2022        pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
2023                cdev, lldi->adapter_type, lldi->nports,
2024                lldi->ports[0]->name, lldi->nchan, lldi->ntxq,
2025                lldi->nrxq, lldi->wr_cred);
2026        for (i = 0; i < lldi->nrxq; i++)
2027                log_debug(1 << CXGBI_DBG_DEV,
2028                        "t4 0x%p, rxq id #%d: %u.\n",
2029                        cdev, i, lldi->rxq_ids[i]);
2030
2031        memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi));
2032        cdev->flags = CXGBI_FLAG_DEV_T4;
2033        cdev->pdev = lldi->pdev;
2034        cdev->ports = lldi->ports;
2035        cdev->nports = lldi->nports;
2036        cdev->mtus = lldi->mtus;
2037        cdev->nmtus = NMTUS;
2038        cdev->rx_credit_thres = (CHELSIO_CHIP_VERSION(lldi->adapter_type) <=
2039                                 CHELSIO_T5) ? cxgb4i_rx_credit_thres : 0;
2040        cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
2041        cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
2042        cdev->itp = &cxgb4i_iscsi_transport;
2043        cdev->owner = THIS_MODULE;
2044
2045        cdev->pfvf = FW_VIID_PFN_G(cxgb4_port_viid(lldi->ports[0]))
2046                        << FW_VIID_PFN_S;
2047        pr_info("cdev 0x%p,%s, pfvf %u.\n",
2048                cdev, lldi->ports[0]->name, cdev->pfvf);
2049
2050        rc = cxgb4i_ddp_init(cdev);
2051        if (rc) {
2052                pr_info("t4 0x%p ddp init failed.\n", cdev);
2053                goto err_out;
2054        }
2055        rc = cxgb4i_ofld_init(cdev);
2056        if (rc) {
2057                pr_info("t4 0x%p ofld init failed.\n", cdev);
2058                goto err_out;
2059        }
2060
2061        rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN,
2062                                &cxgb4i_host_template, cxgb4i_stt);
2063        if (rc)
2064                goto err_out;
2065
2066        for (i = 0; i < cdev->nports; i++) {
2067                pi = netdev_priv(lldi->ports[i]);
2068                cdev->hbas[i]->port_id = pi->port_id;
2069        }
2070        return cdev;
2071
2072err_out:
2073        cxgbi_device_unregister(cdev);
2074        return ERR_PTR(-ENOMEM);
2075}
2076
2077#define RX_PULL_LEN     128
2078static int t4_uld_rx_handler(void *handle, const __be64 *rsp,
2079                                const struct pkt_gl *pgl)
2080{
2081        const struct cpl_act_establish *rpl;
2082        struct sk_buff *skb;
2083        unsigned int opc;
2084        struct cxgbi_device *cdev = handle;
2085
2086        if (pgl == NULL) {
2087                unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
2088
2089                skb = alloc_wr(len, 0, GFP_ATOMIC);
2090                if (!skb)
2091                        goto nomem;
2092                skb_copy_to_linear_data(skb, &rsp[1], len);
2093        } else {
2094                if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) {
2095                        pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
2096                                pgl->va, be64_to_cpu(*rsp),
2097                                be64_to_cpu(*(u64 *)pgl->va),
2098                                pgl->tot_len);
2099                        return 0;
2100                }
2101                skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN);
2102                if (unlikely(!skb))
2103                        goto nomem;
2104        }
2105
2106        rpl = (struct cpl_act_establish *)skb->data;
2107        opc = rpl->ot.opcode;
2108        log_debug(1 << CXGBI_DBG_TOE,
2109                "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
2110                 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
2111        if (opc >= ARRAY_SIZE(cxgb4i_cplhandlers) || !cxgb4i_cplhandlers[opc]) {
2112                pr_err("No handler for opcode 0x%x.\n", opc);
2113                __kfree_skb(skb);
2114        } else
2115                cxgb4i_cplhandlers[opc](cdev, skb);
2116
2117        return 0;
2118nomem:
2119        log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n");
2120        return 1;
2121}
2122
2123static int t4_uld_state_change(void *handle, enum cxgb4_state state)
2124{
2125        struct cxgbi_device *cdev = handle;
2126
2127        switch (state) {
2128        case CXGB4_STATE_UP:
2129                pr_info("cdev 0x%p, UP.\n", cdev);
2130                break;
2131        case CXGB4_STATE_START_RECOVERY:
2132                pr_info("cdev 0x%p, RECOVERY.\n", cdev);
2133                /* close all connections */
2134                break;
2135        case CXGB4_STATE_DOWN:
2136                pr_info("cdev 0x%p, DOWN.\n", cdev);
2137                break;
2138        case CXGB4_STATE_DETACH:
2139                pr_info("cdev 0x%p, DETACH.\n", cdev);
2140                cxgbi_device_unregister(cdev);
2141                break;
2142        default:
2143                pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
2144                break;
2145        }
2146        return 0;
2147}
2148
2149static int __init cxgb4i_init_module(void)
2150{
2151        int rc;
2152
2153        printk(KERN_INFO "%s", version);
2154
2155        rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt);
2156        if (rc < 0)
2157                return rc;
2158        cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
2159
2160        return 0;
2161}
2162
2163static void __exit cxgb4i_exit_module(void)
2164{
2165        cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
2166        cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
2167        cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
2168}
2169
2170module_init(cxgb4i_init_module);
2171module_exit(cxgb4i_exit_module);
2172