linux/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
<<
>>
Prefs
   1/*
   2 * cxgb4i.c: Chelsio T4 iSCSI driver.
   3 *
   4 * Copyright (c) 2010 Chelsio Communications, Inc.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 *
  10 * Written by:  Karen Xie (kxie@chelsio.com)
  11 *              Rakesh Ranjan (rranjan@chelsio.com)
  12 */
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
  15
  16#include <linux/module.h>
  17#include <linux/moduleparam.h>
  18#include <scsi/scsi_host.h>
  19#include <net/tcp.h>
  20#include <net/dst.h>
  21#include <linux/netdevice.h>
  22
  23#include "t4_msg.h"
  24#include "cxgb4.h"
  25#include "cxgb4_uld.h"
  26#include "t4fw_api.h"
  27#include "l2t.h"
  28#include "cxgb4i.h"
  29
  30static unsigned int dbg_level;
  31
  32#include "../libcxgbi.h"
  33
  34#define DRV_MODULE_NAME         "cxgb4i"
  35#define DRV_MODULE_DESC         "Chelsio T4 iSCSI Driver"
  36#define DRV_MODULE_VERSION      "0.9.1"
  37#define DRV_MODULE_RELDATE      "Aug. 2010"
  38
  39static char version[] =
  40        DRV_MODULE_DESC " " DRV_MODULE_NAME
  41        " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  42
  43MODULE_AUTHOR("Chelsio Communications, Inc.");
  44MODULE_DESCRIPTION(DRV_MODULE_DESC);
  45MODULE_VERSION(DRV_MODULE_VERSION);
  46MODULE_LICENSE("GPL");
  47
  48module_param(dbg_level, uint, 0644);
  49MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
  50
  51static int cxgb4i_rcv_win = 256 * 1024;
  52module_param(cxgb4i_rcv_win, int, 0644);
  53MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes");
  54
  55static int cxgb4i_snd_win = 128 * 1024;
  56module_param(cxgb4i_snd_win, int, 0644);
  57MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
  58
  59static int cxgb4i_rx_credit_thres = 10 * 1024;
  60module_param(cxgb4i_rx_credit_thres, int, 0644);
  61MODULE_PARM_DESC(cxgb4i_rx_credit_thres,
  62                "RX credits return threshold in bytes (default=10KB)");
  63
  64static unsigned int cxgb4i_max_connect = (8 * 1024);
  65module_param(cxgb4i_max_connect, uint, 0644);
  66MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections");
  67
  68static unsigned short cxgb4i_sport_base = 20000;
  69module_param(cxgb4i_sport_base, ushort, 0644);
  70MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)");
  71
  72typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
  73
  74static void *t4_uld_add(const struct cxgb4_lld_info *);
  75static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
  76static int t4_uld_state_change(void *, enum cxgb4_state state);
  77
  78static const struct cxgb4_uld_info cxgb4i_uld_info = {
  79        .name = DRV_MODULE_NAME,
  80        .add = t4_uld_add,
  81        .rx_handler = t4_uld_rx_handler,
  82        .state_change = t4_uld_state_change,
  83};
  84
  85static struct scsi_host_template cxgb4i_host_template = {
  86        .module         = THIS_MODULE,
  87        .name           = DRV_MODULE_NAME,
  88        .proc_name      = DRV_MODULE_NAME,
  89        .can_queue      = CXGB4I_SCSI_HOST_QDEPTH,
  90        .queuecommand   = iscsi_queuecommand,
  91        .change_queue_depth = iscsi_change_queue_depth,
  92        .sg_tablesize   = SG_ALL,
  93        .max_sectors    = 0xFFFF,
  94        .cmd_per_lun    = ISCSI_DEF_CMD_PER_LUN,
  95        .eh_abort_handler = iscsi_eh_abort,
  96        .eh_device_reset_handler = iscsi_eh_device_reset,
  97        .eh_target_reset_handler = iscsi_eh_recover_target,
  98        .target_alloc   = iscsi_target_alloc,
  99        .use_clustering = DISABLE_CLUSTERING,
 100        .this_id        = -1,
 101};
 102
 103static struct iscsi_transport cxgb4i_iscsi_transport = {
 104        .owner          = THIS_MODULE,
 105        .name           = DRV_MODULE_NAME,
 106        .caps           = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
 107                                CAP_DATADGST | CAP_DIGEST_OFFLOAD |
 108                                CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
 109        .attr_is_visible        = cxgbi_attr_is_visible,
 110        .get_host_param = cxgbi_get_host_param,
 111        .set_host_param = cxgbi_set_host_param,
 112        /* session management */
 113        .create_session = cxgbi_create_session,
 114        .destroy_session        = cxgbi_destroy_session,
 115        .get_session_param = iscsi_session_get_param,
 116        /* connection management */
 117        .create_conn    = cxgbi_create_conn,
 118        .bind_conn              = cxgbi_bind_conn,
 119        .destroy_conn   = iscsi_tcp_conn_teardown,
 120        .start_conn             = iscsi_conn_start,
 121        .stop_conn              = iscsi_conn_stop,
 122        .get_conn_param = iscsi_conn_get_param,
 123        .set_param      = cxgbi_set_conn_param,
 124        .get_stats      = cxgbi_get_conn_stats,
 125        /* pdu xmit req from user space */
 126        .send_pdu       = iscsi_conn_send_pdu,
 127        /* task */
 128        .init_task      = iscsi_tcp_task_init,
 129        .xmit_task      = iscsi_tcp_task_xmit,
 130        .cleanup_task   = cxgbi_cleanup_task,
 131        /* pdu */
 132        .alloc_pdu      = cxgbi_conn_alloc_pdu,
 133        .init_pdu       = cxgbi_conn_init_pdu,
 134        .xmit_pdu       = cxgbi_conn_xmit_pdu,
 135        .parse_pdu_itt  = cxgbi_parse_pdu_itt,
 136        /* TCP connect/disconnect */
 137        .get_ep_param   = cxgbi_get_ep_param,
 138        .ep_connect     = cxgbi_ep_connect,
 139        .ep_poll        = cxgbi_ep_poll,
 140        .ep_disconnect  = cxgbi_ep_disconnect,
 141        /* Error recovery timeout call */
 142        .session_recovery_timedout = iscsi_session_recovery_timedout,
 143};
 144
 145static struct scsi_transport_template *cxgb4i_stt;
 146
 147/*
 148 * CPL (Chelsio Protocol Language) defines a message passing interface between
 149 * the host driver and Chelsio asic.
 150 * The section below implments CPLs that related to iscsi tcp connection
 151 * open/close/abort and data send/receive.
 152 */
 153#define DIV_ROUND_UP(n, d)      (((n) + (d) - 1) / (d))
 154#define RCV_BUFSIZ_MASK         0x3FFU
 155#define MAX_IMM_TX_PKT_LEN      128
 156
 157static inline void set_queue(struct sk_buff *skb, unsigned int queue,
 158                                const struct cxgbi_sock *csk)
 159{
 160        skb->queue_mapping = queue;
 161}
 162
 163static int push_tx_frames(struct cxgbi_sock *, int);
 164
 165/*
 166 * is_ofld_imm - check whether a packet can be sent as immediate data
 167 * @skb: the packet
 168 *
 169 * Returns true if a packet can be sent as an offload WR with immediate
 170 * data.  We currently use the same limit as for Ethernet packets.
 171 */
 172static inline int is_ofld_imm(const struct sk_buff *skb)
 173{
 174        return skb->len <= (MAX_IMM_TX_PKT_LEN -
 175                        sizeof(struct fw_ofld_tx_data_wr));
 176}
 177
 178static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
 179                                struct l2t_entry *e)
 180{
 181        struct cpl_act_open_req *req;
 182        int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
 183        unsigned long long opt0;
 184        unsigned int opt2;
 185        unsigned int qid_atid = ((unsigned int)csk->atid) |
 186                                 (((unsigned int)csk->rss_qid) << 14);
 187
 188        opt0 = KEEP_ALIVE(1) |
 189                WND_SCALE(wscale) |
 190                MSS_IDX(csk->mss_idx) |
 191                L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) |
 192                TX_CHAN(csk->tx_chan) |
 193                SMAC_SEL(csk->smac_idx) |
 194                ULP_MODE(ULP_MODE_ISCSI) |
 195                RCV_BUFSIZ(cxgb4i_rcv_win >> 10);
 196        opt2 = RX_CHANNEL(0) |
 197                RSS_QUEUE_VALID |
 198                (1 << 20) | (1 << 22) |
 199                RSS_QUEUE(csk->rss_qid);
 200
 201        set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
 202        req = (struct cpl_act_open_req *)skb->head;
 203
 204        INIT_TP_WR(req, 0);
 205        OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
 206                                        qid_atid));
 207        req->local_port = csk->saddr.sin_port;
 208        req->peer_port = csk->daddr.sin_port;
 209        req->local_ip = csk->saddr.sin_addr.s_addr;
 210        req->peer_ip = csk->daddr.sin_addr.s_addr;
 211        req->opt0 = cpu_to_be64(opt0);
 212        req->params = 0;
 213        req->opt2 = cpu_to_be32(opt2);
 214
 215        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 216                "csk 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
 217                csk, &req->local_ip, ntohs(req->local_port),
 218                &req->peer_ip, ntohs(req->peer_port),
 219                csk->atid, csk->rss_qid);
 220
 221        cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
 222}
 223
 224static void send_close_req(struct cxgbi_sock *csk)
 225{
 226        struct sk_buff *skb = csk->cpl_close;
 227        struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
 228        unsigned int tid = csk->tid;
 229
 230        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 231                "csk 0x%p,%u,0x%lx, tid %u.\n",
 232                csk, csk->state, csk->flags, csk->tid);
 233        csk->cpl_close = NULL;
 234        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
 235        INIT_TP_WR(req, tid);
 236        OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
 237        req->rsvd = 0;
 238
 239        cxgbi_sock_skb_entail(csk, skb);
 240        if (csk->state >= CTP_ESTABLISHED)
 241                push_tx_frames(csk, 1);
 242}
 243
 244static void abort_arp_failure(void *handle, struct sk_buff *skb)
 245{
 246        struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
 247        struct cpl_abort_req *req;
 248
 249        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 250                "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
 251                csk, csk->state, csk->flags, csk->tid);
 252        req = (struct cpl_abort_req *)skb->data;
 253        req->cmd = CPL_ABORT_NO_RST;
 254        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
 255}
 256
 257static void send_abort_req(struct cxgbi_sock *csk)
 258{
 259        struct cpl_abort_req *req;
 260        struct sk_buff *skb = csk->cpl_abort_req;
 261
 262        if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
 263                return;
 264        cxgbi_sock_set_state(csk, CTP_ABORTING);
 265        cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
 266        cxgbi_sock_purge_write_queue(csk);
 267
 268        csk->cpl_abort_req = NULL;
 269        req = (struct cpl_abort_req *)skb->head;
 270        set_queue(skb, CPL_PRIORITY_DATA, csk);
 271        req->cmd = CPL_ABORT_SEND_RST;
 272        t4_set_arp_err_handler(skb, csk, abort_arp_failure);
 273        INIT_TP_WR(req, csk->tid);
 274        OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
 275        req->rsvd0 = htonl(csk->snd_nxt);
 276        req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
 277
 278        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 279                "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
 280                csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
 281                req->rsvd1);
 282
 283        cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
 284}
 285
 286static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
 287{
 288        struct sk_buff *skb = csk->cpl_abort_rpl;
 289        struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
 290
 291        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 292                "csk 0x%p,%u,0x%lx,%u, status %d.\n",
 293                csk, csk->state, csk->flags, csk->tid, rst_status);
 294
 295        csk->cpl_abort_rpl = NULL;
 296        set_queue(skb, CPL_PRIORITY_DATA, csk);
 297        INIT_TP_WR(rpl, csk->tid);
 298        OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
 299        rpl->cmd = rst_status;
 300        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
 301}
 302
 303/*
 304 * CPL connection rx data ack: host ->
 305 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
 306 * credits sent.
 307 */
 308static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
 309{
 310        struct sk_buff *skb;
 311        struct cpl_rx_data_ack *req;
 312
 313        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
 314                "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
 315                csk, csk->state, csk->flags, csk->tid, credits);
 316
 317        skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
 318        if (!skb) {
 319                pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
 320                return 0;
 321        }
 322        req = (struct cpl_rx_data_ack *)skb->head;
 323
 324        set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
 325        INIT_TP_WR(req, csk->tid);
 326        OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
 327                                      csk->tid));
 328        req->credit_dack = cpu_to_be32(RX_CREDITS(credits) | RX_FORCE_ACK(1));
 329        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
 330        return credits;
 331}
 332
 333/*
 334 * sgl_len - calculates the size of an SGL of the given capacity
 335 * @n: the number of SGL entries
 336 * Calculates the number of flits needed for a scatter/gather list that
 337 * can hold the given number of entries.
 338 */
 339static inline unsigned int sgl_len(unsigned int n)
 340{
 341        n--;
 342        return (3 * n) / 2 + (n & 1) + 2;
 343}
 344
 345/*
 346 * calc_tx_flits_ofld - calculate # of flits for an offload packet
 347 * @skb: the packet
 348 *
 349 * Returns the number of flits needed for the given offload packet.
 350 * These packets are already fully constructed and no additional headers
 351 * will be added.
 352 */
 353static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
 354{
 355        unsigned int flits, cnt;
 356
 357        if (is_ofld_imm(skb))
 358                return DIV_ROUND_UP(skb->len, 8);
 359        flits = skb_transport_offset(skb) / 8;
 360        cnt = skb_shinfo(skb)->nr_frags;
 361        if (skb->tail != skb->transport_header)
 362                cnt++;
 363        return flits + sgl_len(cnt);
 364}
 365
 366static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
 367{
 368        struct sk_buff *skb;
 369        struct fw_flowc_wr *flowc;
 370        int flowclen, i;
 371
 372        flowclen = 80;
 373        skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
 374        flowc = (struct fw_flowc_wr *)skb->head;
 375        flowc->op_to_nparams =
 376                htonl(FW_WR_OP(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS(8));
 377        flowc->flowid_len16 =
 378                htonl(FW_WR_LEN16(DIV_ROUND_UP(72, 16)) |
 379                                FW_WR_FLOWID(csk->tid));
 380        flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
 381        flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
 382        flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
 383        flowc->mnemval[1].val = htonl(csk->tx_chan);
 384        flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
 385        flowc->mnemval[2].val = htonl(csk->tx_chan);
 386        flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
 387        flowc->mnemval[3].val = htonl(csk->rss_qid);
 388        flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
 389        flowc->mnemval[4].val = htonl(csk->snd_nxt);
 390        flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
 391        flowc->mnemval[5].val = htonl(csk->rcv_nxt);
 392        flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
 393        flowc->mnemval[6].val = htonl(cxgb4i_snd_win);
 394        flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
 395        flowc->mnemval[7].val = htonl(csk->advmss);
 396        flowc->mnemval[8].mnemonic = 0;
 397        flowc->mnemval[8].val = 0;
 398        for (i = 0; i < 9; i++) {
 399                flowc->mnemval[i].r4[0] = 0;
 400                flowc->mnemval[i].r4[1] = 0;
 401                flowc->mnemval[i].r4[2] = 0;
 402        }
 403        set_queue(skb, CPL_PRIORITY_DATA, csk);
 404
 405        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 406                "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
 407                csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
 408                csk->snd_nxt, csk->rcv_nxt, cxgb4i_snd_win,
 409                csk->advmss);
 410
 411        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
 412}
 413
 414static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
 415                                   int dlen, int len, u32 credits, int compl)
 416{
 417        struct fw_ofld_tx_data_wr *req;
 418        unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
 419        unsigned int wr_ulp_mode = 0;
 420
 421        req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
 422
 423        if (is_ofld_imm(skb)) {
 424                req->op_to_immdlen = htonl(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
 425                                        FW_WR_COMPL(1) |
 426                                        FW_WR_IMMDLEN(dlen));
 427                req->flowid_len16 = htonl(FW_WR_FLOWID(csk->tid) |
 428                                                FW_WR_LEN16(credits));
 429        } else {
 430                req->op_to_immdlen =
 431                        cpu_to_be32(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
 432                                        FW_WR_COMPL(1) |
 433                                        FW_WR_IMMDLEN(0));
 434                req->flowid_len16 =
 435                        cpu_to_be32(FW_WR_FLOWID(csk->tid) |
 436                                        FW_WR_LEN16(credits));
 437        }
 438        if (submode)
 439                wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE(ULP2_MODE_ISCSI) |
 440                                FW_OFLD_TX_DATA_WR_ULPSUBMODE(submode);
 441        req->tunnel_to_proxy = htonl(wr_ulp_mode |
 442                 FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1));
 443        req->plen = htonl(len);
 444        if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
 445                cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
 446}
 447
 448static void arp_failure_skb_discard(void *handle, struct sk_buff *skb)
 449{
 450        kfree_skb(skb);
 451}
 452
 453static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
 454{
 455        int total_size = 0;
 456        struct sk_buff *skb;
 457
 458        if (unlikely(csk->state < CTP_ESTABLISHED ||
 459                csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
 460                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK |
 461                         1 << CXGBI_DBG_PDU_TX,
 462                        "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
 463                        csk, csk->state, csk->flags, csk->tid);
 464                return 0;
 465        }
 466
 467        while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
 468                int dlen = skb->len;
 469                int len = skb->len;
 470                unsigned int credits_needed;
 471
 472                skb_reset_transport_header(skb);
 473                if (is_ofld_imm(skb))
 474                        credits_needed = DIV_ROUND_UP(dlen +
 475                                        sizeof(struct fw_ofld_tx_data_wr), 16);
 476                else
 477                        credits_needed = DIV_ROUND_UP(8*calc_tx_flits_ofld(skb)
 478                                        + sizeof(struct fw_ofld_tx_data_wr),
 479                                        16);
 480
 481                if (csk->wr_cred < credits_needed) {
 482                        log_debug(1 << CXGBI_DBG_PDU_TX,
 483                                "csk 0x%p, skb %u/%u, wr %d < %u.\n",
 484                                csk, skb->len, skb->data_len,
 485                                credits_needed, csk->wr_cred);
 486                        break;
 487                }
 488                __skb_unlink(skb, &csk->write_queue);
 489                set_queue(skb, CPL_PRIORITY_DATA, csk);
 490                skb->csum = credits_needed;
 491                csk->wr_cred -= credits_needed;
 492                csk->wr_una_cred += credits_needed;
 493                cxgbi_sock_enqueue_wr(csk, skb);
 494
 495                log_debug(1 << CXGBI_DBG_PDU_TX,
 496                        "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
 497                        csk, skb->len, skb->data_len, credits_needed,
 498                        csk->wr_cred, csk->wr_una_cred);
 499
 500                if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
 501                        if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
 502                                send_tx_flowc_wr(csk);
 503                                skb->csum += 5;
 504                                csk->wr_cred -= 5;
 505                                csk->wr_una_cred += 5;
 506                        }
 507                        len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
 508                        make_tx_data_wr(csk, skb, dlen, len, credits_needed,
 509                                        req_completion);
 510                        csk->snd_nxt += len;
 511                        cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
 512                }
 513                total_size += skb->truesize;
 514                t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
 515
 516                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
 517                        "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
 518                        csk, csk->state, csk->flags, csk->tid, skb, len);
 519
 520                cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
 521        }
 522        return total_size;
 523}
 524
 525static inline void free_atid(struct cxgbi_sock *csk)
 526{
 527        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
 528
 529        if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
 530                cxgb4_free_atid(lldi->tids, csk->atid);
 531                cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
 532                cxgbi_sock_put(csk);
 533        }
 534}
 535
 536static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
 537{
 538        struct cxgbi_sock *csk;
 539        struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
 540        unsigned short tcp_opt = ntohs(req->tcp_opt);
 541        unsigned int tid = GET_TID(req);
 542        unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
 543        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 544        struct tid_info *t = lldi->tids;
 545        u32 rcv_isn = be32_to_cpu(req->rcv_isn);
 546
 547        csk = lookup_atid(t, atid);
 548        if (unlikely(!csk)) {
 549                pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev);
 550                goto rel_skb;
 551        }
 552
 553        if (csk->atid != atid) {
 554                pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
 555                        atid, csk, csk->state, csk->flags, csk->tid, csk->atid);
 556                goto rel_skb;
 557        }
 558
 559        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 560                "csk 0x%p,%u,0x%lx, tid %u, atid %u, rseq %u.\n",
 561                csk, csk->state, csk->flags, tid, atid, rcv_isn);
 562
 563        cxgbi_sock_get(csk);
 564        csk->tid = tid;
 565        cxgb4_insert_tid(lldi->tids, csk, tid);
 566        cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
 567
 568        free_atid(csk);
 569
 570        spin_lock_bh(&csk->lock);
 571        if (unlikely(csk->state != CTP_ACTIVE_OPEN))
 572                pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
 573                        csk, csk->state, csk->flags, csk->tid);
 574
 575        if (csk->retry_timer.function) {
 576                del_timer(&csk->retry_timer);
 577                csk->retry_timer.function = NULL;
 578        }
 579
 580        csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
 581        /*
 582         * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
 583         * pass through opt0.
 584         */
 585        if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10))
 586                csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10);
 587
 588        csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40;
 589        if (GET_TCPOPT_TSTAMP(tcp_opt))
 590                csk->advmss -= 12;
 591        if (csk->advmss < 128)
 592                csk->advmss = 128;
 593
 594        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 595                "csk 0x%p, mss_idx %u, advmss %u.\n",
 596                        csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss);
 597
 598        cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
 599
 600        if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
 601                send_abort_req(csk);
 602        else {
 603                if (skb_queue_len(&csk->write_queue))
 604                        push_tx_frames(csk, 0);
 605                cxgbi_conn_tx_open(csk);
 606        }
 607        spin_unlock_bh(&csk->lock);
 608
 609rel_skb:
 610        __kfree_skb(skb);
 611}
 612
 613static int act_open_rpl_status_to_errno(int status)
 614{
 615        switch (status) {
 616        case CPL_ERR_CONN_RESET:
 617                return -ECONNREFUSED;
 618        case CPL_ERR_ARP_MISS:
 619                return -EHOSTUNREACH;
 620        case CPL_ERR_CONN_TIMEDOUT:
 621                return -ETIMEDOUT;
 622        case CPL_ERR_TCAM_FULL:
 623                return -ENOMEM;
 624        case CPL_ERR_CONN_EXIST:
 625                return -EADDRINUSE;
 626        default:
 627                return -EIO;
 628        }
 629}
 630
 631static void csk_act_open_retry_timer(unsigned long data)
 632{
 633        struct sk_buff *skb;
 634        struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
 635
 636        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 637                "csk 0x%p,%u,0x%lx,%u.\n",
 638                csk, csk->state, csk->flags, csk->tid);
 639
 640        cxgbi_sock_get(csk);
 641        spin_lock_bh(&csk->lock);
 642        skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_ATOMIC);
 643        if (!skb)
 644                cxgbi_sock_fail_act_open(csk, -ENOMEM);
 645        else {
 646                skb->sk = (struct sock *)csk;
 647                t4_set_arp_err_handler(skb, csk,
 648                                        cxgbi_sock_act_open_req_arp_failure);
 649                send_act_open_req(csk, skb, csk->l2t);
 650        }
 651        spin_unlock_bh(&csk->lock);
 652        cxgbi_sock_put(csk);
 653}
 654
 655static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
 656{
 657        struct cxgbi_sock *csk;
 658        struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
 659        unsigned int tid = GET_TID(rpl);
 660        unsigned int atid =
 661                GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status)));
 662        unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
 663        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 664        struct tid_info *t = lldi->tids;
 665
 666        csk = lookup_atid(t, atid);
 667        if (unlikely(!csk)) {
 668                pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid);
 669                goto rel_skb;
 670        }
 671
 672        pr_info("%pI4:%u-%pI4:%u, atid %u,%u, status %u, csk 0x%p,%u,0x%lx.\n",
 673                &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
 674                &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port),
 675                atid, tid, status, csk, csk->state, csk->flags);
 676
 677        if (status == CPL_ERR_RTX_NEG_ADVICE)
 678                goto rel_skb;
 679
 680        if (status && status != CPL_ERR_TCAM_FULL &&
 681            status != CPL_ERR_CONN_EXIST &&
 682            status != CPL_ERR_ARP_MISS)
 683                cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl));
 684
 685        cxgbi_sock_get(csk);
 686        spin_lock_bh(&csk->lock);
 687
 688        if (status == CPL_ERR_CONN_EXIST &&
 689            csk->retry_timer.function != csk_act_open_retry_timer) {
 690                csk->retry_timer.function = csk_act_open_retry_timer;
 691                mod_timer(&csk->retry_timer, jiffies + HZ / 2);
 692        } else
 693                cxgbi_sock_fail_act_open(csk,
 694                                        act_open_rpl_status_to_errno(status));
 695
 696        spin_unlock_bh(&csk->lock);
 697        cxgbi_sock_put(csk);
 698rel_skb:
 699        __kfree_skb(skb);
 700}
 701
 702static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
 703{
 704        struct cxgbi_sock *csk;
 705        struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data;
 706        unsigned int tid = GET_TID(req);
 707        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 708        struct tid_info *t = lldi->tids;
 709
 710        csk = lookup_tid(t, tid);
 711        if (unlikely(!csk)) {
 712                pr_err("can't find connection for tid %u.\n", tid);
 713                goto rel_skb;
 714        }
 715        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 716                "csk 0x%p,%u,0x%lx,%u.\n",
 717                csk, csk->state, csk->flags, csk->tid);
 718        cxgbi_sock_rcv_peer_close(csk);
 719rel_skb:
 720        __kfree_skb(skb);
 721}
 722
 723static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
 724{
 725        struct cxgbi_sock *csk;
 726        struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data;
 727        unsigned int tid = GET_TID(rpl);
 728        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 729        struct tid_info *t = lldi->tids;
 730
 731        csk = lookup_tid(t, tid);
 732        if (unlikely(!csk)) {
 733                pr_err("can't find connection for tid %u.\n", tid);
 734                goto rel_skb;
 735        }
 736        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 737                "csk 0x%p,%u,0x%lx,%u.\n",
 738                csk, csk->state, csk->flags, csk->tid);
 739        cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
 740rel_skb:
 741        __kfree_skb(skb);
 742}
 743
 744static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
 745                                                                int *need_rst)
 746{
 747        switch (abort_reason) {
 748        case CPL_ERR_BAD_SYN: /* fall through */
 749        case CPL_ERR_CONN_RESET:
 750                return csk->state > CTP_ESTABLISHED ?
 751                        -EPIPE : -ECONNRESET;
 752        case CPL_ERR_XMIT_TIMEDOUT:
 753        case CPL_ERR_PERSIST_TIMEDOUT:
 754        case CPL_ERR_FINWAIT2_TIMEDOUT:
 755        case CPL_ERR_KEEPALIVE_TIMEDOUT:
 756                return -ETIMEDOUT;
 757        default:
 758                return -EIO;
 759        }
 760}
 761
 762static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
 763{
 764        struct cxgbi_sock *csk;
 765        struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data;
 766        unsigned int tid = GET_TID(req);
 767        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 768        struct tid_info *t = lldi->tids;
 769        int rst_status = CPL_ABORT_NO_RST;
 770
 771        csk = lookup_tid(t, tid);
 772        if (unlikely(!csk)) {
 773                pr_err("can't find connection for tid %u.\n", tid);
 774                goto rel_skb;
 775        }
 776
 777        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 778                "csk 0x%p,%u,0x%lx, tid %u, status 0x%x.\n",
 779                csk, csk->state, csk->flags, csk->tid, req->status);
 780
 781        if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
 782            req->status == CPL_ERR_PERSIST_NEG_ADVICE)
 783                goto rel_skb;
 784
 785        cxgbi_sock_get(csk);
 786        spin_lock_bh(&csk->lock);
 787
 788        if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) {
 789                cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
 790                cxgbi_sock_set_state(csk, CTP_ABORTING);
 791                goto done;
 792        }
 793
 794        cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
 795        send_abort_rpl(csk, rst_status);
 796
 797        if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
 798                csk->err = abort_status_to_errno(csk, req->status, &rst_status);
 799                cxgbi_sock_closed(csk);
 800        }
 801done:
 802        spin_unlock_bh(&csk->lock);
 803        cxgbi_sock_put(csk);
 804rel_skb:
 805        __kfree_skb(skb);
 806}
 807
 808static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
 809{
 810        struct cxgbi_sock *csk;
 811        struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data;
 812        unsigned int tid = GET_TID(rpl);
 813        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 814        struct tid_info *t = lldi->tids;
 815
 816        csk = lookup_tid(t, tid);
 817        if (!csk)
 818                goto rel_skb;
 819
 820        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 821                "status 0x%x, csk 0x%p, s %u, 0x%lx.\n",
 822                rpl->status, csk, csk ? csk->state : 0,
 823                csk ? csk->flags : 0UL);
 824
 825        if (rpl->status == CPL_ERR_ABORT_FAILED)
 826                goto rel_skb;
 827
 828        cxgbi_sock_rcv_abort_rpl(csk);
 829rel_skb:
 830        __kfree_skb(skb);
 831}
 832
 833static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
 834{
 835        struct cxgbi_sock *csk;
 836        struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
 837        unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
 838        unsigned int tid = GET_TID(cpl);
 839        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 840        struct tid_info *t = lldi->tids;
 841
 842        csk = lookup_tid(t, tid);
 843        if (unlikely(!csk)) {
 844                pr_err("can't find conn. for tid %u.\n", tid);
 845                goto rel_skb;
 846        }
 847
 848        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
 849                "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
 850                csk, csk->state, csk->flags, csk->tid, skb, skb->len,
 851                pdu_len_ddp);
 852
 853        spin_lock_bh(&csk->lock);
 854
 855        if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
 856                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 857                        "csk 0x%p,%u,0x%lx,%u, bad state.\n",
 858                        csk, csk->state, csk->flags, csk->tid);
 859                if (csk->state != CTP_ABORTING)
 860                        goto abort_conn;
 861                else
 862                        goto discard;
 863        }
 864
 865        cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq);
 866        cxgbi_skcb_flags(skb) = 0;
 867
 868        skb_reset_transport_header(skb);
 869        __skb_pull(skb, sizeof(*cpl));
 870        __pskb_trim(skb, ntohs(cpl->len));
 871
 872        if (!csk->skb_ulp_lhdr) {
 873                unsigned char *bhs;
 874                unsigned int hlen, dlen;
 875
 876                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
 877                        "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
 878                        csk, csk->state, csk->flags, csk->tid, skb);
 879                csk->skb_ulp_lhdr = skb;
 880                cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
 881
 882                if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) {
 883                        pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
 884                                csk->tid, cxgbi_skcb_tcp_seq(skb),
 885                                csk->rcv_nxt);
 886                        goto abort_conn;
 887                }
 888
 889                bhs = skb->data;
 890                hlen = ntohs(cpl->len);
 891                dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
 892
 893                if ((hlen + dlen) != ISCSI_PDU_LEN(pdu_len_ddp) - 40) {
 894                        pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
 895                                "mismatch %u != %u + %u, seq 0x%x.\n",
 896                                csk->tid, ISCSI_PDU_LEN(pdu_len_ddp) - 40,
 897                                hlen, dlen, cxgbi_skcb_tcp_seq(skb));
 898                        goto abort_conn;
 899                }
 900
 901                cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3);
 902                if (dlen)
 903                        cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len;
 904                csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
 905
 906                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
 907                        "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
 908                        csk, skb, *bhs, hlen, dlen,
 909                        ntohl(*((unsigned int *)(bhs + 16))),
 910                        ntohl(*((unsigned int *)(bhs + 24))));
 911
 912        } else {
 913                struct sk_buff *lskb = csk->skb_ulp_lhdr;
 914
 915                cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
 916                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
 917                        "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
 918                        csk, csk->state, csk->flags, skb, lskb);
 919        }
 920
 921        __skb_queue_tail(&csk->receive_queue, skb);
 922        spin_unlock_bh(&csk->lock);
 923        return;
 924
 925abort_conn:
 926        send_abort_req(csk);
 927discard:
 928        spin_unlock_bh(&csk->lock);
 929rel_skb:
 930        __kfree_skb(skb);
 931}
 932
 933static void do_rx_data_ddp(struct cxgbi_device *cdev,
 934                                  struct sk_buff *skb)
 935{
 936        struct cxgbi_sock *csk;
 937        struct sk_buff *lskb;
 938        struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data;
 939        unsigned int tid = GET_TID(rpl);
 940        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 941        struct tid_info *t = lldi->tids;
 942        unsigned int status = ntohl(rpl->ddpvld);
 943
 944        csk = lookup_tid(t, tid);
 945        if (unlikely(!csk)) {
 946                pr_err("can't find connection for tid %u.\n", tid);
 947                goto rel_skb;
 948        }
 949
 950        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
 951                "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
 952                csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr);
 953
 954        spin_lock_bh(&csk->lock);
 955
 956        if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
 957                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 958                        "csk 0x%p,%u,0x%lx,%u, bad state.\n",
 959                        csk, csk->state, csk->flags, csk->tid);
 960                if (csk->state != CTP_ABORTING)
 961                        goto abort_conn;
 962                else
 963                        goto discard;
 964        }
 965
 966        if (!csk->skb_ulp_lhdr) {
 967                pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid);
 968                goto abort_conn;
 969        }
 970
 971        lskb = csk->skb_ulp_lhdr;
 972        csk->skb_ulp_lhdr = NULL;
 973
 974        cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc);
 975
 976        if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb))
 977                pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
 978                        csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
 979
 980        if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
 981                pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
 982                        csk, lskb, status, cxgbi_skcb_flags(lskb));
 983                cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR);
 984        }
 985        if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
 986                pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
 987                        csk, lskb, status, cxgbi_skcb_flags(lskb));
 988                cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR);
 989        }
 990        if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
 991                log_debug(1 << CXGBI_DBG_PDU_RX,
 992                        "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
 993                        csk, lskb, status);
 994                cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR);
 995        }
 996        if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
 997                !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) {
 998                log_debug(1 << CXGBI_DBG_PDU_RX,
 999                        "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1000                        csk, lskb, status);
1001                cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD);
1002        }
1003        log_debug(1 << CXGBI_DBG_PDU_RX,
1004                "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1005                csk, lskb, cxgbi_skcb_flags(lskb));
1006
1007        cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS);
1008        cxgbi_conn_pdu_ready(csk);
1009        spin_unlock_bh(&csk->lock);
1010        goto rel_skb;
1011
1012abort_conn:
1013        send_abort_req(csk);
1014discard:
1015        spin_unlock_bh(&csk->lock);
1016rel_skb:
1017        __kfree_skb(skb);
1018}
1019
1020static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
1021{
1022        struct cxgbi_sock *csk;
1023        struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data;
1024        unsigned int tid = GET_TID(rpl);
1025        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1026        struct tid_info *t = lldi->tids;
1027
1028        csk = lookup_tid(t, tid);
1029        if (unlikely(!csk))
1030                pr_err("can't find connection for tid %u.\n", tid);
1031        else {
1032                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1033                        "csk 0x%p,%u,0x%lx,%u.\n",
1034                        csk, csk->state, csk->flags, csk->tid);
1035                cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una),
1036                                        rpl->seq_vld);
1037        }
1038        __kfree_skb(skb);
1039}
1040
1041static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1042{
1043        struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1044        unsigned int tid = GET_TID(rpl);
1045        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1046        struct tid_info *t = lldi->tids;
1047        struct cxgbi_sock *csk;
1048
1049        csk = lookup_tid(t, tid);
1050        if (!csk)
1051                pr_err("can't find conn. for tid %u.\n", tid);
1052
1053        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1054                "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1055                csk, csk->state, csk->flags, csk->tid, rpl->status);
1056
1057        if (rpl->status != CPL_ERR_NONE)
1058                pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1059                        csk, tid, rpl->status);
1060
1061        __kfree_skb(skb);
1062}
1063
1064static int alloc_cpls(struct cxgbi_sock *csk)
1065{
1066        csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req),
1067                                        0, GFP_KERNEL);
1068        if (!csk->cpl_close)
1069                return -ENOMEM;
1070
1071        csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req),
1072                                        0, GFP_KERNEL);
1073        if (!csk->cpl_abort_req)
1074                goto free_cpls;
1075
1076        csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl),
1077                                        0, GFP_KERNEL);
1078        if (!csk->cpl_abort_rpl)
1079                goto free_cpls;
1080        return 0;
1081
1082free_cpls:
1083        cxgbi_sock_free_cpl_skbs(csk);
1084        return -ENOMEM;
1085}
1086
1087static inline void l2t_put(struct cxgbi_sock *csk)
1088{
1089        if (csk->l2t) {
1090                cxgb4_l2t_release(csk->l2t);
1091                csk->l2t = NULL;
1092                cxgbi_sock_put(csk);
1093        }
1094}
1095
1096static void release_offload_resources(struct cxgbi_sock *csk)
1097{
1098        struct cxgb4_lld_info *lldi;
1099
1100        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1101                "csk 0x%p,%u,0x%lx,%u.\n",
1102                csk, csk->state, csk->flags, csk->tid);
1103
1104        cxgbi_sock_free_cpl_skbs(csk);
1105        if (csk->wr_cred != csk->wr_max_cred) {
1106                cxgbi_sock_purge_wr_queue(csk);
1107                cxgbi_sock_reset_wr_list(csk);
1108        }
1109
1110        l2t_put(csk);
1111        if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
1112                free_atid(csk);
1113        else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
1114                lldi = cxgbi_cdev_priv(csk->cdev);
1115                cxgb4_remove_tid(lldi->tids, 0, csk->tid);
1116                cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
1117                cxgbi_sock_put(csk);
1118        }
1119        csk->dst = NULL;
1120        csk->cdev = NULL;
1121}
1122
1123static int init_act_open(struct cxgbi_sock *csk)
1124{
1125        struct cxgbi_device *cdev = csk->cdev;
1126        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1127        struct net_device *ndev = cdev->ports[csk->port_id];
1128        struct port_info *pi = netdev_priv(ndev);
1129        struct sk_buff *skb = NULL;
1130        struct neighbour *n;
1131        unsigned int step;
1132
1133        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1134                "csk 0x%p,%u,0x%lx,%u.\n",
1135                csk, csk->state, csk->flags, csk->tid);
1136
1137        csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
1138        if (csk->atid < 0) {
1139                pr_err("%s, NO atid available.\n", ndev->name);
1140                return -EINVAL;
1141        }
1142        cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1143        cxgbi_sock_get(csk);
1144
1145        n = dst_neigh_lookup(csk->dst, &csk->daddr.sin_addr.s_addr);
1146        if (!n) {
1147                pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1148                goto rel_resource;
1149        }
1150        csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
1151        if (!csk->l2t) {
1152                pr_err("%s, cannot alloc l2t.\n", ndev->name);
1153                goto rel_resource;
1154        }
1155        cxgbi_sock_get(csk);
1156
1157        skb = alloc_wr(sizeof(struct cpl_act_open_req), 0, GFP_KERNEL);
1158        if (!skb)
1159                goto rel_resource;
1160        skb->sk = (struct sock *)csk;
1161        t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure);
1162
1163        if (!csk->mtu)
1164                csk->mtu = dst_mtu(csk->dst);
1165        cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
1166        csk->tx_chan = cxgb4_port_chan(ndev);
1167        /* SMT two entries per row */
1168        csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1;
1169        step = lldi->ntxq / lldi->nchan;
1170        csk->txq_idx = cxgb4_port_idx(ndev) * step;
1171        step = lldi->nrxq / lldi->nchan;
1172        csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step];
1173        csk->wr_max_cred = csk->wr_cred = lldi->wr_cred;
1174        csk->wr_una_cred = 0;
1175        cxgbi_sock_reset_wr_list(csk);
1176        csk->err = 0;
1177        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1178                "csk 0x%p,p%d,%s, %u,%u,%u, mss %u,%u, smac %u.\n",
1179                csk, pi->port_id, ndev->name, csk->tx_chan,
1180                csk->txq_idx, csk->rss_qid, csk->mtu, csk->mss_idx,
1181                csk->smac_idx);
1182
1183        cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1184        send_act_open_req(csk, skb, csk->l2t);
1185        neigh_release(n);
1186        return 0;
1187
1188rel_resource:
1189        if (n)
1190                neigh_release(n);
1191        if (skb)
1192                __kfree_skb(skb);
1193        return -EINVAL;
1194}
1195
1196cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
1197        [CPL_ACT_ESTABLISH] = do_act_establish,
1198        [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1199        [CPL_PEER_CLOSE] = do_peer_close,
1200        [CPL_ABORT_REQ_RSS] = do_abort_req_rss,
1201        [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss,
1202        [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1203        [CPL_FW4_ACK] = do_fw4_ack,
1204        [CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
1205        [CPL_SET_TCB_RPL] = do_set_tcb_rpl,
1206        [CPL_RX_DATA_DDP] = do_rx_data_ddp,
1207};
1208
1209int cxgb4i_ofld_init(struct cxgbi_device *cdev)
1210{
1211        int rc;
1212
1213        if (cxgb4i_max_connect > CXGB4I_MAX_CONN)
1214                cxgb4i_max_connect = CXGB4I_MAX_CONN;
1215
1216        rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base,
1217                                        cxgb4i_max_connect);
1218        if (rc < 0)
1219                return rc;
1220
1221        cdev->csk_release_offload_resources = release_offload_resources;
1222        cdev->csk_push_tx_frames = push_tx_frames;
1223        cdev->csk_send_abort_req = send_abort_req;
1224        cdev->csk_send_close_req = send_close_req;
1225        cdev->csk_send_rx_credits = send_rx_credits;
1226        cdev->csk_alloc_cpls = alloc_cpls;
1227        cdev->csk_init_act_open = init_act_open;
1228
1229        pr_info("cdev 0x%p, offload up, added.\n", cdev);
1230        return 0;
1231}
1232
1233/*
1234 * functions to program the pagepod in h/w
1235 */
1236#define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */
1237static inline void ulp_mem_io_set_hdr(struct ulp_mem_io *req,
1238                                unsigned int wr_len, unsigned int dlen,
1239                                unsigned int pm_addr)
1240{
1241        struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
1242
1243        INIT_ULPTX_WR(req, wr_len, 0, 0);
1244        req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1 << 23));
1245        req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5));
1246        req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5));
1247        req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
1248
1249        idata->cmd_more = htonl(ULPTX_CMD(ULP_TX_SC_IMM));
1250        idata->len = htonl(dlen);
1251}
1252
1253static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id,
1254                                struct cxgbi_pagepod_hdr *hdr, unsigned int idx,
1255                                unsigned int npods,
1256                                struct cxgbi_gather_list *gl,
1257                                unsigned int gl_pidx)
1258{
1259        struct cxgbi_ddp_info *ddp = cdev->ddp;
1260        struct sk_buff *skb;
1261        struct ulp_mem_io *req;
1262        struct ulptx_idata *idata;
1263        struct cxgbi_pagepod *ppod;
1264        unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit;
1265        unsigned int dlen = PPOD_SIZE * npods;
1266        unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
1267                                sizeof(struct ulptx_idata) + dlen, 16);
1268        unsigned int i;
1269
1270        skb = alloc_wr(wr_len, 0, GFP_ATOMIC);
1271        if (!skb) {
1272                pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n",
1273                        cdev, idx, npods);
1274                return -ENOMEM;
1275        }
1276        req = (struct ulp_mem_io *)skb->head;
1277        set_queue(skb, CPL_PRIORITY_CONTROL, NULL);
1278
1279        ulp_mem_io_set_hdr(req, wr_len, dlen, pm_addr);
1280        idata = (struct ulptx_idata *)(req + 1);
1281        ppod = (struct cxgbi_pagepod *)(idata + 1);
1282
1283        for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) {
1284                if (!hdr && !gl)
1285                        cxgbi_ddp_ppod_clear(ppod);
1286                else
1287                        cxgbi_ddp_ppod_set(ppod, hdr, gl, gl_pidx);
1288        }
1289
1290        cxgb4_ofld_send(cdev->ports[port_id], skb);
1291        return 0;
1292}
1293
1294static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
1295                        unsigned int idx, unsigned int npods,
1296                        struct cxgbi_gather_list *gl)
1297{
1298        unsigned int i, cnt;
1299        int err = 0;
1300
1301        for (i = 0; i < npods; i += cnt, idx += cnt) {
1302                cnt = npods - i;
1303                if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1304                        cnt = ULPMEM_IDATA_MAX_NPPODS;
1305                err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr,
1306                                        idx, cnt, gl, 4 * i);
1307                if (err < 0)
1308                        break;
1309        }
1310        return err;
1311}
1312
1313static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
1314                          unsigned int idx, unsigned int npods)
1315{
1316        unsigned int i, cnt;
1317        int err;
1318
1319        for (i = 0; i < npods; i += cnt, idx += cnt) {
1320                cnt = npods - i;
1321                if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1322                        cnt = ULPMEM_IDATA_MAX_NPPODS;
1323                err = ddp_ppod_write_idata(chba->cdev, chba->port_id, NULL,
1324                                        idx, cnt, NULL, 0);
1325                if (err < 0)
1326                        break;
1327        }
1328}
1329
1330static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
1331                                int pg_idx, bool reply)
1332{
1333        struct sk_buff *skb;
1334        struct cpl_set_tcb_field *req;
1335
1336        if (!pg_idx || pg_idx >= DDP_PGIDX_MAX)
1337                return 0;
1338
1339        skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
1340        if (!skb)
1341                return -ENOMEM;
1342
1343        /*  set up ulp page size */
1344        req = (struct cpl_set_tcb_field *)skb->head;
1345        INIT_TP_WR(req, csk->tid);
1346        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1347        req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
1348        req->word_cookie = htons(0);
1349        req->mask = cpu_to_be64(0x3 << 8);
1350        req->val = cpu_to_be64(pg_idx << 8);
1351        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1352
1353        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1354                "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
1355
1356        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1357        return 0;
1358}
1359
1360static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1361                                 int hcrc, int dcrc, int reply)
1362{
1363        struct sk_buff *skb;
1364        struct cpl_set_tcb_field *req;
1365
1366        if (!hcrc && !dcrc)
1367                return 0;
1368
1369        skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
1370        if (!skb)
1371                return -ENOMEM;
1372
1373        csk->hcrc_len = (hcrc ? 4 : 0);
1374        csk->dcrc_len = (dcrc ? 4 : 0);
1375        /*  set up ulp submode */
1376        req = (struct cpl_set_tcb_field *)skb->head;
1377        INIT_TP_WR(req, tid);
1378        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1379        req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
1380        req->word_cookie = htons(0);
1381        req->mask = cpu_to_be64(0x3 << 4);
1382        req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1383                                (dcrc ? ULP_CRC_DATA : 0)) << 4);
1384        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1385
1386        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1387                "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
1388
1389        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1390        return 0;
1391}
1392
1393static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
1394{
1395        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1396        struct cxgbi_ddp_info *ddp = cdev->ddp;
1397        unsigned int tagmask, pgsz_factor[4];
1398        int err;
1399
1400        if (ddp) {
1401                kref_get(&ddp->refcnt);
1402                pr_warn("cdev 0x%p, ddp 0x%p already set up.\n",
1403                        cdev, cdev->ddp);
1404                return -EALREADY;
1405        }
1406
1407        err = cxgbi_ddp_init(cdev, lldi->vr->iscsi.start,
1408                        lldi->vr->iscsi.start + lldi->vr->iscsi.size - 1,
1409                        lldi->iscsi_iolen, lldi->iscsi_iolen);
1410        if (err < 0)
1411                return err;
1412
1413        ddp = cdev->ddp;
1414
1415        tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
1416        cxgbi_ddp_page_size_factor(pgsz_factor);
1417        cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor);
1418
1419        cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
1420        cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
1421        cdev->csk_ddp_set = ddp_set_map;
1422        cdev->csk_ddp_clear = ddp_clear_map;
1423
1424        pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n",
1425                cdev, cdev->tag_format.sw_bits, cdev->tag_format.rsvd_bits,
1426                cdev->tag_format.rsvd_shift, cdev->tag_format.rsvd_mask);
1427        pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, "
1428                " %u/%u.\n",
1429                cdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask,
1430                ddp->rsvd_tag_mask, ddp->max_txsz, lldi->iscsi_iolen,
1431                ddp->max_rxsz, lldi->iscsi_iolen);
1432        pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n",
1433                cdev, cdev->tx_max_size, ddp->max_txsz, cdev->rx_max_size,
1434                ddp->max_rxsz);
1435        return 0;
1436}
1437
1438static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
1439{
1440        struct cxgbi_device *cdev;
1441        struct port_info *pi;
1442        int i, rc;
1443
1444        cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports);
1445        if (!cdev) {
1446                pr_info("t4 device 0x%p, register failed.\n", lldi);
1447                return NULL;
1448        }
1449        pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
1450                cdev, lldi->adapter_type, lldi->nports,
1451                lldi->ports[0]->name, lldi->nchan, lldi->ntxq,
1452                lldi->nrxq, lldi->wr_cred);
1453        for (i = 0; i < lldi->nrxq; i++)
1454                log_debug(1 << CXGBI_DBG_DEV,
1455                        "t4 0x%p, rxq id #%d: %u.\n",
1456                        cdev, i, lldi->rxq_ids[i]);
1457
1458        memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi));
1459        cdev->flags = CXGBI_FLAG_DEV_T4;
1460        cdev->pdev = lldi->pdev;
1461        cdev->ports = lldi->ports;
1462        cdev->nports = lldi->nports;
1463        cdev->mtus = lldi->mtus;
1464        cdev->nmtus = NMTUS;
1465        cdev->snd_win = cxgb4i_snd_win;
1466        cdev->rcv_win = cxgb4i_rcv_win;
1467        cdev->rx_credit_thres = cxgb4i_rx_credit_thres;
1468        cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
1469        cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
1470        cdev->itp = &cxgb4i_iscsi_transport;
1471
1472        cdev->pfvf = FW_VIID_PFN_GET(cxgb4_port_viid(lldi->ports[0])) << 8;
1473        pr_info("cdev 0x%p,%s, pfvf %u.\n",
1474                cdev, lldi->ports[0]->name, cdev->pfvf);
1475
1476        rc = cxgb4i_ddp_init(cdev);
1477        if (rc) {
1478                pr_info("t4 0x%p ddp init failed.\n", cdev);
1479                goto err_out;
1480        }
1481        rc = cxgb4i_ofld_init(cdev);
1482        if (rc) {
1483                pr_info("t4 0x%p ofld init failed.\n", cdev);
1484                goto err_out;
1485        }
1486
1487        rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN,
1488                                &cxgb4i_host_template, cxgb4i_stt);
1489        if (rc)
1490                goto err_out;
1491
1492        for (i = 0; i < cdev->nports; i++) {
1493                pi = netdev_priv(lldi->ports[i]);
1494                cdev->hbas[i]->port_id = pi->port_id;
1495        }
1496        return cdev;
1497
1498err_out:
1499        cxgbi_device_unregister(cdev);
1500        return ERR_PTR(-ENOMEM);
1501}
1502
1503#define RX_PULL_LEN     128
1504static int t4_uld_rx_handler(void *handle, const __be64 *rsp,
1505                                const struct pkt_gl *pgl)
1506{
1507        const struct cpl_act_establish *rpl;
1508        struct sk_buff *skb;
1509        unsigned int opc;
1510        struct cxgbi_device *cdev = handle;
1511
1512        if (pgl == NULL) {
1513                unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
1514
1515                skb = alloc_wr(len, 0, GFP_ATOMIC);
1516                if (!skb)
1517                        goto nomem;
1518                skb_copy_to_linear_data(skb, &rsp[1], len);
1519        } else {
1520                if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) {
1521                        pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
1522                                pgl->va, be64_to_cpu(*rsp),
1523                                be64_to_cpu(*(u64 *)pgl->va),
1524                                pgl->tot_len);
1525                        return 0;
1526                }
1527                skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN);
1528                if (unlikely(!skb))
1529                        goto nomem;
1530        }
1531
1532        rpl = (struct cpl_act_establish *)skb->data;
1533        opc = rpl->ot.opcode;
1534        log_debug(1 << CXGBI_DBG_TOE,
1535                "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
1536                 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
1537        if (cxgb4i_cplhandlers[opc])
1538                cxgb4i_cplhandlers[opc](cdev, skb);
1539        else {
1540                pr_err("No handler for opcode 0x%x.\n", opc);
1541                __kfree_skb(skb);
1542        }
1543        return 0;
1544nomem:
1545        log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n");
1546        return 1;
1547}
1548
1549static int t4_uld_state_change(void *handle, enum cxgb4_state state)
1550{
1551        struct cxgbi_device *cdev = handle;
1552
1553        switch (state) {
1554        case CXGB4_STATE_UP:
1555                pr_info("cdev 0x%p, UP.\n", cdev);
1556                /* re-initialize */
1557                break;
1558        case CXGB4_STATE_START_RECOVERY:
1559                pr_info("cdev 0x%p, RECOVERY.\n", cdev);
1560                /* close all connections */
1561                break;
1562        case CXGB4_STATE_DOWN:
1563                pr_info("cdev 0x%p, DOWN.\n", cdev);
1564                break;
1565        case CXGB4_STATE_DETACH:
1566                pr_info("cdev 0x%p, DETACH.\n", cdev);
1567                break;
1568        default:
1569                pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
1570                break;
1571        }
1572        return 0;
1573}
1574
1575static int __init cxgb4i_init_module(void)
1576{
1577        int rc;
1578
1579        printk(KERN_INFO "%s", version);
1580
1581        rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt);
1582        if (rc < 0)
1583                return rc;
1584        cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
1585        return 0;
1586}
1587
1588static void __exit cxgb4i_exit_module(void)
1589{
1590        cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
1591        cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
1592        cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
1593}
1594
1595module_init(cxgb4i_init_module);
1596module_exit(cxgb4i_exit_module);
1597