linux/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
<<
>>
Prefs
   1/*
   2 * cxgb4i.c: Chelsio T4 iSCSI driver.
   3 *
   4 * Copyright (c) 2010 Chelsio Communications, Inc.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation.
   9 *
  10 * Written by:  Karen Xie (kxie@chelsio.com)
  11 *              Rakesh Ranjan (rranjan@chelsio.com)
  12 */
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
  15
  16#include <linux/module.h>
  17#include <linux/moduleparam.h>
  18#include <scsi/scsi_host.h>
  19#include <net/tcp.h>
  20#include <net/dst.h>
  21#include <linux/netdevice.h>
  22
  23#include "t4_regs.h"
  24#include "t4_msg.h"
  25#include "cxgb4.h"
  26#include "cxgb4_uld.h"
  27#include "t4fw_api.h"
  28#include "l2t.h"
  29#include "cxgb4i.h"
  30
  31static unsigned int dbg_level;
  32
  33#include "../libcxgbi.h"
  34
  35#define DRV_MODULE_NAME         "cxgb4i"
  36#define DRV_MODULE_DESC         "Chelsio T4/T5 iSCSI Driver"
  37#define DRV_MODULE_VERSION      "0.9.4"
  38
  39static char version[] =
  40        DRV_MODULE_DESC " " DRV_MODULE_NAME
  41        " v" DRV_MODULE_VERSION "\n";
  42
  43MODULE_AUTHOR("Chelsio Communications, Inc.");
  44MODULE_DESCRIPTION(DRV_MODULE_DESC);
  45MODULE_VERSION(DRV_MODULE_VERSION);
  46MODULE_LICENSE("GPL");
  47
  48module_param(dbg_level, uint, 0644);
  49MODULE_PARM_DESC(dbg_level, "Debug flag (default=0)");
  50
  51static int cxgb4i_rcv_win = 256 * 1024;
  52module_param(cxgb4i_rcv_win, int, 0644);
  53MODULE_PARM_DESC(cxgb4i_rcv_win, "TCP reveive window in bytes");
  54
  55static int cxgb4i_snd_win = 128 * 1024;
  56module_param(cxgb4i_snd_win, int, 0644);
  57MODULE_PARM_DESC(cxgb4i_snd_win, "TCP send window in bytes");
  58
  59static int cxgb4i_rx_credit_thres = 10 * 1024;
  60module_param(cxgb4i_rx_credit_thres, int, 0644);
  61MODULE_PARM_DESC(cxgb4i_rx_credit_thres,
  62                "RX credits return threshold in bytes (default=10KB)");
  63
  64static unsigned int cxgb4i_max_connect = (8 * 1024);
  65module_param(cxgb4i_max_connect, uint, 0644);
  66MODULE_PARM_DESC(cxgb4i_max_connect, "Maximum number of connections");
  67
  68static unsigned short cxgb4i_sport_base = 20000;
  69module_param(cxgb4i_sport_base, ushort, 0644);
  70MODULE_PARM_DESC(cxgb4i_sport_base, "Starting port number (default 20000)");
  71
  72typedef void (*cxgb4i_cplhandler_func)(struct cxgbi_device *, struct sk_buff *);
  73
  74static void *t4_uld_add(const struct cxgb4_lld_info *);
  75static int t4_uld_rx_handler(void *, const __be64 *, const struct pkt_gl *);
  76static int t4_uld_state_change(void *, enum cxgb4_state state);
  77
  78static const struct cxgb4_uld_info cxgb4i_uld_info = {
  79        .name = DRV_MODULE_NAME,
  80        .add = t4_uld_add,
  81        .rx_handler = t4_uld_rx_handler,
  82        .state_change = t4_uld_state_change,
  83};
  84
  85static struct scsi_host_template cxgb4i_host_template = {
  86        .module         = THIS_MODULE,
  87        .name           = DRV_MODULE_NAME,
  88        .proc_name      = DRV_MODULE_NAME,
  89        .can_queue      = CXGB4I_SCSI_HOST_QDEPTH,
  90        .queuecommand   = iscsi_queuecommand,
  91        .change_queue_depth = iscsi_change_queue_depth,
  92        .sg_tablesize   = SG_ALL,
  93        .max_sectors    = 0xFFFF,
  94        .cmd_per_lun    = ISCSI_DEF_CMD_PER_LUN,
  95        .eh_abort_handler = iscsi_eh_abort,
  96        .eh_device_reset_handler = iscsi_eh_device_reset,
  97        .eh_target_reset_handler = iscsi_eh_recover_target,
  98        .target_alloc   = iscsi_target_alloc,
  99        .use_clustering = DISABLE_CLUSTERING,
 100        .this_id        = -1,
 101};
 102
 103static struct iscsi_transport cxgb4i_iscsi_transport = {
 104        .owner          = THIS_MODULE,
 105        .name           = DRV_MODULE_NAME,
 106        .caps           = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
 107                                CAP_DATADGST | CAP_DIGEST_OFFLOAD |
 108                                CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
 109        .attr_is_visible        = cxgbi_attr_is_visible,
 110        .get_host_param = cxgbi_get_host_param,
 111        .set_host_param = cxgbi_set_host_param,
 112        /* session management */
 113        .create_session = cxgbi_create_session,
 114        .destroy_session        = cxgbi_destroy_session,
 115        .get_session_param = iscsi_session_get_param,
 116        /* connection management */
 117        .create_conn    = cxgbi_create_conn,
 118        .bind_conn              = cxgbi_bind_conn,
 119        .destroy_conn   = iscsi_tcp_conn_teardown,
 120        .start_conn             = iscsi_conn_start,
 121        .stop_conn              = iscsi_conn_stop,
 122        .get_conn_param = iscsi_conn_get_param,
 123        .set_param      = cxgbi_set_conn_param,
 124        .get_stats      = cxgbi_get_conn_stats,
 125        /* pdu xmit req from user space */
 126        .send_pdu       = iscsi_conn_send_pdu,
 127        /* task */
 128        .init_task      = iscsi_tcp_task_init,
 129        .xmit_task      = iscsi_tcp_task_xmit,
 130        .cleanup_task   = cxgbi_cleanup_task,
 131        /* pdu */
 132        .alloc_pdu      = cxgbi_conn_alloc_pdu,
 133        .init_pdu       = cxgbi_conn_init_pdu,
 134        .xmit_pdu       = cxgbi_conn_xmit_pdu,
 135        .parse_pdu_itt  = cxgbi_parse_pdu_itt,
 136        /* TCP connect/disconnect */
 137        .get_ep_param   = cxgbi_get_ep_param,
 138        .ep_connect     = cxgbi_ep_connect,
 139        .ep_poll        = cxgbi_ep_poll,
 140        .ep_disconnect  = cxgbi_ep_disconnect,
 141        /* Error recovery timeout call */
 142        .session_recovery_timedout = iscsi_session_recovery_timedout,
 143};
 144
 145static struct scsi_transport_template *cxgb4i_stt;
 146
 147/*
 148 * CPL (Chelsio Protocol Language) defines a message passing interface between
 149 * the host driver and Chelsio asic.
 150 * The section below implments CPLs that related to iscsi tcp connection
 151 * open/close/abort and data send/receive.
 152 */
 153#define DIV_ROUND_UP(n, d)      (((n) + (d) - 1) / (d))
 154#define RCV_BUFSIZ_MASK         0x3FFU
 155#define MAX_IMM_TX_PKT_LEN      128
 156
 157static inline void set_queue(struct sk_buff *skb, unsigned int queue,
 158                                const struct cxgbi_sock *csk)
 159{
 160        skb->queue_mapping = queue;
 161}
 162
 163static int push_tx_frames(struct cxgbi_sock *, int);
 164
 165/*
 166 * is_ofld_imm - check whether a packet can be sent as immediate data
 167 * @skb: the packet
 168 *
 169 * Returns true if a packet can be sent as an offload WR with immediate
 170 * data.  We currently use the same limit as for Ethernet packets.
 171 */
 172static inline int is_ofld_imm(const struct sk_buff *skb)
 173{
 174        return skb->len <= (MAX_IMM_TX_PKT_LEN -
 175                        sizeof(struct fw_ofld_tx_data_wr));
 176}
 177
 178
 179#define VLAN_NONE 0xfff
 180#define FILTER_SEL_VLAN_NONE 0xffff
 181#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
 182#define FILTER_SEL_WIDTH_VIN_P_FC \
 183        (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
 184#define FILTER_SEL_WIDTH_TAG_P_FC \
 185        (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
 186#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
 187
 188static unsigned int select_ntuple(struct cxgbi_device *cdev,
 189                                struct l2t_entry *l2t)
 190{
 191        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 192        unsigned int ntuple = 0;
 193        u32 viid;
 194
 195        switch (lldi->filt_mode) {
 196
 197        /* default filter mode */
 198        case HW_TPL_FR_MT_PR_IV_P_FC:
 199                if (l2t->vlan == VLAN_NONE)
 200                        ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
 201                else {
 202                        ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
 203                        ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
 204                }
 205                ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
 206                          FILTER_SEL_WIDTH_VLD_TAG_P_FC;
 207                break;
 208        case HW_TPL_FR_MT_PR_OV_P_FC: {
 209                viid = cxgb4_port_viid(l2t->neigh->dev);
 210
 211                ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
 212                ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
 213                ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
 214                ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
 215                          FILTER_SEL_WIDTH_VLD_TAG_P_FC;
 216                break;
 217        }
 218        default:
 219                break;
 220        }
 221        return ntuple;
 222}
 223
 224static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
 225                                struct l2t_entry *e)
 226{
 227        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
 228        int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
 229        unsigned long long opt0;
 230        unsigned int opt2;
 231        unsigned int qid_atid = ((unsigned int)csk->atid) |
 232                                 (((unsigned int)csk->rss_qid) << 14);
 233
 234        opt0 = KEEP_ALIVE(1) |
 235                WND_SCALE(wscale) |
 236                MSS_IDX(csk->mss_idx) |
 237                L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) |
 238                TX_CHAN(csk->tx_chan) |
 239                SMAC_SEL(csk->smac_idx) |
 240                ULP_MODE(ULP_MODE_ISCSI) |
 241                RCV_BUFSIZ(cxgb4i_rcv_win >> 10);
 242        opt2 = RX_CHANNEL(0) |
 243                RSS_QUEUE_VALID |
 244                (1 << 20) |
 245                RSS_QUEUE(csk->rss_qid);
 246
 247        if (is_t4(lldi->adapter_type)) {
 248                struct cpl_act_open_req *req =
 249                                (struct cpl_act_open_req *)skb->head;
 250
 251                req = (struct cpl_act_open_req *)skb->head;
 252
 253                INIT_TP_WR(req, 0);
 254                OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
 255                                        qid_atid));
 256                req->local_port = csk->saddr.sin_port;
 257                req->peer_port = csk->daddr.sin_port;
 258                req->local_ip = csk->saddr.sin_addr.s_addr;
 259                req->peer_ip = csk->daddr.sin_addr.s_addr;
 260                req->opt0 = cpu_to_be64(opt0);
 261                req->params = cpu_to_be32(select_ntuple(csk->cdev, csk->l2t));
 262                opt2 |= 1 << 22;
 263                req->opt2 = cpu_to_be32(opt2);
 264
 265                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 266                        "csk t4 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
 267                        csk, &req->local_ip, ntohs(req->local_port),
 268                        &req->peer_ip, ntohs(req->peer_port),
 269                        csk->atid, csk->rss_qid);
 270        } else {
 271                struct cpl_t5_act_open_req *req =
 272                                (struct cpl_t5_act_open_req *)skb->head;
 273
 274                req = (struct cpl_t5_act_open_req *)skb->head;
 275
 276                INIT_TP_WR(req, 0);
 277                OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
 278                                        qid_atid));
 279                req->local_port = csk->saddr.sin_port;
 280                req->peer_port = csk->daddr.sin_port;
 281                req->local_ip = csk->saddr.sin_addr.s_addr;
 282                req->peer_ip = csk->daddr.sin_addr.s_addr;
 283                req->opt0 = cpu_to_be64(opt0);
 284                req->params = cpu_to_be32(select_ntuple(csk->cdev, csk->l2t));
 285                opt2 |= 1 << 31;
 286                req->opt2 = cpu_to_be32(opt2);
 287
 288                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 289                        "csk t5 0x%p, %pI4:%u-%pI4:%u, atid %d, qid %u.\n",
 290                        csk, &req->local_ip, ntohs(req->local_port),
 291                        &req->peer_ip, ntohs(req->peer_port),
 292                        csk->atid, csk->rss_qid);
 293        }
 294
 295        set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
 296        cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
 297}
 298
 299static void send_close_req(struct cxgbi_sock *csk)
 300{
 301        struct sk_buff *skb = csk->cpl_close;
 302        struct cpl_close_con_req *req = (struct cpl_close_con_req *)skb->head;
 303        unsigned int tid = csk->tid;
 304
 305        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 306                "csk 0x%p,%u,0x%lx, tid %u.\n",
 307                csk, csk->state, csk->flags, csk->tid);
 308        csk->cpl_close = NULL;
 309        set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
 310        INIT_TP_WR(req, tid);
 311        OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, tid));
 312        req->rsvd = 0;
 313
 314        cxgbi_sock_skb_entail(csk, skb);
 315        if (csk->state >= CTP_ESTABLISHED)
 316                push_tx_frames(csk, 1);
 317}
 318
 319static void abort_arp_failure(void *handle, struct sk_buff *skb)
 320{
 321        struct cxgbi_sock *csk = (struct cxgbi_sock *)handle;
 322        struct cpl_abort_req *req;
 323
 324        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 325                "csk 0x%p,%u,0x%lx, tid %u, abort.\n",
 326                csk, csk->state, csk->flags, csk->tid);
 327        req = (struct cpl_abort_req *)skb->data;
 328        req->cmd = CPL_ABORT_NO_RST;
 329        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
 330}
 331
 332static void send_abort_req(struct cxgbi_sock *csk)
 333{
 334        struct cpl_abort_req *req;
 335        struct sk_buff *skb = csk->cpl_abort_req;
 336
 337        if (unlikely(csk->state == CTP_ABORTING) || !skb || !csk->cdev)
 338                return;
 339        cxgbi_sock_set_state(csk, CTP_ABORTING);
 340        cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING);
 341        cxgbi_sock_purge_write_queue(csk);
 342
 343        csk->cpl_abort_req = NULL;
 344        req = (struct cpl_abort_req *)skb->head;
 345        set_queue(skb, CPL_PRIORITY_DATA, csk);
 346        req->cmd = CPL_ABORT_SEND_RST;
 347        t4_set_arp_err_handler(skb, csk, abort_arp_failure);
 348        INIT_TP_WR(req, csk->tid);
 349        OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid));
 350        req->rsvd0 = htonl(csk->snd_nxt);
 351        req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT);
 352
 353        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 354                "csk 0x%p,%u,0x%lx,%u, snd_nxt %u, 0x%x.\n",
 355                csk, csk->state, csk->flags, csk->tid, csk->snd_nxt,
 356                req->rsvd1);
 357
 358        cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
 359}
 360
 361static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status)
 362{
 363        struct sk_buff *skb = csk->cpl_abort_rpl;
 364        struct cpl_abort_rpl *rpl = (struct cpl_abort_rpl *)skb->head;
 365
 366        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 367                "csk 0x%p,%u,0x%lx,%u, status %d.\n",
 368                csk, csk->state, csk->flags, csk->tid, rst_status);
 369
 370        csk->cpl_abort_rpl = NULL;
 371        set_queue(skb, CPL_PRIORITY_DATA, csk);
 372        INIT_TP_WR(rpl, csk->tid);
 373        OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid));
 374        rpl->cmd = rst_status;
 375        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
 376}
 377
 378/*
 379 * CPL connection rx data ack: host ->
 380 * Send RX credits through an RX_DATA_ACK CPL message. Returns the number of
 381 * credits sent.
 382 */
 383static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits)
 384{
 385        struct sk_buff *skb;
 386        struct cpl_rx_data_ack *req;
 387
 388        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
 389                "csk 0x%p,%u,0x%lx,%u, credit %u.\n",
 390                csk, csk->state, csk->flags, csk->tid, credits);
 391
 392        skb = alloc_wr(sizeof(*req), 0, GFP_ATOMIC);
 393        if (!skb) {
 394                pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits);
 395                return 0;
 396        }
 397        req = (struct cpl_rx_data_ack *)skb->head;
 398
 399        set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id);
 400        INIT_TP_WR(req, csk->tid);
 401        OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
 402                                      csk->tid));
 403        req->credit_dack = cpu_to_be32(RX_CREDITS(credits) | RX_FORCE_ACK(1));
 404        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
 405        return credits;
 406}
 407
 408/*
 409 * sgl_len - calculates the size of an SGL of the given capacity
 410 * @n: the number of SGL entries
 411 * Calculates the number of flits needed for a scatter/gather list that
 412 * can hold the given number of entries.
 413 */
 414static inline unsigned int sgl_len(unsigned int n)
 415{
 416        n--;
 417        return (3 * n) / 2 + (n & 1) + 2;
 418}
 419
 420/*
 421 * calc_tx_flits_ofld - calculate # of flits for an offload packet
 422 * @skb: the packet
 423 *
 424 * Returns the number of flits needed for the given offload packet.
 425 * These packets are already fully constructed and no additional headers
 426 * will be added.
 427 */
 428static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
 429{
 430        unsigned int flits, cnt;
 431
 432        if (is_ofld_imm(skb))
 433                return DIV_ROUND_UP(skb->len, 8);
 434        flits = skb_transport_offset(skb) / 8;
 435        cnt = skb_shinfo(skb)->nr_frags;
 436        if (skb_tail_pointer(skb) != skb_transport_header(skb))
 437                cnt++;
 438        return flits + sgl_len(cnt);
 439}
 440
 441static inline void send_tx_flowc_wr(struct cxgbi_sock *csk)
 442{
 443        struct sk_buff *skb;
 444        struct fw_flowc_wr *flowc;
 445        int flowclen, i;
 446
 447        flowclen = 80;
 448        skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
 449        flowc = (struct fw_flowc_wr *)skb->head;
 450        flowc->op_to_nparams =
 451                htonl(FW_WR_OP(FW_FLOWC_WR) | FW_FLOWC_WR_NPARAMS(8));
 452        flowc->flowid_len16 =
 453                htonl(FW_WR_LEN16(DIV_ROUND_UP(72, 16)) |
 454                                FW_WR_FLOWID(csk->tid));
 455        flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
 456        flowc->mnemval[0].val = htonl(csk->cdev->pfvf);
 457        flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
 458        flowc->mnemval[1].val = htonl(csk->tx_chan);
 459        flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
 460        flowc->mnemval[2].val = htonl(csk->tx_chan);
 461        flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
 462        flowc->mnemval[3].val = htonl(csk->rss_qid);
 463        flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
 464        flowc->mnemval[4].val = htonl(csk->snd_nxt);
 465        flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
 466        flowc->mnemval[5].val = htonl(csk->rcv_nxt);
 467        flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
 468        flowc->mnemval[6].val = htonl(cxgb4i_snd_win);
 469        flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
 470        flowc->mnemval[7].val = htonl(csk->advmss);
 471        flowc->mnemval[8].mnemonic = 0;
 472        flowc->mnemval[8].val = 0;
 473        for (i = 0; i < 9; i++) {
 474                flowc->mnemval[i].r4[0] = 0;
 475                flowc->mnemval[i].r4[1] = 0;
 476                flowc->mnemval[i].r4[2] = 0;
 477        }
 478        set_queue(skb, CPL_PRIORITY_DATA, csk);
 479
 480        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 481                "csk 0x%p, tid 0x%x, %u,%u,%u,%u,%u,%u,%u.\n",
 482                csk, csk->tid, 0, csk->tx_chan, csk->rss_qid,
 483                csk->snd_nxt, csk->rcv_nxt, cxgb4i_snd_win,
 484                csk->advmss);
 485
 486        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
 487}
 488
 489static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
 490                                   int dlen, int len, u32 credits, int compl)
 491{
 492        struct fw_ofld_tx_data_wr *req;
 493        unsigned int submode = cxgbi_skcb_ulp_mode(skb) & 3;
 494        unsigned int wr_ulp_mode = 0;
 495
 496        req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req));
 497
 498        if (is_ofld_imm(skb)) {
 499                req->op_to_immdlen = htonl(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
 500                                        FW_WR_COMPL(1) |
 501                                        FW_WR_IMMDLEN(dlen));
 502                req->flowid_len16 = htonl(FW_WR_FLOWID(csk->tid) |
 503                                                FW_WR_LEN16(credits));
 504        } else {
 505                req->op_to_immdlen =
 506                        cpu_to_be32(FW_WR_OP(FW_OFLD_TX_DATA_WR) |
 507                                        FW_WR_COMPL(1) |
 508                                        FW_WR_IMMDLEN(0));
 509                req->flowid_len16 =
 510                        cpu_to_be32(FW_WR_FLOWID(csk->tid) |
 511                                        FW_WR_LEN16(credits));
 512        }
 513        if (submode)
 514                wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE(ULP2_MODE_ISCSI) |
 515                                FW_OFLD_TX_DATA_WR_ULPSUBMODE(submode);
 516        req->tunnel_to_proxy = htonl(wr_ulp_mode |
 517                 FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1));
 518        req->plen = htonl(len);
 519        if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
 520                cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
 521}
 522
 523static void arp_failure_skb_discard(void *handle, struct sk_buff *skb)
 524{
 525        kfree_skb(skb);
 526}
 527
 528static int push_tx_frames(struct cxgbi_sock *csk, int req_completion)
 529{
 530        int total_size = 0;
 531        struct sk_buff *skb;
 532
 533        if (unlikely(csk->state < CTP_ESTABLISHED ||
 534                csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) {
 535                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK |
 536                         1 << CXGBI_DBG_PDU_TX,
 537                        "csk 0x%p,%u,0x%lx,%u, in closing state.\n",
 538                        csk, csk->state, csk->flags, csk->tid);
 539                return 0;
 540        }
 541
 542        while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) {
 543                int dlen = skb->len;
 544                int len = skb->len;
 545                unsigned int credits_needed;
 546
 547                skb_reset_transport_header(skb);
 548                if (is_ofld_imm(skb))
 549                        credits_needed = DIV_ROUND_UP(dlen +
 550                                        sizeof(struct fw_ofld_tx_data_wr), 16);
 551                else
 552                        credits_needed = DIV_ROUND_UP(8*calc_tx_flits_ofld(skb)
 553                                        + sizeof(struct fw_ofld_tx_data_wr),
 554                                        16);
 555
 556                if (csk->wr_cred < credits_needed) {
 557                        log_debug(1 << CXGBI_DBG_PDU_TX,
 558                                "csk 0x%p, skb %u/%u, wr %d < %u.\n",
 559                                csk, skb->len, skb->data_len,
 560                                credits_needed, csk->wr_cred);
 561                        break;
 562                }
 563                __skb_unlink(skb, &csk->write_queue);
 564                set_queue(skb, CPL_PRIORITY_DATA, csk);
 565                skb->csum = credits_needed;
 566                csk->wr_cred -= credits_needed;
 567                csk->wr_una_cred += credits_needed;
 568                cxgbi_sock_enqueue_wr(csk, skb);
 569
 570                log_debug(1 << CXGBI_DBG_PDU_TX,
 571                        "csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
 572                        csk, skb->len, skb->data_len, credits_needed,
 573                        csk->wr_cred, csk->wr_una_cred);
 574
 575                if (likely(cxgbi_skcb_test_flag(skb, SKCBF_TX_NEED_HDR))) {
 576                        if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
 577                                send_tx_flowc_wr(csk);
 578                                skb->csum += 5;
 579                                csk->wr_cred -= 5;
 580                                csk->wr_una_cred += 5;
 581                        }
 582                        len += cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
 583                        make_tx_data_wr(csk, skb, dlen, len, credits_needed,
 584                                        req_completion);
 585                        csk->snd_nxt += len;
 586                        cxgbi_skcb_clear_flag(skb, SKCBF_TX_NEED_HDR);
 587                }
 588                total_size += skb->truesize;
 589                t4_set_arp_err_handler(skb, csk, arp_failure_skb_discard);
 590
 591                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_TX,
 592                        "csk 0x%p,%u,0x%lx,%u, skb 0x%p, %u.\n",
 593                        csk, csk->state, csk->flags, csk->tid, skb, len);
 594
 595                cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
 596        }
 597        return total_size;
 598}
 599
 600static inline void free_atid(struct cxgbi_sock *csk)
 601{
 602        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
 603
 604        if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) {
 605                cxgb4_free_atid(lldi->tids, csk->atid);
 606                cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID);
 607                cxgbi_sock_put(csk);
 608        }
 609}
 610
 611static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
 612{
 613        struct cxgbi_sock *csk;
 614        struct cpl_act_establish *req = (struct cpl_act_establish *)skb->data;
 615        unsigned short tcp_opt = ntohs(req->tcp_opt);
 616        unsigned int tid = GET_TID(req);
 617        unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
 618        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 619        struct tid_info *t = lldi->tids;
 620        u32 rcv_isn = be32_to_cpu(req->rcv_isn);
 621
 622        csk = lookup_atid(t, atid);
 623        if (unlikely(!csk)) {
 624                pr_err("NO conn. for atid %u, cdev 0x%p.\n", atid, cdev);
 625                goto rel_skb;
 626        }
 627
 628        if (csk->atid != atid) {
 629                pr_err("bad conn atid %u, csk 0x%p,%u,0x%lx,tid %u, atid %u.\n",
 630                        atid, csk, csk->state, csk->flags, csk->tid, csk->atid);
 631                goto rel_skb;
 632        }
 633
 634        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 635                "csk 0x%p,%u,0x%lx, tid %u, atid %u, rseq %u.\n",
 636                csk, csk->state, csk->flags, tid, atid, rcv_isn);
 637
 638        cxgbi_sock_get(csk);
 639        csk->tid = tid;
 640        cxgb4_insert_tid(lldi->tids, csk, tid);
 641        cxgbi_sock_set_flag(csk, CTPF_HAS_TID);
 642
 643        free_atid(csk);
 644
 645        spin_lock_bh(&csk->lock);
 646        if (unlikely(csk->state != CTP_ACTIVE_OPEN))
 647                pr_info("csk 0x%p,%u,0x%lx,%u, got EST.\n",
 648                        csk, csk->state, csk->flags, csk->tid);
 649
 650        if (csk->retry_timer.function) {
 651                del_timer(&csk->retry_timer);
 652                csk->retry_timer.function = NULL;
 653        }
 654
 655        csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn;
 656        /*
 657         * Causes the first RX_DATA_ACK to supply any Rx credits we couldn't
 658         * pass through opt0.
 659         */
 660        if (cxgb4i_rcv_win > (RCV_BUFSIZ_MASK << 10))
 661                csk->rcv_wup -= cxgb4i_rcv_win - (RCV_BUFSIZ_MASK << 10);
 662
 663        csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40;
 664        if (GET_TCPOPT_TSTAMP(tcp_opt))
 665                csk->advmss -= 12;
 666        if (csk->advmss < 128)
 667                csk->advmss = 128;
 668
 669        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 670                "csk 0x%p, mss_idx %u, advmss %u.\n",
 671                        csk, GET_TCPOPT_MSS(tcp_opt), csk->advmss);
 672
 673        cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt));
 674
 675        if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED)))
 676                send_abort_req(csk);
 677        else {
 678                if (skb_queue_len(&csk->write_queue))
 679                        push_tx_frames(csk, 0);
 680                cxgbi_conn_tx_open(csk);
 681        }
 682        spin_unlock_bh(&csk->lock);
 683
 684rel_skb:
 685        __kfree_skb(skb);
 686}
 687
 688static int act_open_rpl_status_to_errno(int status)
 689{
 690        switch (status) {
 691        case CPL_ERR_CONN_RESET:
 692                return -ECONNREFUSED;
 693        case CPL_ERR_ARP_MISS:
 694                return -EHOSTUNREACH;
 695        case CPL_ERR_CONN_TIMEDOUT:
 696                return -ETIMEDOUT;
 697        case CPL_ERR_TCAM_FULL:
 698                return -ENOMEM;
 699        case CPL_ERR_CONN_EXIST:
 700                return -EADDRINUSE;
 701        default:
 702                return -EIO;
 703        }
 704}
 705
 706static void csk_act_open_retry_timer(unsigned long data)
 707{
 708        struct sk_buff *skb;
 709        struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
 710        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
 711
 712        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 713                "csk 0x%p,%u,0x%lx,%u.\n",
 714                csk, csk->state, csk->flags, csk->tid);
 715
 716        cxgbi_sock_get(csk);
 717        spin_lock_bh(&csk->lock);
 718        skb = alloc_wr(is_t4(lldi->adapter_type) ?
 719                                sizeof(struct cpl_act_open_req) :
 720                                sizeof(struct cpl_t5_act_open_req),
 721                        0, GFP_ATOMIC);
 722        if (!skb)
 723                cxgbi_sock_fail_act_open(csk, -ENOMEM);
 724        else {
 725                skb->sk = (struct sock *)csk;
 726                t4_set_arp_err_handler(skb, csk,
 727                                        cxgbi_sock_act_open_req_arp_failure);
 728                send_act_open_req(csk, skb, csk->l2t);
 729        }
 730        spin_unlock_bh(&csk->lock);
 731        cxgbi_sock_put(csk);
 732}
 733
 734static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
 735{
 736        struct cxgbi_sock *csk;
 737        struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)skb->data;
 738        unsigned int tid = GET_TID(rpl);
 739        unsigned int atid =
 740                GET_TID_TID(GET_AOPEN_ATID(be32_to_cpu(rpl->atid_status)));
 741        unsigned int status = GET_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
 742        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 743        struct tid_info *t = lldi->tids;
 744
 745        csk = lookup_atid(t, atid);
 746        if (unlikely(!csk)) {
 747                pr_err("NO matching conn. atid %u, tid %u.\n", atid, tid);
 748                goto rel_skb;
 749        }
 750
 751        pr_info("%pI4:%u-%pI4:%u, atid %u,%u, status %u, csk 0x%p,%u,0x%lx.\n",
 752                &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
 753                &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port),
 754                atid, tid, status, csk, csk->state, csk->flags);
 755
 756        if (status == CPL_ERR_RTX_NEG_ADVICE)
 757                goto rel_skb;
 758
 759        if (status && status != CPL_ERR_TCAM_FULL &&
 760            status != CPL_ERR_CONN_EXIST &&
 761            status != CPL_ERR_ARP_MISS)
 762                cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl));
 763
 764        cxgbi_sock_get(csk);
 765        spin_lock_bh(&csk->lock);
 766
 767        if (status == CPL_ERR_CONN_EXIST &&
 768            csk->retry_timer.function != csk_act_open_retry_timer) {
 769                csk->retry_timer.function = csk_act_open_retry_timer;
 770                mod_timer(&csk->retry_timer, jiffies + HZ / 2);
 771        } else
 772                cxgbi_sock_fail_act_open(csk,
 773                                        act_open_rpl_status_to_errno(status));
 774
 775        spin_unlock_bh(&csk->lock);
 776        cxgbi_sock_put(csk);
 777rel_skb:
 778        __kfree_skb(skb);
 779}
 780
 781static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
 782{
 783        struct cxgbi_sock *csk;
 784        struct cpl_peer_close *req = (struct cpl_peer_close *)skb->data;
 785        unsigned int tid = GET_TID(req);
 786        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 787        struct tid_info *t = lldi->tids;
 788
 789        csk = lookup_tid(t, tid);
 790        if (unlikely(!csk)) {
 791                pr_err("can't find connection for tid %u.\n", tid);
 792                goto rel_skb;
 793        }
 794        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 795                "csk 0x%p,%u,0x%lx,%u.\n",
 796                csk, csk->state, csk->flags, csk->tid);
 797        cxgbi_sock_rcv_peer_close(csk);
 798rel_skb:
 799        __kfree_skb(skb);
 800}
 801
 802static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
 803{
 804        struct cxgbi_sock *csk;
 805        struct cpl_close_con_rpl *rpl = (struct cpl_close_con_rpl *)skb->data;
 806        unsigned int tid = GET_TID(rpl);
 807        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 808        struct tid_info *t = lldi->tids;
 809
 810        csk = lookup_tid(t, tid);
 811        if (unlikely(!csk)) {
 812                pr_err("can't find connection for tid %u.\n", tid);
 813                goto rel_skb;
 814        }
 815        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 816                "csk 0x%p,%u,0x%lx,%u.\n",
 817                csk, csk->state, csk->flags, csk->tid);
 818        cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
 819rel_skb:
 820        __kfree_skb(skb);
 821}
 822
 823static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason,
 824                                                                int *need_rst)
 825{
 826        switch (abort_reason) {
 827        case CPL_ERR_BAD_SYN: /* fall through */
 828        case CPL_ERR_CONN_RESET:
 829                return csk->state > CTP_ESTABLISHED ?
 830                        -EPIPE : -ECONNRESET;
 831        case CPL_ERR_XMIT_TIMEDOUT:
 832        case CPL_ERR_PERSIST_TIMEDOUT:
 833        case CPL_ERR_FINWAIT2_TIMEDOUT:
 834        case CPL_ERR_KEEPALIVE_TIMEDOUT:
 835                return -ETIMEDOUT;
 836        default:
 837                return -EIO;
 838        }
 839}
 840
 841static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
 842{
 843        struct cxgbi_sock *csk;
 844        struct cpl_abort_req_rss *req = (struct cpl_abort_req_rss *)skb->data;
 845        unsigned int tid = GET_TID(req);
 846        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 847        struct tid_info *t = lldi->tids;
 848        int rst_status = CPL_ABORT_NO_RST;
 849
 850        csk = lookup_tid(t, tid);
 851        if (unlikely(!csk)) {
 852                pr_err("can't find connection for tid %u.\n", tid);
 853                goto rel_skb;
 854        }
 855
 856        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 857                "csk 0x%p,%u,0x%lx, tid %u, status 0x%x.\n",
 858                csk, csk->state, csk->flags, csk->tid, req->status);
 859
 860        if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
 861            req->status == CPL_ERR_PERSIST_NEG_ADVICE)
 862                goto rel_skb;
 863
 864        cxgbi_sock_get(csk);
 865        spin_lock_bh(&csk->lock);
 866
 867        if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) {
 868                cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
 869                cxgbi_sock_set_state(csk, CTP_ABORTING);
 870                goto done;
 871        }
 872
 873        cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
 874        send_abort_rpl(csk, rst_status);
 875
 876        if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
 877                csk->err = abort_status_to_errno(csk, req->status, &rst_status);
 878                cxgbi_sock_closed(csk);
 879        }
 880done:
 881        spin_unlock_bh(&csk->lock);
 882        cxgbi_sock_put(csk);
 883rel_skb:
 884        __kfree_skb(skb);
 885}
 886
 887static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
 888{
 889        struct cxgbi_sock *csk;
 890        struct cpl_abort_rpl_rss *rpl = (struct cpl_abort_rpl_rss *)skb->data;
 891        unsigned int tid = GET_TID(rpl);
 892        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 893        struct tid_info *t = lldi->tids;
 894
 895        csk = lookup_tid(t, tid);
 896        if (!csk)
 897                goto rel_skb;
 898
 899        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 900                "status 0x%x, csk 0x%p, s %u, 0x%lx.\n",
 901                rpl->status, csk, csk ? csk->state : 0,
 902                csk ? csk->flags : 0UL);
 903
 904        if (rpl->status == CPL_ERR_ABORT_FAILED)
 905                goto rel_skb;
 906
 907        cxgbi_sock_rcv_abort_rpl(csk);
 908rel_skb:
 909        __kfree_skb(skb);
 910}
 911
 912static void do_rx_iscsi_hdr(struct cxgbi_device *cdev, struct sk_buff *skb)
 913{
 914        struct cxgbi_sock *csk;
 915        struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)skb->data;
 916        unsigned short pdu_len_ddp = be16_to_cpu(cpl->pdu_len_ddp);
 917        unsigned int tid = GET_TID(cpl);
 918        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
 919        struct tid_info *t = lldi->tids;
 920
 921        csk = lookup_tid(t, tid);
 922        if (unlikely(!csk)) {
 923                pr_err("can't find conn. for tid %u.\n", tid);
 924                goto rel_skb;
 925        }
 926
 927        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
 928                "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p,%u, 0x%x.\n",
 929                csk, csk->state, csk->flags, csk->tid, skb, skb->len,
 930                pdu_len_ddp);
 931
 932        spin_lock_bh(&csk->lock);
 933
 934        if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
 935                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
 936                        "csk 0x%p,%u,0x%lx,%u, bad state.\n",
 937                        csk, csk->state, csk->flags, csk->tid);
 938                if (csk->state != CTP_ABORTING)
 939                        goto abort_conn;
 940                else
 941                        goto discard;
 942        }
 943
 944        cxgbi_skcb_tcp_seq(skb) = ntohl(cpl->seq);
 945        cxgbi_skcb_flags(skb) = 0;
 946
 947        skb_reset_transport_header(skb);
 948        __skb_pull(skb, sizeof(*cpl));
 949        __pskb_trim(skb, ntohs(cpl->len));
 950
 951        if (!csk->skb_ulp_lhdr) {
 952                unsigned char *bhs;
 953                unsigned int hlen, dlen, plen;
 954
 955                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
 956                        "csk 0x%p,%u,0x%lx, tid %u, skb 0x%p header.\n",
 957                        csk, csk->state, csk->flags, csk->tid, skb);
 958                csk->skb_ulp_lhdr = skb;
 959                cxgbi_skcb_set_flag(skb, SKCBF_RX_HDR);
 960
 961                if (cxgbi_skcb_tcp_seq(skb) != csk->rcv_nxt) {
 962                        pr_info("tid %u, CPL_ISCSI_HDR, bad seq, 0x%x/0x%x.\n",
 963                                csk->tid, cxgbi_skcb_tcp_seq(skb),
 964                                csk->rcv_nxt);
 965                        goto abort_conn;
 966                }
 967
 968                bhs = skb->data;
 969                hlen = ntohs(cpl->len);
 970                dlen = ntohl(*(unsigned int *)(bhs + 4)) & 0xFFFFFF;
 971
 972                plen = ISCSI_PDU_LEN(pdu_len_ddp);
 973                if (is_t4(lldi->adapter_type))
 974                        plen -= 40;
 975
 976                if ((hlen + dlen) != plen) {
 977                        pr_info("tid 0x%x, CPL_ISCSI_HDR, pdu len "
 978                                "mismatch %u != %u + %u, seq 0x%x.\n",
 979                                csk->tid, plen, hlen, dlen,
 980                                cxgbi_skcb_tcp_seq(skb));
 981                        goto abort_conn;
 982                }
 983
 984                cxgbi_skcb_rx_pdulen(skb) = (hlen + dlen + 3) & (~0x3);
 985                if (dlen)
 986                        cxgbi_skcb_rx_pdulen(skb) += csk->dcrc_len;
 987                csk->rcv_nxt += cxgbi_skcb_rx_pdulen(skb);
 988
 989                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
 990                        "csk 0x%p, skb 0x%p, 0x%x,%u+%u,0x%x,0x%x.\n",
 991                        csk, skb, *bhs, hlen, dlen,
 992                        ntohl(*((unsigned int *)(bhs + 16))),
 993                        ntohl(*((unsigned int *)(bhs + 24))));
 994
 995        } else {
 996                struct sk_buff *lskb = csk->skb_ulp_lhdr;
 997
 998                cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA);
 999                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1000                        "csk 0x%p,%u,0x%lx, skb 0x%p data, 0x%p.\n",
1001                        csk, csk->state, csk->flags, skb, lskb);
1002        }
1003
1004        __skb_queue_tail(&csk->receive_queue, skb);
1005        spin_unlock_bh(&csk->lock);
1006        return;
1007
1008abort_conn:
1009        send_abort_req(csk);
1010discard:
1011        spin_unlock_bh(&csk->lock);
1012rel_skb:
1013        __kfree_skb(skb);
1014}
1015
1016static void do_rx_data_ddp(struct cxgbi_device *cdev,
1017                                  struct sk_buff *skb)
1018{
1019        struct cxgbi_sock *csk;
1020        struct sk_buff *lskb;
1021        struct cpl_rx_data_ddp *rpl = (struct cpl_rx_data_ddp *)skb->data;
1022        unsigned int tid = GET_TID(rpl);
1023        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1024        struct tid_info *t = lldi->tids;
1025        unsigned int status = ntohl(rpl->ddpvld);
1026
1027        csk = lookup_tid(t, tid);
1028        if (unlikely(!csk)) {
1029                pr_err("can't find connection for tid %u.\n", tid);
1030                goto rel_skb;
1031        }
1032
1033        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_PDU_RX,
1034                "csk 0x%p,%u,0x%lx, skb 0x%p,0x%x, lhdr 0x%p.\n",
1035                csk, csk->state, csk->flags, skb, status, csk->skb_ulp_lhdr);
1036
1037        spin_lock_bh(&csk->lock);
1038
1039        if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) {
1040                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1041                        "csk 0x%p,%u,0x%lx,%u, bad state.\n",
1042                        csk, csk->state, csk->flags, csk->tid);
1043                if (csk->state != CTP_ABORTING)
1044                        goto abort_conn;
1045                else
1046                        goto discard;
1047        }
1048
1049        if (!csk->skb_ulp_lhdr) {
1050                pr_err("tid 0x%x, rcv RX_DATA_DDP w/o pdu bhs.\n", csk->tid);
1051                goto abort_conn;
1052        }
1053
1054        lskb = csk->skb_ulp_lhdr;
1055        csk->skb_ulp_lhdr = NULL;
1056
1057        cxgbi_skcb_rx_ddigest(lskb) = ntohl(rpl->ulp_crc);
1058
1059        if (ntohs(rpl->len) != cxgbi_skcb_rx_pdulen(lskb))
1060                pr_info("tid 0x%x, RX_DATA_DDP pdulen %u != %u.\n",
1061                        csk->tid, ntohs(rpl->len), cxgbi_skcb_rx_pdulen(lskb));
1062
1063        if (status & (1 << CPL_RX_DDP_STATUS_HCRC_SHIFT)) {
1064                pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, hcrc bad 0x%lx.\n",
1065                        csk, lskb, status, cxgbi_skcb_flags(lskb));
1066                cxgbi_skcb_set_flag(lskb, SKCBF_RX_HCRC_ERR);
1067        }
1068        if (status & (1 << CPL_RX_DDP_STATUS_DCRC_SHIFT)) {
1069                pr_info("csk 0x%p, lhdr 0x%p, status 0x%x, dcrc bad 0x%lx.\n",
1070                        csk, lskb, status, cxgbi_skcb_flags(lskb));
1071                cxgbi_skcb_set_flag(lskb, SKCBF_RX_DCRC_ERR);
1072        }
1073        if (status & (1 << CPL_RX_DDP_STATUS_PAD_SHIFT)) {
1074                log_debug(1 << CXGBI_DBG_PDU_RX,
1075                        "csk 0x%p, lhdr 0x%p, status 0x%x, pad bad.\n",
1076                        csk, lskb, status);
1077                cxgbi_skcb_set_flag(lskb, SKCBF_RX_PAD_ERR);
1078        }
1079        if ((status & (1 << CPL_RX_DDP_STATUS_DDP_SHIFT)) &&
1080                !cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA)) {
1081                log_debug(1 << CXGBI_DBG_PDU_RX,
1082                        "csk 0x%p, lhdr 0x%p, 0x%x, data ddp'ed.\n",
1083                        csk, lskb, status);
1084                cxgbi_skcb_set_flag(lskb, SKCBF_RX_DATA_DDPD);
1085        }
1086        log_debug(1 << CXGBI_DBG_PDU_RX,
1087                "csk 0x%p, lskb 0x%p, f 0x%lx.\n",
1088                csk, lskb, cxgbi_skcb_flags(lskb));
1089
1090        cxgbi_skcb_set_flag(lskb, SKCBF_RX_STATUS);
1091        cxgbi_conn_pdu_ready(csk);
1092        spin_unlock_bh(&csk->lock);
1093        goto rel_skb;
1094
1095abort_conn:
1096        send_abort_req(csk);
1097discard:
1098        spin_unlock_bh(&csk->lock);
1099rel_skb:
1100        __kfree_skb(skb);
1101}
1102
1103static void do_fw4_ack(struct cxgbi_device *cdev, struct sk_buff *skb)
1104{
1105        struct cxgbi_sock *csk;
1106        struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)skb->data;
1107        unsigned int tid = GET_TID(rpl);
1108        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1109        struct tid_info *t = lldi->tids;
1110
1111        csk = lookup_tid(t, tid);
1112        if (unlikely(!csk))
1113                pr_err("can't find connection for tid %u.\n", tid);
1114        else {
1115                log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1116                        "csk 0x%p,%u,0x%lx,%u.\n",
1117                        csk, csk->state, csk->flags, csk->tid);
1118                cxgbi_sock_rcv_wr_ack(csk, rpl->credits, ntohl(rpl->snd_una),
1119                                        rpl->seq_vld);
1120        }
1121        __kfree_skb(skb);
1122}
1123
1124static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
1125{
1126        struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
1127        unsigned int tid = GET_TID(rpl);
1128        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1129        struct tid_info *t = lldi->tids;
1130        struct cxgbi_sock *csk;
1131
1132        csk = lookup_tid(t, tid);
1133        if (!csk)
1134                pr_err("can't find conn. for tid %u.\n", tid);
1135
1136        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1137                "csk 0x%p,%u,%lx,%u, status 0x%x.\n",
1138                csk, csk->state, csk->flags, csk->tid, rpl->status);
1139
1140        if (rpl->status != CPL_ERR_NONE)
1141                pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
1142                        csk, tid, rpl->status);
1143
1144        __kfree_skb(skb);
1145}
1146
1147static int alloc_cpls(struct cxgbi_sock *csk)
1148{
1149        csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req),
1150                                        0, GFP_KERNEL);
1151        if (!csk->cpl_close)
1152                return -ENOMEM;
1153
1154        csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req),
1155                                        0, GFP_KERNEL);
1156        if (!csk->cpl_abort_req)
1157                goto free_cpls;
1158
1159        csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl),
1160                                        0, GFP_KERNEL);
1161        if (!csk->cpl_abort_rpl)
1162                goto free_cpls;
1163        return 0;
1164
1165free_cpls:
1166        cxgbi_sock_free_cpl_skbs(csk);
1167        return -ENOMEM;
1168}
1169
1170static inline void l2t_put(struct cxgbi_sock *csk)
1171{
1172        if (csk->l2t) {
1173                cxgb4_l2t_release(csk->l2t);
1174                csk->l2t = NULL;
1175                cxgbi_sock_put(csk);
1176        }
1177}
1178
1179static void release_offload_resources(struct cxgbi_sock *csk)
1180{
1181        struct cxgb4_lld_info *lldi;
1182
1183        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1184                "csk 0x%p,%u,0x%lx,%u.\n",
1185                csk, csk->state, csk->flags, csk->tid);
1186
1187        cxgbi_sock_free_cpl_skbs(csk);
1188        if (csk->wr_cred != csk->wr_max_cred) {
1189                cxgbi_sock_purge_wr_queue(csk);
1190                cxgbi_sock_reset_wr_list(csk);
1191        }
1192
1193        l2t_put(csk);
1194        if (cxgbi_sock_flag(csk, CTPF_HAS_ATID))
1195                free_atid(csk);
1196        else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) {
1197                lldi = cxgbi_cdev_priv(csk->cdev);
1198                cxgb4_remove_tid(lldi->tids, 0, csk->tid);
1199                cxgbi_sock_clear_flag(csk, CTPF_HAS_TID);
1200                cxgbi_sock_put(csk);
1201        }
1202        csk->dst = NULL;
1203        csk->cdev = NULL;
1204}
1205
1206static int init_act_open(struct cxgbi_sock *csk)
1207{
1208        struct cxgbi_device *cdev = csk->cdev;
1209        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1210        struct net_device *ndev = cdev->ports[csk->port_id];
1211        struct port_info *pi = netdev_priv(ndev);
1212        struct sk_buff *skb = NULL;
1213        struct neighbour *n;
1214        unsigned int step;
1215
1216        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1217                "csk 0x%p,%u,0x%lx,%u.\n",
1218                csk, csk->state, csk->flags, csk->tid);
1219
1220        csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
1221        if (csk->atid < 0) {
1222                pr_err("%s, NO atid available.\n", ndev->name);
1223                return -EINVAL;
1224        }
1225        cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
1226        cxgbi_sock_get(csk);
1227
1228        n = dst_neigh_lookup(csk->dst, &csk->daddr.sin_addr.s_addr);
1229        if (!n) {
1230                pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
1231                goto rel_resource;
1232        }
1233        csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
1234        if (!csk->l2t) {
1235                pr_err("%s, cannot alloc l2t.\n", ndev->name);
1236                goto rel_resource;
1237        }
1238        cxgbi_sock_get(csk);
1239
1240        skb = alloc_wr(is_t4(lldi->adapter_type) ?
1241                                sizeof(struct cpl_act_open_req) :
1242                                sizeof(struct cpl_t5_act_open_req),
1243                        0, GFP_ATOMIC);
1244        if (!skb)
1245                goto rel_resource;
1246        skb->sk = (struct sock *)csk;
1247        t4_set_arp_err_handler(skb, csk, cxgbi_sock_act_open_req_arp_failure);
1248
1249        if (!csk->mtu)
1250                csk->mtu = dst_mtu(csk->dst);
1251        cxgb4_best_mtu(lldi->mtus, csk->mtu, &csk->mss_idx);
1252        csk->tx_chan = cxgb4_port_chan(ndev);
1253        /* SMT two entries per row */
1254        csk->smac_idx = ((cxgb4_port_viid(ndev) & 0x7F)) << 1;
1255        step = lldi->ntxq / lldi->nchan;
1256        csk->txq_idx = cxgb4_port_idx(ndev) * step;
1257        step = lldi->nrxq / lldi->nchan;
1258        csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step];
1259        csk->wr_max_cred = csk->wr_cred = lldi->wr_cred;
1260        csk->wr_una_cred = 0;
1261        cxgbi_sock_reset_wr_list(csk);
1262        csk->err = 0;
1263        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1264                "csk 0x%p,p%d,%s, %u,%u,%u, mss %u,%u, smac %u.\n",
1265                csk, pi->port_id, ndev->name, csk->tx_chan,
1266                csk->txq_idx, csk->rss_qid, csk->mtu, csk->mss_idx,
1267                csk->smac_idx);
1268
1269        cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
1270        send_act_open_req(csk, skb, csk->l2t);
1271        neigh_release(n);
1272        return 0;
1273
1274rel_resource:
1275        if (n)
1276                neigh_release(n);
1277        if (skb)
1278                __kfree_skb(skb);
1279        return -EINVAL;
1280}
1281
1282#define CPL_ISCSI_DATA          0xB2
1283#define CPL_RX_ISCSI_DDP        0x49
1284cxgb4i_cplhandler_func cxgb4i_cplhandlers[NUM_CPL_CMDS] = {
1285        [CPL_ACT_ESTABLISH] = do_act_establish,
1286        [CPL_ACT_OPEN_RPL] = do_act_open_rpl,
1287        [CPL_PEER_CLOSE] = do_peer_close,
1288        [CPL_ABORT_REQ_RSS] = do_abort_req_rss,
1289        [CPL_ABORT_RPL_RSS] = do_abort_rpl_rss,
1290        [CPL_CLOSE_CON_RPL] = do_close_con_rpl,
1291        [CPL_FW4_ACK] = do_fw4_ack,
1292        [CPL_ISCSI_HDR] = do_rx_iscsi_hdr,
1293        [CPL_ISCSI_DATA] = do_rx_iscsi_hdr,
1294        [CPL_SET_TCB_RPL] = do_set_tcb_rpl,
1295        [CPL_RX_DATA_DDP] = do_rx_data_ddp,
1296        [CPL_RX_ISCSI_DDP] = do_rx_data_ddp,
1297};
1298
1299int cxgb4i_ofld_init(struct cxgbi_device *cdev)
1300{
1301        int rc;
1302
1303        if (cxgb4i_max_connect > CXGB4I_MAX_CONN)
1304                cxgb4i_max_connect = CXGB4I_MAX_CONN;
1305
1306        rc = cxgbi_device_portmap_create(cdev, cxgb4i_sport_base,
1307                                        cxgb4i_max_connect);
1308        if (rc < 0)
1309                return rc;
1310
1311        cdev->csk_release_offload_resources = release_offload_resources;
1312        cdev->csk_push_tx_frames = push_tx_frames;
1313        cdev->csk_send_abort_req = send_abort_req;
1314        cdev->csk_send_close_req = send_close_req;
1315        cdev->csk_send_rx_credits = send_rx_credits;
1316        cdev->csk_alloc_cpls = alloc_cpls;
1317        cdev->csk_init_act_open = init_act_open;
1318
1319        pr_info("cdev 0x%p, offload up, added.\n", cdev);
1320        return 0;
1321}
1322
1323/*
1324 * functions to program the pagepod in h/w
1325 */
1326#define ULPMEM_IDATA_MAX_NPPODS 4 /* 256/PPOD_SIZE */
1327static inline void ulp_mem_io_set_hdr(struct cxgb4_lld_info *lldi,
1328                                struct ulp_mem_io *req,
1329                                unsigned int wr_len, unsigned int dlen,
1330                                unsigned int pm_addr)
1331{
1332        struct ulptx_idata *idata = (struct ulptx_idata *)(req + 1);
1333
1334        INIT_ULPTX_WR(req, wr_len, 0, 0);
1335        if (is_t4(lldi->adapter_type))
1336                req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) |
1337                                        (ULP_MEMIO_ORDER(1)));
1338        else
1339                req->cmd = htonl(ULPTX_CMD(ULP_TX_MEM_WRITE) |
1340                                        (V_T5_ULP_MEMIO_IMM(1)));
1341        req->dlen = htonl(ULP_MEMIO_DATA_LEN(dlen >> 5));
1342        req->lock_addr = htonl(ULP_MEMIO_ADDR(pm_addr >> 5));
1343        req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
1344
1345        idata->cmd_more = htonl(ULPTX_CMD(ULP_TX_SC_IMM));
1346        idata->len = htonl(dlen);
1347}
1348
1349static int ddp_ppod_write_idata(struct cxgbi_device *cdev, unsigned int port_id,
1350                                struct cxgbi_pagepod_hdr *hdr, unsigned int idx,
1351                                unsigned int npods,
1352                                struct cxgbi_gather_list *gl,
1353                                unsigned int gl_pidx)
1354{
1355        struct cxgbi_ddp_info *ddp = cdev->ddp;
1356        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1357        struct sk_buff *skb;
1358        struct ulp_mem_io *req;
1359        struct ulptx_idata *idata;
1360        struct cxgbi_pagepod *ppod;
1361        unsigned int pm_addr = idx * PPOD_SIZE + ddp->llimit;
1362        unsigned int dlen = PPOD_SIZE * npods;
1363        unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
1364                                sizeof(struct ulptx_idata) + dlen, 16);
1365        unsigned int i;
1366
1367        skb = alloc_wr(wr_len, 0, GFP_ATOMIC);
1368        if (!skb) {
1369                pr_err("cdev 0x%p, idx %u, npods %u, OOM.\n",
1370                        cdev, idx, npods);
1371                return -ENOMEM;
1372        }
1373        req = (struct ulp_mem_io *)skb->head;
1374        set_queue(skb, CPL_PRIORITY_CONTROL, NULL);
1375
1376        ulp_mem_io_set_hdr(lldi, req, wr_len, dlen, pm_addr);
1377        idata = (struct ulptx_idata *)(req + 1);
1378        ppod = (struct cxgbi_pagepod *)(idata + 1);
1379
1380        for (i = 0; i < npods; i++, ppod++, gl_pidx += PPOD_PAGES_MAX) {
1381                if (!hdr && !gl)
1382                        cxgbi_ddp_ppod_clear(ppod);
1383                else
1384                        cxgbi_ddp_ppod_set(ppod, hdr, gl, gl_pidx);
1385        }
1386
1387        cxgb4_ofld_send(cdev->ports[port_id], skb);
1388        return 0;
1389}
1390
1391static int ddp_set_map(struct cxgbi_sock *csk, struct cxgbi_pagepod_hdr *hdr,
1392                        unsigned int idx, unsigned int npods,
1393                        struct cxgbi_gather_list *gl)
1394{
1395        unsigned int i, cnt;
1396        int err = 0;
1397
1398        for (i = 0; i < npods; i += cnt, idx += cnt) {
1399                cnt = npods - i;
1400                if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1401                        cnt = ULPMEM_IDATA_MAX_NPPODS;
1402                err = ddp_ppod_write_idata(csk->cdev, csk->port_id, hdr,
1403                                        idx, cnt, gl, 4 * i);
1404                if (err < 0)
1405                        break;
1406        }
1407        return err;
1408}
1409
1410static void ddp_clear_map(struct cxgbi_hba *chba, unsigned int tag,
1411                          unsigned int idx, unsigned int npods)
1412{
1413        unsigned int i, cnt;
1414        int err;
1415
1416        for (i = 0; i < npods; i += cnt, idx += cnt) {
1417                cnt = npods - i;
1418                if (cnt > ULPMEM_IDATA_MAX_NPPODS)
1419                        cnt = ULPMEM_IDATA_MAX_NPPODS;
1420                err = ddp_ppod_write_idata(chba->cdev, chba->port_id, NULL,
1421                                        idx, cnt, NULL, 0);
1422                if (err < 0)
1423                        break;
1424        }
1425}
1426
1427static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
1428                                int pg_idx, bool reply)
1429{
1430        struct sk_buff *skb;
1431        struct cpl_set_tcb_field *req;
1432
1433        if (!pg_idx || pg_idx >= DDP_PGIDX_MAX)
1434                return 0;
1435
1436        skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
1437        if (!skb)
1438                return -ENOMEM;
1439
1440        /*  set up ulp page size */
1441        req = (struct cpl_set_tcb_field *)skb->head;
1442        INIT_TP_WR(req, csk->tid);
1443        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
1444        req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
1445        req->word_cookie = htons(0);
1446        req->mask = cpu_to_be64(0x3 << 8);
1447        req->val = cpu_to_be64(pg_idx << 8);
1448        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1449
1450        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1451                "csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
1452
1453        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1454        return 0;
1455}
1456
1457static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
1458                                 int hcrc, int dcrc, int reply)
1459{
1460        struct sk_buff *skb;
1461        struct cpl_set_tcb_field *req;
1462
1463        if (!hcrc && !dcrc)
1464                return 0;
1465
1466        skb = alloc_wr(sizeof(*req), 0, GFP_KERNEL);
1467        if (!skb)
1468                return -ENOMEM;
1469
1470        csk->hcrc_len = (hcrc ? 4 : 0);
1471        csk->dcrc_len = (dcrc ? 4 : 0);
1472        /*  set up ulp submode */
1473        req = (struct cpl_set_tcb_field *)skb->head;
1474        INIT_TP_WR(req, tid);
1475        OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1476        req->reply_ctrl = htons(NO_REPLY(reply) | QUEUENO(csk->rss_qid));
1477        req->word_cookie = htons(0);
1478        req->mask = cpu_to_be64(0x3 << 4);
1479        req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
1480                                (dcrc ? ULP_CRC_DATA : 0)) << 4);
1481        set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->port_id);
1482
1483        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
1484                "csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
1485
1486        cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
1487        return 0;
1488}
1489
1490static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
1491{
1492        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
1493        struct cxgbi_ddp_info *ddp = cdev->ddp;
1494        unsigned int tagmask, pgsz_factor[4];
1495        int err;
1496
1497        if (ddp) {
1498                kref_get(&ddp->refcnt);
1499                pr_warn("cdev 0x%p, ddp 0x%p already set up.\n",
1500                        cdev, cdev->ddp);
1501                return -EALREADY;
1502        }
1503
1504        err = cxgbi_ddp_init(cdev, lldi->vr->iscsi.start,
1505                        lldi->vr->iscsi.start + lldi->vr->iscsi.size - 1,
1506                        lldi->iscsi_iolen, lldi->iscsi_iolen);
1507        if (err < 0)
1508                return err;
1509
1510        ddp = cdev->ddp;
1511
1512        tagmask = ddp->idx_mask << PPOD_IDX_SHIFT;
1513        cxgbi_ddp_page_size_factor(pgsz_factor);
1514        cxgb4_iscsi_init(lldi->ports[0], tagmask, pgsz_factor);
1515
1516        cdev->csk_ddp_setup_digest = ddp_setup_conn_digest;
1517        cdev->csk_ddp_setup_pgidx = ddp_setup_conn_pgidx;
1518        cdev->csk_ddp_set = ddp_set_map;
1519        cdev->csk_ddp_clear = ddp_clear_map;
1520
1521        pr_info("cxgb4i 0x%p tag: sw %u, rsvd %u,%u, mask 0x%x.\n",
1522                cdev, cdev->tag_format.sw_bits, cdev->tag_format.rsvd_bits,
1523                cdev->tag_format.rsvd_shift, cdev->tag_format.rsvd_mask);
1524        pr_info("cxgb4i 0x%p, nppods %u, bits %u, mask 0x%x,0x%x pkt %u/%u, "
1525                " %u/%u.\n",
1526                cdev, ddp->nppods, ddp->idx_bits, ddp->idx_mask,
1527                ddp->rsvd_tag_mask, ddp->max_txsz, lldi->iscsi_iolen,
1528                ddp->max_rxsz, lldi->iscsi_iolen);
1529        pr_info("cxgb4i 0x%p max payload size: %u/%u, %u/%u.\n",
1530                cdev, cdev->tx_max_size, ddp->max_txsz, cdev->rx_max_size,
1531                ddp->max_rxsz);
1532        return 0;
1533}
1534
1535static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
1536{
1537        struct cxgbi_device *cdev;
1538        struct port_info *pi;
1539        int i, rc;
1540
1541        cdev = cxgbi_device_register(sizeof(*lldi), lldi->nports);
1542        if (!cdev) {
1543                pr_info("t4 device 0x%p, register failed.\n", lldi);
1544                return NULL;
1545        }
1546        pr_info("0x%p,0x%x, ports %u,%s, chan %u, q %u,%u, wr %u.\n",
1547                cdev, lldi->adapter_type, lldi->nports,
1548                lldi->ports[0]->name, lldi->nchan, lldi->ntxq,
1549                lldi->nrxq, lldi->wr_cred);
1550        for (i = 0; i < lldi->nrxq; i++)
1551                log_debug(1 << CXGBI_DBG_DEV,
1552                        "t4 0x%p, rxq id #%d: %u.\n",
1553                        cdev, i, lldi->rxq_ids[i]);
1554
1555        memcpy(cxgbi_cdev_priv(cdev), lldi, sizeof(*lldi));
1556        cdev->flags = CXGBI_FLAG_DEV_T4;
1557        cdev->pdev = lldi->pdev;
1558        cdev->ports = lldi->ports;
1559        cdev->nports = lldi->nports;
1560        cdev->mtus = lldi->mtus;
1561        cdev->nmtus = NMTUS;
1562        cdev->snd_win = cxgb4i_snd_win;
1563        cdev->rcv_win = cxgb4i_rcv_win;
1564        cdev->rx_credit_thres = cxgb4i_rx_credit_thres;
1565        cdev->skb_tx_rsvd = CXGB4I_TX_HEADER_LEN;
1566        cdev->skb_rx_extra = sizeof(struct cpl_iscsi_hdr);
1567        cdev->itp = &cxgb4i_iscsi_transport;
1568
1569        cdev->pfvf = FW_VIID_PFN_GET(cxgb4_port_viid(lldi->ports[0])) << 8;
1570        pr_info("cdev 0x%p,%s, pfvf %u.\n",
1571                cdev, lldi->ports[0]->name, cdev->pfvf);
1572
1573        rc = cxgb4i_ddp_init(cdev);
1574        if (rc) {
1575                pr_info("t4 0x%p ddp init failed.\n", cdev);
1576                goto err_out;
1577        }
1578        rc = cxgb4i_ofld_init(cdev);
1579        if (rc) {
1580                pr_info("t4 0x%p ofld init failed.\n", cdev);
1581                goto err_out;
1582        }
1583
1584        rc = cxgbi_hbas_add(cdev, CXGB4I_MAX_LUN, CXGBI_MAX_CONN,
1585                                &cxgb4i_host_template, cxgb4i_stt);
1586        if (rc)
1587                goto err_out;
1588
1589        for (i = 0; i < cdev->nports; i++) {
1590                pi = netdev_priv(lldi->ports[i]);
1591                cdev->hbas[i]->port_id = pi->port_id;
1592        }
1593        return cdev;
1594
1595err_out:
1596        cxgbi_device_unregister(cdev);
1597        return ERR_PTR(-ENOMEM);
1598}
1599
1600#define RX_PULL_LEN     128
1601static int t4_uld_rx_handler(void *handle, const __be64 *rsp,
1602                                const struct pkt_gl *pgl)
1603{
1604        const struct cpl_act_establish *rpl;
1605        struct sk_buff *skb;
1606        unsigned int opc;
1607        struct cxgbi_device *cdev = handle;
1608
1609        if (pgl == NULL) {
1610                unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
1611
1612                skb = alloc_wr(len, 0, GFP_ATOMIC);
1613                if (!skb)
1614                        goto nomem;
1615                skb_copy_to_linear_data(skb, &rsp[1], len);
1616        } else {
1617                if (unlikely(*(u8 *)rsp != *(u8 *)pgl->va)) {
1618                        pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
1619                                pgl->va, be64_to_cpu(*rsp),
1620                                be64_to_cpu(*(u64 *)pgl->va),
1621                                pgl->tot_len);
1622                        return 0;
1623                }
1624                skb = cxgb4_pktgl_to_skb(pgl, RX_PULL_LEN, RX_PULL_LEN);
1625                if (unlikely(!skb))
1626                        goto nomem;
1627        }
1628
1629        rpl = (struct cpl_act_establish *)skb->data;
1630        opc = rpl->ot.opcode;
1631        log_debug(1 << CXGBI_DBG_TOE,
1632                "cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
1633                 cdev, opc, rpl->ot.opcode_tid, ntohl(rpl->ot.opcode_tid), skb);
1634        if (cxgb4i_cplhandlers[opc])
1635                cxgb4i_cplhandlers[opc](cdev, skb);
1636        else {
1637                pr_err("No handler for opcode 0x%x.\n", opc);
1638                __kfree_skb(skb);
1639        }
1640        return 0;
1641nomem:
1642        log_debug(1 << CXGBI_DBG_TOE, "OOM bailing out.\n");
1643        return 1;
1644}
1645
1646static int t4_uld_state_change(void *handle, enum cxgb4_state state)
1647{
1648        struct cxgbi_device *cdev = handle;
1649
1650        switch (state) {
1651        case CXGB4_STATE_UP:
1652                pr_info("cdev 0x%p, UP.\n", cdev);
1653                /* re-initialize */
1654                break;
1655        case CXGB4_STATE_START_RECOVERY:
1656                pr_info("cdev 0x%p, RECOVERY.\n", cdev);
1657                /* close all connections */
1658                break;
1659        case CXGB4_STATE_DOWN:
1660                pr_info("cdev 0x%p, DOWN.\n", cdev);
1661                break;
1662        case CXGB4_STATE_DETACH:
1663                pr_info("cdev 0x%p, DETACH.\n", cdev);
1664                cxgbi_device_unregister(cdev);
1665                break;
1666        default:
1667                pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
1668                break;
1669        }
1670        return 0;
1671}
1672
1673static int __init cxgb4i_init_module(void)
1674{
1675        int rc;
1676
1677        printk(KERN_INFO "%s", version);
1678
1679        rc = cxgbi_iscsi_init(&cxgb4i_iscsi_transport, &cxgb4i_stt);
1680        if (rc < 0)
1681                return rc;
1682        cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
1683        return 0;
1684}
1685
1686static void __exit cxgb4i_exit_module(void)
1687{
1688        cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
1689        cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
1690        cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
1691}
1692
1693module_init(cxgb4i_init_module);
1694module_exit(cxgb4i_exit_module);
1695