linux/drivers/scsi/cxgb3i/cxgb3i_pdu.c
<<
>>
Prefs
   1/*
   2 * cxgb3i_pdu.c: Chelsio S3xx iSCSI driver.
   3 *
   4 * Copyright (c) 2008 Chelsio Communications, Inc.
   5 * Copyright (c) 2008 Mike Christie
   6 * Copyright (c) 2008 Red Hat, Inc.  All rights reserved.
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation.
  11 *
  12 * Written by: Karen Xie (kxie@chelsio.com)
  13 */
  14
  15#include <linux/skbuff.h>
  16#include <linux/crypto.h>
  17#include <scsi/scsi_cmnd.h>
  18#include <scsi/scsi_host.h>
  19
  20#include "cxgb3i.h"
  21#include "cxgb3i_pdu.h"
  22
  23#ifdef __DEBUG_CXGB3I_RX__
  24#define cxgb3i_rx_debug         cxgb3i_log_debug
  25#else
  26#define cxgb3i_rx_debug(fmt...)
  27#endif
  28
  29#ifdef __DEBUG_CXGB3I_TX__
  30#define cxgb3i_tx_debug         cxgb3i_log_debug
  31#else
  32#define cxgb3i_tx_debug(fmt...)
  33#endif
  34
  35/* always allocate rooms for AHS */
  36#define SKB_TX_PDU_HEADER_LEN   \
  37        (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE)
  38static unsigned int skb_extra_headroom;
  39static struct page *pad_page;
  40
  41/*
  42 * pdu receive, interact with libiscsi_tcp
  43 */
  44static inline int read_pdu_skb(struct iscsi_conn *conn, struct sk_buff *skb,
  45                               unsigned int offset, int offloaded)
  46{
  47        int status = 0;
  48        int bytes_read;
  49
  50        bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
  51        switch (status) {
  52        case ISCSI_TCP_CONN_ERR:
  53                return -EIO;
  54        case ISCSI_TCP_SUSPENDED:
  55                /* no transfer - just have caller flush queue */
  56                return bytes_read;
  57        case ISCSI_TCP_SKB_DONE:
  58                /*
  59                 * pdus should always fit in the skb and we should get
  60                 * segment done notifcation.
  61                 */
  62                iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
  63                return -EFAULT;
  64        case ISCSI_TCP_SEGMENT_DONE:
  65                return bytes_read;
  66        default:
  67                iscsi_conn_printk(KERN_ERR, conn, "Invalid iscsi_tcp_recv_skb "
  68                                  "status %d\n", status);
  69                return -EINVAL;
  70        }
  71}
  72
  73static int cxgb3i_conn_read_pdu_skb(struct iscsi_conn *conn,
  74                                    struct sk_buff *skb)
  75{
  76        struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  77        bool offloaded = 0;
  78        unsigned int offset;
  79        int rc;
  80
  81        cxgb3i_rx_debug("conn 0x%p, skb 0x%p, len %u, flag 0x%x.\n",
  82                        conn, skb, skb->len, skb_ulp_mode(skb));
  83
  84        if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
  85                iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
  86                return -EIO;
  87        }
  88
  89        if (conn->hdrdgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_HCRC_ERROR)) {
  90                iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
  91                return -EIO;
  92        }
  93
  94        if (conn->datadgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_DCRC_ERROR)) {
  95                iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
  96                return -EIO;
  97        }
  98
  99        /* iscsi hdr */
 100        rc = read_pdu_skb(conn, skb, 0, 0);
 101        if (rc <= 0)
 102                return rc;
 103
 104        if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
 105                return 0;
 106
 107        offset = rc;
 108        if (conn->hdrdgst_en)
 109                offset += ISCSI_DIGEST_SIZE;
 110
 111        /* iscsi data */
 112        if (skb_ulp_mode(skb) & ULP2_FLAG_DATA_DDPED) {
 113                cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, ddp'ed, "
 114                                "itt 0x%x.\n",
 115                                skb,
 116                                tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
 117                                tcp_conn->in.datalen,
 118                                ntohl(tcp_conn->in.hdr->itt));
 119                offloaded = 1;
 120        } else {
 121                cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, NOT ddp'ed, "
 122                                "itt 0x%x.\n",
 123                                skb,
 124                                tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
 125                                tcp_conn->in.datalen,
 126                                ntohl(tcp_conn->in.hdr->itt));
 127                offset += sizeof(struct cpl_iscsi_hdr_norss);
 128        }
 129
 130        rc = read_pdu_skb(conn, skb, offset, offloaded);
 131        if (rc < 0)
 132                return rc;
 133        else
 134                return 0;
 135}
 136
 137/*
 138 * pdu transmit, interact with libiscsi_tcp
 139 */
 140static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
 141{
 142        u8 submode = 0;
 143
 144        if (hcrc)
 145                submode |= 1;
 146        if (dcrc)
 147                submode |= 2;
 148        skb_ulp_mode(skb) = (ULP_MODE_ISCSI << 4) | submode;
 149}
 150
 151void cxgb3i_conn_cleanup_task(struct iscsi_task *task)
 152{
 153        struct cxgb3i_task_data *tdata = task->dd_data +
 154                                        sizeof(struct iscsi_tcp_task);
 155
 156        /* never reached the xmit task callout */
 157        if (tdata->skb)
 158                __kfree_skb(tdata->skb);
 159        memset(tdata, 0, sizeof(struct cxgb3i_task_data));
 160
 161        /* MNC - Do we need a check in case this is called but
 162         * cxgb3i_conn_alloc_pdu has never been called on the task */
 163        cxgb3i_release_itt(task, task->hdr_itt);
 164        iscsi_tcp_cleanup_task(task);
 165}
 166
 167static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
 168                                unsigned int offset, unsigned int *off,
 169                                struct scatterlist **sgp)
 170{
 171        int i;
 172        struct scatterlist *sg;
 173
 174        for_each_sg(sgl, sg, sgcnt, i) {
 175                if (offset < sg->length) {
 176                        *off = offset;
 177                        *sgp = sg;
 178                        return 0;
 179                }
 180                offset -= sg->length;
 181        }
 182        return -EFAULT;
 183}
 184
 185static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
 186                                unsigned int dlen, skb_frag_t *frags,
 187                                int frag_max)
 188{
 189        unsigned int datalen = dlen;
 190        unsigned int sglen = sg->length - sgoffset;
 191        struct page *page = sg_page(sg);
 192        int i;
 193
 194        i = 0;
 195        do {
 196                unsigned int copy;
 197
 198                if (!sglen) {
 199                        sg = sg_next(sg);
 200                        if (!sg) {
 201                                cxgb3i_log_error("%s, sg NULL, len %u/%u.\n",
 202                                                 __func__, datalen, dlen);
 203                                return -EINVAL;
 204                        }
 205                        sgoffset = 0;
 206                        sglen = sg->length;
 207                        page = sg_page(sg);
 208
 209                }
 210                copy = min(datalen, sglen);
 211                if (i && page == frags[i - 1].page &&
 212                    sgoffset + sg->offset ==
 213                        frags[i - 1].page_offset + frags[i - 1].size) {
 214                        frags[i - 1].size += copy;
 215                } else {
 216                        if (i >= frag_max) {
 217                                cxgb3i_log_error("%s, too many pages %u, "
 218                                                 "dlen %u.\n", __func__,
 219                                                 frag_max, dlen);
 220                                return -EINVAL;
 221                        }
 222
 223                        frags[i].page = page;
 224                        frags[i].page_offset = sg->offset + sgoffset;
 225                        frags[i].size = copy;
 226                        i++;
 227                }
 228                datalen -= copy;
 229                sgoffset += copy;
 230                sglen -= copy;
 231        } while (datalen);
 232
 233        return i;
 234}
 235
 236int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
 237{
 238        struct iscsi_conn *conn = task->conn;
 239        struct iscsi_tcp_task *tcp_task = task->dd_data;
 240        struct cxgb3i_task_data *tdata = task->dd_data + sizeof(*tcp_task);
 241        struct scsi_cmnd *sc = task->sc;
 242        int headroom = SKB_TX_PDU_HEADER_LEN;
 243
 244        tcp_task->dd_data = tdata;
 245        task->hdr = NULL;
 246
 247        /* write command, need to send data pdus */
 248        if (skb_extra_headroom && (opcode == ISCSI_OP_SCSI_DATA_OUT ||
 249            (opcode == ISCSI_OP_SCSI_CMD &&
 250            (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
 251                headroom += min(skb_extra_headroom, conn->max_xmit_dlength);
 252
 253        tdata->skb = alloc_skb(TX_HEADER_LEN + headroom, GFP_ATOMIC);
 254        if (!tdata->skb)
 255                return -ENOMEM;
 256        skb_reserve(tdata->skb, TX_HEADER_LEN);
 257
 258        cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n",
 259                        task, opcode, tdata->skb);
 260
 261        task->hdr = (struct iscsi_hdr *)tdata->skb->data;
 262        task->hdr_max = SKB_TX_PDU_HEADER_LEN;
 263
 264        /* data_out uses scsi_cmd's itt */
 265        if (opcode != ISCSI_OP_SCSI_DATA_OUT)
 266                cxgb3i_reserve_itt(task, &task->hdr->itt);
 267
 268        return 0;
 269}
 270
 271int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
 272                              unsigned int count)
 273{
 274        struct iscsi_conn *conn = task->conn;
 275        struct iscsi_tcp_task *tcp_task = task->dd_data;
 276        struct cxgb3i_task_data *tdata = tcp_task->dd_data;
 277        struct sk_buff *skb = tdata->skb;
 278        unsigned int datalen = count;
 279        int i, padlen = iscsi_padding(count);
 280        struct page *pg;
 281
 282        cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n",
 283                        task, task->sc, offset, count, skb);
 284
 285        skb_put(skb, task->hdr_len);
 286        tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
 287        if (!count)
 288                return 0;
 289
 290        if (task->sc) {
 291                struct scsi_data_buffer *sdb = scsi_out(task->sc);
 292                struct scatterlist *sg = NULL;
 293                int err;
 294
 295                tdata->offset = offset;
 296                tdata->count = count;
 297                err = sgl_seek_offset(sdb->table.sgl, sdb->table.nents,
 298                                        tdata->offset, &tdata->sgoffset, &sg);
 299                if (err < 0) {
 300                        cxgb3i_log_warn("tpdu, sgl %u, bad offset %u/%u.\n",
 301                                        sdb->table.nents, tdata->offset,
 302                                        sdb->length);
 303                        return err;
 304                }
 305                err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
 306                                        tdata->frags, MAX_PDU_FRAGS);
 307                if (err < 0) {
 308                        cxgb3i_log_warn("tpdu, sgl %u, bad offset %u + %u.\n",
 309                                        sdb->table.nents, tdata->offset,
 310                                        tdata->count);
 311                        return err;
 312                }
 313                tdata->nr_frags = err;
 314
 315                if (tdata->nr_frags > MAX_SKB_FRAGS ||
 316                    (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
 317                        char *dst = skb->data + task->hdr_len;
 318                        skb_frag_t *frag = tdata->frags;
 319
 320                        /* data fits in the skb's headroom */
 321                        for (i = 0; i < tdata->nr_frags; i++, frag++) {
 322                                char *src = kmap_atomic(frag->page,
 323                                                        KM_SOFTIRQ0);
 324
 325                                memcpy(dst, src+frag->page_offset, frag->size);
 326                                dst += frag->size;
 327                                kunmap_atomic(src, KM_SOFTIRQ0);
 328                        }
 329                        if (padlen) {
 330                                memset(dst, 0, padlen);
 331                                padlen = 0;
 332                        }
 333                        skb_put(skb, count + padlen);
 334                } else {
 335                        /* data fit into frag_list */
 336                        for (i = 0; i < tdata->nr_frags; i++)
 337                                get_page(tdata->frags[i].page);
 338
 339                        memcpy(skb_shinfo(skb)->frags, tdata->frags,
 340                                sizeof(skb_frag_t) * tdata->nr_frags);
 341                        skb_shinfo(skb)->nr_frags = tdata->nr_frags;
 342                        skb->len += count;
 343                        skb->data_len += count;
 344                        skb->truesize += count;
 345                }
 346
 347        } else {
 348                pg = virt_to_page(task->data);
 349
 350                get_page(pg);
 351                skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
 352                                        count);
 353                skb->len += count;
 354                skb->data_len += count;
 355                skb->truesize += count;
 356        }
 357
 358        if (padlen) {
 359                i = skb_shinfo(skb)->nr_frags;
 360                get_page(pad_page);
 361                skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, pad_page, 0,
 362                                 padlen);
 363
 364                skb->data_len += padlen;
 365                skb->truesize += padlen;
 366                skb->len += padlen;
 367        }
 368
 369        return 0;
 370}
 371
 372int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
 373{
 374        struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
 375        struct cxgb3i_conn *cconn = tcp_conn->dd_data;
 376        struct iscsi_tcp_task *tcp_task = task->dd_data;
 377        struct cxgb3i_task_data *tdata = tcp_task->dd_data;
 378        struct sk_buff *skb = tdata->skb;
 379        unsigned int datalen;
 380        int err;
 381
 382        if (!skb)
 383                return 0;
 384
 385        datalen = skb->data_len;
 386        tdata->skb = NULL;
 387        err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb);
 388        if (err > 0) {
 389                int pdulen = err;
 390
 391        cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
 392                        task, skb, skb->len, skb->data_len, err);
 393
 394                if (task->conn->hdrdgst_en)
 395                        pdulen += ISCSI_DIGEST_SIZE;
 396                if (datalen && task->conn->datadgst_en)
 397                        pdulen += ISCSI_DIGEST_SIZE;
 398
 399                task->conn->txdata_octets += pdulen;
 400                return 0;
 401        }
 402
 403        if (err == -EAGAIN || err == -ENOBUFS) {
 404                /* reset skb to send when we are called again */
 405                tdata->skb = skb;
 406                return err;
 407        }
 408
 409        kfree_skb(skb);
 410        cxgb3i_tx_debug("itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
 411                        task->itt, skb, skb->len, skb->data_len, err);
 412        iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
 413        iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
 414        return err;
 415}
 416
 417int cxgb3i_pdu_init(void)
 418{
 419        if (SKB_TX_HEADROOM > (512 * MAX_SKB_FRAGS))
 420                skb_extra_headroom = SKB_TX_HEADROOM;
 421        pad_page = alloc_page(GFP_KERNEL);
 422        if (!pad_page)
 423                return -ENOMEM;
 424        memset(page_address(pad_page), 0, PAGE_SIZE);
 425        return 0;
 426}
 427
 428void cxgb3i_pdu_cleanup(void)
 429{
 430        if (pad_page) {
 431                __free_page(pad_page);
 432                pad_page = NULL;
 433        }
 434}
 435
 436void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
 437{
 438        struct sk_buff *skb;
 439        unsigned int read = 0;
 440        struct iscsi_conn *conn = c3cn->user_data;
 441        int err = 0;
 442
 443        cxgb3i_rx_debug("cn 0x%p.\n", c3cn);
 444
 445        read_lock(&c3cn->callback_lock);
 446        if (unlikely(!conn || conn->suspend_rx)) {
 447                cxgb3i_rx_debug("conn 0x%p, id %d, suspend_rx %lu!\n",
 448                                conn, conn ? conn->id : 0xFF,
 449                                conn ? conn->suspend_rx : 0xFF);
 450                read_unlock(&c3cn->callback_lock);
 451                return;
 452        }
 453        skb = skb_peek(&c3cn->receive_queue);
 454        while (!err && skb) {
 455                __skb_unlink(skb, &c3cn->receive_queue);
 456                read += skb_rx_pdulen(skb);
 457                cxgb3i_rx_debug("conn 0x%p, cn 0x%p, rx skb 0x%p, pdulen %u.\n",
 458                                conn, c3cn, skb, skb_rx_pdulen(skb));
 459                err = cxgb3i_conn_read_pdu_skb(conn, skb);
 460                __kfree_skb(skb);
 461                skb = skb_peek(&c3cn->receive_queue);
 462        }
 463        read_unlock(&c3cn->callback_lock);
 464        if (c3cn) {
 465                c3cn->copied_seq += read;
 466                cxgb3i_c3cn_rx_credits(c3cn, read);
 467        }
 468        conn->rxdata_octets += read;
 469
 470        if (err) {
 471                cxgb3i_log_info("conn 0x%p rx failed err %d.\n", conn, err);
 472                iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
 473        }
 474}
 475
 476void cxgb3i_conn_tx_open(struct s3_conn *c3cn)
 477{
 478        struct iscsi_conn *conn = c3cn->user_data;
 479
 480        cxgb3i_tx_debug("cn 0x%p.\n", c3cn);
 481        if (conn) {
 482                cxgb3i_tx_debug("cn 0x%p, cid %d.\n", c3cn, conn->id);
 483                iscsi_conn_queue_work(conn);
 484        }
 485}
 486
 487void cxgb3i_conn_closing(struct s3_conn *c3cn)
 488{
 489        struct iscsi_conn *conn;
 490
 491        read_lock(&c3cn->callback_lock);
 492        conn = c3cn->user_data;
 493        if (conn && c3cn->state != C3CN_STATE_ESTABLISHED)
 494                iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
 495        read_unlock(&c3cn->callback_lock);
 496}
 497