linux/drivers/nvme/host/tcp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * NVMe over Fabrics TCP host.
   4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
   5 */
   6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
   7#include <linux/module.h>
   8#include <linux/init.h>
   9#include <linux/slab.h>
  10#include <linux/err.h>
  11#include <linux/nvme-tcp.h>
  12#include <net/sock.h>
  13#include <net/tcp.h>
  14#include <linux/blk-mq.h>
  15#include <crypto/hash.h>
  16
  17#include "nvme.h"
  18#include "fabrics.h"
  19
  20struct nvme_tcp_queue;
  21
  22enum nvme_tcp_send_state {
  23        NVME_TCP_SEND_CMD_PDU = 0,
  24        NVME_TCP_SEND_H2C_PDU,
  25        NVME_TCP_SEND_DATA,
  26        NVME_TCP_SEND_DDGST,
  27};
  28
  29struct nvme_tcp_request {
  30        struct nvme_request     req;
  31        void                    *pdu;
  32        struct nvme_tcp_queue   *queue;
  33        u32                     data_len;
  34        u32                     pdu_len;
  35        u32                     pdu_sent;
  36        u16                     ttag;
  37        struct list_head        entry;
  38        __le32                  ddgst;
  39
  40        struct bio              *curr_bio;
  41        struct iov_iter         iter;
  42
  43        /* send state */
  44        size_t                  offset;
  45        size_t                  data_sent;
  46        enum nvme_tcp_send_state state;
  47};
  48
  49enum nvme_tcp_queue_flags {
  50        NVME_TCP_Q_ALLOCATED    = 0,
  51        NVME_TCP_Q_LIVE         = 1,
  52};
  53
  54enum nvme_tcp_recv_state {
  55        NVME_TCP_RECV_PDU = 0,
  56        NVME_TCP_RECV_DATA,
  57        NVME_TCP_RECV_DDGST,
  58};
  59
  60struct nvme_tcp_ctrl;
  61struct nvme_tcp_queue {
  62        struct socket           *sock;
  63        struct work_struct      io_work;
  64        int                     io_cpu;
  65
  66        spinlock_t              lock;
  67        struct list_head        send_list;
  68
  69        /* recv state */
  70        void                    *pdu;
  71        int                     pdu_remaining;
  72        int                     pdu_offset;
  73        size_t                  data_remaining;
  74        size_t                  ddgst_remaining;
  75
  76        /* send state */
  77        struct nvme_tcp_request *request;
  78
  79        int                     queue_size;
  80        size_t                  cmnd_capsule_len;
  81        struct nvme_tcp_ctrl    *ctrl;
  82        unsigned long           flags;
  83        bool                    rd_enabled;
  84
  85        bool                    hdr_digest;
  86        bool                    data_digest;
  87        struct ahash_request    *rcv_hash;
  88        struct ahash_request    *snd_hash;
  89        __le32                  exp_ddgst;
  90        __le32                  recv_ddgst;
  91
  92        struct page_frag_cache  pf_cache;
  93
  94        void (*state_change)(struct sock *);
  95        void (*data_ready)(struct sock *);
  96        void (*write_space)(struct sock *);
  97};
  98
  99struct nvme_tcp_ctrl {
 100        /* read only in the hot path */
 101        struct nvme_tcp_queue   *queues;
 102        struct blk_mq_tag_set   tag_set;
 103
 104        /* other member variables */
 105        struct list_head        list;
 106        struct blk_mq_tag_set   admin_tag_set;
 107        struct sockaddr_storage addr;
 108        struct sockaddr_storage src_addr;
 109        struct nvme_ctrl        ctrl;
 110
 111        struct work_struct      err_work;
 112        struct delayed_work     connect_work;
 113        struct nvme_tcp_request async_req;
 114        u32                     io_queues[HCTX_MAX_TYPES];
 115};
 116
 117static LIST_HEAD(nvme_tcp_ctrl_list);
 118static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
 119static struct workqueue_struct *nvme_tcp_wq;
 120static struct blk_mq_ops nvme_tcp_mq_ops;
 121static struct blk_mq_ops nvme_tcp_admin_mq_ops;
 122
 123static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
 124{
 125        return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
 126}
 127
 128static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
 129{
 130        return queue - queue->ctrl->queues;
 131}
 132
 133static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
 134{
 135        u32 queue_idx = nvme_tcp_queue_id(queue);
 136
 137        if (queue_idx == 0)
 138                return queue->ctrl->admin_tag_set.tags[queue_idx];
 139        return queue->ctrl->tag_set.tags[queue_idx - 1];
 140}
 141
 142static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
 143{
 144        return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
 145}
 146
 147static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
 148{
 149        return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
 150}
 151
 152static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_queue *queue)
 153{
 154        return queue->cmnd_capsule_len - sizeof(struct nvme_command);
 155}
 156
 157static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
 158{
 159        return req == &req->queue->ctrl->async_req;
 160}
 161
 162static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
 163{
 164        struct request *rq;
 165        unsigned int bytes;
 166
 167        if (unlikely(nvme_tcp_async_req(req)))
 168                return false; /* async events don't have a request */
 169
 170        rq = blk_mq_rq_from_pdu(req);
 171        bytes = blk_rq_payload_bytes(rq);
 172
 173        return rq_data_dir(rq) == WRITE && bytes &&
 174                bytes <= nvme_tcp_inline_data_size(req->queue);
 175}
 176
 177static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
 178{
 179        return req->iter.bvec->bv_page;
 180}
 181
 182static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
 183{
 184        return req->iter.bvec->bv_offset + req->iter.iov_offset;
 185}
 186
 187static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
 188{
 189        return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
 190                        req->pdu_len - req->pdu_sent);
 191}
 192
 193static inline size_t nvme_tcp_req_offset(struct nvme_tcp_request *req)
 194{
 195        return req->iter.iov_offset;
 196}
 197
 198static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
 199{
 200        return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
 201                        req->pdu_len - req->pdu_sent : 0;
 202}
 203
 204static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
 205                int len)
 206{
 207        return nvme_tcp_pdu_data_left(req) <= len;
 208}
 209
 210static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
 211                unsigned int dir)
 212{
 213        struct request *rq = blk_mq_rq_from_pdu(req);
 214        struct bio_vec *vec;
 215        unsigned int size;
 216        int nsegs;
 217        size_t offset;
 218
 219        if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
 220                vec = &rq->special_vec;
 221                nsegs = 1;
 222                size = blk_rq_payload_bytes(rq);
 223                offset = 0;
 224        } else {
 225                struct bio *bio = req->curr_bio;
 226
 227                vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
 228                nsegs = bio_segments(bio);
 229                size = bio->bi_iter.bi_size;
 230                offset = bio->bi_iter.bi_bvec_done;
 231        }
 232
 233        iov_iter_bvec(&req->iter, dir, vec, nsegs, size);
 234        req->iter.iov_offset = offset;
 235}
 236
 237static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
 238                int len)
 239{
 240        req->data_sent += len;
 241        req->pdu_sent += len;
 242        iov_iter_advance(&req->iter, len);
 243        if (!iov_iter_count(&req->iter) &&
 244            req->data_sent < req->data_len) {
 245                req->curr_bio = req->curr_bio->bi_next;
 246                nvme_tcp_init_iter(req, WRITE);
 247        }
 248}
 249
 250static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req)
 251{
 252        struct nvme_tcp_queue *queue = req->queue;
 253
 254        spin_lock(&queue->lock);
 255        list_add_tail(&req->entry, &queue->send_list);
 256        spin_unlock(&queue->lock);
 257
 258        queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
 259}
 260
 261static inline struct nvme_tcp_request *
 262nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
 263{
 264        struct nvme_tcp_request *req;
 265
 266        spin_lock(&queue->lock);
 267        req = list_first_entry_or_null(&queue->send_list,
 268                        struct nvme_tcp_request, entry);
 269        if (req)
 270                list_del(&req->entry);
 271        spin_unlock(&queue->lock);
 272
 273        return req;
 274}
 275
 276static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
 277                __le32 *dgst)
 278{
 279        ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
 280        crypto_ahash_final(hash);
 281}
 282
 283static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
 284                struct page *page, off_t off, size_t len)
 285{
 286        struct scatterlist sg;
 287
 288        sg_init_marker(&sg, 1);
 289        sg_set_page(&sg, page, len, off);
 290        ahash_request_set_crypt(hash, &sg, NULL, len);
 291        crypto_ahash_update(hash);
 292}
 293
 294static inline void nvme_tcp_hdgst(struct ahash_request *hash,
 295                void *pdu, size_t len)
 296{
 297        struct scatterlist sg;
 298
 299        sg_init_one(&sg, pdu, len);
 300        ahash_request_set_crypt(hash, &sg, pdu + len, len);
 301        crypto_ahash_digest(hash);
 302}
 303
 304static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
 305                void *pdu, size_t pdu_len)
 306{
 307        struct nvme_tcp_hdr *hdr = pdu;
 308        __le32 recv_digest;
 309        __le32 exp_digest;
 310
 311        if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
 312                dev_err(queue->ctrl->ctrl.device,
 313                        "queue %d: header digest flag is cleared\n",
 314                        nvme_tcp_queue_id(queue));
 315                return -EPROTO;
 316        }
 317
 318        recv_digest = *(__le32 *)(pdu + hdr->hlen);
 319        nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
 320        exp_digest = *(__le32 *)(pdu + hdr->hlen);
 321        if (recv_digest != exp_digest) {
 322                dev_err(queue->ctrl->ctrl.device,
 323                        "header digest error: recv %#x expected %#x\n",
 324                        le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
 325                return -EIO;
 326        }
 327
 328        return 0;
 329}
 330
 331static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
 332{
 333        struct nvme_tcp_hdr *hdr = pdu;
 334        u8 digest_len = nvme_tcp_hdgst_len(queue);
 335        u32 len;
 336
 337        len = le32_to_cpu(hdr->plen) - hdr->hlen -
 338                ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
 339
 340        if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
 341                dev_err(queue->ctrl->ctrl.device,
 342                        "queue %d: data digest flag is cleared\n",
 343                nvme_tcp_queue_id(queue));
 344                return -EPROTO;
 345        }
 346        crypto_ahash_init(queue->rcv_hash);
 347
 348        return 0;
 349}
 350
 351static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
 352                struct request *rq, unsigned int hctx_idx)
 353{
 354        struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
 355
 356        page_frag_free(req->pdu);
 357}
 358
 359static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
 360                struct request *rq, unsigned int hctx_idx,
 361                unsigned int numa_node)
 362{
 363        struct nvme_tcp_ctrl *ctrl = set->driver_data;
 364        struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
 365        int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
 366        struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
 367        u8 hdgst = nvme_tcp_hdgst_len(queue);
 368
 369        req->pdu = page_frag_alloc(&queue->pf_cache,
 370                sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
 371                GFP_KERNEL | __GFP_ZERO);
 372        if (!req->pdu)
 373                return -ENOMEM;
 374
 375        req->queue = queue;
 376        nvme_req(rq)->ctrl = &ctrl->ctrl;
 377
 378        return 0;
 379}
 380
 381static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 382                unsigned int hctx_idx)
 383{
 384        struct nvme_tcp_ctrl *ctrl = data;
 385        struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
 386
 387        hctx->driver_data = queue;
 388        return 0;
 389}
 390
 391static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
 392                unsigned int hctx_idx)
 393{
 394        struct nvme_tcp_ctrl *ctrl = data;
 395        struct nvme_tcp_queue *queue = &ctrl->queues[0];
 396
 397        hctx->driver_data = queue;
 398        return 0;
 399}
 400
 401static enum nvme_tcp_recv_state
 402nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
 403{
 404        return  (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
 405                (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
 406                NVME_TCP_RECV_DATA;
 407}
 408
 409static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
 410{
 411        queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
 412                                nvme_tcp_hdgst_len(queue);
 413        queue->pdu_offset = 0;
 414        queue->data_remaining = -1;
 415        queue->ddgst_remaining = 0;
 416}
 417
 418static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
 419{
 420        if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
 421                return;
 422
 423        queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work);
 424}
 425
 426static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
 427                struct nvme_completion *cqe)
 428{
 429        struct request *rq;
 430
 431        rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
 432        if (!rq) {
 433                dev_err(queue->ctrl->ctrl.device,
 434                        "queue %d tag 0x%x not found\n",
 435                        nvme_tcp_queue_id(queue), cqe->command_id);
 436                nvme_tcp_error_recovery(&queue->ctrl->ctrl);
 437                return -EINVAL;
 438        }
 439
 440        nvme_end_request(rq, cqe->status, cqe->result);
 441
 442        return 0;
 443}
 444
 445static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
 446                struct nvme_tcp_data_pdu *pdu)
 447{
 448        struct request *rq;
 449
 450        rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
 451        if (!rq) {
 452                dev_err(queue->ctrl->ctrl.device,
 453                        "queue %d tag %#x not found\n",
 454                        nvme_tcp_queue_id(queue), pdu->command_id);
 455                return -ENOENT;
 456        }
 457
 458        if (!blk_rq_payload_bytes(rq)) {
 459                dev_err(queue->ctrl->ctrl.device,
 460                        "queue %d tag %#x unexpected data\n",
 461                        nvme_tcp_queue_id(queue), rq->tag);
 462                return -EIO;
 463        }
 464
 465        queue->data_remaining = le32_to_cpu(pdu->data_length);
 466
 467        if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
 468            unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
 469                dev_err(queue->ctrl->ctrl.device,
 470                        "queue %d tag %#x SUCCESS set but not last PDU\n",
 471                        nvme_tcp_queue_id(queue), rq->tag);
 472                nvme_tcp_error_recovery(&queue->ctrl->ctrl);
 473                return -EPROTO;
 474        }
 475
 476        return 0;
 477}
 478
 479static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
 480                struct nvme_tcp_rsp_pdu *pdu)
 481{
 482        struct nvme_completion *cqe = &pdu->cqe;
 483        int ret = 0;
 484
 485        /*
 486         * AEN requests are special as they don't time out and can
 487         * survive any kind of queue freeze and often don't respond to
 488         * aborts.  We don't even bother to allocate a struct request
 489         * for them but rather special case them here.
 490         */
 491        if (unlikely(nvme_tcp_queue_id(queue) == 0 &&
 492            cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
 493                nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
 494                                &cqe->result);
 495        else
 496                ret = nvme_tcp_process_nvme_cqe(queue, cqe);
 497
 498        return ret;
 499}
 500
 501static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
 502                struct nvme_tcp_r2t_pdu *pdu)
 503{
 504        struct nvme_tcp_data_pdu *data = req->pdu;
 505        struct nvme_tcp_queue *queue = req->queue;
 506        struct request *rq = blk_mq_rq_from_pdu(req);
 507        u8 hdgst = nvme_tcp_hdgst_len(queue);
 508        u8 ddgst = nvme_tcp_ddgst_len(queue);
 509
 510        req->pdu_len = le32_to_cpu(pdu->r2t_length);
 511        req->pdu_sent = 0;
 512
 513        if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
 514                dev_err(queue->ctrl->ctrl.device,
 515                        "req %d r2t len %u exceeded data len %u (%zu sent)\n",
 516                        rq->tag, req->pdu_len, req->data_len,
 517                        req->data_sent);
 518                return -EPROTO;
 519        }
 520
 521        if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
 522                dev_err(queue->ctrl->ctrl.device,
 523                        "req %d unexpected r2t offset %u (expected %zu)\n",
 524                        rq->tag, le32_to_cpu(pdu->r2t_offset),
 525                        req->data_sent);
 526                return -EPROTO;
 527        }
 528
 529        memset(data, 0, sizeof(*data));
 530        data->hdr.type = nvme_tcp_h2c_data;
 531        data->hdr.flags = NVME_TCP_F_DATA_LAST;
 532        if (queue->hdr_digest)
 533                data->hdr.flags |= NVME_TCP_F_HDGST;
 534        if (queue->data_digest)
 535                data->hdr.flags |= NVME_TCP_F_DDGST;
 536        data->hdr.hlen = sizeof(*data);
 537        data->hdr.pdo = data->hdr.hlen + hdgst;
 538        data->hdr.plen =
 539                cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
 540        data->ttag = pdu->ttag;
 541        data->command_id = rq->tag;
 542        data->data_offset = cpu_to_le32(req->data_sent);
 543        data->data_length = cpu_to_le32(req->pdu_len);
 544        return 0;
 545}
 546
 547static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
 548                struct nvme_tcp_r2t_pdu *pdu)
 549{
 550        struct nvme_tcp_request *req;
 551        struct request *rq;
 552        int ret;
 553
 554        rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
 555        if (!rq) {
 556                dev_err(queue->ctrl->ctrl.device,
 557                        "queue %d tag %#x not found\n",
 558                        nvme_tcp_queue_id(queue), pdu->command_id);
 559                return -ENOENT;
 560        }
 561        req = blk_mq_rq_to_pdu(rq);
 562
 563        ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
 564        if (unlikely(ret))
 565                return ret;
 566
 567        req->state = NVME_TCP_SEND_H2C_PDU;
 568        req->offset = 0;
 569
 570        nvme_tcp_queue_request(req);
 571
 572        return 0;
 573}
 574
 575static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
 576                unsigned int *offset, size_t *len)
 577{
 578        struct nvme_tcp_hdr *hdr;
 579        char *pdu = queue->pdu;
 580        size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
 581        int ret;
 582
 583        ret = skb_copy_bits(skb, *offset,
 584                &pdu[queue->pdu_offset], rcv_len);
 585        if (unlikely(ret))
 586                return ret;
 587
 588        queue->pdu_remaining -= rcv_len;
 589        queue->pdu_offset += rcv_len;
 590        *offset += rcv_len;
 591        *len -= rcv_len;
 592        if (queue->pdu_remaining)
 593                return 0;
 594
 595        hdr = queue->pdu;
 596        if (queue->hdr_digest) {
 597                ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
 598                if (unlikely(ret))
 599                        return ret;
 600        }
 601
 602
 603        if (queue->data_digest) {
 604                ret = nvme_tcp_check_ddgst(queue, queue->pdu);
 605                if (unlikely(ret))
 606                        return ret;
 607        }
 608
 609        switch (hdr->type) {
 610        case nvme_tcp_c2h_data:
 611                ret = nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
 612                break;
 613        case nvme_tcp_rsp:
 614                nvme_tcp_init_recv_ctx(queue);
 615                ret = nvme_tcp_handle_comp(queue, (void *)queue->pdu);
 616                break;
 617        case nvme_tcp_r2t:
 618                nvme_tcp_init_recv_ctx(queue);
 619                ret = nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
 620                break;
 621        default:
 622                dev_err(queue->ctrl->ctrl.device,
 623                        "unsupported pdu type (%d)\n", hdr->type);
 624                return -EINVAL;
 625        }
 626
 627        return ret;
 628}
 629
 630static inline void nvme_tcp_end_request(struct request *rq, u16 status)
 631{
 632        union nvme_result res = {};
 633
 634        nvme_end_request(rq, cpu_to_le16(status << 1), res);
 635}
 636
 637static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
 638                              unsigned int *offset, size_t *len)
 639{
 640        struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
 641        struct nvme_tcp_request *req;
 642        struct request *rq;
 643
 644        rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
 645        if (!rq) {
 646                dev_err(queue->ctrl->ctrl.device,
 647                        "queue %d tag %#x not found\n",
 648                        nvme_tcp_queue_id(queue), pdu->command_id);
 649                return -ENOENT;
 650        }
 651        req = blk_mq_rq_to_pdu(rq);
 652
 653        while (true) {
 654                int recv_len, ret;
 655
 656                recv_len = min_t(size_t, *len, queue->data_remaining);
 657                if (!recv_len)
 658                        break;
 659
 660                if (!iov_iter_count(&req->iter)) {
 661                        req->curr_bio = req->curr_bio->bi_next;
 662
 663                        /*
 664                         * If we don`t have any bios it means that controller
 665                         * sent more data than we requested, hence error
 666                         */
 667                        if (!req->curr_bio) {
 668                                dev_err(queue->ctrl->ctrl.device,
 669                                        "queue %d no space in request %#x",
 670                                        nvme_tcp_queue_id(queue), rq->tag);
 671                                nvme_tcp_init_recv_ctx(queue);
 672                                return -EIO;
 673                        }
 674                        nvme_tcp_init_iter(req, READ);
 675                }
 676
 677                /* we can read only from what is left in this bio */
 678                recv_len = min_t(size_t, recv_len,
 679                                iov_iter_count(&req->iter));
 680
 681                if (queue->data_digest)
 682                        ret = skb_copy_and_hash_datagram_iter(skb, *offset,
 683                                &req->iter, recv_len, queue->rcv_hash);
 684                else
 685                        ret = skb_copy_datagram_iter(skb, *offset,
 686                                        &req->iter, recv_len);
 687                if (ret) {
 688                        dev_err(queue->ctrl->ctrl.device,
 689                                "queue %d failed to copy request %#x data",
 690                                nvme_tcp_queue_id(queue), rq->tag);
 691                        return ret;
 692                }
 693
 694                *len -= recv_len;
 695                *offset += recv_len;
 696                queue->data_remaining -= recv_len;
 697        }
 698
 699        if (!queue->data_remaining) {
 700                if (queue->data_digest) {
 701                        nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
 702                        queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
 703                } else {
 704                        if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS)
 705                                nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
 706                        nvme_tcp_init_recv_ctx(queue);
 707                }
 708        }
 709
 710        return 0;
 711}
 712
 713static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
 714                struct sk_buff *skb, unsigned int *offset, size_t *len)
 715{
 716        struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
 717        char *ddgst = (char *)&queue->recv_ddgst;
 718        size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
 719        off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
 720        int ret;
 721
 722        ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
 723        if (unlikely(ret))
 724                return ret;
 725
 726        queue->ddgst_remaining -= recv_len;
 727        *offset += recv_len;
 728        *len -= recv_len;
 729        if (queue->ddgst_remaining)
 730                return 0;
 731
 732        if (queue->recv_ddgst != queue->exp_ddgst) {
 733                dev_err(queue->ctrl->ctrl.device,
 734                        "data digest error: recv %#x expected %#x\n",
 735                        le32_to_cpu(queue->recv_ddgst),
 736                        le32_to_cpu(queue->exp_ddgst));
 737                return -EIO;
 738        }
 739
 740        if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
 741                struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
 742                                                pdu->command_id);
 743
 744                nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
 745        }
 746
 747        nvme_tcp_init_recv_ctx(queue);
 748        return 0;
 749}
 750
 751static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
 752                             unsigned int offset, size_t len)
 753{
 754        struct nvme_tcp_queue *queue = desc->arg.data;
 755        size_t consumed = len;
 756        int result;
 757
 758        while (len) {
 759                switch (nvme_tcp_recv_state(queue)) {
 760                case NVME_TCP_RECV_PDU:
 761                        result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
 762                        break;
 763                case NVME_TCP_RECV_DATA:
 764                        result = nvme_tcp_recv_data(queue, skb, &offset, &len);
 765                        break;
 766                case NVME_TCP_RECV_DDGST:
 767                        result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
 768                        break;
 769                default:
 770                        result = -EFAULT;
 771                }
 772                if (result) {
 773                        dev_err(queue->ctrl->ctrl.device,
 774                                "receive failed:  %d\n", result);
 775                        queue->rd_enabled = false;
 776                        nvme_tcp_error_recovery(&queue->ctrl->ctrl);
 777                        return result;
 778                }
 779        }
 780
 781        return consumed;
 782}
 783
 784static void nvme_tcp_data_ready(struct sock *sk)
 785{
 786        struct nvme_tcp_queue *queue;
 787
 788        read_lock(&sk->sk_callback_lock);
 789        queue = sk->sk_user_data;
 790        if (likely(queue && queue->rd_enabled))
 791                queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
 792        read_unlock(&sk->sk_callback_lock);
 793}
 794
 795static void nvme_tcp_write_space(struct sock *sk)
 796{
 797        struct nvme_tcp_queue *queue;
 798
 799        read_lock_bh(&sk->sk_callback_lock);
 800        queue = sk->sk_user_data;
 801        if (likely(queue && sk_stream_is_writeable(sk))) {
 802                clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 803                queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
 804        }
 805        read_unlock_bh(&sk->sk_callback_lock);
 806}
 807
 808static void nvme_tcp_state_change(struct sock *sk)
 809{
 810        struct nvme_tcp_queue *queue;
 811
 812        read_lock(&sk->sk_callback_lock);
 813        queue = sk->sk_user_data;
 814        if (!queue)
 815                goto done;
 816
 817        switch (sk->sk_state) {
 818        case TCP_CLOSE:
 819        case TCP_CLOSE_WAIT:
 820        case TCP_LAST_ACK:
 821        case TCP_FIN_WAIT1:
 822        case TCP_FIN_WAIT2:
 823                /* fallthrough */
 824                nvme_tcp_error_recovery(&queue->ctrl->ctrl);
 825                break;
 826        default:
 827                dev_info(queue->ctrl->ctrl.device,
 828                        "queue %d socket state %d\n",
 829                        nvme_tcp_queue_id(queue), sk->sk_state);
 830        }
 831
 832        queue->state_change(sk);
 833done:
 834        read_unlock(&sk->sk_callback_lock);
 835}
 836
 837static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
 838{
 839        queue->request = NULL;
 840}
 841
 842static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
 843{
 844        nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_DATA_XFER_ERROR);
 845}
 846
 847static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
 848{
 849        struct nvme_tcp_queue *queue = req->queue;
 850
 851        while (true) {
 852                struct page *page = nvme_tcp_req_cur_page(req);
 853                size_t offset = nvme_tcp_req_cur_offset(req);
 854                size_t len = nvme_tcp_req_cur_length(req);
 855                bool last = nvme_tcp_pdu_last_send(req, len);
 856                int ret, flags = MSG_DONTWAIT;
 857
 858                if (last && !queue->data_digest)
 859                        flags |= MSG_EOR;
 860                else
 861                        flags |= MSG_MORE;
 862
 863                /* can't zcopy slab pages */
 864                if (unlikely(PageSlab(page))) {
 865                        ret = sock_no_sendpage(queue->sock, page, offset, len,
 866                                        flags);
 867                } else {
 868                        ret = kernel_sendpage(queue->sock, page, offset, len,
 869                                        flags);
 870                }
 871                if (ret <= 0)
 872                        return ret;
 873
 874                nvme_tcp_advance_req(req, ret);
 875                if (queue->data_digest)
 876                        nvme_tcp_ddgst_update(queue->snd_hash, page,
 877                                        offset, ret);
 878
 879                /* fully successful last write*/
 880                if (last && ret == len) {
 881                        if (queue->data_digest) {
 882                                nvme_tcp_ddgst_final(queue->snd_hash,
 883                                        &req->ddgst);
 884                                req->state = NVME_TCP_SEND_DDGST;
 885                                req->offset = 0;
 886                        } else {
 887                                nvme_tcp_done_send_req(queue);
 888                        }
 889                        return 1;
 890                }
 891        }
 892        return -EAGAIN;
 893}
 894
 895static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
 896{
 897        struct nvme_tcp_queue *queue = req->queue;
 898        struct nvme_tcp_cmd_pdu *pdu = req->pdu;
 899        bool inline_data = nvme_tcp_has_inline_data(req);
 900        int flags = MSG_DONTWAIT | (inline_data ? MSG_MORE : MSG_EOR);
 901        u8 hdgst = nvme_tcp_hdgst_len(queue);
 902        int len = sizeof(*pdu) + hdgst - req->offset;
 903        int ret;
 904
 905        if (queue->hdr_digest && !req->offset)
 906                nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
 907
 908        ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
 909                        offset_in_page(pdu) + req->offset, len,  flags);
 910        if (unlikely(ret <= 0))
 911                return ret;
 912
 913        len -= ret;
 914        if (!len) {
 915                if (inline_data) {
 916                        req->state = NVME_TCP_SEND_DATA;
 917                        if (queue->data_digest)
 918                                crypto_ahash_init(queue->snd_hash);
 919                        nvme_tcp_init_iter(req, WRITE);
 920                } else {
 921                        nvme_tcp_done_send_req(queue);
 922                }
 923                return 1;
 924        }
 925        req->offset += ret;
 926
 927        return -EAGAIN;
 928}
 929
 930static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
 931{
 932        struct nvme_tcp_queue *queue = req->queue;
 933        struct nvme_tcp_data_pdu *pdu = req->pdu;
 934        u8 hdgst = nvme_tcp_hdgst_len(queue);
 935        int len = sizeof(*pdu) - req->offset + hdgst;
 936        int ret;
 937
 938        if (queue->hdr_digest && !req->offset)
 939                nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
 940
 941        ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
 942                        offset_in_page(pdu) + req->offset, len,
 943                        MSG_DONTWAIT | MSG_MORE);
 944        if (unlikely(ret <= 0))
 945                return ret;
 946
 947        len -= ret;
 948        if (!len) {
 949                req->state = NVME_TCP_SEND_DATA;
 950                if (queue->data_digest)
 951                        crypto_ahash_init(queue->snd_hash);
 952                if (!req->data_sent)
 953                        nvme_tcp_init_iter(req, WRITE);
 954                return 1;
 955        }
 956        req->offset += ret;
 957
 958        return -EAGAIN;
 959}
 960
 961static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
 962{
 963        struct nvme_tcp_queue *queue = req->queue;
 964        int ret;
 965        struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR };
 966        struct kvec iov = {
 967                .iov_base = &req->ddgst + req->offset,
 968                .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
 969        };
 970
 971        ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
 972        if (unlikely(ret <= 0))
 973                return ret;
 974
 975        if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
 976                nvme_tcp_done_send_req(queue);
 977                return 1;
 978        }
 979
 980        req->offset += ret;
 981        return -EAGAIN;
 982}
 983
 984static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
 985{
 986        struct nvme_tcp_request *req;
 987        int ret = 1;
 988
 989        if (!queue->request) {
 990                queue->request = nvme_tcp_fetch_request(queue);
 991                if (!queue->request)
 992                        return 0;
 993        }
 994        req = queue->request;
 995
 996        if (req->state == NVME_TCP_SEND_CMD_PDU) {
 997                ret = nvme_tcp_try_send_cmd_pdu(req);
 998                if (ret <= 0)
 999                        goto done;
1000                if (!nvme_tcp_has_inline_data(req))
1001                        return ret;
1002        }
1003
1004        if (req->state == NVME_TCP_SEND_H2C_PDU) {
1005                ret = nvme_tcp_try_send_data_pdu(req);
1006                if (ret <= 0)
1007                        goto done;
1008        }
1009
1010        if (req->state == NVME_TCP_SEND_DATA) {
1011                ret = nvme_tcp_try_send_data(req);
1012                if (ret <= 0)
1013                        goto done;
1014        }
1015
1016        if (req->state == NVME_TCP_SEND_DDGST)
1017                ret = nvme_tcp_try_send_ddgst(req);
1018done:
1019        if (ret == -EAGAIN)
1020                ret = 0;
1021        return ret;
1022}
1023
1024static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1025{
1026        struct sock *sk = queue->sock->sk;
1027        read_descriptor_t rd_desc;
1028        int consumed;
1029
1030        rd_desc.arg.data = queue;
1031        rd_desc.count = 1;
1032        lock_sock(sk);
1033        consumed = tcp_read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1034        release_sock(sk);
1035        return consumed;
1036}
1037
1038static void nvme_tcp_io_work(struct work_struct *w)
1039{
1040        struct nvme_tcp_queue *queue =
1041                container_of(w, struct nvme_tcp_queue, io_work);
1042        unsigned long start = jiffies + msecs_to_jiffies(1);
1043
1044        do {
1045                bool pending = false;
1046                int result;
1047
1048                result = nvme_tcp_try_send(queue);
1049                if (result > 0) {
1050                        pending = true;
1051                } else if (unlikely(result < 0)) {
1052                        dev_err(queue->ctrl->ctrl.device,
1053                                "failed to send request %d\n", result);
1054                        if (result != -EPIPE)
1055                                nvme_tcp_fail_request(queue->request);
1056                        nvme_tcp_done_send_req(queue);
1057                        return;
1058                }
1059
1060                result = nvme_tcp_try_recv(queue);
1061                if (result > 0)
1062                        pending = true;
1063
1064                if (!pending)
1065                        return;
1066
1067        } while (time_after(jiffies, start)); /* quota is exhausted */
1068
1069        queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1070}
1071
1072static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1073{
1074        struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1075
1076        ahash_request_free(queue->rcv_hash);
1077        ahash_request_free(queue->snd_hash);
1078        crypto_free_ahash(tfm);
1079}
1080
1081static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1082{
1083        struct crypto_ahash *tfm;
1084
1085        tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1086        if (IS_ERR(tfm))
1087                return PTR_ERR(tfm);
1088
1089        queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1090        if (!queue->snd_hash)
1091                goto free_tfm;
1092        ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1093
1094        queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1095        if (!queue->rcv_hash)
1096                goto free_snd_hash;
1097        ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1098
1099        return 0;
1100free_snd_hash:
1101        ahash_request_free(queue->snd_hash);
1102free_tfm:
1103        crypto_free_ahash(tfm);
1104        return -ENOMEM;
1105}
1106
1107static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1108{
1109        struct nvme_tcp_request *async = &ctrl->async_req;
1110
1111        page_frag_free(async->pdu);
1112}
1113
1114static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1115{
1116        struct nvme_tcp_queue *queue = &ctrl->queues[0];
1117        struct nvme_tcp_request *async = &ctrl->async_req;
1118        u8 hdgst = nvme_tcp_hdgst_len(queue);
1119
1120        async->pdu = page_frag_alloc(&queue->pf_cache,
1121                sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1122                GFP_KERNEL | __GFP_ZERO);
1123        if (!async->pdu)
1124                return -ENOMEM;
1125
1126        async->queue = &ctrl->queues[0];
1127        return 0;
1128}
1129
1130static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1131{
1132        struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1133        struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1134
1135        if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1136                return;
1137
1138        if (queue->hdr_digest || queue->data_digest)
1139                nvme_tcp_free_crypto(queue);
1140
1141        sock_release(queue->sock);
1142        kfree(queue->pdu);
1143}
1144
1145static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1146{
1147        struct nvme_tcp_icreq_pdu *icreq;
1148        struct nvme_tcp_icresp_pdu *icresp;
1149        struct msghdr msg = {};
1150        struct kvec iov;
1151        bool ctrl_hdgst, ctrl_ddgst;
1152        int ret;
1153
1154        icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1155        if (!icreq)
1156                return -ENOMEM;
1157
1158        icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1159        if (!icresp) {
1160                ret = -ENOMEM;
1161                goto free_icreq;
1162        }
1163
1164        icreq->hdr.type = nvme_tcp_icreq;
1165        icreq->hdr.hlen = sizeof(*icreq);
1166        icreq->hdr.pdo = 0;
1167        icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1168        icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1169        icreq->maxr2t = 0; /* single inflight r2t supported */
1170        icreq->hpda = 0; /* no alignment constraint */
1171        if (queue->hdr_digest)
1172                icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1173        if (queue->data_digest)
1174                icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1175
1176        iov.iov_base = icreq;
1177        iov.iov_len = sizeof(*icreq);
1178        ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1179        if (ret < 0)
1180                goto free_icresp;
1181
1182        memset(&msg, 0, sizeof(msg));
1183        iov.iov_base = icresp;
1184        iov.iov_len = sizeof(*icresp);
1185        ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1186                        iov.iov_len, msg.msg_flags);
1187        if (ret < 0)
1188                goto free_icresp;
1189
1190        ret = -EINVAL;
1191        if (icresp->hdr.type != nvme_tcp_icresp) {
1192                pr_err("queue %d: bad type returned %d\n",
1193                        nvme_tcp_queue_id(queue), icresp->hdr.type);
1194                goto free_icresp;
1195        }
1196
1197        if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1198                pr_err("queue %d: bad pdu length returned %d\n",
1199                        nvme_tcp_queue_id(queue), icresp->hdr.plen);
1200                goto free_icresp;
1201        }
1202
1203        if (icresp->pfv != NVME_TCP_PFV_1_0) {
1204                pr_err("queue %d: bad pfv returned %d\n",
1205                        nvme_tcp_queue_id(queue), icresp->pfv);
1206                goto free_icresp;
1207        }
1208
1209        ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1210        if ((queue->data_digest && !ctrl_ddgst) ||
1211            (!queue->data_digest && ctrl_ddgst)) {
1212                pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1213                        nvme_tcp_queue_id(queue),
1214                        queue->data_digest ? "enabled" : "disabled",
1215                        ctrl_ddgst ? "enabled" : "disabled");
1216                goto free_icresp;
1217        }
1218
1219        ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1220        if ((queue->hdr_digest && !ctrl_hdgst) ||
1221            (!queue->hdr_digest && ctrl_hdgst)) {
1222                pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1223                        nvme_tcp_queue_id(queue),
1224                        queue->hdr_digest ? "enabled" : "disabled",
1225                        ctrl_hdgst ? "enabled" : "disabled");
1226                goto free_icresp;
1227        }
1228
1229        if (icresp->cpda != 0) {
1230                pr_err("queue %d: unsupported cpda returned %d\n",
1231                        nvme_tcp_queue_id(queue), icresp->cpda);
1232                goto free_icresp;
1233        }
1234
1235        ret = 0;
1236free_icresp:
1237        kfree(icresp);
1238free_icreq:
1239        kfree(icreq);
1240        return ret;
1241}
1242
1243static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1244                int qid, size_t queue_size)
1245{
1246        struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1247        struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1248        struct linger sol = { .l_onoff = 1, .l_linger = 0 };
1249        int ret, opt, rcv_pdu_size, n;
1250
1251        queue->ctrl = ctrl;
1252        INIT_LIST_HEAD(&queue->send_list);
1253        spin_lock_init(&queue->lock);
1254        INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1255        queue->queue_size = queue_size;
1256
1257        if (qid > 0)
1258                queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1259        else
1260                queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1261                                                NVME_TCP_ADMIN_CCSZ;
1262
1263        ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1264                        IPPROTO_TCP, &queue->sock);
1265        if (ret) {
1266                dev_err(ctrl->ctrl.device,
1267                        "failed to create socket: %d\n", ret);
1268                return ret;
1269        }
1270
1271        /* Single syn retry */
1272        opt = 1;
1273        ret = kernel_setsockopt(queue->sock, IPPROTO_TCP, TCP_SYNCNT,
1274                        (char *)&opt, sizeof(opt));
1275        if (ret) {
1276                dev_err(ctrl->ctrl.device,
1277                        "failed to set TCP_SYNCNT sock opt %d\n", ret);
1278                goto err_sock;
1279        }
1280
1281        /* Set TCP no delay */
1282        opt = 1;
1283        ret = kernel_setsockopt(queue->sock, IPPROTO_TCP,
1284                        TCP_NODELAY, (char *)&opt, sizeof(opt));
1285        if (ret) {
1286                dev_err(ctrl->ctrl.device,
1287                        "failed to set TCP_NODELAY sock opt %d\n", ret);
1288                goto err_sock;
1289        }
1290
1291        /*
1292         * Cleanup whatever is sitting in the TCP transmit queue on socket
1293         * close. This is done to prevent stale data from being sent should
1294         * the network connection be restored before TCP times out.
1295         */
1296        ret = kernel_setsockopt(queue->sock, SOL_SOCKET, SO_LINGER,
1297                        (char *)&sol, sizeof(sol));
1298        if (ret) {
1299                dev_err(ctrl->ctrl.device,
1300                        "failed to set SO_LINGER sock opt %d\n", ret);
1301                goto err_sock;
1302        }
1303
1304        queue->sock->sk->sk_allocation = GFP_ATOMIC;
1305        if (!qid)
1306                n = 0;
1307        else
1308                n = (qid - 1) % num_online_cpus();
1309        queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1310        queue->request = NULL;
1311        queue->data_remaining = 0;
1312        queue->ddgst_remaining = 0;
1313        queue->pdu_remaining = 0;
1314        queue->pdu_offset = 0;
1315        sk_set_memalloc(queue->sock->sk);
1316
1317        if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) {
1318                ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1319                        sizeof(ctrl->src_addr));
1320                if (ret) {
1321                        dev_err(ctrl->ctrl.device,
1322                                "failed to bind queue %d socket %d\n",
1323                                qid, ret);
1324                        goto err_sock;
1325                }
1326        }
1327
1328        queue->hdr_digest = nctrl->opts->hdr_digest;
1329        queue->data_digest = nctrl->opts->data_digest;
1330        if (queue->hdr_digest || queue->data_digest) {
1331                ret = nvme_tcp_alloc_crypto(queue);
1332                if (ret) {
1333                        dev_err(ctrl->ctrl.device,
1334                                "failed to allocate queue %d crypto\n", qid);
1335                        goto err_sock;
1336                }
1337        }
1338
1339        rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1340                        nvme_tcp_hdgst_len(queue);
1341        queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1342        if (!queue->pdu) {
1343                ret = -ENOMEM;
1344                goto err_crypto;
1345        }
1346
1347        dev_dbg(ctrl->ctrl.device, "connecting queue %d\n",
1348                        nvme_tcp_queue_id(queue));
1349
1350        ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1351                sizeof(ctrl->addr), 0);
1352        if (ret) {
1353                dev_err(ctrl->ctrl.device,
1354                        "failed to connect socket: %d\n", ret);
1355                goto err_rcv_pdu;
1356        }
1357
1358        ret = nvme_tcp_init_connection(queue);
1359        if (ret)
1360                goto err_init_connect;
1361
1362        queue->rd_enabled = true;
1363        set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1364        nvme_tcp_init_recv_ctx(queue);
1365
1366        write_lock_bh(&queue->sock->sk->sk_callback_lock);
1367        queue->sock->sk->sk_user_data = queue;
1368        queue->state_change = queue->sock->sk->sk_state_change;
1369        queue->data_ready = queue->sock->sk->sk_data_ready;
1370        queue->write_space = queue->sock->sk->sk_write_space;
1371        queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1372        queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1373        queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1374        write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1375
1376        return 0;
1377
1378err_init_connect:
1379        kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1380err_rcv_pdu:
1381        kfree(queue->pdu);
1382err_crypto:
1383        if (queue->hdr_digest || queue->data_digest)
1384                nvme_tcp_free_crypto(queue);
1385err_sock:
1386        sock_release(queue->sock);
1387        queue->sock = NULL;
1388        return ret;
1389}
1390
1391static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1392{
1393        struct socket *sock = queue->sock;
1394
1395        write_lock_bh(&sock->sk->sk_callback_lock);
1396        sock->sk->sk_user_data  = NULL;
1397        sock->sk->sk_data_ready = queue->data_ready;
1398        sock->sk->sk_state_change = queue->state_change;
1399        sock->sk->sk_write_space  = queue->write_space;
1400        write_unlock_bh(&sock->sk->sk_callback_lock);
1401}
1402
1403static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1404{
1405        kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1406        nvme_tcp_restore_sock_calls(queue);
1407        cancel_work_sync(&queue->io_work);
1408}
1409
1410static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1411{
1412        struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1413        struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1414
1415        if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1416                return;
1417
1418        __nvme_tcp_stop_queue(queue);
1419}
1420
1421static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1422{
1423        struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1424        int ret;
1425
1426        if (idx)
1427                ret = nvmf_connect_io_queue(nctrl, idx, false);
1428        else
1429                ret = nvmf_connect_admin_queue(nctrl);
1430
1431        if (!ret) {
1432                set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1433        } else {
1434                if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1435                        __nvme_tcp_stop_queue(&ctrl->queues[idx]);
1436                dev_err(nctrl->device,
1437                        "failed to connect queue: %d ret=%d\n", idx, ret);
1438        }
1439        return ret;
1440}
1441
1442static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
1443                bool admin)
1444{
1445        struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1446        struct blk_mq_tag_set *set;
1447        int ret;
1448
1449        if (admin) {
1450                set = &ctrl->admin_tag_set;
1451                memset(set, 0, sizeof(*set));
1452                set->ops = &nvme_tcp_admin_mq_ops;
1453                set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
1454                set->reserved_tags = 2; /* connect + keep-alive */
1455                set->numa_node = NUMA_NO_NODE;
1456                set->cmd_size = sizeof(struct nvme_tcp_request);
1457                set->driver_data = ctrl;
1458                set->nr_hw_queues = 1;
1459                set->timeout = ADMIN_TIMEOUT;
1460        } else {
1461                set = &ctrl->tag_set;
1462                memset(set, 0, sizeof(*set));
1463                set->ops = &nvme_tcp_mq_ops;
1464                set->queue_depth = nctrl->sqsize + 1;
1465                set->reserved_tags = 1; /* fabric connect */
1466                set->numa_node = NUMA_NO_NODE;
1467                set->flags = BLK_MQ_F_SHOULD_MERGE;
1468                set->cmd_size = sizeof(struct nvme_tcp_request);
1469                set->driver_data = ctrl;
1470                set->nr_hw_queues = nctrl->queue_count - 1;
1471                set->timeout = NVME_IO_TIMEOUT;
1472                set->nr_maps = 2 /* default + read */;
1473        }
1474
1475        ret = blk_mq_alloc_tag_set(set);
1476        if (ret)
1477                return ERR_PTR(ret);
1478
1479        return set;
1480}
1481
1482static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1483{
1484        if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1485                nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1486                to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1487        }
1488
1489        nvme_tcp_free_queue(ctrl, 0);
1490}
1491
1492static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1493{
1494        int i;
1495
1496        for (i = 1; i < ctrl->queue_count; i++)
1497                nvme_tcp_free_queue(ctrl, i);
1498}
1499
1500static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1501{
1502        int i;
1503
1504        for (i = 1; i < ctrl->queue_count; i++)
1505                nvme_tcp_stop_queue(ctrl, i);
1506}
1507
1508static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl)
1509{
1510        int i, ret = 0;
1511
1512        for (i = 1; i < ctrl->queue_count; i++) {
1513                ret = nvme_tcp_start_queue(ctrl, i);
1514                if (ret)
1515                        goto out_stop_queues;
1516        }
1517
1518        return 0;
1519
1520out_stop_queues:
1521        for (i--; i >= 1; i--)
1522                nvme_tcp_stop_queue(ctrl, i);
1523        return ret;
1524}
1525
1526static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1527{
1528        int ret;
1529
1530        ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
1531        if (ret)
1532                return ret;
1533
1534        ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1535        if (ret)
1536                goto out_free_queue;
1537
1538        return 0;
1539
1540out_free_queue:
1541        nvme_tcp_free_queue(ctrl, 0);
1542        return ret;
1543}
1544
1545static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1546{
1547        int i, ret;
1548
1549        for (i = 1; i < ctrl->queue_count; i++) {
1550                ret = nvme_tcp_alloc_queue(ctrl, i,
1551                                ctrl->sqsize + 1);
1552                if (ret)
1553                        goto out_free_queues;
1554        }
1555
1556        return 0;
1557
1558out_free_queues:
1559        for (i--; i >= 1; i--)
1560                nvme_tcp_free_queue(ctrl, i);
1561
1562        return ret;
1563}
1564
1565static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1566{
1567        unsigned int nr_io_queues;
1568
1569        nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1570        nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1571
1572        return nr_io_queues;
1573}
1574
1575static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1576                unsigned int nr_io_queues)
1577{
1578        struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1579        struct nvmf_ctrl_options *opts = nctrl->opts;
1580
1581        if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1582                /*
1583                 * separate read/write queues
1584                 * hand out dedicated default queues only after we have
1585                 * sufficient read queues.
1586                 */
1587                ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1588                nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1589                ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1590                        min(opts->nr_write_queues, nr_io_queues);
1591                nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1592        } else {
1593                /*
1594                 * shared read/write queues
1595                 * either no write queues were requested, or we don't have
1596                 * sufficient queue count to have dedicated default queues.
1597                 */
1598                ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1599                        min(opts->nr_io_queues, nr_io_queues);
1600                nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1601        }
1602}
1603
1604static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1605{
1606        unsigned int nr_io_queues;
1607        int ret;
1608
1609        nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1610        ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1611        if (ret)
1612                return ret;
1613
1614        ctrl->queue_count = nr_io_queues + 1;
1615        if (ctrl->queue_count < 2)
1616                return 0;
1617
1618        dev_info(ctrl->device,
1619                "creating %d I/O queues.\n", nr_io_queues);
1620
1621        nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1622
1623        return __nvme_tcp_alloc_io_queues(ctrl);
1624}
1625
1626static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1627{
1628        nvme_tcp_stop_io_queues(ctrl);
1629        if (remove) {
1630                blk_cleanup_queue(ctrl->connect_q);
1631                blk_mq_free_tag_set(ctrl->tagset);
1632        }
1633        nvme_tcp_free_io_queues(ctrl);
1634}
1635
1636static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1637{
1638        int ret;
1639
1640        ret = nvme_tcp_alloc_io_queues(ctrl);
1641        if (ret)
1642                return ret;
1643
1644        if (new) {
1645                ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false);
1646                if (IS_ERR(ctrl->tagset)) {
1647                        ret = PTR_ERR(ctrl->tagset);
1648                        goto out_free_io_queues;
1649                }
1650
1651                ctrl->connect_q = blk_mq_init_queue(ctrl->tagset);
1652                if (IS_ERR(ctrl->connect_q)) {
1653                        ret = PTR_ERR(ctrl->connect_q);
1654                        goto out_free_tag_set;
1655                }
1656        } else {
1657                blk_mq_update_nr_hw_queues(ctrl->tagset,
1658                        ctrl->queue_count - 1);
1659        }
1660
1661        ret = nvme_tcp_start_io_queues(ctrl);
1662        if (ret)
1663                goto out_cleanup_connect_q;
1664
1665        return 0;
1666
1667out_cleanup_connect_q:
1668        if (new)
1669                blk_cleanup_queue(ctrl->connect_q);
1670out_free_tag_set:
1671        if (new)
1672                blk_mq_free_tag_set(ctrl->tagset);
1673out_free_io_queues:
1674        nvme_tcp_free_io_queues(ctrl);
1675        return ret;
1676}
1677
1678static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1679{
1680        nvme_tcp_stop_queue(ctrl, 0);
1681        if (remove) {
1682                blk_cleanup_queue(ctrl->admin_q);
1683                blk_mq_free_tag_set(ctrl->admin_tagset);
1684        }
1685        nvme_tcp_free_admin_queue(ctrl);
1686}
1687
1688static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1689{
1690        int error;
1691
1692        error = nvme_tcp_alloc_admin_queue(ctrl);
1693        if (error)
1694                return error;
1695
1696        if (new) {
1697                ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true);
1698                if (IS_ERR(ctrl->admin_tagset)) {
1699                        error = PTR_ERR(ctrl->admin_tagset);
1700                        goto out_free_queue;
1701                }
1702
1703                ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset);
1704                if (IS_ERR(ctrl->admin_q)) {
1705                        error = PTR_ERR(ctrl->admin_q);
1706                        goto out_free_tagset;
1707                }
1708        }
1709
1710        error = nvme_tcp_start_queue(ctrl, 0);
1711        if (error)
1712                goto out_cleanup_queue;
1713
1714        error = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap);
1715        if (error) {
1716                dev_err(ctrl->device,
1717                        "prop_get NVME_REG_CAP failed\n");
1718                goto out_stop_queue;
1719        }
1720
1721        ctrl->sqsize = min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
1722
1723        error = nvme_enable_ctrl(ctrl, ctrl->cap);
1724        if (error)
1725                goto out_stop_queue;
1726
1727        error = nvme_init_identify(ctrl);
1728        if (error)
1729                goto out_stop_queue;
1730
1731        return 0;
1732
1733out_stop_queue:
1734        nvme_tcp_stop_queue(ctrl, 0);
1735out_cleanup_queue:
1736        if (new)
1737                blk_cleanup_queue(ctrl->admin_q);
1738out_free_tagset:
1739        if (new)
1740                blk_mq_free_tag_set(ctrl->admin_tagset);
1741out_free_queue:
1742        nvme_tcp_free_admin_queue(ctrl);
1743        return error;
1744}
1745
1746static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1747                bool remove)
1748{
1749        blk_mq_quiesce_queue(ctrl->admin_q);
1750        nvme_tcp_stop_queue(ctrl, 0);
1751        if (ctrl->admin_tagset)
1752                blk_mq_tagset_busy_iter(ctrl->admin_tagset,
1753                        nvme_cancel_request, ctrl);
1754        blk_mq_unquiesce_queue(ctrl->admin_q);
1755        nvme_tcp_destroy_admin_queue(ctrl, remove);
1756}
1757
1758static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1759                bool remove)
1760{
1761        if (ctrl->queue_count <= 1)
1762                return;
1763        nvme_stop_queues(ctrl);
1764        nvme_tcp_stop_io_queues(ctrl);
1765        if (ctrl->tagset)
1766                blk_mq_tagset_busy_iter(ctrl->tagset,
1767                        nvme_cancel_request, ctrl);
1768        if (remove)
1769                nvme_start_queues(ctrl);
1770        nvme_tcp_destroy_io_queues(ctrl, remove);
1771}
1772
1773static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
1774{
1775        /* If we are resetting/deleting then do nothing */
1776        if (ctrl->state != NVME_CTRL_CONNECTING) {
1777                WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
1778                        ctrl->state == NVME_CTRL_LIVE);
1779                return;
1780        }
1781
1782        if (nvmf_should_reconnect(ctrl)) {
1783                dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
1784                        ctrl->opts->reconnect_delay);
1785                queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
1786                                ctrl->opts->reconnect_delay * HZ);
1787        } else {
1788                dev_info(ctrl->device, "Removing controller...\n");
1789                nvme_delete_ctrl(ctrl);
1790        }
1791}
1792
1793static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
1794{
1795        struct nvmf_ctrl_options *opts = ctrl->opts;
1796        int ret = -EINVAL;
1797
1798        ret = nvme_tcp_configure_admin_queue(ctrl, new);
1799        if (ret)
1800                return ret;
1801
1802        if (ctrl->icdoff) {
1803                dev_err(ctrl->device, "icdoff is not supported!\n");
1804                goto destroy_admin;
1805        }
1806
1807        if (opts->queue_size > ctrl->sqsize + 1)
1808                dev_warn(ctrl->device,
1809                        "queue_size %zu > ctrl sqsize %u, clamping down\n",
1810                        opts->queue_size, ctrl->sqsize + 1);
1811
1812        if (ctrl->sqsize + 1 > ctrl->maxcmd) {
1813                dev_warn(ctrl->device,
1814                        "sqsize %u > ctrl maxcmd %u, clamping down\n",
1815                        ctrl->sqsize + 1, ctrl->maxcmd);
1816                ctrl->sqsize = ctrl->maxcmd - 1;
1817        }
1818
1819        if (ctrl->queue_count > 1) {
1820                ret = nvme_tcp_configure_io_queues(ctrl, new);
1821                if (ret)
1822                        goto destroy_admin;
1823        }
1824
1825        if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
1826                /* state change failure is ok if we're in DELETING state */
1827                WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1828                ret = -EINVAL;
1829                goto destroy_io;
1830        }
1831
1832        nvme_start_ctrl(ctrl);
1833        return 0;
1834
1835destroy_io:
1836        if (ctrl->queue_count > 1)
1837                nvme_tcp_destroy_io_queues(ctrl, new);
1838destroy_admin:
1839        nvme_tcp_stop_queue(ctrl, 0);
1840        nvme_tcp_destroy_admin_queue(ctrl, new);
1841        return ret;
1842}
1843
1844static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
1845{
1846        struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
1847                        struct nvme_tcp_ctrl, connect_work);
1848        struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1849
1850        ++ctrl->nr_reconnects;
1851
1852        if (nvme_tcp_setup_ctrl(ctrl, false))
1853                goto requeue;
1854
1855        dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
1856                        ctrl->nr_reconnects);
1857
1858        ctrl->nr_reconnects = 0;
1859
1860        return;
1861
1862requeue:
1863        dev_info(ctrl->device, "Failed reconnect attempt %d\n",
1864                        ctrl->nr_reconnects);
1865        nvme_tcp_reconnect_or_remove(ctrl);
1866}
1867
1868static void nvme_tcp_error_recovery_work(struct work_struct *work)
1869{
1870        struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
1871                                struct nvme_tcp_ctrl, err_work);
1872        struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
1873
1874        nvme_stop_keep_alive(ctrl);
1875        nvme_tcp_teardown_io_queues(ctrl, false);
1876        /* unquiesce to fail fast pending requests */
1877        nvme_start_queues(ctrl);
1878        nvme_tcp_teardown_admin_queue(ctrl, false);
1879
1880        if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1881                /* state change failure is ok if we're in DELETING state */
1882                WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1883                return;
1884        }
1885
1886        nvme_tcp_reconnect_or_remove(ctrl);
1887}
1888
1889static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
1890{
1891        cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work);
1892        cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
1893
1894        nvme_tcp_teardown_io_queues(ctrl, shutdown);
1895        if (shutdown)
1896                nvme_shutdown_ctrl(ctrl);
1897        else
1898                nvme_disable_ctrl(ctrl, ctrl->cap);
1899        nvme_tcp_teardown_admin_queue(ctrl, shutdown);
1900}
1901
1902static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
1903{
1904        nvme_tcp_teardown_ctrl(ctrl, true);
1905}
1906
1907static void nvme_reset_ctrl_work(struct work_struct *work)
1908{
1909        struct nvme_ctrl *ctrl =
1910                container_of(work, struct nvme_ctrl, reset_work);
1911
1912        nvme_stop_ctrl(ctrl);
1913        nvme_tcp_teardown_ctrl(ctrl, false);
1914
1915        if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
1916                /* state change failure is ok if we're in DELETING state */
1917                WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING);
1918                return;
1919        }
1920
1921        if (nvme_tcp_setup_ctrl(ctrl, false))
1922                goto out_fail;
1923
1924        return;
1925
1926out_fail:
1927        ++ctrl->nr_reconnects;
1928        nvme_tcp_reconnect_or_remove(ctrl);
1929}
1930
1931static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
1932{
1933        struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1934
1935        if (list_empty(&ctrl->list))
1936                goto free_ctrl;
1937
1938        mutex_lock(&nvme_tcp_ctrl_mutex);
1939        list_del(&ctrl->list);
1940        mutex_unlock(&nvme_tcp_ctrl_mutex);
1941
1942        nvmf_free_options(nctrl->opts);
1943free_ctrl:
1944        kfree(ctrl->queues);
1945        kfree(ctrl);
1946}
1947
1948static void nvme_tcp_set_sg_null(struct nvme_command *c)
1949{
1950        struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1951
1952        sg->addr = 0;
1953        sg->length = 0;
1954        sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
1955                        NVME_SGL_FMT_TRANSPORT_A;
1956}
1957
1958static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
1959                struct nvme_command *c, u32 data_len)
1960{
1961        struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1962
1963        sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
1964        sg->length = cpu_to_le32(data_len);
1965        sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
1966}
1967
1968static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
1969                u32 data_len)
1970{
1971        struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
1972
1973        sg->addr = 0;
1974        sg->length = cpu_to_le32(data_len);
1975        sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
1976                        NVME_SGL_FMT_TRANSPORT_A;
1977}
1978
1979static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
1980{
1981        struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
1982        struct nvme_tcp_queue *queue = &ctrl->queues[0];
1983        struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
1984        struct nvme_command *cmd = &pdu->cmd;
1985        u8 hdgst = nvme_tcp_hdgst_len(queue);
1986
1987        memset(pdu, 0, sizeof(*pdu));
1988        pdu->hdr.type = nvme_tcp_cmd;
1989        if (queue->hdr_digest)
1990                pdu->hdr.flags |= NVME_TCP_F_HDGST;
1991        pdu->hdr.hlen = sizeof(*pdu);
1992        pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
1993
1994        cmd->common.opcode = nvme_admin_async_event;
1995        cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
1996        cmd->common.flags |= NVME_CMD_SGL_METABUF;
1997        nvme_tcp_set_sg_null(cmd);
1998
1999        ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2000        ctrl->async_req.offset = 0;
2001        ctrl->async_req.curr_bio = NULL;
2002        ctrl->async_req.data_len = 0;
2003
2004        nvme_tcp_queue_request(&ctrl->async_req);
2005}
2006
2007static enum blk_eh_timer_return
2008nvme_tcp_timeout(struct request *rq, bool reserved)
2009{
2010        struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2011        struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
2012        struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2013
2014        dev_warn(ctrl->ctrl.device,
2015                "queue %d: timeout request %#x type %d\n",
2016                nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
2017
2018        if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
2019                /*
2020                 * Teardown immediately if controller times out while starting
2021                 * or we are already started error recovery. all outstanding
2022                 * requests are completed on shutdown, so we return BLK_EH_DONE.
2023                 */
2024                flush_work(&ctrl->err_work);
2025                nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
2026                nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
2027                return BLK_EH_DONE;
2028        }
2029
2030        dev_warn(ctrl->ctrl.device, "starting error recovery\n");
2031        nvme_tcp_error_recovery(&ctrl->ctrl);
2032
2033        return BLK_EH_RESET_TIMER;
2034}
2035
2036static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2037                        struct request *rq)
2038{
2039        struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2040        struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2041        struct nvme_command *c = &pdu->cmd;
2042
2043        c->common.flags |= NVME_CMD_SGL_METABUF;
2044
2045        if (rq_data_dir(rq) == WRITE && req->data_len &&
2046            req->data_len <= nvme_tcp_inline_data_size(queue))
2047                nvme_tcp_set_sg_inline(queue, c, req->data_len);
2048        else
2049                nvme_tcp_set_sg_host_data(c, req->data_len);
2050
2051        return 0;
2052}
2053
2054static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2055                struct request *rq)
2056{
2057        struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2058        struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2059        struct nvme_tcp_queue *queue = req->queue;
2060        u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2061        blk_status_t ret;
2062
2063        ret = nvme_setup_cmd(ns, rq, &pdu->cmd);
2064        if (ret)
2065                return ret;
2066
2067        req->state = NVME_TCP_SEND_CMD_PDU;
2068        req->offset = 0;
2069        req->data_sent = 0;
2070        req->pdu_len = 0;
2071        req->pdu_sent = 0;
2072        req->data_len = blk_rq_payload_bytes(rq);
2073        req->curr_bio = rq->bio;
2074
2075        if (rq_data_dir(rq) == WRITE &&
2076            req->data_len <= nvme_tcp_inline_data_size(queue))
2077                req->pdu_len = req->data_len;
2078        else if (req->curr_bio)
2079                nvme_tcp_init_iter(req, READ);
2080
2081        pdu->hdr.type = nvme_tcp_cmd;
2082        pdu->hdr.flags = 0;
2083        if (queue->hdr_digest)
2084                pdu->hdr.flags |= NVME_TCP_F_HDGST;
2085        if (queue->data_digest && req->pdu_len) {
2086                pdu->hdr.flags |= NVME_TCP_F_DDGST;
2087                ddgst = nvme_tcp_ddgst_len(queue);
2088        }
2089        pdu->hdr.hlen = sizeof(*pdu);
2090        pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2091        pdu->hdr.plen =
2092                cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2093
2094        ret = nvme_tcp_map_data(queue, rq);
2095        if (unlikely(ret)) {
2096                dev_err(queue->ctrl->ctrl.device,
2097                        "Failed to map data (%d)\n", ret);
2098                return ret;
2099        }
2100
2101        return 0;
2102}
2103
2104static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2105                const struct blk_mq_queue_data *bd)
2106{
2107        struct nvme_ns *ns = hctx->queue->queuedata;
2108        struct nvme_tcp_queue *queue = hctx->driver_data;
2109        struct request *rq = bd->rq;
2110        struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2111        bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2112        blk_status_t ret;
2113
2114        if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2115                return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2116
2117        ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2118        if (unlikely(ret))
2119                return ret;
2120
2121        blk_mq_start_request(rq);
2122
2123        nvme_tcp_queue_request(req);
2124
2125        return BLK_STS_OK;
2126}
2127
2128static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2129{
2130        struct nvme_tcp_ctrl *ctrl = set->driver_data;
2131        struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2132
2133        if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2134                /* separate read/write queues */
2135                set->map[HCTX_TYPE_DEFAULT].nr_queues =
2136                        ctrl->io_queues[HCTX_TYPE_DEFAULT];
2137                set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2138                set->map[HCTX_TYPE_READ].nr_queues =
2139                        ctrl->io_queues[HCTX_TYPE_READ];
2140                set->map[HCTX_TYPE_READ].queue_offset =
2141                        ctrl->io_queues[HCTX_TYPE_DEFAULT];
2142        } else {
2143                /* shared read/write queues */
2144                set->map[HCTX_TYPE_DEFAULT].nr_queues =
2145                        ctrl->io_queues[HCTX_TYPE_DEFAULT];
2146                set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2147                set->map[HCTX_TYPE_READ].nr_queues =
2148                        ctrl->io_queues[HCTX_TYPE_DEFAULT];
2149                set->map[HCTX_TYPE_READ].queue_offset = 0;
2150        }
2151        blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2152        blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2153
2154        dev_info(ctrl->ctrl.device,
2155                "mapped %d/%d default/read queues.\n",
2156                ctrl->io_queues[HCTX_TYPE_DEFAULT],
2157                ctrl->io_queues[HCTX_TYPE_READ]);
2158
2159        return 0;
2160}
2161
2162static struct blk_mq_ops nvme_tcp_mq_ops = {
2163        .queue_rq       = nvme_tcp_queue_rq,
2164        .complete       = nvme_complete_rq,
2165        .init_request   = nvme_tcp_init_request,
2166        .exit_request   = nvme_tcp_exit_request,
2167        .init_hctx      = nvme_tcp_init_hctx,
2168        .timeout        = nvme_tcp_timeout,
2169        .map_queues     = nvme_tcp_map_queues,
2170};
2171
2172static struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2173        .queue_rq       = nvme_tcp_queue_rq,
2174        .complete       = nvme_complete_rq,
2175        .init_request   = nvme_tcp_init_request,
2176        .exit_request   = nvme_tcp_exit_request,
2177        .init_hctx      = nvme_tcp_init_admin_hctx,
2178        .timeout        = nvme_tcp_timeout,
2179};
2180
2181static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2182        .name                   = "tcp",
2183        .module                 = THIS_MODULE,
2184        .flags                  = NVME_F_FABRICS,
2185        .reg_read32             = nvmf_reg_read32,
2186        .reg_read64             = nvmf_reg_read64,
2187        .reg_write32            = nvmf_reg_write32,
2188        .free_ctrl              = nvme_tcp_free_ctrl,
2189        .submit_async_event     = nvme_tcp_submit_async_event,
2190        .delete_ctrl            = nvme_tcp_delete_ctrl,
2191        .get_address            = nvmf_get_address,
2192};
2193
2194static bool
2195nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2196{
2197        struct nvme_tcp_ctrl *ctrl;
2198        bool found = false;
2199
2200        mutex_lock(&nvme_tcp_ctrl_mutex);
2201        list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2202                found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2203                if (found)
2204                        break;
2205        }
2206        mutex_unlock(&nvme_tcp_ctrl_mutex);
2207
2208        return found;
2209}
2210
2211static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2212                struct nvmf_ctrl_options *opts)
2213{
2214        struct nvme_tcp_ctrl *ctrl;
2215        int ret;
2216
2217        ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2218        if (!ctrl)
2219                return ERR_PTR(-ENOMEM);
2220
2221        INIT_LIST_HEAD(&ctrl->list);
2222        ctrl->ctrl.opts = opts;
2223        ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + 1;
2224        ctrl->ctrl.sqsize = opts->queue_size - 1;
2225        ctrl->ctrl.kato = opts->kato;
2226
2227        INIT_DELAYED_WORK(&ctrl->connect_work,
2228                        nvme_tcp_reconnect_ctrl_work);
2229        INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2230        INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2231
2232        if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2233                opts->trsvcid =
2234                        kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2235                if (!opts->trsvcid) {
2236                        ret = -ENOMEM;
2237                        goto out_free_ctrl;
2238                }
2239                opts->mask |= NVMF_OPT_TRSVCID;
2240        }
2241
2242        ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2243                        opts->traddr, opts->trsvcid, &ctrl->addr);
2244        if (ret) {
2245                pr_err("malformed address passed: %s:%s\n",
2246                        opts->traddr, opts->trsvcid);
2247                goto out_free_ctrl;
2248        }
2249
2250        if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2251                ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2252                        opts->host_traddr, NULL, &ctrl->src_addr);
2253                if (ret) {
2254                        pr_err("malformed src address passed: %s\n",
2255                               opts->host_traddr);
2256                        goto out_free_ctrl;
2257                }
2258        }
2259
2260        if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2261                ret = -EALREADY;
2262                goto out_free_ctrl;
2263        }
2264
2265        ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2266                                GFP_KERNEL);
2267        if (!ctrl->queues) {
2268                ret = -ENOMEM;
2269                goto out_free_ctrl;
2270        }
2271
2272        ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2273        if (ret)
2274                goto out_kfree_queues;
2275
2276        if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2277                WARN_ON_ONCE(1);
2278                ret = -EINTR;
2279                goto out_uninit_ctrl;
2280        }
2281
2282        ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2283        if (ret)
2284                goto out_uninit_ctrl;
2285
2286        dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2287                ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2288
2289        nvme_get_ctrl(&ctrl->ctrl);
2290
2291        mutex_lock(&nvme_tcp_ctrl_mutex);
2292        list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2293        mutex_unlock(&nvme_tcp_ctrl_mutex);
2294
2295        return &ctrl->ctrl;
2296
2297out_uninit_ctrl:
2298        nvme_uninit_ctrl(&ctrl->ctrl);
2299        nvme_put_ctrl(&ctrl->ctrl);
2300        if (ret > 0)
2301                ret = -EIO;
2302        return ERR_PTR(ret);
2303out_kfree_queues:
2304        kfree(ctrl->queues);
2305out_free_ctrl:
2306        kfree(ctrl);
2307        return ERR_PTR(ret);
2308}
2309
2310static struct nvmf_transport_ops nvme_tcp_transport = {
2311        .name           = "tcp",
2312        .module         = THIS_MODULE,
2313        .required_opts  = NVMF_OPT_TRADDR,
2314        .allowed_opts   = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2315                          NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2316                          NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2317                          NVMF_OPT_NR_WRITE_QUEUES,
2318        .create_ctrl    = nvme_tcp_create_ctrl,
2319};
2320
2321static int __init nvme_tcp_init_module(void)
2322{
2323        nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2324                        WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2325        if (!nvme_tcp_wq)
2326                return -ENOMEM;
2327
2328        nvmf_register_transport(&nvme_tcp_transport);
2329        return 0;
2330}
2331
2332static void __exit nvme_tcp_cleanup_module(void)
2333{
2334        struct nvme_tcp_ctrl *ctrl;
2335
2336        nvmf_unregister_transport(&nvme_tcp_transport);
2337
2338        mutex_lock(&nvme_tcp_ctrl_mutex);
2339        list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2340                nvme_delete_ctrl(&ctrl->ctrl);
2341        mutex_unlock(&nvme_tcp_ctrl_mutex);
2342        flush_workqueue(nvme_delete_wq);
2343
2344        destroy_workqueue(nvme_tcp_wq);
2345}
2346
2347module_init(nvme_tcp_init_module);
2348module_exit(nvme_tcp_cleanup_module);
2349
2350MODULE_LICENSE("GPL v2");
2351