linux/drivers/block/rnbd/rnbd-clt.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * RDMA Network Block Driver
   4 *
   5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
   6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
   7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
   8 */
   9
  10#undef pr_fmt
  11#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
  12
  13#include <linux/module.h>
  14#include <linux/blkdev.h>
  15#include <linux/hdreg.h>
  16#include <linux/scatterlist.h>
  17#include <linux/idr.h>
  18
  19#include "rnbd-clt.h"
  20
  21MODULE_DESCRIPTION("RDMA Network Block Device Client");
  22MODULE_LICENSE("GPL");
  23
  24static int rnbd_client_major;
  25static DEFINE_IDA(index_ida);
  26static DEFINE_MUTEX(ida_lock);
  27static DEFINE_MUTEX(sess_lock);
  28static LIST_HEAD(sess_list);
  29
  30/*
  31 * Maximum number of partitions an instance can have.
  32 * 6 bits = 64 minors = 63 partitions (one minor is used for the device itself)
  33 */
  34#define RNBD_PART_BITS          6
  35
  36static inline bool rnbd_clt_get_sess(struct rnbd_clt_session *sess)
  37{
  38        return refcount_inc_not_zero(&sess->refcount);
  39}
  40
  41static void free_sess(struct rnbd_clt_session *sess);
  42
  43static void rnbd_clt_put_sess(struct rnbd_clt_session *sess)
  44{
  45        might_sleep();
  46
  47        if (refcount_dec_and_test(&sess->refcount))
  48                free_sess(sess);
  49}
  50
  51static void rnbd_clt_put_dev(struct rnbd_clt_dev *dev)
  52{
  53        might_sleep();
  54
  55        if (!refcount_dec_and_test(&dev->refcount))
  56                return;
  57
  58        mutex_lock(&ida_lock);
  59        ida_simple_remove(&index_ida, dev->clt_device_id);
  60        mutex_unlock(&ida_lock);
  61        kfree(dev->hw_queues);
  62        kfree(dev->pathname);
  63        rnbd_clt_put_sess(dev->sess);
  64        mutex_destroy(&dev->lock);
  65        kfree(dev);
  66}
  67
  68static inline bool rnbd_clt_get_dev(struct rnbd_clt_dev *dev)
  69{
  70        return refcount_inc_not_zero(&dev->refcount);
  71}
  72
  73static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev,
  74                                 const struct rnbd_msg_open_rsp *rsp)
  75{
  76        struct rnbd_clt_session *sess = dev->sess;
  77
  78        if (!rsp->logical_block_size)
  79                return -EINVAL;
  80
  81        dev->device_id              = le32_to_cpu(rsp->device_id);
  82        dev->nsectors               = le64_to_cpu(rsp->nsectors);
  83        dev->logical_block_size     = le16_to_cpu(rsp->logical_block_size);
  84        dev->physical_block_size    = le16_to_cpu(rsp->physical_block_size);
  85        dev->max_write_same_sectors = le32_to_cpu(rsp->max_write_same_sectors);
  86        dev->max_discard_sectors    = le32_to_cpu(rsp->max_discard_sectors);
  87        dev->discard_granularity    = le32_to_cpu(rsp->discard_granularity);
  88        dev->discard_alignment      = le32_to_cpu(rsp->discard_alignment);
  89        dev->secure_discard         = le16_to_cpu(rsp->secure_discard);
  90        dev->rotational             = rsp->rotational;
  91        dev->wc                     = !!(rsp->cache_policy & RNBD_WRITEBACK);
  92        dev->fua                    = !!(rsp->cache_policy & RNBD_FUA);
  93
  94        dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE;
  95        dev->max_segments = sess->max_segments;
  96
  97        return 0;
  98}
  99
 100static int rnbd_clt_change_capacity(struct rnbd_clt_dev *dev,
 101                                    size_t new_nsectors)
 102{
 103        rnbd_clt_info(dev, "Device size changed from %zu to %zu sectors\n",
 104                       dev->nsectors, new_nsectors);
 105        dev->nsectors = new_nsectors;
 106        set_capacity_and_notify(dev->gd, dev->nsectors);
 107        return 0;
 108}
 109
 110static int process_msg_open_rsp(struct rnbd_clt_dev *dev,
 111                                struct rnbd_msg_open_rsp *rsp)
 112{
 113        struct kobject *gd_kobj;
 114        int err = 0;
 115
 116        mutex_lock(&dev->lock);
 117        if (dev->dev_state == DEV_STATE_UNMAPPED) {
 118                rnbd_clt_info(dev,
 119                               "Ignoring Open-Response message from server for  unmapped device\n");
 120                err = -ENOENT;
 121                goto out;
 122        }
 123        if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED) {
 124                u64 nsectors = le64_to_cpu(rsp->nsectors);
 125
 126                /*
 127                 * If the device was remapped and the size changed in the
 128                 * meantime we need to revalidate it
 129                 */
 130                if (dev->nsectors != nsectors)
 131                        rnbd_clt_change_capacity(dev, nsectors);
 132                gd_kobj = &disk_to_dev(dev->gd)->kobj;
 133                kobject_uevent(gd_kobj, KOBJ_ONLINE);
 134                rnbd_clt_info(dev, "Device online, device remapped successfully\n");
 135        }
 136        err = rnbd_clt_set_dev_attr(dev, rsp);
 137        if (err)
 138                goto out;
 139        dev->dev_state = DEV_STATE_MAPPED;
 140
 141out:
 142        mutex_unlock(&dev->lock);
 143
 144        return err;
 145}
 146
 147int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize)
 148{
 149        int ret = 0;
 150
 151        mutex_lock(&dev->lock);
 152        if (dev->dev_state != DEV_STATE_MAPPED) {
 153                pr_err("Failed to set new size of the device, device is not opened\n");
 154                ret = -ENOENT;
 155                goto out;
 156        }
 157        ret = rnbd_clt_change_capacity(dev, newsize);
 158
 159out:
 160        mutex_unlock(&dev->lock);
 161
 162        return ret;
 163}
 164
 165static inline void rnbd_clt_dev_requeue(struct rnbd_queue *q)
 166{
 167        if (WARN_ON(!q->hctx))
 168                return;
 169
 170        /* We can come here from interrupt, thus async=true */
 171        blk_mq_run_hw_queue(q->hctx, true);
 172}
 173
 174enum {
 175        RNBD_DELAY_IFBUSY = -1,
 176};
 177
 178/**
 179 * rnbd_get_cpu_qlist() - finds a list with HW queues to be rerun
 180 * @sess:       Session to find a queue for
 181 * @cpu:        Cpu to start the search from
 182 *
 183 * Description:
 184 *     Each CPU has a list of HW queues, which needs to be rerun.  If a list
 185 *     is not empty - it is marked with a bit.  This function finds first
 186 *     set bit in a bitmap and returns corresponding CPU list.
 187 */
 188static struct rnbd_cpu_qlist *
 189rnbd_get_cpu_qlist(struct rnbd_clt_session *sess, int cpu)
 190{
 191        int bit;
 192
 193        /* Search from cpu to nr_cpu_ids */
 194        bit = find_next_bit(sess->cpu_queues_bm, nr_cpu_ids, cpu);
 195        if (bit < nr_cpu_ids) {
 196                return per_cpu_ptr(sess->cpu_queues, bit);
 197        } else if (cpu != 0) {
 198                /* Search from 0 to cpu */
 199                bit = find_next_bit(sess->cpu_queues_bm, cpu, 0);
 200                if (bit < cpu)
 201                        return per_cpu_ptr(sess->cpu_queues, bit);
 202        }
 203
 204        return NULL;
 205}
 206
 207static inline int nxt_cpu(int cpu)
 208{
 209        return (cpu + 1) % nr_cpu_ids;
 210}
 211
 212/**
 213 * rnbd_rerun_if_needed() - rerun next queue marked as stopped
 214 * @sess:       Session to rerun a queue on
 215 *
 216 * Description:
 217 *     Each CPU has it's own list of HW queues, which should be rerun.
 218 *     Function finds such list with HW queues, takes a list lock, picks up
 219 *     the first HW queue out of the list and requeues it.
 220 *
 221 * Return:
 222 *     True if the queue was requeued, false otherwise.
 223 *
 224 * Context:
 225 *     Does not matter.
 226 */
 227static bool rnbd_rerun_if_needed(struct rnbd_clt_session *sess)
 228{
 229        struct rnbd_queue *q = NULL;
 230        struct rnbd_cpu_qlist *cpu_q;
 231        unsigned long flags;
 232        int *cpup;
 233
 234        /*
 235         * To keep fairness and not to let other queues starve we always
 236         * try to wake up someone else in round-robin manner.  That of course
 237         * increases latency but queues always have a chance to be executed.
 238         */
 239        cpup = get_cpu_ptr(sess->cpu_rr);
 240        for (cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(*cpup)); cpu_q;
 241             cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(cpu_q->cpu))) {
 242                if (!spin_trylock_irqsave(&cpu_q->requeue_lock, flags))
 243                        continue;
 244                if (!test_bit(cpu_q->cpu, sess->cpu_queues_bm))
 245                        goto unlock;
 246                q = list_first_entry_or_null(&cpu_q->requeue_list,
 247                                             typeof(*q), requeue_list);
 248                if (WARN_ON(!q))
 249                        goto clear_bit;
 250                list_del_init(&q->requeue_list);
 251                clear_bit_unlock(0, &q->in_list);
 252
 253                if (list_empty(&cpu_q->requeue_list)) {
 254                        /* Clear bit if nothing is left */
 255clear_bit:
 256                        clear_bit(cpu_q->cpu, sess->cpu_queues_bm);
 257                }
 258unlock:
 259                spin_unlock_irqrestore(&cpu_q->requeue_lock, flags);
 260
 261                if (q)
 262                        break;
 263        }
 264
 265        /**
 266         * Saves the CPU that is going to be requeued on the per-cpu var. Just
 267         * incrementing it doesn't work because rnbd_get_cpu_qlist() will
 268         * always return the first CPU with something on the queue list when the
 269         * value stored on the var is greater than the last CPU with something
 270         * on the list.
 271         */
 272        if (cpu_q)
 273                *cpup = cpu_q->cpu;
 274        put_cpu_ptr(sess->cpu_rr);
 275
 276        if (q)
 277                rnbd_clt_dev_requeue(q);
 278
 279        return q;
 280}
 281
 282/**
 283 * rnbd_rerun_all_if_idle() - rerun all queues left in the list if
 284 *                               session is idling (there are no requests
 285 *                               in-flight).
 286 * @sess:       Session to rerun the queues on
 287 *
 288 * Description:
 289 *     This function tries to rerun all stopped queues if there are no
 290 *     requests in-flight anymore.  This function tries to solve an obvious
 291 *     problem, when number of tags < than number of queues (hctx), which
 292 *     are stopped and put to sleep.  If last permit, which has been just put,
 293 *     does not wake up all left queues (hctxs), IO requests hang forever.
 294 *
 295 *     That can happen when all number of permits, say N, have been exhausted
 296 *     from one CPU, and we have many block devices per session, say M.
 297 *     Each block device has it's own queue (hctx) for each CPU, so eventually
 298 *     we can put that number of queues (hctxs) to sleep: M x nr_cpu_ids.
 299 *     If number of permits N < M x nr_cpu_ids finally we will get an IO hang.
 300 *
 301 *     To avoid this hang last caller of rnbd_put_permit() (last caller is the
 302 *     one who observes sess->busy == 0) must wake up all remaining queues.
 303 *
 304 * Context:
 305 *     Does not matter.
 306 */
 307static void rnbd_rerun_all_if_idle(struct rnbd_clt_session *sess)
 308{
 309        bool requeued;
 310
 311        do {
 312                requeued = rnbd_rerun_if_needed(sess);
 313        } while (atomic_read(&sess->busy) == 0 && requeued);
 314}
 315
 316static struct rtrs_permit *rnbd_get_permit(struct rnbd_clt_session *sess,
 317                                             enum rtrs_clt_con_type con_type,
 318                                             enum wait_type wait)
 319{
 320        struct rtrs_permit *permit;
 321
 322        permit = rtrs_clt_get_permit(sess->rtrs, con_type, wait);
 323        if (permit)
 324                /* We have a subtle rare case here, when all permits can be
 325                 * consumed before busy counter increased.  This is safe,
 326                 * because loser will get NULL as a permit, observe 0 busy
 327                 * counter and immediately restart the queue himself.
 328                 */
 329                atomic_inc(&sess->busy);
 330
 331        return permit;
 332}
 333
 334static void rnbd_put_permit(struct rnbd_clt_session *sess,
 335                             struct rtrs_permit *permit)
 336{
 337        rtrs_clt_put_permit(sess->rtrs, permit);
 338        atomic_dec(&sess->busy);
 339        /* Paired with rnbd_clt_dev_add_to_requeue().  Decrement first
 340         * and then check queue bits.
 341         */
 342        smp_mb__after_atomic();
 343        rnbd_rerun_all_if_idle(sess);
 344}
 345
 346static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess,
 347                                     enum rtrs_clt_con_type con_type,
 348                                     enum wait_type wait)
 349{
 350        struct rnbd_iu *iu;
 351        struct rtrs_permit *permit;
 352
 353        iu = kzalloc(sizeof(*iu), GFP_KERNEL);
 354        if (!iu)
 355                return NULL;
 356
 357        permit = rnbd_get_permit(sess, con_type, wait);
 358        if (!permit) {
 359                kfree(iu);
 360                return NULL;
 361        }
 362
 363        iu->permit = permit;
 364        /*
 365         * 1st reference is dropped after finishing sending a "user" message,
 366         * 2nd reference is dropped after confirmation with the response is
 367         * returned.
 368         * 1st and 2nd can happen in any order, so the rnbd_iu should be
 369         * released (rtrs_permit returned to rtrs) only after both
 370         * are finished.
 371         */
 372        atomic_set(&iu->refcount, 2);
 373        init_waitqueue_head(&iu->comp.wait);
 374        iu->comp.errno = INT_MAX;
 375
 376        if (sg_alloc_table(&iu->sgt, 1, GFP_KERNEL)) {
 377                rnbd_put_permit(sess, permit);
 378                kfree(iu);
 379                return NULL;
 380        }
 381
 382        return iu;
 383}
 384
 385static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
 386{
 387        if (atomic_dec_and_test(&iu->refcount)) {
 388                sg_free_table(&iu->sgt);
 389                rnbd_put_permit(sess, iu->permit);
 390                kfree(iu);
 391        }
 392}
 393
 394static void rnbd_softirq_done_fn(struct request *rq)
 395{
 396        struct rnbd_clt_dev *dev        = rq->rq_disk->private_data;
 397        struct rnbd_clt_session *sess   = dev->sess;
 398        struct rnbd_iu *iu;
 399
 400        iu = blk_mq_rq_to_pdu(rq);
 401        sg_free_table_chained(&iu->sgt, RNBD_INLINE_SG_CNT);
 402        rnbd_put_permit(sess, iu->permit);
 403        blk_mq_end_request(rq, errno_to_blk_status(iu->errno));
 404}
 405
 406static void msg_io_conf(void *priv, int errno)
 407{
 408        struct rnbd_iu *iu = priv;
 409        struct rnbd_clt_dev *dev = iu->dev;
 410        struct request *rq = iu->rq;
 411        int rw = rq_data_dir(rq);
 412
 413        iu->errno = errno;
 414
 415        blk_mq_complete_request(rq);
 416
 417        if (errno)
 418                rnbd_clt_info_rl(dev, "%s I/O failed with err: %d\n",
 419                                 rw == READ ? "read" : "write", errno);
 420}
 421
 422static void wake_up_iu_comp(struct rnbd_iu *iu, int errno)
 423{
 424        iu->comp.errno = errno;
 425        wake_up(&iu->comp.wait);
 426}
 427
 428static void msg_conf(void *priv, int errno)
 429{
 430        struct rnbd_iu *iu = priv;
 431
 432        iu->errno = errno;
 433        schedule_work(&iu->work);
 434}
 435
 436static int send_usr_msg(struct rtrs_clt *rtrs, int dir,
 437                        struct rnbd_iu *iu, struct kvec *vec,
 438                        size_t len, struct scatterlist *sg, unsigned int sg_len,
 439                        void (*conf)(struct work_struct *work),
 440                        int *errno, int wait)
 441{
 442        int err;
 443        struct rtrs_clt_req_ops req_ops;
 444
 445        INIT_WORK(&iu->work, conf);
 446        req_ops = (struct rtrs_clt_req_ops) {
 447                .priv = iu,
 448                .conf_fn = msg_conf,
 449        };
 450        err = rtrs_clt_request(dir, &req_ops, rtrs, iu->permit,
 451                                vec, 1, len, sg, sg_len);
 452        if (!err && wait) {
 453                wait_event(iu->comp.wait, iu->comp.errno != INT_MAX);
 454                *errno = iu->comp.errno;
 455        } else {
 456                *errno = 0;
 457        }
 458
 459        return err;
 460}
 461
 462static void msg_close_conf(struct work_struct *work)
 463{
 464        struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
 465        struct rnbd_clt_dev *dev = iu->dev;
 466
 467        wake_up_iu_comp(iu, iu->errno);
 468        rnbd_put_iu(dev->sess, iu);
 469        rnbd_clt_put_dev(dev);
 470}
 471
 472static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id,
 473                          enum wait_type wait)
 474{
 475        struct rnbd_clt_session *sess = dev->sess;
 476        struct rnbd_msg_close msg;
 477        struct rnbd_iu *iu;
 478        struct kvec vec = {
 479                .iov_base = &msg,
 480                .iov_len  = sizeof(msg)
 481        };
 482        int err, errno;
 483
 484        iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
 485        if (!iu)
 486                return -ENOMEM;
 487
 488        iu->buf = NULL;
 489        iu->dev = dev;
 490
 491        msg.hdr.type    = cpu_to_le16(RNBD_MSG_CLOSE);
 492        msg.device_id   = cpu_to_le32(device_id);
 493
 494        WARN_ON(!rnbd_clt_get_dev(dev));
 495        err = send_usr_msg(sess->rtrs, WRITE, iu, &vec, 0, NULL, 0,
 496                           msg_close_conf, &errno, wait);
 497        if (err) {
 498                rnbd_clt_put_dev(dev);
 499                rnbd_put_iu(sess, iu);
 500        } else {
 501                err = errno;
 502        }
 503
 504        rnbd_put_iu(sess, iu);
 505        return err;
 506}
 507
 508static void msg_open_conf(struct work_struct *work)
 509{
 510        struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
 511        struct rnbd_msg_open_rsp *rsp = iu->buf;
 512        struct rnbd_clt_dev *dev = iu->dev;
 513        int errno = iu->errno;
 514
 515        if (errno) {
 516                rnbd_clt_err(dev,
 517                              "Opening failed, server responded: %d\n",
 518                              errno);
 519        } else {
 520                errno = process_msg_open_rsp(dev, rsp);
 521                if (errno) {
 522                        u32 device_id = le32_to_cpu(rsp->device_id);
 523                        /*
 524                         * If server thinks its fine, but we fail to process
 525                         * then be nice and send a close to server.
 526                         */
 527                        send_msg_close(dev, device_id, RTRS_PERMIT_NOWAIT);
 528                }
 529        }
 530        kfree(rsp);
 531        wake_up_iu_comp(iu, errno);
 532        rnbd_put_iu(dev->sess, iu);
 533        rnbd_clt_put_dev(dev);
 534}
 535
 536static void msg_sess_info_conf(struct work_struct *work)
 537{
 538        struct rnbd_iu *iu = container_of(work, struct rnbd_iu, work);
 539        struct rnbd_msg_sess_info_rsp *rsp = iu->buf;
 540        struct rnbd_clt_session *sess = iu->sess;
 541
 542        if (!iu->errno)
 543                sess->ver = min_t(u8, rsp->ver, RNBD_PROTO_VER_MAJOR);
 544
 545        kfree(rsp);
 546        wake_up_iu_comp(iu, iu->errno);
 547        rnbd_put_iu(sess, iu);
 548        rnbd_clt_put_sess(sess);
 549}
 550
 551static int send_msg_open(struct rnbd_clt_dev *dev, enum wait_type wait)
 552{
 553        struct rnbd_clt_session *sess = dev->sess;
 554        struct rnbd_msg_open_rsp *rsp;
 555        struct rnbd_msg_open msg;
 556        struct rnbd_iu *iu;
 557        struct kvec vec = {
 558                .iov_base = &msg,
 559                .iov_len  = sizeof(msg)
 560        };
 561        int err, errno;
 562
 563        rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
 564        if (!rsp)
 565                return -ENOMEM;
 566
 567        iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
 568        if (!iu) {
 569                kfree(rsp);
 570                return -ENOMEM;
 571        }
 572
 573        iu->buf = rsp;
 574        iu->dev = dev;
 575
 576        sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
 577
 578        msg.hdr.type    = cpu_to_le16(RNBD_MSG_OPEN);
 579        msg.access_mode = dev->access_mode;
 580        strscpy(msg.dev_name, dev->pathname, sizeof(msg.dev_name));
 581
 582        WARN_ON(!rnbd_clt_get_dev(dev));
 583        err = send_usr_msg(sess->rtrs, READ, iu,
 584                           &vec, sizeof(*rsp), iu->sgt.sgl, 1,
 585                           msg_open_conf, &errno, wait);
 586        if (err) {
 587                rnbd_clt_put_dev(dev);
 588                rnbd_put_iu(sess, iu);
 589                kfree(rsp);
 590        } else {
 591                err = errno;
 592        }
 593
 594        rnbd_put_iu(sess, iu);
 595        return err;
 596}
 597
 598static int send_msg_sess_info(struct rnbd_clt_session *sess, enum wait_type wait)
 599{
 600        struct rnbd_msg_sess_info_rsp *rsp;
 601        struct rnbd_msg_sess_info msg;
 602        struct rnbd_iu *iu;
 603        struct kvec vec = {
 604                .iov_base = &msg,
 605                .iov_len  = sizeof(msg)
 606        };
 607        int err, errno;
 608
 609        rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
 610        if (!rsp)
 611                return -ENOMEM;
 612
 613        iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
 614        if (!iu) {
 615                kfree(rsp);
 616                return -ENOMEM;
 617        }
 618
 619        iu->buf = rsp;
 620        iu->sess = sess;
 621        sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
 622
 623        msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO);
 624        msg.ver      = RNBD_PROTO_VER_MAJOR;
 625
 626        if (!rnbd_clt_get_sess(sess)) {
 627                /*
 628                 * That can happen only in one case, when RTRS has restablished
 629                 * the connection and link_ev() is called, but session is almost
 630                 * dead, last reference on session is put and caller is waiting
 631                 * for RTRS to close everything.
 632                 */
 633                err = -ENODEV;
 634                goto put_iu;
 635        }
 636        err = send_usr_msg(sess->rtrs, READ, iu,
 637                           &vec, sizeof(*rsp), iu->sgt.sgl, 1,
 638                           msg_sess_info_conf, &errno, wait);
 639        if (err) {
 640                rnbd_clt_put_sess(sess);
 641put_iu:
 642                rnbd_put_iu(sess, iu);
 643                kfree(rsp);
 644        } else {
 645                err = errno;
 646        }
 647        rnbd_put_iu(sess, iu);
 648        return err;
 649}
 650
 651static void set_dev_states_to_disconnected(struct rnbd_clt_session *sess)
 652{
 653        struct rnbd_clt_dev *dev;
 654        struct kobject *gd_kobj;
 655
 656        mutex_lock(&sess->lock);
 657        list_for_each_entry(dev, &sess->devs_list, list) {
 658                rnbd_clt_err(dev, "Device disconnected.\n");
 659
 660                mutex_lock(&dev->lock);
 661                if (dev->dev_state == DEV_STATE_MAPPED) {
 662                        dev->dev_state = DEV_STATE_MAPPED_DISCONNECTED;
 663                        gd_kobj = &disk_to_dev(dev->gd)->kobj;
 664                        kobject_uevent(gd_kobj, KOBJ_OFFLINE);
 665                }
 666                mutex_unlock(&dev->lock);
 667        }
 668        mutex_unlock(&sess->lock);
 669}
 670
 671static void remap_devs(struct rnbd_clt_session *sess)
 672{
 673        struct rnbd_clt_dev *dev;
 674        struct rtrs_attrs attrs;
 675        int err;
 676
 677        /*
 678         * Careful here: we are called from RTRS link event directly,
 679         * thus we can't send any RTRS request and wait for response
 680         * or RTRS will not be able to complete request with failure
 681         * if something goes wrong (failing of outstanding requests
 682         * happens exactly from the context where we are blocking now).
 683         *
 684         * So to avoid deadlocks each usr message sent from here must
 685         * be asynchronous.
 686         */
 687
 688        err = send_msg_sess_info(sess, RTRS_PERMIT_NOWAIT);
 689        if (err) {
 690                pr_err("send_msg_sess_info(\"%s\"): %d\n", sess->sessname, err);
 691                return;
 692        }
 693
 694        err = rtrs_clt_query(sess->rtrs, &attrs);
 695        if (err) {
 696                pr_err("rtrs_clt_query(\"%s\"): %d\n", sess->sessname, err);
 697                return;
 698        }
 699        mutex_lock(&sess->lock);
 700        sess->max_io_size = attrs.max_io_size;
 701
 702        list_for_each_entry(dev, &sess->devs_list, list) {
 703                bool skip;
 704
 705                mutex_lock(&dev->lock);
 706                skip = (dev->dev_state == DEV_STATE_INIT);
 707                mutex_unlock(&dev->lock);
 708                if (skip)
 709                        /*
 710                         * When device is establishing connection for the first
 711                         * time - do not remap, it will be closed soon.
 712                         */
 713                        continue;
 714
 715                rnbd_clt_info(dev, "session reconnected, remapping device\n");
 716                err = send_msg_open(dev, RTRS_PERMIT_NOWAIT);
 717                if (err) {
 718                        rnbd_clt_err(dev, "send_msg_open(): %d\n", err);
 719                        break;
 720                }
 721        }
 722        mutex_unlock(&sess->lock);
 723}
 724
 725static void rnbd_clt_link_ev(void *priv, enum rtrs_clt_link_ev ev)
 726{
 727        struct rnbd_clt_session *sess = priv;
 728
 729        switch (ev) {
 730        case RTRS_CLT_LINK_EV_DISCONNECTED:
 731                set_dev_states_to_disconnected(sess);
 732                break;
 733        case RTRS_CLT_LINK_EV_RECONNECTED:
 734                remap_devs(sess);
 735                break;
 736        default:
 737                pr_err("Unknown session event received (%d), session: %s\n",
 738                       ev, sess->sessname);
 739        }
 740}
 741
 742static void rnbd_init_cpu_qlists(struct rnbd_cpu_qlist __percpu *cpu_queues)
 743{
 744        unsigned int cpu;
 745        struct rnbd_cpu_qlist *cpu_q;
 746
 747        for_each_possible_cpu(cpu) {
 748                cpu_q = per_cpu_ptr(cpu_queues, cpu);
 749
 750                cpu_q->cpu = cpu;
 751                INIT_LIST_HEAD(&cpu_q->requeue_list);
 752                spin_lock_init(&cpu_q->requeue_lock);
 753        }
 754}
 755
 756static void destroy_mq_tags(struct rnbd_clt_session *sess)
 757{
 758        if (sess->tag_set.tags)
 759                blk_mq_free_tag_set(&sess->tag_set);
 760}
 761
 762static inline void wake_up_rtrs_waiters(struct rnbd_clt_session *sess)
 763{
 764        sess->rtrs_ready = true;
 765        wake_up_all(&sess->rtrs_waitq);
 766}
 767
 768static void close_rtrs(struct rnbd_clt_session *sess)
 769{
 770        might_sleep();
 771
 772        if (!IS_ERR_OR_NULL(sess->rtrs)) {
 773                rtrs_clt_close(sess->rtrs);
 774                sess->rtrs = NULL;
 775                wake_up_rtrs_waiters(sess);
 776        }
 777}
 778
 779static void free_sess(struct rnbd_clt_session *sess)
 780{
 781        WARN_ON(!list_empty(&sess->devs_list));
 782
 783        might_sleep();
 784
 785        close_rtrs(sess);
 786        destroy_mq_tags(sess);
 787        if (!list_empty(&sess->list)) {
 788                mutex_lock(&sess_lock);
 789                list_del(&sess->list);
 790                mutex_unlock(&sess_lock);
 791        }
 792        free_percpu(sess->cpu_queues);
 793        free_percpu(sess->cpu_rr);
 794        mutex_destroy(&sess->lock);
 795        kfree(sess);
 796}
 797
 798static struct rnbd_clt_session *alloc_sess(const char *sessname)
 799{
 800        struct rnbd_clt_session *sess;
 801        int err, cpu;
 802
 803        sess = kzalloc_node(sizeof(*sess), GFP_KERNEL, NUMA_NO_NODE);
 804        if (!sess)
 805                return ERR_PTR(-ENOMEM);
 806        strscpy(sess->sessname, sessname, sizeof(sess->sessname));
 807        atomic_set(&sess->busy, 0);
 808        mutex_init(&sess->lock);
 809        INIT_LIST_HEAD(&sess->devs_list);
 810        INIT_LIST_HEAD(&sess->list);
 811        bitmap_zero(sess->cpu_queues_bm, num_possible_cpus());
 812        init_waitqueue_head(&sess->rtrs_waitq);
 813        refcount_set(&sess->refcount, 1);
 814
 815        sess->cpu_queues = alloc_percpu(struct rnbd_cpu_qlist);
 816        if (!sess->cpu_queues) {
 817                err = -ENOMEM;
 818                goto err;
 819        }
 820        rnbd_init_cpu_qlists(sess->cpu_queues);
 821
 822        /*
 823         * That is simple percpu variable which stores cpu indices, which are
 824         * incremented on each access.  We need that for the sake of fairness
 825         * to wake up queues in a round-robin manner.
 826         */
 827        sess->cpu_rr = alloc_percpu(int);
 828        if (!sess->cpu_rr) {
 829                err = -ENOMEM;
 830                goto err;
 831        }
 832        for_each_possible_cpu(cpu)
 833                * per_cpu_ptr(sess->cpu_rr, cpu) = cpu;
 834
 835        return sess;
 836
 837err:
 838        free_sess(sess);
 839
 840        return ERR_PTR(err);
 841}
 842
 843static int wait_for_rtrs_connection(struct rnbd_clt_session *sess)
 844{
 845        wait_event(sess->rtrs_waitq, sess->rtrs_ready);
 846        if (IS_ERR_OR_NULL(sess->rtrs))
 847                return -ECONNRESET;
 848
 849        return 0;
 850}
 851
 852static void wait_for_rtrs_disconnection(struct rnbd_clt_session *sess)
 853        __releases(&sess_lock)
 854        __acquires(&sess_lock)
 855{
 856        DEFINE_WAIT(wait);
 857
 858        prepare_to_wait(&sess->rtrs_waitq, &wait, TASK_UNINTERRUPTIBLE);
 859        if (IS_ERR_OR_NULL(sess->rtrs)) {
 860                finish_wait(&sess->rtrs_waitq, &wait);
 861                return;
 862        }
 863        mutex_unlock(&sess_lock);
 864        /* loop in caller, see __find_and_get_sess().
 865         * You can't leave mutex locked and call schedule(), you will catch a
 866         * deadlock with a caller of free_sess(), which has just put the last
 867         * reference and is about to take the sess_lock in order to delete
 868         * the session from the list.
 869         */
 870        schedule();
 871        mutex_lock(&sess_lock);
 872}
 873
 874static struct rnbd_clt_session *__find_and_get_sess(const char *sessname)
 875        __releases(&sess_lock)
 876        __acquires(&sess_lock)
 877{
 878        struct rnbd_clt_session *sess, *sn;
 879        int err;
 880
 881again:
 882        list_for_each_entry_safe(sess, sn, &sess_list, list) {
 883                if (strcmp(sessname, sess->sessname))
 884                        continue;
 885
 886                if (sess->rtrs_ready && IS_ERR_OR_NULL(sess->rtrs))
 887                        /*
 888                         * No RTRS connection, session is dying.
 889                         */
 890                        continue;
 891
 892                if (rnbd_clt_get_sess(sess)) {
 893                        /*
 894                         * Alive session is found, wait for RTRS connection.
 895                         */
 896                        mutex_unlock(&sess_lock);
 897                        err = wait_for_rtrs_connection(sess);
 898                        if (err)
 899                                rnbd_clt_put_sess(sess);
 900                        mutex_lock(&sess_lock);
 901
 902                        if (err)
 903                                /* Session is dying, repeat the loop */
 904                                goto again;
 905
 906                        return sess;
 907                }
 908                /*
 909                 * Ref is 0, session is dying, wait for RTRS disconnect
 910                 * in order to avoid session names clashes.
 911                 */
 912                wait_for_rtrs_disconnection(sess);
 913                /*
 914                 * RTRS is disconnected and soon session will be freed,
 915                 * so repeat a loop.
 916                 */
 917                goto again;
 918        }
 919
 920        return NULL;
 921}
 922
 923/* caller is responsible for initializing 'first' to false */
 924static struct
 925rnbd_clt_session *find_or_create_sess(const char *sessname, bool *first)
 926{
 927        struct rnbd_clt_session *sess = NULL;
 928
 929        mutex_lock(&sess_lock);
 930        sess = __find_and_get_sess(sessname);
 931        if (!sess) {
 932                sess = alloc_sess(sessname);
 933                if (IS_ERR(sess)) {
 934                        mutex_unlock(&sess_lock);
 935                        return sess;
 936                }
 937                list_add(&sess->list, &sess_list);
 938                *first = true;
 939        }
 940        mutex_unlock(&sess_lock);
 941
 942        return sess;
 943}
 944
 945static int rnbd_client_open(struct block_device *block_device, fmode_t mode)
 946{
 947        struct rnbd_clt_dev *dev = block_device->bd_disk->private_data;
 948
 949        if (dev->read_only && (mode & FMODE_WRITE))
 950                return -EPERM;
 951
 952        if (dev->dev_state == DEV_STATE_UNMAPPED ||
 953            !rnbd_clt_get_dev(dev))
 954                return -EIO;
 955
 956        return 0;
 957}
 958
 959static void rnbd_client_release(struct gendisk *gen, fmode_t mode)
 960{
 961        struct rnbd_clt_dev *dev = gen->private_data;
 962
 963        rnbd_clt_put_dev(dev);
 964}
 965
 966static int rnbd_client_getgeo(struct block_device *block_device,
 967                              struct hd_geometry *geo)
 968{
 969        u64 size;
 970        struct rnbd_clt_dev *dev;
 971
 972        dev = block_device->bd_disk->private_data;
 973        size = dev->size * (dev->logical_block_size / SECTOR_SIZE);
 974        geo->cylinders  = size >> 6;    /* size/64 */
 975        geo->heads      = 4;
 976        geo->sectors    = 16;
 977        geo->start      = 0;
 978
 979        return 0;
 980}
 981
 982static const struct block_device_operations rnbd_client_ops = {
 983        .owner          = THIS_MODULE,
 984        .open           = rnbd_client_open,
 985        .release        = rnbd_client_release,
 986        .getgeo         = rnbd_client_getgeo
 987};
 988
 989/* The amount of data that belongs to an I/O and the amount of data that
 990 * should be read or written to the disk (bi_size) can differ.
 991 *
 992 * E.g. When WRITE_SAME is used, only a small amount of data is
 993 * transferred that is then written repeatedly over a lot of sectors.
 994 *
 995 * Get the size of data to be transferred via RTRS by summing up the size
 996 * of the scather-gather list entries.
 997 */
 998static size_t rnbd_clt_get_sg_size(struct scatterlist *sglist, u32 len)
 999{
1000        struct scatterlist *sg;
1001        size_t tsize = 0;
1002        int i;
1003
1004        for_each_sg(sglist, sg, len, i)
1005                tsize += sg->length;
1006        return tsize;
1007}
1008
1009static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
1010                                     struct request *rq,
1011                                     struct rnbd_iu *iu)
1012{
1013        struct rtrs_clt *rtrs = dev->sess->rtrs;
1014        struct rtrs_permit *permit = iu->permit;
1015        struct rnbd_msg_io msg;
1016        struct rtrs_clt_req_ops req_ops;
1017        unsigned int sg_cnt = 0;
1018        struct kvec vec;
1019        size_t size;
1020        int err;
1021
1022        iu->rq          = rq;
1023        iu->dev         = dev;
1024        msg.sector      = cpu_to_le64(blk_rq_pos(rq));
1025        msg.bi_size     = cpu_to_le32(blk_rq_bytes(rq));
1026        msg.rw          = cpu_to_le32(rq_to_rnbd_flags(rq));
1027        msg.prio        = cpu_to_le16(req_get_ioprio(rq));
1028
1029        /*
1030         * We only support discards with single segment for now.
1031         * See queue limits.
1032         */
1033        if (req_op(rq) != REQ_OP_DISCARD)
1034                sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl);
1035
1036        if (sg_cnt == 0)
1037                sg_mark_end(&iu->sgt.sgl[0]);
1038
1039        msg.hdr.type    = cpu_to_le16(RNBD_MSG_IO);
1040        msg.device_id   = cpu_to_le32(dev->device_id);
1041
1042        vec = (struct kvec) {
1043                .iov_base = &msg,
1044                .iov_len  = sizeof(msg)
1045        };
1046        size = rnbd_clt_get_sg_size(iu->sgt.sgl, sg_cnt);
1047        req_ops = (struct rtrs_clt_req_ops) {
1048                .priv = iu,
1049                .conf_fn = msg_io_conf,
1050        };
1051        err = rtrs_clt_request(rq_data_dir(rq), &req_ops, rtrs, permit,
1052                               &vec, 1, size, iu->sgt.sgl, sg_cnt);
1053        if (err) {
1054                rnbd_clt_err_rl(dev, "RTRS failed to transfer IO, err: %d\n",
1055                                 err);
1056                return err;
1057        }
1058
1059        return 0;
1060}
1061
1062/**
1063 * rnbd_clt_dev_add_to_requeue() - add device to requeue if session is busy
1064 * @dev:        Device to be checked
1065 * @q:          Queue to be added to the requeue list if required
1066 *
1067 * Description:
1068 *     If session is busy, that means someone will requeue us when resources
1069 *     are freed.  If session is not doing anything - device is not added to
1070 *     the list and @false is returned.
1071 */
1072static bool rnbd_clt_dev_add_to_requeue(struct rnbd_clt_dev *dev,
1073                                                struct rnbd_queue *q)
1074{
1075        struct rnbd_clt_session *sess = dev->sess;
1076        struct rnbd_cpu_qlist *cpu_q;
1077        unsigned long flags;
1078        bool added = true;
1079        bool need_set;
1080
1081        cpu_q = get_cpu_ptr(sess->cpu_queues);
1082        spin_lock_irqsave(&cpu_q->requeue_lock, flags);
1083
1084        if (!test_and_set_bit_lock(0, &q->in_list)) {
1085                if (WARN_ON(!list_empty(&q->requeue_list)))
1086                        goto unlock;
1087
1088                need_set = !test_bit(cpu_q->cpu, sess->cpu_queues_bm);
1089                if (need_set) {
1090                        set_bit(cpu_q->cpu, sess->cpu_queues_bm);
1091                        /* Paired with rnbd_put_permit(). Set a bit first
1092                         * and then observe the busy counter.
1093                         */
1094                        smp_mb__before_atomic();
1095                }
1096                if (atomic_read(&sess->busy)) {
1097                        list_add_tail(&q->requeue_list, &cpu_q->requeue_list);
1098                } else {
1099                        /* Very unlikely, but possible: busy counter was
1100                         * observed as zero.  Drop all bits and return
1101                         * false to restart the queue by ourselves.
1102                         */
1103                        if (need_set)
1104                                clear_bit(cpu_q->cpu, sess->cpu_queues_bm);
1105                        clear_bit_unlock(0, &q->in_list);
1106                        added = false;
1107                }
1108        }
1109unlock:
1110        spin_unlock_irqrestore(&cpu_q->requeue_lock, flags);
1111        put_cpu_ptr(sess->cpu_queues);
1112
1113        return added;
1114}
1115
1116static void rnbd_clt_dev_kick_mq_queue(struct rnbd_clt_dev *dev,
1117                                        struct blk_mq_hw_ctx *hctx,
1118                                        int delay)
1119{
1120        struct rnbd_queue *q = hctx->driver_data;
1121
1122        if (delay != RNBD_DELAY_IFBUSY)
1123                blk_mq_delay_run_hw_queue(hctx, delay);
1124        else if (!rnbd_clt_dev_add_to_requeue(dev, q))
1125                /*
1126                 * If session is not busy we have to restart
1127                 * the queue ourselves.
1128                 */
1129                blk_mq_delay_run_hw_queue(hctx, 10/*ms*/);
1130}
1131
1132static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
1133                                   const struct blk_mq_queue_data *bd)
1134{
1135        struct request *rq = bd->rq;
1136        struct rnbd_clt_dev *dev = rq->rq_disk->private_data;
1137        struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
1138        int err;
1139        blk_status_t ret = BLK_STS_IOERR;
1140
1141        if (dev->dev_state != DEV_STATE_MAPPED)
1142                return BLK_STS_IOERR;
1143
1144        iu->permit = rnbd_get_permit(dev->sess, RTRS_IO_CON,
1145                                      RTRS_PERMIT_NOWAIT);
1146        if (!iu->permit) {
1147                rnbd_clt_dev_kick_mq_queue(dev, hctx, RNBD_DELAY_IFBUSY);
1148                return BLK_STS_RESOURCE;
1149        }
1150
1151        iu->sgt.sgl = iu->first_sgl;
1152        err = sg_alloc_table_chained(&iu->sgt,
1153                                     /* Even-if the request has no segment,
1154                                      * sglist must have one entry at least.
1155                                      */
1156                                     blk_rq_nr_phys_segments(rq) ? : 1,
1157                                     iu->sgt.sgl,
1158                                     RNBD_INLINE_SG_CNT);
1159        if (err) {
1160                rnbd_clt_err_rl(dev, "sg_alloc_table_chained ret=%d\n", err);
1161                rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/);
1162                rnbd_put_permit(dev->sess, iu->permit);
1163                return BLK_STS_RESOURCE;
1164        }
1165
1166        blk_mq_start_request(rq);
1167        err = rnbd_client_xfer_request(dev, rq, iu);
1168        if (err == 0)
1169                return BLK_STS_OK;
1170        if (err == -EAGAIN || err == -ENOMEM) {
1171                rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/);
1172                ret = BLK_STS_RESOURCE;
1173        }
1174        sg_free_table_chained(&iu->sgt, RNBD_INLINE_SG_CNT);
1175        rnbd_put_permit(dev->sess, iu->permit);
1176        return ret;
1177}
1178
1179static int rnbd_rdma_poll(struct blk_mq_hw_ctx *hctx)
1180{
1181        struct rnbd_queue *q = hctx->driver_data;
1182        struct rnbd_clt_dev *dev = q->dev;
1183        int cnt;
1184
1185        cnt = rtrs_clt_rdma_cq_direct(dev->sess->rtrs, hctx->queue_num);
1186        return cnt;
1187}
1188
1189static int rnbd_rdma_map_queues(struct blk_mq_tag_set *set)
1190{
1191        struct rnbd_clt_session *sess = set->driver_data;
1192
1193        /* shared read/write queues */
1194        set->map[HCTX_TYPE_DEFAULT].nr_queues = num_online_cpus();
1195        set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
1196        set->map[HCTX_TYPE_READ].nr_queues = num_online_cpus();
1197        set->map[HCTX_TYPE_READ].queue_offset = 0;
1198        blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
1199        blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
1200
1201        if (sess->nr_poll_queues) {
1202                /* dedicated queue for poll */
1203                set->map[HCTX_TYPE_POLL].nr_queues = sess->nr_poll_queues;
1204                set->map[HCTX_TYPE_POLL].queue_offset = set->map[HCTX_TYPE_READ].queue_offset +
1205                        set->map[HCTX_TYPE_READ].nr_queues;
1206                blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
1207                pr_info("[session=%s] mapped %d/%d/%d default/read/poll queues.\n",
1208                        sess->sessname,
1209                        set->map[HCTX_TYPE_DEFAULT].nr_queues,
1210                        set->map[HCTX_TYPE_READ].nr_queues,
1211                        set->map[HCTX_TYPE_POLL].nr_queues);
1212        } else {
1213                pr_info("[session=%s] mapped %d/%d default/read queues.\n",
1214                        sess->sessname,
1215                        set->map[HCTX_TYPE_DEFAULT].nr_queues,
1216                        set->map[HCTX_TYPE_READ].nr_queues);
1217        }
1218
1219        return 0;
1220}
1221
1222static struct blk_mq_ops rnbd_mq_ops = {
1223        .queue_rq       = rnbd_queue_rq,
1224        .complete       = rnbd_softirq_done_fn,
1225        .map_queues     = rnbd_rdma_map_queues,
1226        .poll           = rnbd_rdma_poll,
1227};
1228
1229static int setup_mq_tags(struct rnbd_clt_session *sess)
1230{
1231        struct blk_mq_tag_set *tag_set = &sess->tag_set;
1232
1233        memset(tag_set, 0, sizeof(*tag_set));
1234        tag_set->ops            = &rnbd_mq_ops;
1235        tag_set->queue_depth    = sess->queue_depth;
1236        tag_set->numa_node              = NUMA_NO_NODE;
1237        tag_set->flags          = BLK_MQ_F_SHOULD_MERGE |
1238                                  BLK_MQ_F_TAG_QUEUE_SHARED;
1239        tag_set->cmd_size       = sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE;
1240
1241        /* for HCTX_TYPE_DEFAULT, HCTX_TYPE_READ, HCTX_TYPE_POLL */
1242        tag_set->nr_maps        = sess->nr_poll_queues ? HCTX_MAX_TYPES : 2;
1243        /*
1244         * HCTX_TYPE_DEFAULT and HCTX_TYPE_READ share one set of queues
1245         * others are for HCTX_TYPE_POLL
1246         */
1247        tag_set->nr_hw_queues   = num_online_cpus() + sess->nr_poll_queues;
1248        tag_set->driver_data    = sess;
1249
1250        return blk_mq_alloc_tag_set(tag_set);
1251}
1252
1253static struct rnbd_clt_session *
1254find_and_get_or_create_sess(const char *sessname,
1255                            const struct rtrs_addr *paths,
1256                            size_t path_cnt, u16 port_nr, u32 nr_poll_queues)
1257{
1258        struct rnbd_clt_session *sess;
1259        struct rtrs_attrs attrs;
1260        int err;
1261        bool first = false;
1262        struct rtrs_clt_ops rtrs_ops;
1263
1264        sess = find_or_create_sess(sessname, &first);
1265        if (sess == ERR_PTR(-ENOMEM))
1266                return ERR_PTR(-ENOMEM);
1267        else if ((nr_poll_queues && !first) ||  (!nr_poll_queues && sess->nr_poll_queues)) {
1268                /*
1269                 * A device MUST have its own session to use the polling-mode.
1270                 * It must fail to map new device with the same session.
1271                 */
1272                err = -EINVAL;
1273                goto put_sess;
1274        }
1275
1276        if (!first)
1277                return sess;
1278
1279        if (!path_cnt) {
1280                pr_err("Session %s not found, and path parameter not given", sessname);
1281                err = -ENXIO;
1282                goto put_sess;
1283        }
1284
1285        rtrs_ops = (struct rtrs_clt_ops) {
1286                .priv = sess,
1287                .link_ev = rnbd_clt_link_ev,
1288        };
1289        /*
1290         * Nothing was found, establish rtrs connection and proceed further.
1291         */
1292        sess->rtrs = rtrs_clt_open(&rtrs_ops, sessname,
1293                                   paths, path_cnt, port_nr,
1294                                   0, /* Do not use pdu of rtrs */
1295                                   RECONNECT_DELAY,
1296                                   MAX_RECONNECTS, nr_poll_queues);
1297        if (IS_ERR(sess->rtrs)) {
1298                err = PTR_ERR(sess->rtrs);
1299                goto wake_up_and_put;
1300        }
1301
1302        err = rtrs_clt_query(sess->rtrs, &attrs);
1303        if (err)
1304                goto close_rtrs;
1305
1306        sess->max_io_size = attrs.max_io_size;
1307        sess->queue_depth = attrs.queue_depth;
1308        sess->nr_poll_queues = nr_poll_queues;
1309        sess->max_segments = attrs.max_segments;
1310
1311        err = setup_mq_tags(sess);
1312        if (err)
1313                goto close_rtrs;
1314
1315        err = send_msg_sess_info(sess, RTRS_PERMIT_WAIT);
1316        if (err)
1317                goto close_rtrs;
1318
1319        wake_up_rtrs_waiters(sess);
1320
1321        return sess;
1322
1323close_rtrs:
1324        close_rtrs(sess);
1325put_sess:
1326        rnbd_clt_put_sess(sess);
1327
1328        return ERR_PTR(err);
1329
1330wake_up_and_put:
1331        wake_up_rtrs_waiters(sess);
1332        goto put_sess;
1333}
1334
1335static inline void rnbd_init_hw_queue(struct rnbd_clt_dev *dev,
1336                                       struct rnbd_queue *q,
1337                                       struct blk_mq_hw_ctx *hctx)
1338{
1339        INIT_LIST_HEAD(&q->requeue_list);
1340        q->dev  = dev;
1341        q->hctx = hctx;
1342}
1343
1344static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev)
1345{
1346        int i;
1347        struct blk_mq_hw_ctx *hctx;
1348        struct rnbd_queue *q;
1349
1350        queue_for_each_hw_ctx(dev->queue, hctx, i) {
1351                q = &dev->hw_queues[i];
1352                rnbd_init_hw_queue(dev, q, hctx);
1353                hctx->driver_data = q;
1354        }
1355}
1356
1357static void setup_request_queue(struct rnbd_clt_dev *dev)
1358{
1359        blk_queue_logical_block_size(dev->queue, dev->logical_block_size);
1360        blk_queue_physical_block_size(dev->queue, dev->physical_block_size);
1361        blk_queue_max_hw_sectors(dev->queue, dev->max_hw_sectors);
1362        blk_queue_max_write_same_sectors(dev->queue,
1363                                         dev->max_write_same_sectors);
1364
1365        /*
1366         * we don't support discards to "discontiguous" segments
1367         * in on request
1368         */
1369        blk_queue_max_discard_segments(dev->queue, 1);
1370
1371        blk_queue_max_discard_sectors(dev->queue, dev->max_discard_sectors);
1372        dev->queue->limits.discard_granularity  = dev->discard_granularity;
1373        dev->queue->limits.discard_alignment    = dev->discard_alignment;
1374        if (dev->max_discard_sectors)
1375                blk_queue_flag_set(QUEUE_FLAG_DISCARD, dev->queue);
1376        if (dev->secure_discard)
1377                blk_queue_flag_set(QUEUE_FLAG_SECERASE, dev->queue);
1378
1379        blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, dev->queue);
1380        blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, dev->queue);
1381        blk_queue_max_segments(dev->queue, dev->max_segments);
1382        blk_queue_io_opt(dev->queue, dev->sess->max_io_size);
1383        blk_queue_virt_boundary(dev->queue, SZ_4K - 1);
1384        blk_queue_write_cache(dev->queue, dev->wc, dev->fua);
1385}
1386
1387static void rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
1388{
1389        dev->gd->major          = rnbd_client_major;
1390        dev->gd->first_minor    = idx << RNBD_PART_BITS;
1391        dev->gd->minors         = 1 << RNBD_PART_BITS;
1392        dev->gd->fops           = &rnbd_client_ops;
1393        dev->gd->queue          = dev->queue;
1394        dev->gd->private_data   = dev;
1395        snprintf(dev->gd->disk_name, sizeof(dev->gd->disk_name), "rnbd%d",
1396                 idx);
1397        pr_debug("disk_name=%s, capacity=%zu\n",
1398                 dev->gd->disk_name,
1399                 dev->nsectors * (dev->logical_block_size / SECTOR_SIZE)
1400                 );
1401
1402        set_capacity(dev->gd, dev->nsectors);
1403
1404        if (dev->access_mode == RNBD_ACCESS_RO) {
1405                dev->read_only = true;
1406                set_disk_ro(dev->gd, true);
1407        } else {
1408                dev->read_only = false;
1409        }
1410
1411        if (!dev->rotational)
1412                blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
1413        add_disk(dev->gd);
1414}
1415
1416static int rnbd_client_setup_device(struct rnbd_clt_dev *dev)
1417{
1418        int idx = dev->clt_device_id;
1419
1420        dev->size = dev->nsectors * dev->logical_block_size;
1421
1422        dev->gd = blk_mq_alloc_disk(&dev->sess->tag_set, dev);
1423        if (IS_ERR(dev->gd))
1424                return PTR_ERR(dev->gd);
1425        dev->queue = dev->gd->queue;
1426        rnbd_init_mq_hw_queues(dev);
1427
1428        setup_request_queue(dev);
1429        rnbd_clt_setup_gen_disk(dev, idx);
1430        return 0;
1431}
1432
1433static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
1434                                      enum rnbd_access_mode access_mode,
1435                                      const char *pathname,
1436                                      u32 nr_poll_queues)
1437{
1438        struct rnbd_clt_dev *dev;
1439        int ret;
1440
1441        dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, NUMA_NO_NODE);
1442        if (!dev)
1443                return ERR_PTR(-ENOMEM);
1444
1445        /*
1446         * nr_cpu_ids: the number of softirq queues
1447         * nr_poll_queues: the number of polling queues
1448         */
1449        dev->hw_queues = kcalloc(nr_cpu_ids + nr_poll_queues,
1450                                 sizeof(*dev->hw_queues),
1451                                 GFP_KERNEL);
1452        if (!dev->hw_queues) {
1453                ret = -ENOMEM;
1454                goto out_alloc;
1455        }
1456
1457        mutex_lock(&ida_lock);
1458        ret = ida_simple_get(&index_ida, 0, 1 << (MINORBITS - RNBD_PART_BITS),
1459                             GFP_KERNEL);
1460        mutex_unlock(&ida_lock);
1461        if (ret < 0) {
1462                pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
1463                       pathname, sess->sessname, ret);
1464                goto out_queues;
1465        }
1466
1467        dev->pathname = kstrdup(pathname, GFP_KERNEL);
1468        if (!dev->pathname) {
1469                ret = -ENOMEM;
1470                goto out_queues;
1471        }
1472
1473        dev->clt_device_id      = ret;
1474        dev->sess               = sess;
1475        dev->access_mode        = access_mode;
1476        dev->nr_poll_queues     = nr_poll_queues;
1477        mutex_init(&dev->lock);
1478        refcount_set(&dev->refcount, 1);
1479        dev->dev_state = DEV_STATE_INIT;
1480
1481        /*
1482         * Here we called from sysfs entry, thus clt-sysfs is
1483         * responsible that session will not disappear.
1484         */
1485        WARN_ON(!rnbd_clt_get_sess(sess));
1486
1487        return dev;
1488
1489out_queues:
1490        kfree(dev->hw_queues);
1491out_alloc:
1492        kfree(dev);
1493        return ERR_PTR(ret);
1494}
1495
1496static bool __exists_dev(const char *pathname, const char *sessname)
1497{
1498        struct rnbd_clt_session *sess;
1499        struct rnbd_clt_dev *dev;
1500        bool found = false;
1501
1502        list_for_each_entry(sess, &sess_list, list) {
1503                if (sessname && strncmp(sess->sessname, sessname,
1504                                        sizeof(sess->sessname)))
1505                        continue;
1506                mutex_lock(&sess->lock);
1507                list_for_each_entry(dev, &sess->devs_list, list) {
1508                        if (strlen(dev->pathname) == strlen(pathname) &&
1509                            !strcmp(dev->pathname, pathname)) {
1510                                found = true;
1511                                break;
1512                        }
1513                }
1514                mutex_unlock(&sess->lock);
1515                if (found)
1516                        break;
1517        }
1518
1519        return found;
1520}
1521
1522static bool exists_devpath(const char *pathname, const char *sessname)
1523{
1524        bool found;
1525
1526        mutex_lock(&sess_lock);
1527        found = __exists_dev(pathname, sessname);
1528        mutex_unlock(&sess_lock);
1529
1530        return found;
1531}
1532
1533static bool insert_dev_if_not_exists_devpath(struct rnbd_clt_dev *dev)
1534{
1535        bool found;
1536        struct rnbd_clt_session *sess = dev->sess;
1537
1538        mutex_lock(&sess_lock);
1539        found = __exists_dev(dev->pathname, sess->sessname);
1540        if (!found) {
1541                mutex_lock(&sess->lock);
1542                list_add_tail(&dev->list, &sess->devs_list);
1543                mutex_unlock(&sess->lock);
1544        }
1545        mutex_unlock(&sess_lock);
1546
1547        return found;
1548}
1549
1550static void delete_dev(struct rnbd_clt_dev *dev)
1551{
1552        struct rnbd_clt_session *sess = dev->sess;
1553
1554        mutex_lock(&sess->lock);
1555        list_del(&dev->list);
1556        mutex_unlock(&sess->lock);
1557}
1558
1559struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
1560                                           struct rtrs_addr *paths,
1561                                           size_t path_cnt, u16 port_nr,
1562                                           const char *pathname,
1563                                           enum rnbd_access_mode access_mode,
1564                                           u32 nr_poll_queues)
1565{
1566        struct rnbd_clt_session *sess;
1567        struct rnbd_clt_dev *dev;
1568        int ret;
1569
1570        if (exists_devpath(pathname, sessname))
1571                return ERR_PTR(-EEXIST);
1572
1573        sess = find_and_get_or_create_sess(sessname, paths, path_cnt, port_nr, nr_poll_queues);
1574        if (IS_ERR(sess))
1575                return ERR_CAST(sess);
1576
1577        dev = init_dev(sess, access_mode, pathname, nr_poll_queues);
1578        if (IS_ERR(dev)) {
1579                pr_err("map_device: failed to map device '%s' from session %s, can't initialize device, err: %ld\n",
1580                       pathname, sess->sessname, PTR_ERR(dev));
1581                ret = PTR_ERR(dev);
1582                goto put_sess;
1583        }
1584        if (insert_dev_if_not_exists_devpath(dev)) {
1585                ret = -EEXIST;
1586                goto put_dev;
1587        }
1588        ret = send_msg_open(dev, RTRS_PERMIT_WAIT);
1589        if (ret) {
1590                rnbd_clt_err(dev,
1591                              "map_device: failed, can't open remote device, err: %d\n",
1592                              ret);
1593                goto del_dev;
1594        }
1595        mutex_lock(&dev->lock);
1596        pr_debug("Opened remote device: session=%s, path='%s'\n",
1597                 sess->sessname, pathname);
1598        ret = rnbd_client_setup_device(dev);
1599        if (ret) {
1600                rnbd_clt_err(dev,
1601                              "map_device: Failed to configure device, err: %d\n",
1602                              ret);
1603                mutex_unlock(&dev->lock);
1604                goto send_close;
1605        }
1606
1607        rnbd_clt_info(dev,
1608                       "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d, wc: %d, fua: %d)\n",
1609                       dev->gd->disk_name, dev->nsectors,
1610                       dev->logical_block_size, dev->physical_block_size,
1611                       dev->max_write_same_sectors, dev->max_discard_sectors,
1612                       dev->discard_granularity, dev->discard_alignment,
1613                       dev->secure_discard, dev->max_segments,
1614                       dev->max_hw_sectors, dev->rotational, dev->wc, dev->fua);
1615
1616        mutex_unlock(&dev->lock);
1617        rnbd_clt_put_sess(sess);
1618
1619        return dev;
1620
1621send_close:
1622        send_msg_close(dev, dev->device_id, RTRS_PERMIT_WAIT);
1623del_dev:
1624        delete_dev(dev);
1625put_dev:
1626        rnbd_clt_put_dev(dev);
1627put_sess:
1628        rnbd_clt_put_sess(sess);
1629
1630        return ERR_PTR(ret);
1631}
1632
1633static void destroy_gen_disk(struct rnbd_clt_dev *dev)
1634{
1635        del_gendisk(dev->gd);
1636        blk_cleanup_disk(dev->gd);
1637}
1638
1639static void destroy_sysfs(struct rnbd_clt_dev *dev,
1640                          const struct attribute *sysfs_self)
1641{
1642        rnbd_clt_remove_dev_symlink(dev);
1643        if (dev->kobj.state_initialized) {
1644                if (sysfs_self)
1645                        /* To avoid deadlock firstly remove itself */
1646                        sysfs_remove_file_self(&dev->kobj, sysfs_self);
1647                kobject_del(&dev->kobj);
1648                kobject_put(&dev->kobj);
1649        }
1650}
1651
1652int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force,
1653                           const struct attribute *sysfs_self)
1654{
1655        struct rnbd_clt_session *sess = dev->sess;
1656        int refcount, ret = 0;
1657        bool was_mapped;
1658
1659        mutex_lock(&dev->lock);
1660        if (dev->dev_state == DEV_STATE_UNMAPPED) {
1661                rnbd_clt_info(dev, "Device is already being unmapped\n");
1662                ret = -EALREADY;
1663                goto err;
1664        }
1665        refcount = refcount_read(&dev->refcount);
1666        if (!force && refcount > 1) {
1667                rnbd_clt_err(dev,
1668                              "Closing device failed, device is in use, (%d device users)\n",
1669                              refcount - 1);
1670                ret = -EBUSY;
1671                goto err;
1672        }
1673        was_mapped = (dev->dev_state == DEV_STATE_MAPPED);
1674        dev->dev_state = DEV_STATE_UNMAPPED;
1675        mutex_unlock(&dev->lock);
1676
1677        delete_dev(dev);
1678        destroy_sysfs(dev, sysfs_self);
1679        destroy_gen_disk(dev);
1680        if (was_mapped && sess->rtrs)
1681                send_msg_close(dev, dev->device_id, RTRS_PERMIT_WAIT);
1682
1683        rnbd_clt_info(dev, "Device is unmapped\n");
1684
1685        /* Likely last reference put */
1686        rnbd_clt_put_dev(dev);
1687
1688        /*
1689         * Here device and session can be vanished!
1690         */
1691
1692        return 0;
1693err:
1694        mutex_unlock(&dev->lock);
1695
1696        return ret;
1697}
1698
1699int rnbd_clt_remap_device(struct rnbd_clt_dev *dev)
1700{
1701        int err;
1702
1703        mutex_lock(&dev->lock);
1704        if (dev->dev_state == DEV_STATE_MAPPED_DISCONNECTED)
1705                err = 0;
1706        else if (dev->dev_state == DEV_STATE_UNMAPPED)
1707                err = -ENODEV;
1708        else if (dev->dev_state == DEV_STATE_MAPPED)
1709                err = -EALREADY;
1710        else
1711                err = -EBUSY;
1712        mutex_unlock(&dev->lock);
1713        if (!err) {
1714                rnbd_clt_info(dev, "Remapping device.\n");
1715                err = send_msg_open(dev, RTRS_PERMIT_WAIT);
1716                if (err)
1717                        rnbd_clt_err(dev, "remap_device: %d\n", err);
1718        }
1719
1720        return err;
1721}
1722
1723static void unmap_device_work(struct work_struct *work)
1724{
1725        struct rnbd_clt_dev *dev;
1726
1727        dev = container_of(work, typeof(*dev), unmap_on_rmmod_work);
1728        rnbd_clt_unmap_device(dev, true, NULL);
1729}
1730
1731static void rnbd_destroy_sessions(void)
1732{
1733        struct rnbd_clt_session *sess, *sn;
1734        struct rnbd_clt_dev *dev, *tn;
1735
1736        /* Firstly forbid access through sysfs interface */
1737        rnbd_clt_destroy_sysfs_files();
1738
1739        /*
1740         * Here at this point there is no any concurrent access to sessions
1741         * list and devices list:
1742         *   1. New session or device can't be created - session sysfs files
1743         *      are removed.
1744         *   2. Device or session can't be removed - module reference is taken
1745         *      into account in unmap device sysfs callback.
1746         *   3. No IO requests inflight - each file open of block_dev increases
1747         *      module reference in get_disk().
1748         *
1749         * But still there can be user requests inflights, which are sent by
1750         * asynchronous send_msg_*() functions, thus before unmapping devices
1751         * RTRS session must be explicitly closed.
1752         */
1753
1754        list_for_each_entry_safe(sess, sn, &sess_list, list) {
1755                if (!rnbd_clt_get_sess(sess))
1756                        continue;
1757                close_rtrs(sess);
1758                list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
1759                        /*
1760                         * Here unmap happens in parallel for only one reason:
1761                         * blk_cleanup_queue() takes around half a second, so
1762                         * on huge amount of devices the whole module unload
1763                         * procedure takes minutes.
1764                         */
1765                        INIT_WORK(&dev->unmap_on_rmmod_work, unmap_device_work);
1766                        queue_work(system_long_wq, &dev->unmap_on_rmmod_work);
1767                }
1768                rnbd_clt_put_sess(sess);
1769        }
1770        /* Wait for all scheduled unmap works */
1771        flush_workqueue(system_long_wq);
1772        WARN_ON(!list_empty(&sess_list));
1773}
1774
1775static int __init rnbd_client_init(void)
1776{
1777        int err = 0;
1778
1779        BUILD_BUG_ON(sizeof(struct rnbd_msg_hdr) != 4);
1780        BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info) != 36);
1781        BUILD_BUG_ON(sizeof(struct rnbd_msg_sess_info_rsp) != 36);
1782        BUILD_BUG_ON(sizeof(struct rnbd_msg_open) != 264);
1783        BUILD_BUG_ON(sizeof(struct rnbd_msg_close) != 8);
1784        BUILD_BUG_ON(sizeof(struct rnbd_msg_open_rsp) != 56);
1785        rnbd_client_major = register_blkdev(rnbd_client_major, "rnbd");
1786        if (rnbd_client_major <= 0) {
1787                pr_err("Failed to load module, block device registration failed\n");
1788                return -EBUSY;
1789        }
1790
1791        err = rnbd_clt_create_sysfs_files();
1792        if (err) {
1793                pr_err("Failed to load module, creating sysfs device files failed, err: %d\n",
1794                       err);
1795                unregister_blkdev(rnbd_client_major, "rnbd");
1796        }
1797
1798        return err;
1799}
1800
1801static void __exit rnbd_client_exit(void)
1802{
1803        rnbd_destroy_sessions();
1804        unregister_blkdev(rnbd_client_major, "rnbd");
1805        ida_destroy(&index_ida);
1806}
1807
1808module_init(rnbd_client_init);
1809module_exit(rnbd_client_exit);
1810