linux/drivers/scsi/ibmvscsi_tgt/libsrp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*******************************************************************************
   3 * SCSI RDMA Protocol lib functions
   4 *
   5 * Copyright (C) 2006 FUJITA Tomonori <tomof@acm.org>
   6 * Copyright (C) 2016 Bryant G. Ly <bryantly@linux.vnet.ibm.com> IBM Corp.
   7 *
   8 ***********************************************************************/
   9
  10#define pr_fmt(fmt)     "libsrp: " fmt
  11
  12#include <linux/printk.h>
  13#include <linux/err.h>
  14#include <linux/slab.h>
  15#include <linux/kfifo.h>
  16#include <linux/scatterlist.h>
  17#include <linux/dma-mapping.h>
  18#include <linux/module.h>
  19#include <scsi/srp.h>
  20#include <target/target_core_base.h>
  21#include "libsrp.h"
  22#include "ibmvscsi_tgt.h"
  23
  24static int srp_iu_pool_alloc(struct srp_queue *q, size_t max,
  25                             struct srp_buf **ring)
  26{
  27        struct iu_entry *iue;
  28        int i;
  29
  30        q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL);
  31        if (!q->pool)
  32                return -ENOMEM;
  33        q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL);
  34        if (!q->items)
  35                goto free_pool;
  36
  37        spin_lock_init(&q->lock);
  38        kfifo_init(&q->queue, (void *)q->pool, max * sizeof(void *));
  39
  40        for (i = 0, iue = q->items; i < max; i++) {
  41                kfifo_in(&q->queue, (void *)&iue, sizeof(void *));
  42                iue->sbuf = ring[i];
  43                iue++;
  44        }
  45        return 0;
  46
  47free_pool:
  48        kfree(q->pool);
  49        return -ENOMEM;
  50}
  51
  52static void srp_iu_pool_free(struct srp_queue *q)
  53{
  54        kfree(q->items);
  55        kfree(q->pool);
  56}
  57
  58static struct srp_buf **srp_ring_alloc(struct device *dev,
  59                                       size_t max, size_t size)
  60{
  61        struct srp_buf **ring;
  62        int i;
  63
  64        ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL);
  65        if (!ring)
  66                return NULL;
  67
  68        for (i = 0; i < max; i++) {
  69                ring[i] = kzalloc(sizeof(*ring[i]), GFP_KERNEL);
  70                if (!ring[i])
  71                        goto out;
  72                ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma,
  73                                                  GFP_KERNEL);
  74                if (!ring[i]->buf)
  75                        goto out;
  76        }
  77        return ring;
  78
  79out:
  80        for (i = 0; i < max && ring[i]; i++) {
  81                if (ring[i]->buf) {
  82                        dma_free_coherent(dev, size, ring[i]->buf,
  83                                          ring[i]->dma);
  84                }
  85                kfree(ring[i]);
  86        }
  87        kfree(ring);
  88
  89        return NULL;
  90}
  91
  92static void srp_ring_free(struct device *dev, struct srp_buf **ring,
  93                          size_t max, size_t size)
  94{
  95        int i;
  96
  97        for (i = 0; i < max; i++) {
  98                dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
  99                kfree(ring[i]);
 100        }
 101        kfree(ring);
 102}
 103
 104int srp_target_alloc(struct srp_target *target, struct device *dev,
 105                     size_t nr, size_t iu_size)
 106{
 107        int err;
 108
 109        spin_lock_init(&target->lock);
 110
 111        target->dev = dev;
 112
 113        target->srp_iu_size = iu_size;
 114        target->rx_ring_size = nr;
 115        target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size);
 116        if (!target->rx_ring)
 117                return -ENOMEM;
 118        err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring);
 119        if (err)
 120                goto free_ring;
 121
 122        dev_set_drvdata(target->dev, target);
 123        return 0;
 124
 125free_ring:
 126        srp_ring_free(target->dev, target->rx_ring, nr, iu_size);
 127        return -ENOMEM;
 128}
 129
 130void srp_target_free(struct srp_target *target)
 131{
 132        dev_set_drvdata(target->dev, NULL);
 133        srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size,
 134                      target->srp_iu_size);
 135        srp_iu_pool_free(&target->iu_queue);
 136}
 137
 138struct iu_entry *srp_iu_get(struct srp_target *target)
 139{
 140        struct iu_entry *iue = NULL;
 141
 142        if (kfifo_out_locked(&target->iu_queue.queue, (void *)&iue,
 143                             sizeof(void *),
 144                             &target->iu_queue.lock) != sizeof(void *)) {
 145                WARN_ONCE(1, "unexpected fifo state");
 146                return NULL;
 147        }
 148        if (!iue)
 149                return iue;
 150        iue->target = target;
 151        iue->flags = 0;
 152        return iue;
 153}
 154
 155void srp_iu_put(struct iu_entry *iue)
 156{
 157        kfifo_in_locked(&iue->target->iu_queue.queue, (void *)&iue,
 158                        sizeof(void *), &iue->target->iu_queue.lock);
 159}
 160
 161static int srp_direct_data(struct ibmvscsis_cmd *cmd, struct srp_direct_buf *md,
 162                           enum dma_data_direction dir, srp_rdma_t rdma_io,
 163                           int dma_map, int ext_desc)
 164{
 165        struct iu_entry *iue = NULL;
 166        struct scatterlist *sg = NULL;
 167        int err, nsg = 0, len;
 168
 169        if (dma_map) {
 170                iue = cmd->iue;
 171                sg = cmd->se_cmd.t_data_sg;
 172                nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents,
 173                                 DMA_BIDIRECTIONAL);
 174                if (!nsg) {
 175                        pr_err("fail to map %p %d\n", iue,
 176                               cmd->se_cmd.t_data_nents);
 177                        return 0;
 178                }
 179                len = min(cmd->se_cmd.data_length, be32_to_cpu(md->len));
 180        } else {
 181                len = be32_to_cpu(md->len);
 182        }
 183
 184        err = rdma_io(cmd, sg, nsg, md, 1, dir, len);
 185
 186        if (dma_map)
 187                dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
 188
 189        return err;
 190}
 191
 192static int srp_indirect_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd,
 193                             struct srp_indirect_buf *id,
 194                             enum dma_data_direction dir, srp_rdma_t rdma_io,
 195                             int dma_map, int ext_desc)
 196{
 197        struct iu_entry *iue = NULL;
 198        struct srp_direct_buf *md = NULL;
 199        struct scatterlist dummy, *sg = NULL;
 200        dma_addr_t token = 0;
 201        int err = 0;
 202        int nmd, nsg = 0, len;
 203
 204        if (dma_map || ext_desc) {
 205                iue = cmd->iue;
 206                sg = cmd->se_cmd.t_data_sg;
 207        }
 208
 209        nmd = be32_to_cpu(id->table_desc.len) / sizeof(struct srp_direct_buf);
 210
 211        if ((dir == DMA_FROM_DEVICE && nmd == srp_cmd->data_in_desc_cnt) ||
 212            (dir == DMA_TO_DEVICE && nmd == srp_cmd->data_out_desc_cnt)) {
 213                md = &id->desc_list[0];
 214                goto rdma;
 215        }
 216
 217        if (ext_desc && dma_map) {
 218                md = dma_alloc_coherent(iue->target->dev,
 219                                        be32_to_cpu(id->table_desc.len),
 220                                        &token, GFP_KERNEL);
 221                if (!md) {
 222                        pr_err("Can't get dma memory %u\n",
 223                               be32_to_cpu(id->table_desc.len));
 224                        return -ENOMEM;
 225                }
 226
 227                sg_init_one(&dummy, md, be32_to_cpu(id->table_desc.len));
 228                sg_dma_address(&dummy) = token;
 229                sg_dma_len(&dummy) = be32_to_cpu(id->table_desc.len);
 230                err = rdma_io(cmd, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE,
 231                              be32_to_cpu(id->table_desc.len));
 232                if (err) {
 233                        pr_err("Error copying indirect table %d\n", err);
 234                        goto free_mem;
 235                }
 236        } else {
 237                pr_err("This command uses external indirect buffer\n");
 238                return -EINVAL;
 239        }
 240
 241rdma:
 242        if (dma_map) {
 243                nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents,
 244                                 DMA_BIDIRECTIONAL);
 245                if (!nsg) {
 246                        pr_err("fail to map %p %d\n", iue,
 247                               cmd->se_cmd.t_data_nents);
 248                        err = -EIO;
 249                        goto free_mem;
 250                }
 251                len = min(cmd->se_cmd.data_length, be32_to_cpu(id->len));
 252        } else {
 253                len = be32_to_cpu(id->len);
 254        }
 255
 256        err = rdma_io(cmd, sg, nsg, md, nmd, dir, len);
 257
 258        if (dma_map)
 259                dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL);
 260
 261free_mem:
 262        if (token && dma_map) {
 263                dma_free_coherent(iue->target->dev,
 264                                  be32_to_cpu(id->table_desc.len), md, token);
 265        }
 266        return err;
 267}
 268
 269static int data_out_desc_size(struct srp_cmd *cmd)
 270{
 271        int size = 0;
 272        u8 fmt = cmd->buf_fmt >> 4;
 273
 274        switch (fmt) {
 275        case SRP_NO_DATA_DESC:
 276                break;
 277        case SRP_DATA_DESC_DIRECT:
 278                size = sizeof(struct srp_direct_buf);
 279                break;
 280        case SRP_DATA_DESC_INDIRECT:
 281                size = sizeof(struct srp_indirect_buf) +
 282                        sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt;
 283                break;
 284        default:
 285                pr_err("client error. Invalid data_out_format %x\n", fmt);
 286                break;
 287        }
 288        return size;
 289}
 290
 291/*
 292 * TODO: this can be called multiple times for a single command if it
 293 * has very long data.
 294 */
 295int srp_transfer_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd,
 296                      srp_rdma_t rdma_io, int dma_map, int ext_desc)
 297{
 298        struct srp_direct_buf *md;
 299        struct srp_indirect_buf *id;
 300        enum dma_data_direction dir;
 301        int offset, err = 0;
 302        u8 format;
 303
 304        if (!cmd->se_cmd.t_data_nents)
 305                return 0;
 306
 307        offset = srp_cmd->add_cdb_len & ~3;
 308
 309        dir = srp_cmd_direction(srp_cmd);
 310        if (dir == DMA_FROM_DEVICE)
 311                offset += data_out_desc_size(srp_cmd);
 312
 313        if (dir == DMA_TO_DEVICE)
 314                format = srp_cmd->buf_fmt >> 4;
 315        else
 316                format = srp_cmd->buf_fmt & ((1U << 4) - 1);
 317
 318        switch (format) {
 319        case SRP_NO_DATA_DESC:
 320                break;
 321        case SRP_DATA_DESC_DIRECT:
 322                md = (struct srp_direct_buf *)(srp_cmd->add_data + offset);
 323                err = srp_direct_data(cmd, md, dir, rdma_io, dma_map, ext_desc);
 324                break;
 325        case SRP_DATA_DESC_INDIRECT:
 326                id = (struct srp_indirect_buf *)(srp_cmd->add_data + offset);
 327                err = srp_indirect_data(cmd, srp_cmd, id, dir, rdma_io, dma_map,
 328                                        ext_desc);
 329                break;
 330        default:
 331                pr_err("Unknown format %d %x\n", dir, format);
 332                err = -EINVAL;
 333        }
 334
 335        return err;
 336}
 337
 338u64 srp_data_length(struct srp_cmd *cmd, enum dma_data_direction dir)
 339{
 340        struct srp_direct_buf *md;
 341        struct srp_indirect_buf *id;
 342        u64 len = 0;
 343        uint offset = cmd->add_cdb_len & ~3;
 344        u8 fmt;
 345
 346        if (dir == DMA_TO_DEVICE) {
 347                fmt = cmd->buf_fmt >> 4;
 348        } else {
 349                fmt = cmd->buf_fmt & ((1U << 4) - 1);
 350                offset += data_out_desc_size(cmd);
 351        }
 352
 353        switch (fmt) {
 354        case SRP_NO_DATA_DESC:
 355                break;
 356        case SRP_DATA_DESC_DIRECT:
 357                md = (struct srp_direct_buf *)(cmd->add_data + offset);
 358                len = be32_to_cpu(md->len);
 359                break;
 360        case SRP_DATA_DESC_INDIRECT:
 361                id = (struct srp_indirect_buf *)(cmd->add_data + offset);
 362                len = be32_to_cpu(id->len);
 363                break;
 364        default:
 365                pr_err("invalid data format %x\n", fmt);
 366                break;
 367        }
 368        return len;
 369}
 370
 371int srp_get_desc_table(struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
 372                       u64 *data_len)
 373{
 374        struct srp_indirect_buf *idb;
 375        struct srp_direct_buf *db;
 376        uint add_cdb_offset;
 377        int rc;
 378
 379        /*
 380         * The pointer computations below will only be compiled correctly
 381         * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
 382         * whether srp_cmd::add_data has been declared as a byte pointer.
 383         */
 384        BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
 385                     && !__same_type(srp_cmd->add_data[0], (u8)0));
 386
 387        BUG_ON(!dir);
 388        BUG_ON(!data_len);
 389
 390        rc = 0;
 391        *data_len = 0;
 392
 393        *dir = DMA_NONE;
 394
 395        if (srp_cmd->buf_fmt & 0xf)
 396                *dir = DMA_FROM_DEVICE;
 397        else if (srp_cmd->buf_fmt >> 4)
 398                *dir = DMA_TO_DEVICE;
 399
 400        add_cdb_offset = srp_cmd->add_cdb_len & ~3;
 401        if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
 402            ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
 403                db = (struct srp_direct_buf *)(srp_cmd->add_data
 404                                               + add_cdb_offset);
 405                *data_len = be32_to_cpu(db->len);
 406        } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
 407                   ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
 408                idb = (struct srp_indirect_buf *)(srp_cmd->add_data
 409                                                  + add_cdb_offset);
 410
 411                *data_len = be32_to_cpu(idb->len);
 412        }
 413        return rc;
 414}
 415
 416MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions");
 417MODULE_AUTHOR("FUJITA Tomonori");
 418MODULE_LICENSE("GPL");
 419