linux/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2016 Chelsio Communications, Inc.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8
   9#include "cxgbit.h"
  10
  11static void
  12cxgbit_set_one_ppod(struct cxgbi_pagepod *ppod,
  13                    struct cxgbi_task_tag_info *ttinfo,
  14                    struct scatterlist **sg_pp, unsigned int *sg_off)
  15{
  16        struct scatterlist *sg = sg_pp ? *sg_pp : NULL;
  17        unsigned int offset = sg_off ? *sg_off : 0;
  18        dma_addr_t addr = 0UL;
  19        unsigned int len = 0;
  20        int i;
  21
  22        memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr));
  23
  24        if (sg) {
  25                addr = sg_dma_address(sg);
  26                len = sg_dma_len(sg);
  27        }
  28
  29        for (i = 0; i < PPOD_PAGES_MAX; i++) {
  30                if (sg) {
  31                        ppod->addr[i] = cpu_to_be64(addr + offset);
  32                        offset += PAGE_SIZE;
  33                        if (offset == (len + sg->offset)) {
  34                                offset = 0;
  35                                sg = sg_next(sg);
  36                                if (sg) {
  37                                        addr = sg_dma_address(sg);
  38                                        len = sg_dma_len(sg);
  39                                }
  40                        }
  41                } else {
  42                        ppod->addr[i] = 0ULL;
  43                }
  44        }
  45
  46        /*
  47         * the fifth address needs to be repeated in the next ppod, so do
  48         * not move sg
  49         */
  50        if (sg_pp) {
  51                *sg_pp = sg;
  52                *sg_off = offset;
  53        }
  54
  55        if (offset == len) {
  56                offset = 0;
  57                if (sg) {
  58                        sg = sg_next(sg);
  59                        if (sg)
  60                                addr = sg_dma_address(sg);
  61                }
  62        }
  63        ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL;
  64}
  65
  66static struct sk_buff *
  67cxgbit_ppod_init_idata(struct cxgbit_device *cdev, struct cxgbi_ppm *ppm,
  68                       unsigned int idx, unsigned int npods, unsigned int tid)
  69{
  70        struct ulp_mem_io *req;
  71        struct ulptx_idata *idata;
  72        unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
  73        unsigned int dlen = npods << PPOD_SIZE_SHIFT;
  74        unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
  75                                sizeof(struct ulptx_idata) + dlen, 16);
  76        struct sk_buff *skb;
  77
  78        skb  = alloc_skb(wr_len, GFP_KERNEL);
  79        if (!skb)
  80                return NULL;
  81
  82        req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
  83        INIT_ULPTX_WR(req, wr_len, 0, tid);
  84        req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
  85                FW_WR_ATOMIC_V(0));
  86        req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
  87                ULP_MEMIO_ORDER_V(0) |
  88                T5_ULP_MEMIO_IMM_V(1));
  89        req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
  90        req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
  91        req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
  92
  93        idata = (struct ulptx_idata *)(req + 1);
  94        idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
  95        idata->len = htonl(dlen);
  96
  97        return skb;
  98}
  99
 100static int
 101cxgbit_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
 102                        struct cxgbi_task_tag_info *ttinfo, unsigned int idx,
 103                        unsigned int npods, struct scatterlist **sg_pp,
 104                        unsigned int *sg_off)
 105{
 106        struct cxgbit_device *cdev = csk->com.cdev;
 107        struct sk_buff *skb;
 108        struct ulp_mem_io *req;
 109        struct ulptx_idata *idata;
 110        struct cxgbi_pagepod *ppod;
 111        unsigned int i;
 112
 113        skb = cxgbit_ppod_init_idata(cdev, ppm, idx, npods, csk->tid);
 114        if (!skb)
 115                return -ENOMEM;
 116
 117        req = (struct ulp_mem_io *)skb->data;
 118        idata = (struct ulptx_idata *)(req + 1);
 119        ppod = (struct cxgbi_pagepod *)(idata + 1);
 120
 121        for (i = 0; i < npods; i++, ppod++)
 122                cxgbit_set_one_ppod(ppod, ttinfo, sg_pp, sg_off);
 123
 124        __skb_queue_tail(&csk->ppodq, skb);
 125
 126        return 0;
 127}
 128
 129static int
 130cxgbit_ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
 131                   struct cxgbi_task_tag_info *ttinfo)
 132{
 133        unsigned int pidx = ttinfo->idx;
 134        unsigned int npods = ttinfo->npods;
 135        unsigned int i, cnt;
 136        struct scatterlist *sg = ttinfo->sgl;
 137        unsigned int offset = 0;
 138        int ret = 0;
 139
 140        for (i = 0; i < npods; i += cnt, pidx += cnt) {
 141                cnt = npods - i;
 142
 143                if (cnt > ULPMEM_IDATA_MAX_NPPODS)
 144                        cnt = ULPMEM_IDATA_MAX_NPPODS;
 145
 146                ret = cxgbit_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt,
 147                                              &sg, &offset);
 148                if (ret < 0)
 149                        break;
 150        }
 151
 152        return ret;
 153}
 154
 155static int cxgbit_ddp_sgl_check(struct scatterlist *sg,
 156                                unsigned int nents)
 157{
 158        unsigned int last_sgidx = nents - 1;
 159        unsigned int i;
 160
 161        for (i = 0; i < nents; i++, sg = sg_next(sg)) {
 162                unsigned int len = sg->length + sg->offset;
 163
 164                if ((sg->offset & 0x3) || (i && sg->offset) ||
 165                    ((i != last_sgidx) && (len != PAGE_SIZE))) {
 166                        return -EINVAL;
 167                }
 168        }
 169
 170        return 0;
 171}
 172
 173static int
 174cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo,
 175                   unsigned int xferlen)
 176{
 177        struct cxgbit_device *cdev = csk->com.cdev;
 178        struct cxgbi_ppm *ppm = cdev2ppm(cdev);
 179        struct scatterlist *sgl = ttinfo->sgl;
 180        unsigned int sgcnt = ttinfo->nents;
 181        unsigned int sg_offset = sgl->offset;
 182        int ret;
 183
 184        if ((xferlen < DDP_THRESHOLD) || (!sgcnt)) {
 185                pr_debug("ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n",
 186                         ppm, ppm->tformat.pgsz_idx_dflt,
 187                         xferlen, ttinfo->nents);
 188                return -EINVAL;
 189        }
 190
 191        if (cxgbit_ddp_sgl_check(sgl, sgcnt) < 0)
 192                return -EINVAL;
 193
 194        ttinfo->nr_pages = (xferlen + sgl->offset +
 195                            (1 << PAGE_SHIFT) - 1) >> PAGE_SHIFT;
 196
 197        /*
 198         * the ddp tag will be used for the ttt in the outgoing r2t pdu
 199         */
 200        ret = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx,
 201                                      &ttinfo->tag, 0);
 202        if (ret < 0)
 203                return ret;
 204        ttinfo->npods = ret;
 205
 206        sgl->offset = 0;
 207        ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
 208        sgl->offset = sg_offset;
 209        if (!ret) {
 210                pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
 211                        __func__, 0, xferlen, sgcnt);
 212                goto rel_ppods;
 213        }
 214
 215        cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset,
 216                                xferlen, &ttinfo->hdr);
 217
 218        ret = cxgbit_ddp_set_map(ppm, csk, ttinfo);
 219        if (ret < 0) {
 220                __skb_queue_purge(&csk->ppodq);
 221                dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
 222                goto rel_ppods;
 223        }
 224
 225        return 0;
 226
 227rel_ppods:
 228        cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
 229        return -EINVAL;
 230}
 231
 232void
 233cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 234                   struct iscsi_r2t *r2t)
 235{
 236        struct cxgbit_sock *csk = conn->context;
 237        struct cxgbit_device *cdev = csk->com.cdev;
 238        struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
 239        struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
 240        int ret = -EINVAL;
 241
 242        if ((!ccmd->setup_ddp) ||
 243            (!test_bit(CSK_DDP_ENABLE, &csk->com.flags)))
 244                goto out;
 245
 246        ccmd->setup_ddp = false;
 247
 248        ttinfo->sgl = cmd->se_cmd.t_data_sg;
 249        ttinfo->nents = cmd->se_cmd.t_data_nents;
 250
 251        ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length);
 252        if (ret < 0) {
 253                pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
 254                        csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
 255
 256                ttinfo->sgl = NULL;
 257                ttinfo->nents = 0;
 258        } else {
 259                ccmd->release = true;
 260        }
 261out:
 262        pr_debug("cdev 0x%p, cmd 0x%p, tag 0x%x\n", cdev, cmd, ttinfo->tag);
 263        r2t->targ_xfer_tag = ttinfo->tag;
 264}
 265
 266void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
 267{
 268        struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
 269
 270        if (ccmd->release) {
 271                struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
 272
 273                if (ttinfo->sgl) {
 274                        struct cxgbit_sock *csk = conn->context;
 275                        struct cxgbit_device *cdev = csk->com.cdev;
 276                        struct cxgbi_ppm *ppm = cdev2ppm(cdev);
 277
 278                        cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
 279
 280                        dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
 281                                     ttinfo->nents, DMA_FROM_DEVICE);
 282                } else {
 283                        put_page(sg_page(&ccmd->sg));
 284                }
 285
 286                ccmd->release = false;
 287        }
 288}
 289
 290int cxgbit_ddp_init(struct cxgbit_device *cdev)
 291{
 292        struct cxgb4_lld_info *lldi = &cdev->lldi;
 293        struct net_device *ndev = cdev->lldi.ports[0];
 294        struct cxgbi_tag_format tformat;
 295        unsigned int ppmax;
 296        int ret, i;
 297
 298        if (!lldi->vr->iscsi.size) {
 299                pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name);
 300                return -EACCES;
 301        }
 302
 303        ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
 304
 305        memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
 306        for (i = 0; i < 4; i++)
 307                tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
 308                                         & 0xF;
 309        cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat);
 310
 311        ret = cxgbi_ppm_init(lldi->iscsi_ppm, cdev->lldi.ports[0],
 312                             cdev->lldi.pdev, &cdev->lldi, &tformat,
 313                             ppmax, lldi->iscsi_llimit,
 314                             lldi->vr->iscsi.start, 2);
 315        if (ret >= 0) {
 316                struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*lldi->iscsi_ppm);
 317
 318                if ((ppm->tformat.pgsz_idx_dflt < DDP_PGIDX_MAX) &&
 319                    (ppm->ppmax >= 1024))
 320                        set_bit(CDEV_DDP_ENABLE, &cdev->flags);
 321                ret = 0;
 322        }
 323
 324        return ret;
 325}
 326