linux/drivers/crypto/hisilicon/sgl.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (c) 2019 HiSilicon Limited. */
   3#include <linux/dma-mapping.h>
   4#include <linux/module.h>
   5#include <linux/slab.h>
   6#include "qm.h"
   7
   8#define HISI_ACC_SGL_SGE_NR_MIN         1
   9#define HISI_ACC_SGL_NR_MAX             256
  10#define HISI_ACC_SGL_ALIGN_SIZE         64
  11#define HISI_ACC_MEM_BLOCK_NR           5
  12
  13struct acc_hw_sge {
  14        dma_addr_t buf;
  15        void *page_ctrl;
  16        __le32 len;
  17        __le32 pad;
  18        __le32 pad0;
  19        __le32 pad1;
  20};
  21
  22/* use default sgl head size 64B */
  23struct hisi_acc_hw_sgl {
  24        dma_addr_t next_dma;
  25        __le16 entry_sum_in_chain;
  26        __le16 entry_sum_in_sgl;
  27        __le16 entry_length_in_sgl;
  28        __le16 pad0;
  29        __le64 pad1[5];
  30        struct hisi_acc_hw_sgl *next;
  31        struct acc_hw_sge sge_entries[];
  32} __aligned(1);
  33
  34struct hisi_acc_sgl_pool {
  35        struct mem_block {
  36                struct hisi_acc_hw_sgl *sgl;
  37                dma_addr_t sgl_dma;
  38                size_t size;
  39        } mem_block[HISI_ACC_MEM_BLOCK_NR];
  40        u32 sgl_num_per_block;
  41        u32 block_num;
  42        u32 count;
  43        u32 sge_nr;
  44        size_t sgl_size;
  45};
  46
  47/**
  48 * hisi_acc_create_sgl_pool() - Create a hw sgl pool.
  49 * @dev: The device which hw sgl pool belongs to.
  50 * @count: Count of hisi_acc_hw_sgl in pool.
  51 * @sge_nr: The count of sge in hw_sgl
  52 *
  53 * This function creates a hw sgl pool, after this user can get hw sgl memory
  54 * from it.
  55 */
  56struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
  57                                                   u32 count, u32 sge_nr)
  58{
  59        u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl;
  60        struct hisi_acc_sgl_pool *pool;
  61        struct mem_block *block;
  62        u32 i, j;
  63
  64        if (!dev || !count || !sge_nr || sge_nr > HISI_ACC_SGL_SGE_NR_MAX)
  65                return ERR_PTR(-EINVAL);
  66
  67        sgl_size = sizeof(struct acc_hw_sge) * sge_nr +
  68                   sizeof(struct hisi_acc_hw_sgl);
  69
  70        /*
  71         * the pool may allocate a block of memory of size PAGE_SIZE * 2^(MAX_ORDER - 1),
  72         * block size may exceed 2^31 on ia64, so the max of block size is 2^31
  73         */
  74        block_size = 1 << (PAGE_SHIFT + MAX_ORDER <= 32 ?
  75                           PAGE_SHIFT + MAX_ORDER - 1 : 31);
  76        sgl_num_per_block = block_size / sgl_size;
  77        block_num = count / sgl_num_per_block;
  78        remain_sgl = count % sgl_num_per_block;
  79
  80        if ((!remain_sgl && block_num > HISI_ACC_MEM_BLOCK_NR) ||
  81            (remain_sgl > 0 && block_num > HISI_ACC_MEM_BLOCK_NR - 1))
  82                return ERR_PTR(-EINVAL);
  83
  84        pool = kzalloc(sizeof(*pool), GFP_KERNEL);
  85        if (!pool)
  86                return ERR_PTR(-ENOMEM);
  87        block = pool->mem_block;
  88
  89        for (i = 0; i < block_num; i++) {
  90                block[i].sgl = dma_alloc_coherent(dev, block_size,
  91                                                  &block[i].sgl_dma,
  92                                                  GFP_KERNEL);
  93                if (!block[i].sgl) {
  94                        dev_err(dev, "Fail to allocate hw SG buffer!\n");
  95                        goto err_free_mem;
  96                }
  97
  98                block[i].size = block_size;
  99        }
 100
 101        if (remain_sgl > 0) {
 102                block[i].sgl = dma_alloc_coherent(dev, remain_sgl * sgl_size,
 103                                                  &block[i].sgl_dma,
 104                                                  GFP_KERNEL);
 105                if (!block[i].sgl) {
 106                        dev_err(dev, "Fail to allocate remained hw SG buffer!\n");
 107                        goto err_free_mem;
 108                }
 109
 110                block[i].size = remain_sgl * sgl_size;
 111        }
 112
 113        pool->sgl_num_per_block = sgl_num_per_block;
 114        pool->block_num = remain_sgl ? block_num + 1 : block_num;
 115        pool->count = count;
 116        pool->sgl_size = sgl_size;
 117        pool->sge_nr = sge_nr;
 118
 119        return pool;
 120
 121err_free_mem:
 122        for (j = 0; j < i; j++) {
 123                dma_free_coherent(dev, block_size, block[j].sgl,
 124                                  block[j].sgl_dma);
 125                memset(block + j, 0, sizeof(*block));
 126        }
 127        kfree(pool);
 128        return ERR_PTR(-ENOMEM);
 129}
 130EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool);
 131
 132/**
 133 * hisi_acc_free_sgl_pool() - Free a hw sgl pool.
 134 * @dev: The device which hw sgl pool belongs to.
 135 * @pool: Pointer of pool.
 136 *
 137 * This function frees memory of a hw sgl pool.
 138 */
 139void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool)
 140{
 141        struct mem_block *block;
 142        int i;
 143
 144        if (!dev || !pool)
 145                return;
 146
 147        block = pool->mem_block;
 148
 149        for (i = 0; i < pool->block_num; i++)
 150                dma_free_coherent(dev, block[i].size, block[i].sgl,
 151                                  block[i].sgl_dma);
 152
 153        kfree(pool);
 154}
 155EXPORT_SYMBOL_GPL(hisi_acc_free_sgl_pool);
 156
 157static struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool,
 158                                           u32 index, dma_addr_t *hw_sgl_dma)
 159{
 160        struct mem_block *block;
 161        u32 block_index, offset;
 162
 163        if (!pool || !hw_sgl_dma || index >= pool->count)
 164                return ERR_PTR(-EINVAL);
 165
 166        block = pool->mem_block;
 167        block_index = index / pool->sgl_num_per_block;
 168        offset = index % pool->sgl_num_per_block;
 169
 170        *hw_sgl_dma = block[block_index].sgl_dma + pool->sgl_size * offset;
 171        return (void *)block[block_index].sgl + pool->sgl_size * offset;
 172}
 173
 174static void sg_map_to_hw_sg(struct scatterlist *sgl,
 175                            struct acc_hw_sge *hw_sge)
 176{
 177        hw_sge->buf = sg_dma_address(sgl);
 178        hw_sge->len = cpu_to_le32(sg_dma_len(sgl));
 179        hw_sge->page_ctrl = sg_virt(sgl);
 180}
 181
 182static void inc_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
 183{
 184        u16 var = le16_to_cpu(hw_sgl->entry_sum_in_sgl);
 185
 186        var++;
 187        hw_sgl->entry_sum_in_sgl = cpu_to_le16(var);
 188}
 189
 190static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum)
 191{
 192        hw_sgl->entry_sum_in_chain = cpu_to_le16(sum);
 193}
 194
 195static void clear_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl)
 196{
 197        struct acc_hw_sge *hw_sge = hw_sgl->sge_entries;
 198        int i;
 199
 200        for (i = 0; i < le16_to_cpu(hw_sgl->entry_sum_in_sgl); i++) {
 201                hw_sge[i].page_ctrl = NULL;
 202                hw_sge[i].buf = 0;
 203                hw_sge[i].len = 0;
 204        }
 205}
 206
 207/**
 208 * hisi_acc_sg_buf_map_to_hw_sgl - Map a scatterlist to a hw sgl.
 209 * @dev: The device which hw sgl belongs to.
 210 * @sgl: Scatterlist which will be mapped to hw sgl.
 211 * @pool: Pool which hw sgl memory will be allocated in.
 212 * @index: Index of hisi_acc_hw_sgl in pool.
 213 * @hw_sgl_dma: The dma address of allocated hw sgl.
 214 *
 215 * This function builds hw sgl according input sgl, user can use hw_sgl_dma
 216 * as src/dst in its BD. Only support single hw sgl currently.
 217 */
 218struct hisi_acc_hw_sgl *
 219hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
 220                              struct scatterlist *sgl,
 221                              struct hisi_acc_sgl_pool *pool,
 222                              u32 index, dma_addr_t *hw_sgl_dma)
 223{
 224        struct hisi_acc_hw_sgl *curr_hw_sgl;
 225        dma_addr_t curr_sgl_dma = 0;
 226        struct acc_hw_sge *curr_hw_sge;
 227        struct scatterlist *sg;
 228        int i, sg_n, sg_n_mapped;
 229
 230        if (!dev || !sgl || !pool || !hw_sgl_dma)
 231                return ERR_PTR(-EINVAL);
 232
 233        sg_n = sg_nents(sgl);
 234
 235        sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
 236        if (!sg_n_mapped) {
 237                dev_err(dev, "DMA mapping for SG error!\n");
 238                return ERR_PTR(-EINVAL);
 239        }
 240
 241        if (sg_n_mapped > pool->sge_nr) {
 242                dev_err(dev, "the number of entries in input scatterlist is bigger than SGL pool setting.\n");
 243                return ERR_PTR(-EINVAL);
 244        }
 245
 246        curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
 247        if (IS_ERR(curr_hw_sgl)) {
 248                dev_err(dev, "Get SGL error!\n");
 249                dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
 250                return ERR_PTR(-ENOMEM);
 251
 252        }
 253        curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr);
 254        curr_hw_sge = curr_hw_sgl->sge_entries;
 255
 256        for_each_sg(sgl, sg, sg_n_mapped, i) {
 257                sg_map_to_hw_sg(sg, curr_hw_sge);
 258                inc_hw_sgl_sge(curr_hw_sgl);
 259                curr_hw_sge++;
 260        }
 261
 262        update_hw_sgl_sum_sge(curr_hw_sgl, pool->sge_nr);
 263        *hw_sgl_dma = curr_sgl_dma;
 264
 265        return curr_hw_sgl;
 266}
 267EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl);
 268
 269/**
 270 * hisi_acc_sg_buf_unmap() - Unmap allocated hw sgl.
 271 * @dev: The device which hw sgl belongs to.
 272 * @sgl: Related scatterlist.
 273 * @hw_sgl: Virtual address of hw sgl.
 274 *
 275 * This function unmaps allocated hw sgl.
 276 */
 277void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
 278                           struct hisi_acc_hw_sgl *hw_sgl)
 279{
 280        if (!dev || !sgl || !hw_sgl)
 281                return;
 282
 283        dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL);
 284        clear_hw_sgl_sge(hw_sgl);
 285        hw_sgl->entry_sum_in_chain = 0;
 286        hw_sgl->entry_sum_in_sgl = 0;
 287        hw_sgl->entry_length_in_sgl = 0;
 288}
 289EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_unmap);
 290