linux/drivers/crypto/caam/caamrng.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * caam - Freescale FSL CAAM support for hw_random
   4 *
   5 * Copyright 2011 Freescale Semiconductor, Inc.
   6 * Copyright 2018 NXP
   7 *
   8 * Based on caamalg.c crypto API driver.
   9 *
  10 * relationship between job descriptors to shared descriptors:
  11 *
  12 * ---------------                     --------------
  13 * | JobDesc #0  |-------------------->| ShareDesc  |
  14 * | *(buffer 0) |      |------------->| (generate) |
  15 * ---------------      |              | (move)     |
  16 *                      |              | (store)    |
  17 * ---------------      |              --------------
  18 * | JobDesc #1  |------|
  19 * | *(buffer 1) |
  20 * ---------------
  21 *
  22 * A job desc looks like this:
  23 *
  24 * ---------------------
  25 * | Header            |
  26 * | ShareDesc Pointer |
  27 * | SEQ_OUT_PTR       |
  28 * | (output buffer)   |
  29 * ---------------------
  30 *
  31 * The SharedDesc never changes, and each job descriptor points to one of two
  32 * buffers for each device, from which the data will be copied into the
  33 * requested destination
  34 */
  35
  36#include <linux/hw_random.h>
  37#include <linux/completion.h>
  38#include <linux/atomic.h>
  39
  40#include "compat.h"
  41
  42#include "regs.h"
  43#include "intern.h"
  44#include "desc_constr.h"
  45#include "jr.h"
  46#include "error.h"
  47
  48/*
  49 * Maximum buffer size: maximum number of random, cache-aligned bytes that
  50 * will be generated and moved to seq out ptr (extlen not allowed)
  51 */
  52#define RN_BUF_SIZE                     (0xffff / L1_CACHE_BYTES * \
  53                                         L1_CACHE_BYTES)
  54
  55/* length of descriptors */
  56#define DESC_JOB_O_LEN                  (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2)
  57#define DESC_RNG_LEN                    (3 * CAAM_CMD_SZ)
  58
  59/* Buffer, its dma address and lock */
  60struct buf_data {
  61        u8 buf[RN_BUF_SIZE] ____cacheline_aligned;
  62        dma_addr_t addr;
  63        struct completion filled;
  64        u32 hw_desc[DESC_JOB_O_LEN];
  65#define BUF_NOT_EMPTY 0
  66#define BUF_EMPTY 1
  67#define BUF_PENDING 2  /* Empty, but with job pending --don't submit another */
  68        atomic_t empty;
  69};
  70
  71/* rng per-device context */
  72struct caam_rng_ctx {
  73        struct device *jrdev;
  74        dma_addr_t sh_desc_dma;
  75        u32 sh_desc[DESC_RNG_LEN];
  76        unsigned int cur_buf_idx;
  77        int current_buf;
  78        struct buf_data bufs[2];
  79};
  80
  81static struct caam_rng_ctx *rng_ctx;
  82
  83static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
  84{
  85        if (bd->addr)
  86                dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE,
  87                                 DMA_FROM_DEVICE);
  88}
  89
  90static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx)
  91{
  92        struct device *jrdev = ctx->jrdev;
  93
  94        if (ctx->sh_desc_dma)
  95                dma_unmap_single(jrdev, ctx->sh_desc_dma,
  96                                 desc_bytes(ctx->sh_desc), DMA_TO_DEVICE);
  97        rng_unmap_buf(jrdev, &ctx->bufs[0]);
  98        rng_unmap_buf(jrdev, &ctx->bufs[1]);
  99}
 100
 101static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context)
 102{
 103        struct buf_data *bd;
 104
 105        bd = container_of(desc, struct buf_data, hw_desc[0]);
 106
 107        if (err)
 108                caam_jr_strstatus(jrdev, err);
 109
 110        atomic_set(&bd->empty, BUF_NOT_EMPTY);
 111        complete(&bd->filled);
 112
 113        /* Buffer refilled, invalidate cache */
 114        dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE);
 115
 116#ifdef DEBUG
 117        print_hex_dump(KERN_ERR, "rng refreshed buf@: ",
 118                       DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1);
 119#endif
 120}
 121
 122static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
 123{
 124        struct buf_data *bd = &ctx->bufs[!(to_current ^ ctx->current_buf)];
 125        struct device *jrdev = ctx->jrdev;
 126        u32 *desc = bd->hw_desc;
 127        int err;
 128
 129        dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf));
 130        init_completion(&bd->filled);
 131        err = caam_jr_enqueue(jrdev, desc, rng_done, ctx);
 132        if (err)
 133                complete(&bd->filled); /* don't wait on failed job*/
 134        else
 135                atomic_inc(&bd->empty); /* note if pending */
 136
 137        return err;
 138}
 139
 140static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
 141{
 142        struct caam_rng_ctx *ctx = rng_ctx;
 143        struct buf_data *bd = &ctx->bufs[ctx->current_buf];
 144        int next_buf_idx, copied_idx;
 145        int err;
 146
 147        if (atomic_read(&bd->empty)) {
 148                /* try to submit job if there wasn't one */
 149                if (atomic_read(&bd->empty) == BUF_EMPTY) {
 150                        err = submit_job(ctx, 1);
 151                        /* if can't submit job, can't even wait */
 152                        if (err)
 153                                return 0;
 154                }
 155                /* no immediate data, so exit if not waiting */
 156                if (!wait)
 157                        return 0;
 158
 159                /* waiting for pending job */
 160                if (atomic_read(&bd->empty))
 161                        wait_for_completion(&bd->filled);
 162        }
 163
 164        next_buf_idx = ctx->cur_buf_idx + max;
 165        dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n",
 166                 __func__, ctx->current_buf, ctx->cur_buf_idx);
 167
 168        /* if enough data in current buffer */
 169        if (next_buf_idx < RN_BUF_SIZE) {
 170                memcpy(data, bd->buf + ctx->cur_buf_idx, max);
 171                ctx->cur_buf_idx = next_buf_idx;
 172                return max;
 173        }
 174
 175        /* else, copy what's left... */
 176        copied_idx = RN_BUF_SIZE - ctx->cur_buf_idx;
 177        memcpy(data, bd->buf + ctx->cur_buf_idx, copied_idx);
 178        ctx->cur_buf_idx = 0;
 179        atomic_set(&bd->empty, BUF_EMPTY);
 180
 181        /* ...refill... */
 182        submit_job(ctx, 1);
 183
 184        /* and use next buffer */
 185        ctx->current_buf = !ctx->current_buf;
 186        dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf);
 187
 188        /* since there already is some data read, don't wait */
 189        return copied_idx + caam_read(rng, data + copied_idx,
 190                                      max - copied_idx, false);
 191}
 192
 193static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
 194{
 195        struct device *jrdev = ctx->jrdev;
 196        u32 *desc = ctx->sh_desc;
 197
 198        init_sh_desc(desc, HDR_SHARE_SERIAL);
 199
 200        /* Generate random bytes */
 201        append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG);
 202
 203        /* Store bytes */
 204        append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE);
 205
 206        ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
 207                                          DMA_TO_DEVICE);
 208        if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) {
 209                dev_err(jrdev, "unable to map shared descriptor\n");
 210                return -ENOMEM;
 211        }
 212#ifdef DEBUG
 213        print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
 214                       desc, desc_bytes(desc), 1);
 215#endif
 216        return 0;
 217}
 218
 219static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
 220{
 221        struct device *jrdev = ctx->jrdev;
 222        struct buf_data *bd = &ctx->bufs[buf_id];
 223        u32 *desc = bd->hw_desc;
 224        int sh_len = desc_len(ctx->sh_desc);
 225
 226        init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER |
 227                             HDR_REVERSE);
 228
 229        bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
 230        if (dma_mapping_error(jrdev, bd->addr)) {
 231                dev_err(jrdev, "unable to map dst\n");
 232                return -ENOMEM;
 233        }
 234
 235        append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
 236#ifdef DEBUG
 237        print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
 238                       desc, desc_bytes(desc), 1);
 239#endif
 240        return 0;
 241}
 242
 243static void caam_cleanup(struct hwrng *rng)
 244{
 245        int i;
 246        struct buf_data *bd;
 247
 248        for (i = 0; i < 2; i++) {
 249                bd = &rng_ctx->bufs[i];
 250                if (atomic_read(&bd->empty) == BUF_PENDING)
 251                        wait_for_completion(&bd->filled);
 252        }
 253
 254        rng_unmap_ctx(rng_ctx);
 255}
 256
 257static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
 258{
 259        struct buf_data *bd = &ctx->bufs[buf_id];
 260        int err;
 261
 262        err = rng_create_job_desc(ctx, buf_id);
 263        if (err)
 264                return err;
 265
 266        atomic_set(&bd->empty, BUF_EMPTY);
 267        submit_job(ctx, buf_id == ctx->current_buf);
 268        wait_for_completion(&bd->filled);
 269
 270        return 0;
 271}
 272
 273static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
 274{
 275        int err;
 276
 277        ctx->jrdev = jrdev;
 278
 279        err = rng_create_sh_desc(ctx);
 280        if (err)
 281                return err;
 282
 283        ctx->current_buf = 0;
 284        ctx->cur_buf_idx = 0;
 285
 286        err = caam_init_buf(ctx, 0);
 287        if (err)
 288                return err;
 289
 290        return caam_init_buf(ctx, 1);
 291}
 292
 293static struct hwrng caam_rng = {
 294        .name           = "rng-caam",
 295        .cleanup        = caam_cleanup,
 296        .read           = caam_read,
 297};
 298
 299static void __exit caam_rng_exit(void)
 300{
 301        caam_jr_free(rng_ctx->jrdev);
 302        hwrng_unregister(&caam_rng);
 303        kfree(rng_ctx);
 304}
 305
 306static int __init caam_rng_init(void)
 307{
 308        struct device *dev;
 309        struct device_node *dev_node;
 310        struct platform_device *pdev;
 311        struct caam_drv_private *priv;
 312        u32 rng_inst;
 313        int err;
 314
 315        dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
 316        if (!dev_node) {
 317                dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
 318                if (!dev_node)
 319                        return -ENODEV;
 320        }
 321
 322        pdev = of_find_device_by_node(dev_node);
 323        if (!pdev) {
 324                of_node_put(dev_node);
 325                return -ENODEV;
 326        }
 327
 328        priv = dev_get_drvdata(&pdev->dev);
 329        of_node_put(dev_node);
 330
 331        /*
 332         * If priv is NULL, it's probably because the caam driver wasn't
 333         * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
 334         */
 335        if (!priv) {
 336                err = -ENODEV;
 337                goto out_put_dev;
 338        }
 339
 340        /* Check for an instantiated RNG before registration */
 341        if (priv->era < 10)
 342                rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
 343                            CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
 344        else
 345                rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK;
 346
 347        if (!rng_inst) {
 348                err = -ENODEV;
 349                goto out_put_dev;
 350        }
 351
 352        dev = caam_jr_alloc();
 353        if (IS_ERR(dev)) {
 354                pr_err("Job Ring Device allocation for transform failed\n");
 355                err = PTR_ERR(dev);
 356                goto out_put_dev;
 357        }
 358        rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL);
 359        if (!rng_ctx) {
 360                err = -ENOMEM;
 361                goto free_caam_alloc;
 362        }
 363        err = caam_init_rng(rng_ctx, dev);
 364        if (err)
 365                goto free_rng_ctx;
 366
 367        put_device(&pdev->dev);
 368        dev_info(dev, "registering rng-caam\n");
 369        return hwrng_register(&caam_rng);
 370
 371free_rng_ctx:
 372        kfree(rng_ctx);
 373free_caam_alloc:
 374        caam_jr_free(dev);
 375out_put_dev:
 376        put_device(&pdev->dev);
 377        return err;
 378}
 379
 380module_init(caam_rng_init);
 381module_exit(caam_rng_exit);
 382
 383MODULE_LICENSE("GPL");
 384MODULE_DESCRIPTION("FSL CAAM support for hw_random API");
 385MODULE_AUTHOR("Freescale Semiconductor - NMG");
 386