linux/drivers/crypto/inside-secure/safexcel.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2017 Marvell
   3 *
   4 * Antoine Tenart <antoine.tenart@free-electrons.com>
   5 *
   6 * This file is licensed under the terms of the GNU General Public
   7 * License version 2. This program is licensed "as is" without any
   8 * warranty of any kind, whether express or implied.
   9 */
  10
  11#include <linux/clk.h>
  12#include <linux/device.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/dmapool.h>
  15#include <linux/firmware.h>
  16#include <linux/interrupt.h>
  17#include <linux/module.h>
  18#include <linux/of_platform.h>
  19#include <linux/of_irq.h>
  20#include <linux/platform_device.h>
  21#include <linux/workqueue.h>
  22
  23#include <crypto/internal/hash.h>
  24#include <crypto/internal/skcipher.h>
  25
  26#include "safexcel.h"
  27
  28static u32 max_rings = EIP197_MAX_RINGS;
  29module_param(max_rings, uint, 0644);
  30MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
  31
  32static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
  33{
  34        u32 val, htable_offset;
  35        int i;
  36
  37        /* Enable the record cache memory access */
  38        val = readl(priv->base + EIP197_CS_RAM_CTRL);
  39        val &= ~EIP197_TRC_ENABLE_MASK;
  40        val |= EIP197_TRC_ENABLE_0;
  41        writel(val, priv->base + EIP197_CS_RAM_CTRL);
  42
  43        /* Clear all ECC errors */
  44        writel(0, priv->base + EIP197_TRC_ECCCTRL);
  45
  46        /*
  47         * Make sure the cache memory is accessible by taking record cache into
  48         * reset.
  49         */
  50        val = readl(priv->base + EIP197_TRC_PARAMS);
  51        val |= EIP197_TRC_PARAMS_SW_RESET;
  52        val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
  53        writel(val, priv->base + EIP197_TRC_PARAMS);
  54
  55        /* Clear all records */
  56        for (i = 0; i < EIP197_CS_RC_MAX; i++) {
  57                u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
  58
  59                writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
  60                       EIP197_CS_RC_PREV(EIP197_RC_NULL),
  61                       priv->base + offset);
  62
  63                val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
  64                if (i == 0)
  65                        val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
  66                else if (i == EIP197_CS_RC_MAX - 1)
  67                        val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
  68                writel(val, priv->base + offset + sizeof(u32));
  69        }
  70
  71        /* Clear the hash table entries */
  72        htable_offset = EIP197_CS_RC_MAX * EIP197_CS_RC_SIZE;
  73        for (i = 0; i < 64; i++)
  74                writel(GENMASK(29, 0),
  75                       priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
  76
  77        /* Disable the record cache memory access */
  78        val = readl(priv->base + EIP197_CS_RAM_CTRL);
  79        val &= ~EIP197_TRC_ENABLE_MASK;
  80        writel(val, priv->base + EIP197_CS_RAM_CTRL);
  81
  82        /* Write head and tail pointers of the record free chain */
  83        val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
  84              EIP197_TRC_FREECHAIN_TAIL_PTR(EIP197_CS_RC_MAX - 1);
  85        writel(val, priv->base + EIP197_TRC_FREECHAIN);
  86
  87        /* Configure the record cache #1 */
  88        val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(EIP197_CS_TRC_REC_WC) |
  89              EIP197_TRC_PARAMS2_HTABLE_PTR(EIP197_CS_RC_MAX);
  90        writel(val, priv->base + EIP197_TRC_PARAMS2);
  91
  92        /* Configure the record cache #2 */
  93        val = EIP197_TRC_PARAMS_RC_SZ_LARGE(EIP197_CS_TRC_LG_REC_WC) |
  94              EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
  95              EIP197_TRC_PARAMS_HTABLE_SZ(2);
  96        writel(val, priv->base + EIP197_TRC_PARAMS);
  97}
  98
  99static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
 100                                  const struct firmware *fw, u32 ctrl,
 101                                  u32 prog_en)
 102{
 103        const u32 *data = (const u32 *)fw->data;
 104        u32 val;
 105        int i;
 106
 107        /* Reset the engine to make its program memory accessible */
 108        writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
 109               EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
 110               EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
 111               priv->base + ctrl);
 112
 113        /* Enable access to the program memory */
 114        writel(prog_en, priv->base + EIP197_PE_ICE_RAM_CTRL);
 115
 116        /* Write the firmware */
 117        for (i = 0; i < fw->size / sizeof(u32); i++)
 118                writel(be32_to_cpu(data[i]),
 119                       priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
 120
 121        /* Disable access to the program memory */
 122        writel(0, priv->base + EIP197_PE_ICE_RAM_CTRL);
 123
 124        /* Release engine from reset */
 125        val = readl(priv->base + ctrl);
 126        val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
 127        writel(val, priv->base + ctrl);
 128}
 129
 130static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
 131{
 132        const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
 133        const struct firmware *fw[FW_NB];
 134        int i, j, ret = 0;
 135        u32 val;
 136
 137        for (i = 0; i < FW_NB; i++) {
 138                ret = request_firmware(&fw[i], fw_name[i], priv->dev);
 139                if (ret) {
 140                        dev_err(priv->dev,
 141                                "Failed to request firmware %s (%d)\n",
 142                                fw_name[i], ret);
 143                        goto release_fw;
 144                }
 145         }
 146
 147        /* Clear the scratchpad memory */
 148        val = readl(priv->base + EIP197_PE_ICE_SCRATCH_CTRL);
 149        val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
 150               EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
 151               EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
 152               EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
 153        writel(val, priv->base + EIP197_PE_ICE_SCRATCH_CTRL);
 154
 155        memset(priv->base + EIP197_PE_ICE_SCRATCH_RAM, 0,
 156               EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
 157
 158        eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
 159                              EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
 160
 161        eip197_write_firmware(priv, fw[FW_IPUE], EIP197_PE_ICE_PUE_CTRL,
 162                              EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
 163
 164release_fw:
 165        for (j = 0; j < i; j++)
 166                release_firmware(fw[j]);
 167
 168        return ret;
 169}
 170
 171static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
 172{
 173        u32 hdw, cd_size_rnd, val;
 174        int i;
 175
 176        hdw = readl(priv->base + EIP197_HIA_OPTIONS);
 177        hdw &= GENMASK(27, 25);
 178        hdw >>= 25;
 179
 180        cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;
 181
 182        for (i = 0; i < priv->config.rings; i++) {
 183                /* ring base address */
 184                writel(lower_32_bits(priv->ring[i].cdr.base_dma),
 185                       priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
 186                writel(upper_32_bits(priv->ring[i].cdr.base_dma),
 187                       priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
 188
 189                writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
 190                       priv->config.cd_size,
 191                       priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DESC_SIZE);
 192                writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
 193                       (EIP197_FETCH_COUNT * priv->config.cd_offset),
 194                       priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG);
 195
 196                /* Configure DMA tx control */
 197                val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
 198                val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
 199                writel(val,
 200                       priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DMA_CFG);
 201
 202                /* clear any pending interrupt */
 203                writel(GENMASK(5, 0),
 204                       priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_STAT);
 205        }
 206
 207        return 0;
 208}
 209
 210static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
 211{
 212        u32 hdw, rd_size_rnd, val;
 213        int i;
 214
 215        hdw = readl(priv->base + EIP197_HIA_OPTIONS);
 216        hdw &= GENMASK(27, 25);
 217        hdw >>= 25;
 218
 219        rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;
 220
 221        for (i = 0; i < priv->config.rings; i++) {
 222                /* ring base address */
 223                writel(lower_32_bits(priv->ring[i].rdr.base_dma),
 224                       priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
 225                writel(upper_32_bits(priv->ring[i].rdr.base_dma),
 226                       priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
 227
 228                writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
 229                       priv->config.rd_size,
 230                       priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DESC_SIZE);
 231
 232                writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
 233                       (EIP197_FETCH_COUNT * priv->config.rd_offset),
 234                       priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG);
 235
 236                /* Configure DMA tx control */
 237                val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
 238                val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
 239                val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUG;
 240                writel(val,
 241                       priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DMA_CFG);
 242
 243                /* clear any pending interrupt */
 244                writel(GENMASK(7, 0),
 245                       priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_STAT);
 246
 247                /* enable ring interrupt */
 248                val = readl(priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
 249                val |= EIP197_RDR_IRQ(i);
 250                writel(val, priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
 251        }
 252
 253        return 0;
 254}
 255
 256static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
 257{
 258        u32 version, val;
 259        int i, ret;
 260
 261        /* Determine endianess and configure byte swap */
 262        version = readl(priv->base + EIP197_HIA_VERSION);
 263        val = readl(priv->base + EIP197_HIA_MST_CTRL);
 264
 265        if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
 266                val |= EIP197_MST_CTRL_BYTE_SWAP;
 267        else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
 268                val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
 269
 270        writel(val, priv->base + EIP197_HIA_MST_CTRL);
 271
 272
 273        /* Configure wr/rd cache values */
 274        writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
 275               EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
 276               priv->base + EIP197_MST_CTRL);
 277
 278        /* Interrupts reset */
 279
 280        /* Disable all global interrupts */
 281        writel(0, priv->base + EIP197_HIA_AIC_G_ENABLE_CTRL);
 282
 283        /* Clear any pending interrupt */
 284        writel(GENMASK(31, 0), priv->base + EIP197_HIA_AIC_G_ACK);
 285
 286        /* Data Fetch Engine configuration */
 287
 288        /* Reset all DFE threads */
 289        writel(EIP197_DxE_THR_CTRL_RESET_PE,
 290               priv->base + EIP197_HIA_DFE_THR_CTRL);
 291
 292        /* Reset HIA input interface arbiter */
 293        writel(EIP197_HIA_RA_PE_CTRL_RESET,
 294               priv->base + EIP197_HIA_RA_PE_CTRL);
 295
 296        /* DMA transfer size to use */
 297        val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
 298        val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
 299        val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
 300        val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
 301        val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
 302        writel(val, priv->base + EIP197_HIA_DFE_CFG);
 303
 304        /* Leave the DFE threads reset state */
 305        writel(0, priv->base + EIP197_HIA_DFE_THR_CTRL);
 306
 307        /* Configure the procesing engine thresholds */
 308        writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9),
 309              priv->base + EIP197_PE_IN_DBUF_THRES);
 310        writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7),
 311              priv->base + EIP197_PE_IN_TBUF_THRES);
 312
 313        /* enable HIA input interface arbiter and rings */
 314        writel(EIP197_HIA_RA_PE_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
 315               priv->base + EIP197_HIA_RA_PE_CTRL);
 316
 317        /* Data Store Engine configuration */
 318
 319        /* Reset all DSE threads */
 320        writel(EIP197_DxE_THR_CTRL_RESET_PE,
 321               priv->base + EIP197_HIA_DSE_THR_CTRL);
 322
 323        /* Wait for all DSE threads to complete */
 324        while ((readl(priv->base + EIP197_HIA_DSE_THR_STAT) &
 325                GENMASK(15, 12)) != GENMASK(15, 12))
 326                ;
 327
 328        /* DMA transfer size to use */
 329        val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
 330        val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
 331        val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
 332        val |= EIP197_HIA_DSE_CFG_ALLWAYS_BUFFERABLE;
 333        val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
 334        writel(val, priv->base + EIP197_HIA_DSE_CFG);
 335
 336        /* Leave the DSE threads reset state */
 337        writel(0, priv->base + EIP197_HIA_DSE_THR_CTRL);
 338
 339        /* Configure the procesing engine thresholds */
 340        writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8),
 341               priv->base + EIP197_PE_OUT_DBUF_THRES);
 342
 343        /* Processing Engine configuration */
 344
 345        /* H/W capabilities selection */
 346        val = EIP197_FUNCTION_RSVD;
 347        val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
 348        val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
 349        val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
 350        val |= EIP197_ALG_SHA2;
 351        writel(val, priv->base + EIP197_PE_EIP96_FUNCTION_EN);
 352
 353        /* Command Descriptor Rings prepare */
 354        for (i = 0; i < priv->config.rings; i++) {
 355                /* Clear interrupts for this ring */
 356                writel(GENMASK(31, 0),
 357                       priv->base + EIP197_HIA_AIC_R_ENABLE_CLR(i));
 358
 359                /* Disable external triggering */
 360                writel(0, priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG);
 361
 362                /* Clear the pending prepared counter */
 363                writel(EIP197_xDR_PREP_CLR_COUNT,
 364                       priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_COUNT);
 365
 366                /* Clear the pending processed counter */
 367                writel(EIP197_xDR_PROC_CLR_COUNT,
 368                       priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_COUNT);
 369
 370                writel(0,
 371                       priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_PNTR);
 372                writel(0,
 373                       priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_PNTR);
 374
 375                writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
 376                       priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_SIZE);
 377        }
 378
 379        /* Result Descriptor Ring prepare */
 380        for (i = 0; i < priv->config.rings; i++) {
 381                /* Disable external triggering*/
 382                writel(0, priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG);
 383
 384                /* Clear the pending prepared counter */
 385                writel(EIP197_xDR_PREP_CLR_COUNT,
 386                       priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_COUNT);
 387
 388                /* Clear the pending processed counter */
 389                writel(EIP197_xDR_PROC_CLR_COUNT,
 390                       priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_COUNT);
 391
 392                writel(0,
 393                       priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_PNTR);
 394                writel(0,
 395                       priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_PNTR);
 396
 397                /* Ring size */
 398                writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
 399                       priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_SIZE);
 400        }
 401
 402        /* Enable command descriptor rings */
 403        writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
 404               priv->base + EIP197_HIA_DFE_THR_CTRL);
 405
 406        /* Enable result descriptor rings */
 407        writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
 408               priv->base + EIP197_HIA_DSE_THR_CTRL);
 409
 410        /* Clear any HIA interrupt */
 411        writel(GENMASK(30, 20), priv->base + EIP197_HIA_AIC_G_ACK);
 412
 413        eip197_trc_cache_init(priv);
 414
 415        ret = eip197_load_firmwares(priv);
 416        if (ret)
 417                return ret;
 418
 419        safexcel_hw_setup_cdesc_rings(priv);
 420        safexcel_hw_setup_rdesc_rings(priv);
 421
 422        return 0;
 423}
 424
 425void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
 426{
 427        struct crypto_async_request *req, *backlog;
 428        struct safexcel_context *ctx;
 429        struct safexcel_request *request;
 430        int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
 431
 432        priv->ring[ring].need_dequeue = false;
 433
 434        do {
 435                spin_lock_bh(&priv->ring[ring].queue_lock);
 436                backlog = crypto_get_backlog(&priv->ring[ring].queue);
 437                req = crypto_dequeue_request(&priv->ring[ring].queue);
 438                spin_unlock_bh(&priv->ring[ring].queue_lock);
 439
 440                if (!req)
 441                        goto finalize;
 442
 443                request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
 444                if (!request) {
 445                        spin_lock_bh(&priv->ring[ring].queue_lock);
 446                        crypto_enqueue_request(&priv->ring[ring].queue, req);
 447                        spin_unlock_bh(&priv->ring[ring].queue_lock);
 448
 449                        priv->ring[ring].need_dequeue = true;
 450                        goto finalize;
 451                }
 452
 453                ctx = crypto_tfm_ctx(req->tfm);
 454                ret = ctx->send(req, ring, request, &commands, &results);
 455                if (ret) {
 456                        kfree(request);
 457                        req->complete(req, ret);
 458                        priv->ring[ring].need_dequeue = true;
 459                        goto finalize;
 460                }
 461
 462                if (backlog)
 463                        backlog->complete(backlog, -EINPROGRESS);
 464
 465                spin_lock_bh(&priv->ring[ring].egress_lock);
 466                list_add_tail(&request->list, &priv->ring[ring].list);
 467                spin_unlock_bh(&priv->ring[ring].egress_lock);
 468
 469                cdesc += commands;
 470                rdesc += results;
 471        } while (nreq++ < EIP197_MAX_BATCH_SZ);
 472
 473finalize:
 474        if (nreq == EIP197_MAX_BATCH_SZ)
 475                priv->ring[ring].need_dequeue = true;
 476        else if (!nreq)
 477                return;
 478
 479        spin_lock_bh(&priv->ring[ring].lock);
 480
 481        /* Configure when we want an interrupt */
 482        writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
 483               EIP197_HIA_RDR_THRESH_PROC_PKT(nreq),
 484               priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_THRESH);
 485
 486        /* let the RDR know we have pending descriptors */
 487        writel((rdesc * priv->config.rd_offset) << 2,
 488               priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
 489
 490        /* let the CDR know we have pending descriptors */
 491        writel((cdesc * priv->config.cd_offset) << 2,
 492               priv->base + EIP197_HIA_CDR(ring) + EIP197_HIA_xDR_PREP_COUNT);
 493
 494        spin_unlock_bh(&priv->ring[ring].lock);
 495}
 496
 497void safexcel_free_context(struct safexcel_crypto_priv *priv,
 498                           struct crypto_async_request *req,
 499                           int result_sz)
 500{
 501        struct safexcel_context *ctx = crypto_tfm_ctx(req->tfm);
 502
 503        if (ctx->result_dma)
 504                dma_unmap_single(priv->dev, ctx->result_dma, result_sz,
 505                                 DMA_FROM_DEVICE);
 506
 507        if (ctx->cache) {
 508                dma_unmap_single(priv->dev, ctx->cache_dma, ctx->cache_sz,
 509                                 DMA_TO_DEVICE);
 510                kfree(ctx->cache);
 511                ctx->cache = NULL;
 512                ctx->cache_sz = 0;
 513        }
 514}
 515
 516void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
 517{
 518        struct safexcel_command_desc *cdesc;
 519
 520        /* Acknowledge the command descriptors */
 521        do {
 522                cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
 523                if (IS_ERR(cdesc)) {
 524                        dev_err(priv->dev,
 525                                "Could not retrieve the command descriptor\n");
 526                        return;
 527                }
 528        } while (!cdesc->last_seg);
 529}
 530
 531void safexcel_inv_complete(struct crypto_async_request *req, int error)
 532{
 533        struct safexcel_inv_result *result = req->data;
 534
 535        if (error == -EINPROGRESS)
 536                return;
 537
 538        result->error = error;
 539        complete(&result->completion);
 540}
 541
 542int safexcel_invalidate_cache(struct crypto_async_request *async,
 543                              struct safexcel_context *ctx,
 544                              struct safexcel_crypto_priv *priv,
 545                              dma_addr_t ctxr_dma, int ring,
 546                              struct safexcel_request *request)
 547{
 548        struct safexcel_command_desc *cdesc;
 549        struct safexcel_result_desc *rdesc;
 550        int ret = 0;
 551
 552        spin_lock_bh(&priv->ring[ring].egress_lock);
 553
 554        /* Prepare command descriptor */
 555        cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
 556        if (IS_ERR(cdesc)) {
 557                ret = PTR_ERR(cdesc);
 558                goto unlock;
 559        }
 560
 561        cdesc->control_data.type = EIP197_TYPE_EXTENDED;
 562        cdesc->control_data.options = 0;
 563        cdesc->control_data.refresh = 0;
 564        cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
 565
 566        /* Prepare result descriptor */
 567        rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
 568
 569        if (IS_ERR(rdesc)) {
 570                ret = PTR_ERR(rdesc);
 571                goto cdesc_rollback;
 572        }
 573
 574        request->req = async;
 575        goto unlock;
 576
 577cdesc_rollback:
 578        safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
 579
 580unlock:
 581        spin_unlock_bh(&priv->ring[ring].egress_lock);
 582        return ret;
 583}
 584
 585static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
 586                                                     int ring)
 587{
 588        struct safexcel_request *sreq;
 589        struct safexcel_context *ctx;
 590        int ret, i, nreq, ndesc = 0;
 591        bool should_complete;
 592
 593        nreq = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT);
 594        nreq >>= 24;
 595        nreq &= GENMASK(6, 0);
 596        if (!nreq)
 597                return;
 598
 599        for (i = 0; i < nreq; i++) {
 600                spin_lock_bh(&priv->ring[ring].egress_lock);
 601                sreq = list_first_entry(&priv->ring[ring].list,
 602                                        struct safexcel_request, list);
 603                list_del(&sreq->list);
 604                spin_unlock_bh(&priv->ring[ring].egress_lock);
 605
 606                ctx = crypto_tfm_ctx(sreq->req->tfm);
 607                ndesc = ctx->handle_result(priv, ring, sreq->req,
 608                                           &should_complete, &ret);
 609                if (ndesc < 0) {
 610                        dev_err(priv->dev, "failed to handle result (%d)", ndesc);
 611                        return;
 612                }
 613
 614                writel(EIP197_xDR_PROC_xD_PKT(1) |
 615                       EIP197_xDR_PROC_xD_COUNT(ndesc * priv->config.rd_offset),
 616                       priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT);
 617
 618                if (should_complete) {
 619                        local_bh_disable();
 620                        sreq->req->complete(sreq->req, ret);
 621                        local_bh_enable();
 622                }
 623
 624                kfree(sreq);
 625        }
 626}
 627
 628static void safexcel_handle_result_work(struct work_struct *work)
 629{
 630        struct safexcel_work_data *data =
 631                        container_of(work, struct safexcel_work_data, work);
 632        struct safexcel_crypto_priv *priv = data->priv;
 633
 634        safexcel_handle_result_descriptor(priv, data->ring);
 635
 636        if (priv->ring[data->ring].need_dequeue)
 637                safexcel_dequeue(data->priv, data->ring);
 638}
 639
 640struct safexcel_ring_irq_data {
 641        struct safexcel_crypto_priv *priv;
 642        int ring;
 643};
 644
 645static irqreturn_t safexcel_irq_ring(int irq, void *data)
 646{
 647        struct safexcel_ring_irq_data *irq_data = data;
 648        struct safexcel_crypto_priv *priv = irq_data->priv;
 649        int ring = irq_data->ring;
 650        u32 status, stat;
 651
 652        status = readl(priv->base + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
 653        if (!status)
 654                return IRQ_NONE;
 655
 656        /* RDR interrupts */
 657        if (status & EIP197_RDR_IRQ(ring)) {
 658                stat = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT);
 659
 660                if (unlikely(stat & EIP197_xDR_ERR)) {
 661                        /*
 662                         * Fatal error, the RDR is unusable and must be
 663                         * reinitialized. This should not happen under
 664                         * normal circumstances.
 665                         */
 666                        dev_err(priv->dev, "RDR: fatal error.");
 667                } else if (likely(stat & EIP197_xDR_THRESH)) {
 668                        queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work);
 669                }
 670
 671                /* ACK the interrupts */
 672                writel(stat & 0xff,
 673                       priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT);
 674        }
 675
 676        /* ACK the interrupts */
 677        writel(status, priv->base + EIP197_HIA_AIC_R_ACK(ring));
 678
 679        return IRQ_HANDLED;
 680}
 681
 682static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
 683                                     irq_handler_t handler,
 684                                     struct safexcel_ring_irq_data *ring_irq_priv)
 685{
 686        int ret, irq = platform_get_irq_byname(pdev, name);
 687
 688        if (irq < 0) {
 689                dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name);
 690                return irq;
 691        }
 692
 693        ret = devm_request_irq(&pdev->dev, irq, handler, 0,
 694                               dev_name(&pdev->dev), ring_irq_priv);
 695        if (ret) {
 696                dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
 697                return ret;
 698        }
 699
 700        return irq;
 701}
 702
 703static struct safexcel_alg_template *safexcel_algs[] = {
 704        &safexcel_alg_ecb_aes,
 705        &safexcel_alg_cbc_aes,
 706        &safexcel_alg_sha1,
 707        &safexcel_alg_sha224,
 708        &safexcel_alg_sha256,
 709        &safexcel_alg_hmac_sha1,
 710};
 711
 712static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
 713{
 714        int i, j, ret = 0;
 715
 716        for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
 717                safexcel_algs[i]->priv = priv;
 718
 719                if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
 720                        ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
 721                else
 722                        ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
 723
 724                if (ret)
 725                        goto fail;
 726        }
 727
 728        return 0;
 729
 730fail:
 731        for (j = 0; j < i; j++) {
 732                if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
 733                        crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
 734                else
 735                        crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
 736        }
 737
 738        return ret;
 739}
 740
 741static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
 742{
 743        int i;
 744
 745        for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
 746                if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
 747                        crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
 748                else
 749                        crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
 750        }
 751}
 752
 753static void safexcel_configure(struct safexcel_crypto_priv *priv)
 754{
 755        u32 val, mask;
 756
 757        val = readl(priv->base + EIP197_HIA_OPTIONS);
 758        val = (val & GENMASK(27, 25)) >> 25;
 759        mask = BIT(val) - 1;
 760
 761        val = readl(priv->base + EIP197_HIA_OPTIONS);
 762        priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
 763
 764        priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
 765        priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
 766
 767        priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
 768        priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
 769}
 770
 771static int safexcel_probe(struct platform_device *pdev)
 772{
 773        struct device *dev = &pdev->dev;
 774        struct resource *res;
 775        struct safexcel_crypto_priv *priv;
 776        int i, ret;
 777
 778        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
 779        if (!priv)
 780                return -ENOMEM;
 781
 782        priv->dev = dev;
 783
 784        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 785        priv->base = devm_ioremap_resource(dev, res);
 786        if (IS_ERR(priv->base)) {
 787                dev_err(dev, "failed to get resource\n");
 788                return PTR_ERR(priv->base);
 789        }
 790
 791        priv->clk = of_clk_get(dev->of_node, 0);
 792        if (!IS_ERR(priv->clk)) {
 793                ret = clk_prepare_enable(priv->clk);
 794                if (ret) {
 795                        dev_err(dev, "unable to enable clk (%d)\n", ret);
 796                        return ret;
 797                }
 798        } else {
 799                /* The clock isn't mandatory */
 800                if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
 801                        return -EPROBE_DEFER;
 802        }
 803
 804        ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
 805        if (ret)
 806                goto err_clk;
 807
 808        priv->context_pool = dmam_pool_create("safexcel-context", dev,
 809                                              sizeof(struct safexcel_context_record),
 810                                              1, 0);
 811        if (!priv->context_pool) {
 812                ret = -ENOMEM;
 813                goto err_clk;
 814        }
 815
 816        safexcel_configure(priv);
 817
 818        for (i = 0; i < priv->config.rings; i++) {
 819                char irq_name[6] = {0}; /* "ringX\0" */
 820                char wq_name[9] = {0}; /* "wq_ringX\0" */
 821                int irq;
 822                struct safexcel_ring_irq_data *ring_irq;
 823
 824                ret = safexcel_init_ring_descriptors(priv,
 825                                                     &priv->ring[i].cdr,
 826                                                     &priv->ring[i].rdr);
 827                if (ret)
 828                        goto err_clk;
 829
 830                ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
 831                if (!ring_irq) {
 832                        ret = -ENOMEM;
 833                        goto err_clk;
 834                }
 835
 836                ring_irq->priv = priv;
 837                ring_irq->ring = i;
 838
 839                snprintf(irq_name, 6, "ring%d", i);
 840                irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
 841                                                ring_irq);
 842                if (irq < 0) {
 843                        ret = irq;
 844                        goto err_clk;
 845                }
 846
 847                priv->ring[i].work_data.priv = priv;
 848                priv->ring[i].work_data.ring = i;
 849                INIT_WORK(&priv->ring[i].work_data.work, safexcel_handle_result_work);
 850
 851                snprintf(wq_name, 9, "wq_ring%d", i);
 852                priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
 853                if (!priv->ring[i].workqueue) {
 854                        ret = -ENOMEM;
 855                        goto err_clk;
 856                }
 857
 858                crypto_init_queue(&priv->ring[i].queue,
 859                                  EIP197_DEFAULT_RING_SIZE);
 860
 861                INIT_LIST_HEAD(&priv->ring[i].list);
 862                spin_lock_init(&priv->ring[i].lock);
 863                spin_lock_init(&priv->ring[i].egress_lock);
 864                spin_lock_init(&priv->ring[i].queue_lock);
 865        }
 866
 867        platform_set_drvdata(pdev, priv);
 868        atomic_set(&priv->ring_used, 0);
 869
 870        ret = safexcel_hw_init(priv);
 871        if (ret) {
 872                dev_err(dev, "EIP h/w init failed (%d)\n", ret);
 873                goto err_clk;
 874        }
 875
 876        ret = safexcel_register_algorithms(priv);
 877        if (ret) {
 878                dev_err(dev, "Failed to register algorithms (%d)\n", ret);
 879                goto err_clk;
 880        }
 881
 882        return 0;
 883
 884err_clk:
 885        clk_disable_unprepare(priv->clk);
 886        return ret;
 887}
 888
 889
 890static int safexcel_remove(struct platform_device *pdev)
 891{
 892        struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
 893        int i;
 894
 895        safexcel_unregister_algorithms(priv);
 896        clk_disable_unprepare(priv->clk);
 897
 898        for (i = 0; i < priv->config.rings; i++)
 899                destroy_workqueue(priv->ring[i].workqueue);
 900
 901        return 0;
 902}
 903
 904static const struct of_device_id safexcel_of_match_table[] = {
 905        { .compatible = "inside-secure,safexcel-eip197" },
 906        {},
 907};
 908
 909
 910static struct platform_driver  crypto_safexcel = {
 911        .probe          = safexcel_probe,
 912        .remove         = safexcel_remove,
 913        .driver         = {
 914                .name   = "crypto-safexcel",
 915                .of_match_table = safexcel_of_match_table,
 916        },
 917};
 918module_platform_driver(crypto_safexcel);
 919
 920MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
 921MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
 922MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
 923MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197");
 924MODULE_LICENSE("GPL v2");
 925