linux/drivers/crypto/inside-secure/safexcel.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2017 Marvell
   4 *
   5 * Antoine Tenart <antoine.tenart@free-electrons.com>
   6 */
   7
   8#include <linux/clk.h>
   9#include <linux/device.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/dmapool.h>
  12#include <linux/firmware.h>
  13#include <linux/interrupt.h>
  14#include <linux/module.h>
  15#include <linux/of_platform.h>
  16#include <linux/of_irq.h>
  17#include <linux/platform_device.h>
  18#include <linux/workqueue.h>
  19
  20#include <crypto/internal/aead.h>
  21#include <crypto/internal/hash.h>
  22#include <crypto/internal/skcipher.h>
  23
  24#include "safexcel.h"
  25
  26static u32 max_rings = EIP197_MAX_RINGS;
  27module_param(max_rings, uint, 0644);
  28MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
  29
  30static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
  31{
  32        u32 val, htable_offset;
  33        int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
  34
  35        if (priv->version == EIP197B) {
  36                cs_rc_max = EIP197B_CS_RC_MAX;
  37                cs_ht_wc = EIP197B_CS_HT_WC;
  38                cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC;
  39                cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC;
  40        } else {
  41                cs_rc_max = EIP197D_CS_RC_MAX;
  42                cs_ht_wc = EIP197D_CS_HT_WC;
  43                cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC;
  44                cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC;
  45        }
  46
  47        /* Enable the record cache memory access */
  48        val = readl(priv->base + EIP197_CS_RAM_CTRL);
  49        val &= ~EIP197_TRC_ENABLE_MASK;
  50        val |= EIP197_TRC_ENABLE_0;
  51        writel(val, priv->base + EIP197_CS_RAM_CTRL);
  52
  53        /* Clear all ECC errors */
  54        writel(0, priv->base + EIP197_TRC_ECCCTRL);
  55
  56        /*
  57         * Make sure the cache memory is accessible by taking record cache into
  58         * reset.
  59         */
  60        val = readl(priv->base + EIP197_TRC_PARAMS);
  61        val |= EIP197_TRC_PARAMS_SW_RESET;
  62        val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
  63        writel(val, priv->base + EIP197_TRC_PARAMS);
  64
  65        /* Clear all records */
  66        for (i = 0; i < cs_rc_max; i++) {
  67                u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
  68
  69                writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
  70                       EIP197_CS_RC_PREV(EIP197_RC_NULL),
  71                       priv->base + offset);
  72
  73                val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
  74                if (i == 0)
  75                        val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
  76                else if (i == cs_rc_max - 1)
  77                        val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
  78                writel(val, priv->base + offset + sizeof(u32));
  79        }
  80
  81        /* Clear the hash table entries */
  82        htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
  83        for (i = 0; i < cs_ht_wc; i++)
  84                writel(GENMASK(29, 0),
  85                       priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
  86
  87        /* Disable the record cache memory access */
  88        val = readl(priv->base + EIP197_CS_RAM_CTRL);
  89        val &= ~EIP197_TRC_ENABLE_MASK;
  90        writel(val, priv->base + EIP197_CS_RAM_CTRL);
  91
  92        /* Write head and tail pointers of the record free chain */
  93        val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
  94              EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
  95        writel(val, priv->base + EIP197_TRC_FREECHAIN);
  96
  97        /* Configure the record cache #1 */
  98        val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
  99              EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
 100        writel(val, priv->base + EIP197_TRC_PARAMS2);
 101
 102        /* Configure the record cache #2 */
 103        val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
 104              EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
 105              EIP197_TRC_PARAMS_HTABLE_SZ(2);
 106        writel(val, priv->base + EIP197_TRC_PARAMS);
 107}
 108
 109static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
 110                                  const struct firmware *fw, int pe, u32 ctrl,
 111                                  u32 prog_en)
 112{
 113        const u32 *data = (const u32 *)fw->data;
 114        u32 val;
 115        int i;
 116
 117        /* Reset the engine to make its program memory accessible */
 118        writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
 119               EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
 120               EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
 121               EIP197_PE(priv) + ctrl);
 122
 123        /* Enable access to the program memory */
 124        writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
 125
 126        /* Write the firmware */
 127        for (i = 0; i < fw->size / sizeof(u32); i++)
 128                writel(be32_to_cpu(data[i]),
 129                       priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
 130
 131        /* Disable access to the program memory */
 132        writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
 133
 134        /* Release engine from reset */
 135        val = readl(EIP197_PE(priv) + ctrl);
 136        val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
 137        writel(val, EIP197_PE(priv) + ctrl);
 138}
 139
 140static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
 141{
 142        const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
 143        const struct firmware *fw[FW_NB];
 144        char fw_path[31], *dir = NULL;
 145        int i, j, ret = 0, pe;
 146        u32 val;
 147
 148        switch (priv->version) {
 149        case EIP197B:
 150                dir = "eip197b";
 151                break;
 152        case EIP197D:
 153                dir = "eip197d";
 154                break;
 155        default:
 156                /* No firmware is required */
 157                return 0;
 158        }
 159
 160        for (i = 0; i < FW_NB; i++) {
 161                snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]);
 162                ret = request_firmware(&fw[i], fw_path, priv->dev);
 163                if (ret) {
 164                        if (priv->version != EIP197B)
 165                                goto release_fw;
 166
 167                        /* Fallback to the old firmware location for the
 168                         * EIP197b.
 169                         */
 170                        ret = request_firmware(&fw[i], fw_name[i], priv->dev);
 171                        if (ret) {
 172                                dev_err(priv->dev,
 173                                        "Failed to request firmware %s (%d)\n",
 174                                        fw_name[i], ret);
 175                                goto release_fw;
 176                        }
 177                }
 178        }
 179
 180        for (pe = 0; pe < priv->config.pes; pe++) {
 181                /* Clear the scratchpad memory */
 182                val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
 183                val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
 184                       EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
 185                       EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
 186                       EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
 187                writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
 188
 189                memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0,
 190                          EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
 191
 192                eip197_write_firmware(priv, fw[FW_IFPP], pe,
 193                                      EIP197_PE_ICE_FPP_CTRL(pe),
 194                                      EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
 195
 196                eip197_write_firmware(priv, fw[FW_IPUE], pe,
 197                                      EIP197_PE_ICE_PUE_CTRL(pe),
 198                                      EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
 199        }
 200
 201release_fw:
 202        for (j = 0; j < i; j++)
 203                release_firmware(fw[j]);
 204
 205        return ret;
 206}
 207
 208static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
 209{
 210        u32 hdw, cd_size_rnd, val;
 211        int i;
 212
 213        hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
 214        hdw &= GENMASK(27, 25);
 215        hdw >>= 25;
 216
 217        cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;
 218
 219        for (i = 0; i < priv->config.rings; i++) {
 220                /* ring base address */
 221                writel(lower_32_bits(priv->ring[i].cdr.base_dma),
 222                       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
 223                writel(upper_32_bits(priv->ring[i].cdr.base_dma),
 224                       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
 225
 226                writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
 227                       priv->config.cd_size,
 228                       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
 229                writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
 230                       (EIP197_FETCH_COUNT * priv->config.cd_offset),
 231                       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
 232
 233                /* Configure DMA tx control */
 234                val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
 235                val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
 236                writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
 237
 238                /* clear any pending interrupt */
 239                writel(GENMASK(5, 0),
 240                       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
 241        }
 242
 243        return 0;
 244}
 245
 246static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
 247{
 248        u32 hdw, rd_size_rnd, val;
 249        int i;
 250
 251        hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
 252        hdw &= GENMASK(27, 25);
 253        hdw >>= 25;
 254
 255        rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;
 256
 257        for (i = 0; i < priv->config.rings; i++) {
 258                /* ring base address */
 259                writel(lower_32_bits(priv->ring[i].rdr.base_dma),
 260                       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
 261                writel(upper_32_bits(priv->ring[i].rdr.base_dma),
 262                       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
 263
 264                writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
 265                       priv->config.rd_size,
 266                       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
 267
 268                writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
 269                       (EIP197_FETCH_COUNT * priv->config.rd_offset),
 270                       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
 271
 272                /* Configure DMA tx control */
 273                val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
 274                val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
 275                val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
 276                writel(val,
 277                       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
 278
 279                /* clear any pending interrupt */
 280                writel(GENMASK(7, 0),
 281                       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
 282
 283                /* enable ring interrupt */
 284                val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
 285                val |= EIP197_RDR_IRQ(i);
 286                writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
 287        }
 288
 289        return 0;
 290}
 291
 292static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
 293{
 294        u32 version, val;
 295        int i, ret, pe;
 296
 297        /* Determine endianess and configure byte swap */
 298        version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
 299        val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
 300
 301        if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
 302                val |= EIP197_MST_CTRL_BYTE_SWAP;
 303        else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
 304                val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
 305
 306        /* For EIP197 set maximum number of TX commands to 2^5 = 32 */
 307        if (priv->version == EIP197B || priv->version == EIP197D)
 308                val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
 309
 310        writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
 311
 312        /* Configure wr/rd cache values */
 313        writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
 314               EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
 315               EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
 316
 317        /* Interrupts reset */
 318
 319        /* Disable all global interrupts */
 320        writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
 321
 322        /* Clear any pending interrupt */
 323        writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
 324
 325        /* Processing Engine configuration */
 326        for (pe = 0; pe < priv->config.pes; pe++) {
 327                /* Data Fetch Engine configuration */
 328
 329                /* Reset all DFE threads */
 330                writel(EIP197_DxE_THR_CTRL_RESET_PE,
 331                       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
 332
 333                if (priv->version == EIP197B || priv->version == EIP197D) {
 334                        /* Reset HIA input interface arbiter */
 335                        writel(EIP197_HIA_RA_PE_CTRL_RESET,
 336                               EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
 337                }
 338
 339                /* DMA transfer size to use */
 340                val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
 341                val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
 342                       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
 343                val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
 344                       EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
 345                val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
 346                val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
 347                writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
 348
 349                /* Leave the DFE threads reset state */
 350                writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
 351
 352                /* Configure the processing engine thresholds */
 353                writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
 354                       EIP197_PE_IN_xBUF_THRES_MAX(9),
 355                       EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
 356                writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
 357                       EIP197_PE_IN_xBUF_THRES_MAX(7),
 358                       EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
 359
 360                if (priv->version == EIP197B || priv->version == EIP197D) {
 361                        /* enable HIA input interface arbiter and rings */
 362                        writel(EIP197_HIA_RA_PE_CTRL_EN |
 363                               GENMASK(priv->config.rings - 1, 0),
 364                               EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
 365                }
 366
 367                /* Data Store Engine configuration */
 368
 369                /* Reset all DSE threads */
 370                writel(EIP197_DxE_THR_CTRL_RESET_PE,
 371                       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
 372
 373                /* Wait for all DSE threads to complete */
 374                while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
 375                        GENMASK(15, 12)) != GENMASK(15, 12))
 376                        ;
 377
 378                /* DMA transfer size to use */
 379                val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
 380                val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
 381                       EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
 382                val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
 383                val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
 384                /* FIXME: instability issues can occur for EIP97 but disabling it impact
 385                 * performances.
 386                 */
 387                if (priv->version == EIP197B || priv->version == EIP197D)
 388                        val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
 389                writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
 390
 391                /* Leave the DSE threads reset state */
 392                writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
 393
 394                /* Configure the procesing engine thresholds */
 395                writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
 396                       EIP197_PE_OUT_DBUF_THRES_MAX(8),
 397                       EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
 398
 399                /* Processing Engine configuration */
 400
 401                /* Token & context configuration */
 402                val = EIP197_PE_EIP96_TOKEN_CTRL_CTX_UPDATES |
 403                      EIP197_PE_EIP96_TOKEN_CTRL_REUSE_CTX |
 404                      EIP197_PE_EIP96_TOKEN_CTRL_POST_REUSE_CTX;
 405                writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL(pe));
 406
 407                /* H/W capabilities selection */
 408                val = EIP197_FUNCTION_RSVD;
 409                val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
 410                val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
 411                val |= EIP197_ALG_DES_ECB | EIP197_ALG_DES_CBC;
 412                val |= EIP197_ALG_3DES_ECB | EIP197_ALG_3DES_CBC;
 413                val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
 414                val |= EIP197_ALG_MD5 | EIP197_ALG_HMAC_MD5;
 415                val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
 416                val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
 417                writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
 418        }
 419
 420        /* Command Descriptor Rings prepare */
 421        for (i = 0; i < priv->config.rings; i++) {
 422                /* Clear interrupts for this ring */
 423                writel(GENMASK(31, 0),
 424                       EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
 425
 426                /* Disable external triggering */
 427                writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
 428
 429                /* Clear the pending prepared counter */
 430                writel(EIP197_xDR_PREP_CLR_COUNT,
 431                       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
 432
 433                /* Clear the pending processed counter */
 434                writel(EIP197_xDR_PROC_CLR_COUNT,
 435                       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
 436
 437                writel(0,
 438                       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
 439                writel(0,
 440                       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
 441
 442                writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
 443                       EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
 444        }
 445
 446        /* Result Descriptor Ring prepare */
 447        for (i = 0; i < priv->config.rings; i++) {
 448                /* Disable external triggering*/
 449                writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
 450
 451                /* Clear the pending prepared counter */
 452                writel(EIP197_xDR_PREP_CLR_COUNT,
 453                       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
 454
 455                /* Clear the pending processed counter */
 456                writel(EIP197_xDR_PROC_CLR_COUNT,
 457                       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
 458
 459                writel(0,
 460                       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
 461                writel(0,
 462                       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
 463
 464                /* Ring size */
 465                writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
 466                       EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
 467        }
 468
 469        for (pe = 0; pe < priv->config.pes; pe++) {
 470                /* Enable command descriptor rings */
 471                writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
 472                       EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
 473
 474                /* Enable result descriptor rings */
 475                writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
 476                       EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
 477        }
 478
 479        /* Clear any HIA interrupt */
 480        writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
 481
 482        if (priv->version == EIP197B || priv->version == EIP197D) {
 483                eip197_trc_cache_init(priv);
 484
 485                ret = eip197_load_firmwares(priv);
 486                if (ret)
 487                        return ret;
 488        }
 489
 490        safexcel_hw_setup_cdesc_rings(priv);
 491        safexcel_hw_setup_rdesc_rings(priv);
 492
 493        return 0;
 494}
 495
 496/* Called with ring's lock taken */
 497static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
 498                                       int ring)
 499{
 500        int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
 501
 502        if (!coal)
 503                return;
 504
 505        /* Configure when we want an interrupt */
 506        writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
 507               EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
 508               EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
 509}
 510
 511void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
 512{
 513        struct crypto_async_request *req, *backlog;
 514        struct safexcel_context *ctx;
 515        int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
 516
 517        /* If a request wasn't properly dequeued because of a lack of resources,
 518         * proceeded it first,
 519         */
 520        req = priv->ring[ring].req;
 521        backlog = priv->ring[ring].backlog;
 522        if (req)
 523                goto handle_req;
 524
 525        while (true) {
 526                spin_lock_bh(&priv->ring[ring].queue_lock);
 527                backlog = crypto_get_backlog(&priv->ring[ring].queue);
 528                req = crypto_dequeue_request(&priv->ring[ring].queue);
 529                spin_unlock_bh(&priv->ring[ring].queue_lock);
 530
 531                if (!req) {
 532                        priv->ring[ring].req = NULL;
 533                        priv->ring[ring].backlog = NULL;
 534                        goto finalize;
 535                }
 536
 537handle_req:
 538                ctx = crypto_tfm_ctx(req->tfm);
 539                ret = ctx->send(req, ring, &commands, &results);
 540                if (ret)
 541                        goto request_failed;
 542
 543                if (backlog)
 544                        backlog->complete(backlog, -EINPROGRESS);
 545
 546                /* In case the send() helper did not issue any command to push
 547                 * to the engine because the input data was cached, continue to
 548                 * dequeue other requests as this is valid and not an error.
 549                 */
 550                if (!commands && !results)
 551                        continue;
 552
 553                cdesc += commands;
 554                rdesc += results;
 555                nreq++;
 556        }
 557
 558request_failed:
 559        /* Not enough resources to handle all the requests. Bail out and save
 560         * the request and the backlog for the next dequeue call (per-ring).
 561         */
 562        priv->ring[ring].req = req;
 563        priv->ring[ring].backlog = backlog;
 564
 565finalize:
 566        if (!nreq)
 567                return;
 568
 569        spin_lock_bh(&priv->ring[ring].lock);
 570
 571        priv->ring[ring].requests += nreq;
 572
 573        if (!priv->ring[ring].busy) {
 574                safexcel_try_push_requests(priv, ring);
 575                priv->ring[ring].busy = true;
 576        }
 577
 578        spin_unlock_bh(&priv->ring[ring].lock);
 579
 580        /* let the RDR know we have pending descriptors */
 581        writel((rdesc * priv->config.rd_offset) << 2,
 582               EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
 583
 584        /* let the CDR know we have pending descriptors */
 585        writel((cdesc * priv->config.cd_offset) << 2,
 586               EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
 587}
 588
 589inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
 590                                       struct safexcel_result_desc *rdesc)
 591{
 592        if (likely(!rdesc->result_data.error_code))
 593                return 0;
 594
 595        if (rdesc->result_data.error_code & 0x407f) {
 596                /* Fatal error (bits 0-7, 14) */
 597                dev_err(priv->dev,
 598                        "cipher: result: result descriptor error (0x%x)\n",
 599                        rdesc->result_data.error_code);
 600                return -EINVAL;
 601        } else if (rdesc->result_data.error_code == BIT(9)) {
 602                /* Authentication failed */
 603                return -EBADMSG;
 604        }
 605
 606        /* All other non-fatal errors */
 607        return -EINVAL;
 608}
 609
 610inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
 611                                 int ring,
 612                                 struct safexcel_result_desc *rdesc,
 613                                 struct crypto_async_request *req)
 614{
 615        int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
 616
 617        priv->ring[ring].rdr_req[i] = req;
 618}
 619
 620inline struct crypto_async_request *
 621safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
 622{
 623        int i = safexcel_ring_first_rdr_index(priv, ring);
 624
 625        return priv->ring[ring].rdr_req[i];
 626}
 627
 628void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
 629{
 630        struct safexcel_command_desc *cdesc;
 631
 632        /* Acknowledge the command descriptors */
 633        do {
 634                cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
 635                if (IS_ERR(cdesc)) {
 636                        dev_err(priv->dev,
 637                                "Could not retrieve the command descriptor\n");
 638                        return;
 639                }
 640        } while (!cdesc->last_seg);
 641}
 642
 643void safexcel_inv_complete(struct crypto_async_request *req, int error)
 644{
 645        struct safexcel_inv_result *result = req->data;
 646
 647        if (error == -EINPROGRESS)
 648                return;
 649
 650        result->error = error;
 651        complete(&result->completion);
 652}
 653
 654int safexcel_invalidate_cache(struct crypto_async_request *async,
 655                              struct safexcel_crypto_priv *priv,
 656                              dma_addr_t ctxr_dma, int ring)
 657{
 658        struct safexcel_command_desc *cdesc;
 659        struct safexcel_result_desc *rdesc;
 660        int ret = 0;
 661
 662        /* Prepare command descriptor */
 663        cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
 664        if (IS_ERR(cdesc))
 665                return PTR_ERR(cdesc);
 666
 667        cdesc->control_data.type = EIP197_TYPE_EXTENDED;
 668        cdesc->control_data.options = 0;
 669        cdesc->control_data.refresh = 0;
 670        cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
 671
 672        /* Prepare result descriptor */
 673        rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
 674
 675        if (IS_ERR(rdesc)) {
 676                ret = PTR_ERR(rdesc);
 677                goto cdesc_rollback;
 678        }
 679
 680        safexcel_rdr_req_set(priv, ring, rdesc, async);
 681
 682        return ret;
 683
 684cdesc_rollback:
 685        safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
 686
 687        return ret;
 688}
 689
 690static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
 691                                                     int ring)
 692{
 693        struct crypto_async_request *req;
 694        struct safexcel_context *ctx;
 695        int ret, i, nreq, ndesc, tot_descs, handled = 0;
 696        bool should_complete;
 697
 698handle_results:
 699        tot_descs = 0;
 700
 701        nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
 702        nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
 703        nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
 704        if (!nreq)
 705                goto requests_left;
 706
 707        for (i = 0; i < nreq; i++) {
 708                req = safexcel_rdr_req_get(priv, ring);
 709
 710                ctx = crypto_tfm_ctx(req->tfm);
 711                ndesc = ctx->handle_result(priv, ring, req,
 712                                           &should_complete, &ret);
 713                if (ndesc < 0) {
 714                        dev_err(priv->dev, "failed to handle result (%d)", ndesc);
 715                        goto acknowledge;
 716                }
 717
 718                if (should_complete) {
 719                        local_bh_disable();
 720                        req->complete(req, ret);
 721                        local_bh_enable();
 722                }
 723
 724                tot_descs += ndesc;
 725                handled++;
 726        }
 727
 728acknowledge:
 729        if (i)
 730                writel(EIP197_xDR_PROC_xD_PKT(i) |
 731                       EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
 732                       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
 733
 734        /* If the number of requests overflowed the counter, try to proceed more
 735         * requests.
 736         */
 737        if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
 738                goto handle_results;
 739
 740requests_left:
 741        spin_lock_bh(&priv->ring[ring].lock);
 742
 743        priv->ring[ring].requests -= handled;
 744        safexcel_try_push_requests(priv, ring);
 745
 746        if (!priv->ring[ring].requests)
 747                priv->ring[ring].busy = false;
 748
 749        spin_unlock_bh(&priv->ring[ring].lock);
 750}
 751
 752static void safexcel_dequeue_work(struct work_struct *work)
 753{
 754        struct safexcel_work_data *data =
 755                        container_of(work, struct safexcel_work_data, work);
 756
 757        safexcel_dequeue(data->priv, data->ring);
 758}
 759
 760struct safexcel_ring_irq_data {
 761        struct safexcel_crypto_priv *priv;
 762        int ring;
 763};
 764
 765static irqreturn_t safexcel_irq_ring(int irq, void *data)
 766{
 767        struct safexcel_ring_irq_data *irq_data = data;
 768        struct safexcel_crypto_priv *priv = irq_data->priv;
 769        int ring = irq_data->ring, rc = IRQ_NONE;
 770        u32 status, stat;
 771
 772        status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
 773        if (!status)
 774                return rc;
 775
 776        /* RDR interrupts */
 777        if (status & EIP197_RDR_IRQ(ring)) {
 778                stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
 779
 780                if (unlikely(stat & EIP197_xDR_ERR)) {
 781                        /*
 782                         * Fatal error, the RDR is unusable and must be
 783                         * reinitialized. This should not happen under
 784                         * normal circumstances.
 785                         */
 786                        dev_err(priv->dev, "RDR: fatal error.");
 787                } else if (likely(stat & EIP197_xDR_THRESH)) {
 788                        rc = IRQ_WAKE_THREAD;
 789                }
 790
 791                /* ACK the interrupts */
 792                writel(stat & 0xff,
 793                       EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
 794        }
 795
 796        /* ACK the interrupts */
 797        writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
 798
 799        return rc;
 800}
 801
 802static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
 803{
 804        struct safexcel_ring_irq_data *irq_data = data;
 805        struct safexcel_crypto_priv *priv = irq_data->priv;
 806        int ring = irq_data->ring;
 807
 808        safexcel_handle_result_descriptor(priv, ring);
 809
 810        queue_work(priv->ring[ring].workqueue,
 811                   &priv->ring[ring].work_data.work);
 812
 813        return IRQ_HANDLED;
 814}
 815
 816static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
 817                                     irq_handler_t handler,
 818                                     irq_handler_t threaded_handler,
 819                                     struct safexcel_ring_irq_data *ring_irq_priv)
 820{
 821        int ret, irq = platform_get_irq_byname(pdev, name);
 822
 823        if (irq < 0) {
 824                dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name);
 825                return irq;
 826        }
 827
 828        ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
 829                                        threaded_handler, IRQF_ONESHOT,
 830                                        dev_name(&pdev->dev), ring_irq_priv);
 831        if (ret) {
 832                dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
 833                return ret;
 834        }
 835
 836        return irq;
 837}
 838
 839static struct safexcel_alg_template *safexcel_algs[] = {
 840        &safexcel_alg_ecb_des,
 841        &safexcel_alg_cbc_des,
 842        &safexcel_alg_ecb_des3_ede,
 843        &safexcel_alg_cbc_des3_ede,
 844        &safexcel_alg_ecb_aes,
 845        &safexcel_alg_cbc_aes,
 846        &safexcel_alg_md5,
 847        &safexcel_alg_sha1,
 848        &safexcel_alg_sha224,
 849        &safexcel_alg_sha256,
 850        &safexcel_alg_sha384,
 851        &safexcel_alg_sha512,
 852        &safexcel_alg_hmac_md5,
 853        &safexcel_alg_hmac_sha1,
 854        &safexcel_alg_hmac_sha224,
 855        &safexcel_alg_hmac_sha256,
 856        &safexcel_alg_hmac_sha384,
 857        &safexcel_alg_hmac_sha512,
 858        &safexcel_alg_authenc_hmac_sha1_cbc_aes,
 859        &safexcel_alg_authenc_hmac_sha224_cbc_aes,
 860        &safexcel_alg_authenc_hmac_sha256_cbc_aes,
 861        &safexcel_alg_authenc_hmac_sha384_cbc_aes,
 862        &safexcel_alg_authenc_hmac_sha512_cbc_aes,
 863};
 864
 865static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
 866{
 867        int i, j, ret = 0;
 868
 869        for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
 870                safexcel_algs[i]->priv = priv;
 871
 872                if (!(safexcel_algs[i]->engines & priv->version))
 873                        continue;
 874
 875                if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
 876                        ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
 877                else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
 878                        ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
 879                else
 880                        ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
 881
 882                if (ret)
 883                        goto fail;
 884        }
 885
 886        return 0;
 887
 888fail:
 889        for (j = 0; j < i; j++) {
 890                if (!(safexcel_algs[j]->engines & priv->version))
 891                        continue;
 892
 893                if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
 894                        crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
 895                else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
 896                        crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
 897                else
 898                        crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
 899        }
 900
 901        return ret;
 902}
 903
 904static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
 905{
 906        int i;
 907
 908        for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
 909                if (!(safexcel_algs[i]->engines & priv->version))
 910                        continue;
 911
 912                if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
 913                        crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
 914                else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
 915                        crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
 916                else
 917                        crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
 918        }
 919}
 920
 921static void safexcel_configure(struct safexcel_crypto_priv *priv)
 922{
 923        u32 val, mask = 0;
 924
 925        val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
 926
 927        /* Read number of PEs from the engine */
 928        switch (priv->version) {
 929        case EIP197B:
 930        case EIP197D:
 931                mask = EIP197_N_PES_MASK;
 932                break;
 933        default:
 934                mask = EIP97_N_PES_MASK;
 935        }
 936        priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
 937
 938        val = (val & GENMASK(27, 25)) >> 25;
 939        mask = BIT(val) - 1;
 940
 941        val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
 942        priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
 943
 944        priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
 945        priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
 946
 947        priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
 948        priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
 949}
 950
 951static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
 952{
 953        struct safexcel_register_offsets *offsets = &priv->offsets;
 954
 955        switch (priv->version) {
 956        case EIP197B:
 957        case EIP197D:
 958                offsets->hia_aic        = EIP197_HIA_AIC_BASE;
 959                offsets->hia_aic_g      = EIP197_HIA_AIC_G_BASE;
 960                offsets->hia_aic_r      = EIP197_HIA_AIC_R_BASE;
 961                offsets->hia_aic_xdr    = EIP197_HIA_AIC_xDR_BASE;
 962                offsets->hia_dfe        = EIP197_HIA_DFE_BASE;
 963                offsets->hia_dfe_thr    = EIP197_HIA_DFE_THR_BASE;
 964                offsets->hia_dse        = EIP197_HIA_DSE_BASE;
 965                offsets->hia_dse_thr    = EIP197_HIA_DSE_THR_BASE;
 966                offsets->hia_gen_cfg    = EIP197_HIA_GEN_CFG_BASE;
 967                offsets->pe             = EIP197_PE_BASE;
 968                break;
 969        case EIP97IES:
 970                offsets->hia_aic        = EIP97_HIA_AIC_BASE;
 971                offsets->hia_aic_g      = EIP97_HIA_AIC_G_BASE;
 972                offsets->hia_aic_r      = EIP97_HIA_AIC_R_BASE;
 973                offsets->hia_aic_xdr    = EIP97_HIA_AIC_xDR_BASE;
 974                offsets->hia_dfe        = EIP97_HIA_DFE_BASE;
 975                offsets->hia_dfe_thr    = EIP97_HIA_DFE_THR_BASE;
 976                offsets->hia_dse        = EIP97_HIA_DSE_BASE;
 977                offsets->hia_dse_thr    = EIP97_HIA_DSE_THR_BASE;
 978                offsets->hia_gen_cfg    = EIP97_HIA_GEN_CFG_BASE;
 979                offsets->pe             = EIP97_PE_BASE;
 980                break;
 981        }
 982}
 983
 984static int safexcel_probe(struct platform_device *pdev)
 985{
 986        struct device *dev = &pdev->dev;
 987        struct resource *res;
 988        struct safexcel_crypto_priv *priv;
 989        int i, ret;
 990
 991        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
 992        if (!priv)
 993                return -ENOMEM;
 994
 995        priv->dev = dev;
 996        priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
 997
 998        if (priv->version == EIP197B || priv->version == EIP197D)
 999                priv->flags |= EIP197_TRC_CACHE;
1000
1001        safexcel_init_register_offsets(priv);
1002
1003        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1004        priv->base = devm_ioremap_resource(dev, res);
1005        if (IS_ERR(priv->base)) {
1006                dev_err(dev, "failed to get resource\n");
1007                return PTR_ERR(priv->base);
1008        }
1009
1010        priv->clk = devm_clk_get(&pdev->dev, NULL);
1011        ret = PTR_ERR_OR_ZERO(priv->clk);
1012        /* The clock isn't mandatory */
1013        if  (ret != -ENOENT) {
1014                if (ret)
1015                        return ret;
1016
1017                ret = clk_prepare_enable(priv->clk);
1018                if (ret) {
1019                        dev_err(dev, "unable to enable clk (%d)\n", ret);
1020                        return ret;
1021                }
1022        }
1023
1024        priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
1025        ret = PTR_ERR_OR_ZERO(priv->reg_clk);
1026        /* The clock isn't mandatory */
1027        if  (ret != -ENOENT) {
1028                if (ret)
1029                        goto err_core_clk;
1030
1031                ret = clk_prepare_enable(priv->reg_clk);
1032                if (ret) {
1033                        dev_err(dev, "unable to enable reg clk (%d)\n", ret);
1034                        goto err_core_clk;
1035                }
1036        }
1037
1038        ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
1039        if (ret)
1040                goto err_reg_clk;
1041
1042        priv->context_pool = dmam_pool_create("safexcel-context", dev,
1043                                              sizeof(struct safexcel_context_record),
1044                                              1, 0);
1045        if (!priv->context_pool) {
1046                ret = -ENOMEM;
1047                goto err_reg_clk;
1048        }
1049
1050        safexcel_configure(priv);
1051
1052        priv->ring = devm_kcalloc(dev, priv->config.rings,
1053                                  sizeof(*priv->ring),
1054                                  GFP_KERNEL);
1055        if (!priv->ring) {
1056                ret = -ENOMEM;
1057                goto err_reg_clk;
1058        }
1059
1060        for (i = 0; i < priv->config.rings; i++) {
1061                char irq_name[6] = {0}; /* "ringX\0" */
1062                char wq_name[9] = {0}; /* "wq_ringX\0" */
1063                int irq;
1064                struct safexcel_ring_irq_data *ring_irq;
1065
1066                ret = safexcel_init_ring_descriptors(priv,
1067                                                     &priv->ring[i].cdr,
1068                                                     &priv->ring[i].rdr);
1069                if (ret)
1070                        goto err_reg_clk;
1071
1072                priv->ring[i].rdr_req = devm_kcalloc(dev,
1073                        EIP197_DEFAULT_RING_SIZE,
1074                        sizeof(priv->ring[i].rdr_req),
1075                        GFP_KERNEL);
1076                if (!priv->ring[i].rdr_req) {
1077                        ret = -ENOMEM;
1078                        goto err_reg_clk;
1079                }
1080
1081                ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
1082                if (!ring_irq) {
1083                        ret = -ENOMEM;
1084                        goto err_reg_clk;
1085                }
1086
1087                ring_irq->priv = priv;
1088                ring_irq->ring = i;
1089
1090                snprintf(irq_name, 6, "ring%d", i);
1091                irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
1092                                                safexcel_irq_ring_thread,
1093                                                ring_irq);
1094                if (irq < 0) {
1095                        ret = irq;
1096                        goto err_reg_clk;
1097                }
1098
1099                priv->ring[i].work_data.priv = priv;
1100                priv->ring[i].work_data.ring = i;
1101                INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
1102
1103                snprintf(wq_name, 9, "wq_ring%d", i);
1104                priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
1105                if (!priv->ring[i].workqueue) {
1106                        ret = -ENOMEM;
1107                        goto err_reg_clk;
1108                }
1109
1110                priv->ring[i].requests = 0;
1111                priv->ring[i].busy = false;
1112
1113                crypto_init_queue(&priv->ring[i].queue,
1114                                  EIP197_DEFAULT_RING_SIZE);
1115
1116                spin_lock_init(&priv->ring[i].lock);
1117                spin_lock_init(&priv->ring[i].queue_lock);
1118        }
1119
1120        platform_set_drvdata(pdev, priv);
1121        atomic_set(&priv->ring_used, 0);
1122
1123        ret = safexcel_hw_init(priv);
1124        if (ret) {
1125                dev_err(dev, "EIP h/w init failed (%d)\n", ret);
1126                goto err_reg_clk;
1127        }
1128
1129        ret = safexcel_register_algorithms(priv);
1130        if (ret) {
1131                dev_err(dev, "Failed to register algorithms (%d)\n", ret);
1132                goto err_reg_clk;
1133        }
1134
1135        return 0;
1136
1137err_reg_clk:
1138        clk_disable_unprepare(priv->reg_clk);
1139err_core_clk:
1140        clk_disable_unprepare(priv->clk);
1141        return ret;
1142}
1143
1144static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
1145{
1146        int i;
1147
1148        for (i = 0; i < priv->config.rings; i++) {
1149                /* clear any pending interrupt */
1150                writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
1151                writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
1152
1153                /* Reset the CDR base address */
1154                writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1155                writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1156
1157                /* Reset the RDR base address */
1158                writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1159                writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1160        }
1161}
1162
1163static int safexcel_remove(struct platform_device *pdev)
1164{
1165        struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
1166        int i;
1167
1168        safexcel_unregister_algorithms(priv);
1169        safexcel_hw_reset_rings(priv);
1170
1171        clk_disable_unprepare(priv->clk);
1172
1173        for (i = 0; i < priv->config.rings; i++)
1174                destroy_workqueue(priv->ring[i].workqueue);
1175
1176        return 0;
1177}
1178
1179static const struct of_device_id safexcel_of_match_table[] = {
1180        {
1181                .compatible = "inside-secure,safexcel-eip97ies",
1182                .data = (void *)EIP97IES,
1183        },
1184        {
1185                .compatible = "inside-secure,safexcel-eip197b",
1186                .data = (void *)EIP197B,
1187        },
1188        {
1189                .compatible = "inside-secure,safexcel-eip197d",
1190                .data = (void *)EIP197D,
1191        },
1192        {
1193                /* Deprecated. Kept for backward compatibility. */
1194                .compatible = "inside-secure,safexcel-eip97",
1195                .data = (void *)EIP97IES,
1196        },
1197        {
1198                /* Deprecated. Kept for backward compatibility. */
1199                .compatible = "inside-secure,safexcel-eip197",
1200                .data = (void *)EIP197B,
1201        },
1202        {},
1203};
1204
1205
1206static struct platform_driver  crypto_safexcel = {
1207        .probe          = safexcel_probe,
1208        .remove         = safexcel_remove,
1209        .driver         = {
1210                .name   = "crypto-safexcel",
1211                .of_match_table = safexcel_of_match_table,
1212        },
1213};
1214module_platform_driver(crypto_safexcel);
1215
1216MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
1217MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
1218MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
1219MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197");
1220MODULE_LICENSE("GPL v2");
1221