linux/drivers/crypto/hisilicon/sec/sec_drv.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Driver for the HiSilicon SEC units found on Hip06 Hip07
   4 *
   5 * Copyright (c) 2016-2017 HiSilicon Limited.
   6 */
   7#include <linux/acpi.h>
   8#include <linux/atomic.h>
   9#include <linux/delay.h>
  10#include <linux/dma-direction.h>
  11#include <linux/dma-mapping.h>
  12#include <linux/dmapool.h>
  13#include <linux/io.h>
  14#include <linux/iommu.h>
  15#include <linux/interrupt.h>
  16#include <linux/irq.h>
  17#include <linux/irqreturn.h>
  18#include <linux/mm.h>
  19#include <linux/module.h>
  20#include <linux/of.h>
  21#include <linux/platform_device.h>
  22#include <linux/slab.h>
  23
  24#include "sec_drv.h"
  25
  26#define SEC_QUEUE_AR_FROCE_ALLOC                        0
  27#define SEC_QUEUE_AR_FROCE_NOALLOC                      1
  28#define SEC_QUEUE_AR_FROCE_DIS                          2
  29
  30#define SEC_QUEUE_AW_FROCE_ALLOC                        0
  31#define SEC_QUEUE_AW_FROCE_NOALLOC                      1
  32#define SEC_QUEUE_AW_FROCE_DIS                          2
  33
  34/* SEC_ALGSUB registers */
  35#define SEC_ALGSUB_CLK_EN_REG                           0x03b8
  36#define SEC_ALGSUB_CLK_DIS_REG                          0x03bc
  37#define SEC_ALGSUB_CLK_ST_REG                           0x535c
  38#define SEC_ALGSUB_RST_REQ_REG                          0x0aa8
  39#define SEC_ALGSUB_RST_DREQ_REG                         0x0aac
  40#define SEC_ALGSUB_RST_ST_REG                           0x5a54
  41#define   SEC_ALGSUB_RST_ST_IS_RST                      BIT(0)
  42
  43#define SEC_ALGSUB_BUILD_RST_REQ_REG                    0x0ab8
  44#define SEC_ALGSUB_BUILD_RST_DREQ_REG                   0x0abc
  45#define SEC_ALGSUB_BUILD_RST_ST_REG                     0x5a5c
  46#define   SEC_ALGSUB_BUILD_RST_ST_IS_RST                BIT(0)
  47
  48#define SEC_SAA_BASE                                    0x00001000UL
  49
  50/* SEC_SAA registers */
  51#define SEC_SAA_CTRL_REG(x)     ((x) * SEC_SAA_ADDR_SIZE)
  52#define   SEC_SAA_CTRL_GET_QM_EN                        BIT(0)
  53
  54#define SEC_ST_INTMSK1_REG                              0x0200
  55#define SEC_ST_RINT1_REG                                0x0400
  56#define SEC_ST_INTSTS1_REG                              0x0600
  57#define SEC_BD_MNG_STAT_REG                             0x0800
  58#define SEC_PARSING_STAT_REG                            0x0804
  59#define SEC_LOAD_TIME_OUT_CNT_REG                       0x0808
  60#define SEC_CORE_WORK_TIME_OUT_CNT_REG                  0x080c
  61#define SEC_BACK_TIME_OUT_CNT_REG                       0x0810
  62#define SEC_BD1_PARSING_RD_TIME_OUT_CNT_REG             0x0814
  63#define SEC_BD1_PARSING_WR_TIME_OUT_CNT_REG             0x0818
  64#define SEC_BD2_PARSING_RD_TIME_OUT_CNT_REG             0x081c
  65#define SEC_BD2_PARSING_WR_TIME_OUT_CNT_REG             0x0820
  66#define SEC_SAA_ACC_REG                                 0x083c
  67#define SEC_BD_NUM_CNT_IN_SEC_REG                       0x0858
  68#define SEC_LOAD_WORK_TIME_CNT_REG                      0x0860
  69#define SEC_CORE_WORK_WORK_TIME_CNT_REG                 0x0864
  70#define SEC_BACK_WORK_TIME_CNT_REG                      0x0868
  71#define SEC_SAA_IDLE_TIME_CNT_REG                       0x086c
  72#define SEC_SAA_CLK_CNT_REG                             0x0870
  73
  74/* SEC_COMMON registers */
  75#define SEC_CLK_EN_REG                                  0x0000
  76#define SEC_CTRL_REG                                    0x0004
  77
  78#define SEC_COMMON_CNT_CLR_CE_REG                       0x0008
  79#define   SEC_COMMON_CNT_CLR_CE_CLEAR                   BIT(0)
  80#define   SEC_COMMON_CNT_CLR_CE_SNAP_EN                 BIT(1)
  81
  82#define SEC_SECURE_CTRL_REG                             0x000c
  83#define SEC_AXI_CACHE_CFG_REG                           0x0010
  84#define SEC_AXI_QOS_CFG_REG                             0x0014
  85#define SEC_IPV4_MASK_TABLE_REG                         0x0020
  86#define SEC_IPV6_MASK_TABLE_X_REG(x)    (0x0024 + (x) * 4)
  87#define SEC_FSM_MAX_CNT_REG                             0x0064
  88
  89#define SEC_CTRL2_REG                                   0x0068
  90#define   SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M              GENMASK(3, 0)
  91#define   SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_S              0
  92#define   SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M              GENMASK(6, 4)
  93#define   SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_S              4
  94#define   SEC_CTRL2_CLK_GATE_EN                         BIT(7)
  95#define   SEC_CTRL2_ENDIAN_BD                           BIT(8)
  96#define   SEC_CTRL2_ENDIAN_BD_TYPE                      BIT(9)
  97
  98#define SEC_CNT_PRECISION_CFG_REG                       0x006c
  99#define SEC_DEBUG_BD_CFG_REG                            0x0070
 100#define   SEC_DEBUG_BD_CFG_WB_NORMAL                    BIT(0)
 101#define   SEC_DEBUG_BD_CFG_WB_EN                        BIT(1)
 102
 103#define SEC_Q_SIGHT_SEL                                 0x0074
 104#define SEC_Q_SIGHT_HIS_CLR                             0x0078
 105#define SEC_Q_VMID_CFG_REG(q)           (0x0100 + (q) * 4)
 106#define SEC_Q_WEIGHT_CFG_REG(q)         (0x200 + (q) * 4)
 107#define SEC_STAT_CLR_REG                                0x0a00
 108#define SEC_SAA_IDLE_CNT_CLR_REG                        0x0a04
 109#define SEC_QM_CPL_Q_IDBUF_DFX_CFG_REG                  0x0b00
 110#define SEC_QM_CPL_Q_IDBUF_DFX_RESULT_REG               0x0b04
 111#define SEC_QM_BD_DFX_CFG_REG                           0x0b08
 112#define SEC_QM_BD_DFX_RESULT_REG                        0x0b0c
 113#define SEC_QM_BDID_DFX_RESULT_REG                      0x0b10
 114#define SEC_QM_BD_DFIFO_STATUS_REG                      0x0b14
 115#define SEC_QM_BD_DFX_CFG2_REG                          0x0b1c
 116#define SEC_QM_BD_DFX_RESULT2_REG                       0x0b20
 117#define SEC_QM_BD_IDFIFO_STATUS_REG                     0x0b18
 118#define SEC_QM_BD_DFIFO_STATUS2_REG                     0x0b28
 119#define SEC_QM_BD_IDFIFO_STATUS2_REG                    0x0b2c
 120
 121#define SEC_HASH_IPV4_MASK                              0xfff00000
 122#define SEC_MAX_SAA_NUM                                 0xa
 123#define SEC_SAA_ADDR_SIZE                               0x1000
 124
 125#define SEC_Q_INIT_REG                                  0x0
 126#define   SEC_Q_INIT_WO_STAT_CLEAR                      0x2
 127#define   SEC_Q_INIT_AND_STAT_CLEAR                     0x3
 128
 129#define SEC_Q_CFG_REG                                   0x8
 130#define   SEC_Q_CFG_REORDER                             BIT(0)
 131
 132#define SEC_Q_PROC_NUM_CFG_REG                          0x10
 133#define SEC_QUEUE_ENB_REG                               0x18
 134
 135#define SEC_Q_DEPTH_CFG_REG                             0x50
 136#define   SEC_Q_DEPTH_CFG_DEPTH_M                       GENMASK(11, 0)
 137#define   SEC_Q_DEPTH_CFG_DEPTH_S                       0
 138
 139#define SEC_Q_BASE_HADDR_REG                            0x54
 140#define SEC_Q_BASE_LADDR_REG                            0x58
 141#define SEC_Q_WR_PTR_REG                                0x5c
 142#define SEC_Q_OUTORDER_BASE_HADDR_REG                   0x60
 143#define SEC_Q_OUTORDER_BASE_LADDR_REG                   0x64
 144#define SEC_Q_OUTORDER_RD_PTR_REG                       0x68
 145#define SEC_Q_OT_TH_REG                                 0x6c
 146
 147#define SEC_Q_ARUSER_CFG_REG                            0x70
 148#define   SEC_Q_ARUSER_CFG_FA                           BIT(0)
 149#define   SEC_Q_ARUSER_CFG_FNA                          BIT(1)
 150#define   SEC_Q_ARUSER_CFG_RINVLD                       BIT(2)
 151#define   SEC_Q_ARUSER_CFG_PKG                          BIT(3)
 152
 153#define SEC_Q_AWUSER_CFG_REG                            0x74
 154#define   SEC_Q_AWUSER_CFG_FA                           BIT(0)
 155#define   SEC_Q_AWUSER_CFG_FNA                          BIT(1)
 156#define   SEC_Q_AWUSER_CFG_PKG                          BIT(2)
 157
 158#define SEC_Q_ERR_BASE_HADDR_REG                        0x7c
 159#define SEC_Q_ERR_BASE_LADDR_REG                        0x80
 160#define SEC_Q_CFG_VF_NUM_REG                            0x84
 161#define SEC_Q_SOFT_PROC_PTR_REG                         0x88
 162#define SEC_Q_FAIL_INT_MSK_REG                          0x300
 163#define SEC_Q_FLOW_INT_MKS_REG                          0x304
 164#define SEC_Q_FAIL_RINT_REG                             0x400
 165#define SEC_Q_FLOW_RINT_REG                             0x404
 166#define SEC_Q_FAIL_INT_STATUS_REG                       0x500
 167#define SEC_Q_FLOW_INT_STATUS_REG                       0x504
 168#define SEC_Q_STATUS_REG                                0x600
 169#define SEC_Q_RD_PTR_REG                                0x604
 170#define SEC_Q_PRO_PTR_REG                               0x608
 171#define SEC_Q_OUTORDER_WR_PTR_REG                       0x60c
 172#define SEC_Q_OT_CNT_STATUS_REG                         0x610
 173#define SEC_Q_INORDER_BD_NUM_ST_REG                     0x650
 174#define SEC_Q_INORDER_GET_FLAG_ST_REG                   0x654
 175#define SEC_Q_INORDER_ADD_FLAG_ST_REG                   0x658
 176#define SEC_Q_INORDER_TASK_INT_NUM_LEFT_ST_REG          0x65c
 177#define SEC_Q_RD_DONE_PTR_REG                           0x660
 178#define SEC_Q_CPL_Q_BD_NUM_ST_REG                       0x700
 179#define SEC_Q_CPL_Q_PTR_ST_REG                          0x704
 180#define SEC_Q_CPL_Q_H_ADDR_ST_REG                       0x708
 181#define SEC_Q_CPL_Q_L_ADDR_ST_REG                       0x70c
 182#define SEC_Q_CPL_TASK_INT_NUM_LEFT_ST_REG              0x710
 183#define SEC_Q_WRR_ID_CHECK_REG                          0x714
 184#define SEC_Q_CPLQ_FULL_CHECK_REG                       0x718
 185#define SEC_Q_SUCCESS_BD_CNT_REG                        0x800
 186#define SEC_Q_FAIL_BD_CNT_REG                           0x804
 187#define SEC_Q_GET_BD_CNT_REG                            0x808
 188#define SEC_Q_IVLD_CNT_REG                              0x80c
 189#define SEC_Q_BD_PROC_GET_CNT_REG                       0x810
 190#define SEC_Q_BD_PROC_DONE_CNT_REG                      0x814
 191#define SEC_Q_LAT_CLR_REG                               0x850
 192#define SEC_Q_PKT_LAT_MAX_REG                           0x854
 193#define SEC_Q_PKT_LAT_AVG_REG                           0x858
 194#define SEC_Q_PKT_LAT_MIN_REG                           0x85c
 195#define SEC_Q_ID_CLR_CFG_REG                            0x900
 196#define SEC_Q_1ST_BD_ERR_ID_REG                         0x904
 197#define SEC_Q_1ST_AUTH_FAIL_ID_REG                      0x908
 198#define SEC_Q_1ST_RD_ERR_ID_REG                         0x90c
 199#define SEC_Q_1ST_ECC2_ERR_ID_REG                       0x910
 200#define SEC_Q_1ST_IVLD_ID_REG                           0x914
 201#define SEC_Q_1ST_BD_WR_ERR_ID_REG                      0x918
 202#define SEC_Q_1ST_ERR_BD_WR_ERR_ID_REG                  0x91c
 203#define SEC_Q_1ST_BD_MAC_WR_ERR_ID_REG                  0x920
 204
 205struct sec_debug_bd_info {
 206#define SEC_DEBUG_BD_INFO_SOFT_ERR_CHECK_M      GENMASK(22, 0)
 207        u32 soft_err_check;
 208#define SEC_DEBUG_BD_INFO_HARD_ERR_CHECK_M      GENMASK(9, 0)
 209        u32 hard_err_check;
 210        u32 icv_mac1st_word;
 211#define SEC_DEBUG_BD_INFO_GET_ID_M              GENMASK(19, 0)
 212        u32 sec_get_id;
 213        /* W4---W15 */
 214        u32 reserv_left[12];
 215};
 216
 217struct sec_out_bd_info  {
 218#define SEC_OUT_BD_INFO_Q_ID_M                  GENMASK(11, 0)
 219#define SEC_OUT_BD_INFO_ECC_2BIT_ERR            BIT(14)
 220        u16 data;
 221};
 222
 223#define SEC_MAX_DEVICES                         8
 224static struct sec_dev_info *sec_devices[SEC_MAX_DEVICES];
 225static DEFINE_MUTEX(sec_id_lock);
 226
 227static int sec_queue_map_io(struct sec_queue *queue)
 228{
 229        struct device *dev = queue->dev_info->dev;
 230        struct resource *res;
 231
 232        res = platform_get_resource(to_platform_device(dev),
 233                                    IORESOURCE_MEM,
 234                                    2 + queue->queue_id);
 235        if (!res) {
 236                dev_err(dev, "Failed to get queue %u memory resource\n",
 237                        queue->queue_id);
 238                return -ENOMEM;
 239        }
 240        queue->regs = ioremap(res->start, resource_size(res));
 241        if (!queue->regs)
 242                return -ENOMEM;
 243
 244        return 0;
 245}
 246
 247static void sec_queue_unmap_io(struct sec_queue *queue)
 248{
 249         iounmap(queue->regs);
 250}
 251
 252static int sec_queue_ar_pkgattr(struct sec_queue *queue, u32 ar_pkg)
 253{
 254        void __iomem *addr = queue->regs +  SEC_Q_ARUSER_CFG_REG;
 255        u32 regval;
 256
 257        regval = readl_relaxed(addr);
 258        if (ar_pkg)
 259                regval |= SEC_Q_ARUSER_CFG_PKG;
 260        else
 261                regval &= ~SEC_Q_ARUSER_CFG_PKG;
 262        writel_relaxed(regval, addr);
 263
 264        return 0;
 265}
 266
 267static int sec_queue_aw_pkgattr(struct sec_queue *queue, u32 aw_pkg)
 268{
 269        void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG;
 270        u32 regval;
 271
 272        regval = readl_relaxed(addr);
 273        regval |= SEC_Q_AWUSER_CFG_PKG;
 274        writel_relaxed(regval, addr);
 275
 276        return 0;
 277}
 278
 279static int sec_clk_en(struct sec_dev_info *info)
 280{
 281        void __iomem *base = info->regs[SEC_COMMON];
 282        u32 i = 0;
 283
 284        writel_relaxed(0x7, base + SEC_ALGSUB_CLK_EN_REG);
 285        do {
 286                usleep_range(1000, 10000);
 287                if ((readl_relaxed(base + SEC_ALGSUB_CLK_ST_REG) & 0x7) == 0x7)
 288                        return 0;
 289                i++;
 290        } while (i < 10);
 291        dev_err(info->dev, "sec clock enable fail!\n");
 292
 293        return -EIO;
 294}
 295
 296static int sec_clk_dis(struct sec_dev_info *info)
 297{
 298        void __iomem *base = info->regs[SEC_COMMON];
 299        u32 i = 0;
 300
 301        writel_relaxed(0x7, base + SEC_ALGSUB_CLK_DIS_REG);
 302        do {
 303                usleep_range(1000, 10000);
 304                if ((readl_relaxed(base + SEC_ALGSUB_CLK_ST_REG) & 0x7) == 0)
 305                        return 0;
 306                i++;
 307        } while (i < 10);
 308        dev_err(info->dev, "sec clock disable fail!\n");
 309
 310        return -EIO;
 311}
 312
 313static int sec_reset_whole_module(struct sec_dev_info *info)
 314{
 315        void __iomem *base = info->regs[SEC_COMMON];
 316        bool is_reset, b_is_reset;
 317        u32 i = 0;
 318
 319        writel_relaxed(1, base + SEC_ALGSUB_RST_REQ_REG);
 320        writel_relaxed(1, base + SEC_ALGSUB_BUILD_RST_REQ_REG);
 321        while (1) {
 322                usleep_range(1000, 10000);
 323                is_reset = readl_relaxed(base + SEC_ALGSUB_RST_ST_REG) &
 324                        SEC_ALGSUB_RST_ST_IS_RST;
 325                b_is_reset = readl_relaxed(base + SEC_ALGSUB_BUILD_RST_ST_REG) &
 326                        SEC_ALGSUB_BUILD_RST_ST_IS_RST;
 327                if (is_reset && b_is_reset)
 328                        break;
 329                i++;
 330                if (i > 10) {
 331                        dev_err(info->dev, "Reset req failed\n");
 332                        return -EIO;
 333                }
 334        }
 335
 336        i = 0;
 337        writel_relaxed(1, base + SEC_ALGSUB_RST_DREQ_REG);
 338        writel_relaxed(1, base + SEC_ALGSUB_BUILD_RST_DREQ_REG);
 339        while (1) {
 340                usleep_range(1000, 10000);
 341                is_reset = readl_relaxed(base + SEC_ALGSUB_RST_ST_REG) &
 342                        SEC_ALGSUB_RST_ST_IS_RST;
 343                b_is_reset = readl_relaxed(base + SEC_ALGSUB_BUILD_RST_ST_REG) &
 344                        SEC_ALGSUB_BUILD_RST_ST_IS_RST;
 345                if (!is_reset && !b_is_reset)
 346                        break;
 347
 348                i++;
 349                if (i > 10) {
 350                        dev_err(info->dev, "Reset dreq failed\n");
 351                        return -EIO;
 352                }
 353        }
 354
 355        return 0;
 356}
 357
 358static void sec_bd_endian_little(struct sec_dev_info *info)
 359{
 360        void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
 361        u32 regval;
 362
 363        regval = readl_relaxed(addr);
 364        regval &= ~(SEC_CTRL2_ENDIAN_BD | SEC_CTRL2_ENDIAN_BD_TYPE);
 365        writel_relaxed(regval, addr);
 366}
 367
 368/*
 369 * sec_cache_config - configure optimum cache placement
 370 */
 371static void sec_cache_config(struct sec_dev_info *info)
 372{
 373        struct iommu_domain *domain;
 374        void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL_REG;
 375
 376        domain = iommu_get_domain_for_dev(info->dev);
 377
 378        /* Check that translation is occurring */
 379        if (domain && (domain->type & __IOMMU_DOMAIN_PAGING))
 380                writel_relaxed(0x44cf9e, addr);
 381        else
 382                writel_relaxed(0x4cfd9, addr);
 383}
 384
 385static void sec_data_axiwr_otsd_cfg(struct sec_dev_info *info, u32 cfg)
 386{
 387        void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
 388        u32 regval;
 389
 390        regval = readl_relaxed(addr);
 391        regval &= ~SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M;
 392        regval |= (cfg << SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_S) &
 393                SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M;
 394        writel_relaxed(regval, addr);
 395}
 396
 397static void sec_data_axird_otsd_cfg(struct sec_dev_info *info, u32 cfg)
 398{
 399        void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
 400        u32 regval;
 401
 402        regval = readl_relaxed(addr);
 403        regval &= ~SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M;
 404        regval |= (cfg << SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_S) &
 405                SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M;
 406        writel_relaxed(regval, addr);
 407}
 408
 409static void sec_clk_gate_en(struct sec_dev_info *info, bool clkgate)
 410{
 411        void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG;
 412        u32 regval;
 413
 414        regval = readl_relaxed(addr);
 415        if (clkgate)
 416                regval |= SEC_CTRL2_CLK_GATE_EN;
 417        else
 418                regval &= ~SEC_CTRL2_CLK_GATE_EN;
 419        writel_relaxed(regval, addr);
 420}
 421
 422static void sec_comm_cnt_cfg(struct sec_dev_info *info, bool clr_ce)
 423{
 424        void __iomem *addr = info->regs[SEC_SAA] + SEC_COMMON_CNT_CLR_CE_REG;
 425        u32 regval;
 426
 427        regval = readl_relaxed(addr);
 428        if (clr_ce)
 429                regval |= SEC_COMMON_CNT_CLR_CE_CLEAR;
 430        else
 431                regval &= ~SEC_COMMON_CNT_CLR_CE_CLEAR;
 432        writel_relaxed(regval, addr);
 433}
 434
 435static void sec_commsnap_en(struct sec_dev_info *info, bool snap_en)
 436{
 437        void __iomem *addr = info->regs[SEC_SAA] + SEC_COMMON_CNT_CLR_CE_REG;
 438        u32 regval;
 439
 440        regval = readl_relaxed(addr);
 441        if (snap_en)
 442                regval |= SEC_COMMON_CNT_CLR_CE_SNAP_EN;
 443        else
 444                regval &= ~SEC_COMMON_CNT_CLR_CE_SNAP_EN;
 445        writel_relaxed(regval, addr);
 446}
 447
 448static void sec_ipv6_hashmask(struct sec_dev_info *info, u32 hash_mask[])
 449{
 450        void __iomem *base = info->regs[SEC_SAA];
 451        int i;
 452
 453        for (i = 0; i < 10; i++)
 454                writel_relaxed(hash_mask[0],
 455                               base + SEC_IPV6_MASK_TABLE_X_REG(i));
 456}
 457
 458static int sec_ipv4_hashmask(struct sec_dev_info *info, u32 hash_mask)
 459{
 460        if (hash_mask & SEC_HASH_IPV4_MASK) {
 461                dev_err(info->dev, "Sec Ipv4 Hash Mask Input Error!\n ");
 462                return -EINVAL;
 463        }
 464
 465        writel_relaxed(hash_mask,
 466                       info->regs[SEC_SAA] + SEC_IPV4_MASK_TABLE_REG);
 467
 468        return 0;
 469}
 470
 471static void sec_set_dbg_bd_cfg(struct sec_dev_info *info, u32 cfg)
 472{
 473        void __iomem *addr = info->regs[SEC_SAA] + SEC_DEBUG_BD_CFG_REG;
 474        u32 regval;
 475
 476        regval = readl_relaxed(addr);
 477        /* Always disable write back of normal bd */
 478        regval &= ~SEC_DEBUG_BD_CFG_WB_NORMAL;
 479
 480        if (cfg)
 481                regval &= ~SEC_DEBUG_BD_CFG_WB_EN;
 482        else
 483                regval |= SEC_DEBUG_BD_CFG_WB_EN;
 484
 485        writel_relaxed(regval, addr);
 486}
 487
 488static void sec_saa_getqm_en(struct sec_dev_info *info, u32 saa_indx, u32 en)
 489{
 490        void __iomem *addr = info->regs[SEC_SAA] + SEC_SAA_BASE +
 491                SEC_SAA_CTRL_REG(saa_indx);
 492        u32 regval;
 493
 494        regval = readl_relaxed(addr);
 495        if (en)
 496                regval |= SEC_SAA_CTRL_GET_QM_EN;
 497        else
 498                regval &= ~SEC_SAA_CTRL_GET_QM_EN;
 499        writel_relaxed(regval, addr);
 500}
 501
 502static void sec_saa_int_mask(struct sec_dev_info *info, u32 saa_indx,
 503                             u32 saa_int_mask)
 504{
 505        writel_relaxed(saa_int_mask,
 506                       info->regs[SEC_SAA] + SEC_SAA_BASE + SEC_ST_INTMSK1_REG +
 507                       saa_indx * SEC_SAA_ADDR_SIZE);
 508}
 509
 510static void sec_streamid(struct sec_dev_info *info, int i)
 511{
 512        #define SEC_SID 0x600
 513        #define SEC_VMID 0
 514
 515        writel_relaxed((SEC_VMID | ((SEC_SID & 0xffff) << 8)),
 516                       info->regs[SEC_SAA] + SEC_Q_VMID_CFG_REG(i));
 517}
 518
 519static void sec_queue_ar_alloc(struct sec_queue *queue, u32 alloc)
 520{
 521        void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG;
 522        u32 regval;
 523
 524        regval = readl_relaxed(addr);
 525        if (alloc == SEC_QUEUE_AR_FROCE_ALLOC) {
 526                regval |= SEC_Q_ARUSER_CFG_FA;
 527                regval &= ~SEC_Q_ARUSER_CFG_FNA;
 528        } else {
 529                regval &= ~SEC_Q_ARUSER_CFG_FA;
 530                regval |= SEC_Q_ARUSER_CFG_FNA;
 531        }
 532
 533        writel_relaxed(regval, addr);
 534}
 535
 536static void sec_queue_aw_alloc(struct sec_queue *queue, u32 alloc)
 537{
 538        void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG;
 539        u32 regval;
 540
 541        regval = readl_relaxed(addr);
 542        if (alloc == SEC_QUEUE_AW_FROCE_ALLOC) {
 543                regval |= SEC_Q_AWUSER_CFG_FA;
 544                regval &= ~SEC_Q_AWUSER_CFG_FNA;
 545        } else {
 546                regval &= ~SEC_Q_AWUSER_CFG_FA;
 547                regval |= SEC_Q_AWUSER_CFG_FNA;
 548        }
 549
 550        writel_relaxed(regval, addr);
 551}
 552
 553static void sec_queue_reorder(struct sec_queue *queue, bool reorder)
 554{
 555        void __iomem *base = queue->regs;
 556        u32 regval;
 557
 558        regval = readl_relaxed(base + SEC_Q_CFG_REG);
 559        if (reorder)
 560                regval |= SEC_Q_CFG_REORDER;
 561        else
 562                regval &= ~SEC_Q_CFG_REORDER;
 563        writel_relaxed(regval, base + SEC_Q_CFG_REG);
 564}
 565
 566static void sec_queue_depth(struct sec_queue *queue, u32 depth)
 567{
 568        void __iomem *addr = queue->regs + SEC_Q_DEPTH_CFG_REG;
 569        u32 regval;
 570
 571        regval = readl_relaxed(addr);
 572        regval &= ~SEC_Q_DEPTH_CFG_DEPTH_M;
 573        regval |= (depth << SEC_Q_DEPTH_CFG_DEPTH_S) & SEC_Q_DEPTH_CFG_DEPTH_M;
 574
 575        writel_relaxed(regval, addr);
 576}
 577
 578static void sec_queue_cmdbase_addr(struct sec_queue *queue, u64 addr)
 579{
 580        writel_relaxed(upper_32_bits(addr), queue->regs + SEC_Q_BASE_HADDR_REG);
 581        writel_relaxed(lower_32_bits(addr), queue->regs + SEC_Q_BASE_LADDR_REG);
 582}
 583
 584static void sec_queue_outorder_addr(struct sec_queue *queue, u64 addr)
 585{
 586        writel_relaxed(upper_32_bits(addr),
 587                       queue->regs + SEC_Q_OUTORDER_BASE_HADDR_REG);
 588        writel_relaxed(lower_32_bits(addr),
 589                       queue->regs + SEC_Q_OUTORDER_BASE_LADDR_REG);
 590}
 591
 592static void sec_queue_errbase_addr(struct sec_queue *queue, u64 addr)
 593{
 594        writel_relaxed(upper_32_bits(addr),
 595                       queue->regs + SEC_Q_ERR_BASE_HADDR_REG);
 596        writel_relaxed(lower_32_bits(addr),
 597                       queue->regs + SEC_Q_ERR_BASE_LADDR_REG);
 598}
 599
 600static void sec_queue_irq_disable(struct sec_queue *queue)
 601{
 602        writel_relaxed((u32)~0, queue->regs + SEC_Q_FLOW_INT_MKS_REG);
 603}
 604
 605static void sec_queue_irq_enable(struct sec_queue *queue)
 606{
 607        writel_relaxed(0, queue->regs + SEC_Q_FLOW_INT_MKS_REG);
 608}
 609
 610static void sec_queue_abn_irq_disable(struct sec_queue *queue)
 611{
 612        writel_relaxed((u32)~0, queue->regs + SEC_Q_FAIL_INT_MSK_REG);
 613}
 614
 615static void sec_queue_stop(struct sec_queue *queue)
 616{
 617        disable_irq(queue->task_irq);
 618        sec_queue_irq_disable(queue);
 619        writel_relaxed(0x0, queue->regs + SEC_QUEUE_ENB_REG);
 620}
 621
 622static void sec_queue_start(struct sec_queue *queue)
 623{
 624        sec_queue_irq_enable(queue);
 625        enable_irq(queue->task_irq);
 626        queue->expected = 0;
 627        writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG);
 628        writel_relaxed(0x1, queue->regs + SEC_QUEUE_ENB_REG);
 629}
 630
 631static struct sec_queue *sec_alloc_queue(struct sec_dev_info *info)
 632{
 633        int i;
 634
 635        mutex_lock(&info->dev_lock);
 636
 637        /* Get the first idle queue in SEC device */
 638        for (i = 0; i < SEC_Q_NUM; i++)
 639                if (!info->queues[i].in_use) {
 640                        info->queues[i].in_use = true;
 641                        info->queues_in_use++;
 642                        mutex_unlock(&info->dev_lock);
 643
 644                        return &info->queues[i];
 645                }
 646        mutex_unlock(&info->dev_lock);
 647
 648        return ERR_PTR(-ENODEV);
 649}
 650
 651static int sec_queue_free(struct sec_queue *queue)
 652{
 653        struct sec_dev_info *info = queue->dev_info;
 654
 655        if (queue->queue_id >= SEC_Q_NUM) {
 656                dev_err(info->dev, "No queue %u\n", queue->queue_id);
 657                return -ENODEV;
 658        }
 659
 660        if (!queue->in_use) {
 661                dev_err(info->dev, "Queue %u is idle\n", queue->queue_id);
 662                return -ENODEV;
 663        }
 664
 665        mutex_lock(&info->dev_lock);
 666        queue->in_use = false;
 667        info->queues_in_use--;
 668        mutex_unlock(&info->dev_lock);
 669
 670        return 0;
 671}
 672
 673static irqreturn_t sec_isr_handle_th(int irq, void *q)
 674{
 675        sec_queue_irq_disable(q);
 676        return IRQ_WAKE_THREAD;
 677}
 678
 679static irqreturn_t sec_isr_handle(int irq, void *q)
 680{
 681        struct sec_queue *queue = q;
 682        struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
 683        struct sec_queue_ring_cq *cq_ring = &queue->ring_cq;
 684        struct sec_out_bd_info *outorder_msg;
 685        struct sec_bd_info *msg;
 686        u32 ooo_read, ooo_write;
 687        void __iomem *base = queue->regs;
 688        int q_id;
 689
 690        ooo_read = readl(base + SEC_Q_OUTORDER_RD_PTR_REG);
 691        ooo_write = readl(base + SEC_Q_OUTORDER_WR_PTR_REG);
 692        outorder_msg = cq_ring->vaddr + ooo_read;
 693        q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M;
 694        msg = msg_ring->vaddr + q_id;
 695
 696        while ((ooo_write != ooo_read) && msg->w0 & SEC_BD_W0_DONE) {
 697                /*
 698                 * Must be before callback otherwise blocks adding other chained
 699                 * elements
 700                 */
 701                set_bit(q_id, queue->unprocessed);
 702                if (q_id == queue->expected)
 703                        while (test_bit(queue->expected, queue->unprocessed)) {
 704                                clear_bit(queue->expected, queue->unprocessed);
 705                                msg = msg_ring->vaddr + queue->expected;
 706                                msg->w0 &= ~SEC_BD_W0_DONE;
 707                                msg_ring->callback(msg,
 708                                                queue->shadow[queue->expected]);
 709                                queue->shadow[queue->expected] = NULL;
 710                                queue->expected = (queue->expected + 1) %
 711                                        SEC_QUEUE_LEN;
 712                                atomic_dec(&msg_ring->used);
 713                        }
 714
 715                ooo_read = (ooo_read + 1) % SEC_QUEUE_LEN;
 716                writel(ooo_read, base + SEC_Q_OUTORDER_RD_PTR_REG);
 717                ooo_write = readl(base + SEC_Q_OUTORDER_WR_PTR_REG);
 718                outorder_msg = cq_ring->vaddr + ooo_read;
 719                q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M;
 720                msg = msg_ring->vaddr + q_id;
 721        }
 722
 723        sec_queue_irq_enable(queue);
 724
 725        return IRQ_HANDLED;
 726}
 727
 728static int sec_queue_irq_init(struct sec_queue *queue)
 729{
 730        struct sec_dev_info *info = queue->dev_info;
 731        int irq = queue->task_irq;
 732        int ret;
 733
 734        ret = request_threaded_irq(irq, sec_isr_handle_th, sec_isr_handle,
 735                                   IRQF_TRIGGER_RISING, queue->name, queue);
 736        if (ret) {
 737                dev_err(info->dev, "request irq(%d) failed %d\n", irq, ret);
 738                return ret;
 739        }
 740        disable_irq(irq);
 741
 742        return 0;
 743}
 744
 745static int sec_queue_irq_uninit(struct sec_queue *queue)
 746{
 747        free_irq(queue->task_irq, queue);
 748
 749        return 0;
 750}
 751
 752static struct sec_dev_info *sec_device_get(void)
 753{
 754        struct sec_dev_info *sec_dev = NULL;
 755        struct sec_dev_info *this_sec_dev;
 756        int least_busy_n = SEC_Q_NUM + 1;
 757        int i;
 758
 759        /* Find which one is least busy and use that first */
 760        for (i = 0; i < SEC_MAX_DEVICES; i++) {
 761                this_sec_dev = sec_devices[i];
 762                if (this_sec_dev &&
 763                    this_sec_dev->queues_in_use < least_busy_n) {
 764                        least_busy_n = this_sec_dev->queues_in_use;
 765                        sec_dev = this_sec_dev;
 766                }
 767        }
 768
 769        return sec_dev;
 770}
 771
 772static struct sec_queue *sec_queue_alloc_start(struct sec_dev_info *info)
 773{
 774        struct sec_queue *queue;
 775
 776        queue = sec_alloc_queue(info);
 777        if (IS_ERR(queue)) {
 778                dev_err(info->dev, "alloc sec queue failed! %ld\n",
 779                        PTR_ERR(queue));
 780                return queue;
 781        }
 782
 783        sec_queue_start(queue);
 784
 785        return queue;
 786}
 787
 788/**
 789 * sec_queue_alloc_start_safe - get a hw queue from appropriate instance
 790 *
 791 * This function does extremely simplistic load balancing. It does not take into
 792 * account NUMA locality of the accelerator, or which cpu has requested the
 793 * queue.  Future work may focus on optimizing this in order to improve full
 794 * machine throughput.
 795 */
 796struct sec_queue *sec_queue_alloc_start_safe(void)
 797{
 798        struct sec_dev_info *info;
 799        struct sec_queue *queue = ERR_PTR(-ENODEV);
 800
 801        mutex_lock(&sec_id_lock);
 802        info = sec_device_get();
 803        if (!info)
 804                goto unlock;
 805
 806        queue = sec_queue_alloc_start(info);
 807
 808unlock:
 809        mutex_unlock(&sec_id_lock);
 810
 811        return queue;
 812}
 813
 814/**
 815 * sec_queue_stop_release() - free up a hw queue for reuse
 816 * @queue: The queue we are done with.
 817 *
 818 * This will stop the current queue, terminanting any transactions
 819 * that are inflight an return it to the pool of available hw queuess
 820 */
 821int sec_queue_stop_release(struct sec_queue *queue)
 822{
 823        struct device *dev = queue->dev_info->dev;
 824        int ret;
 825
 826        sec_queue_stop(queue);
 827
 828        ret = sec_queue_free(queue);
 829        if (ret)
 830                dev_err(dev, "Releasing queue failed %d\n", ret);
 831
 832        return ret;
 833}
 834
 835/**
 836 * sec_queue_empty() - Is this hardware queue currently empty.
 837 * @queue: The queue to test
 838 *
 839 * We need to know if we have an empty queue for some of the chaining modes
 840 * as if it is not empty we may need to hold the message in a software queue
 841 * until the hw queue is drained.
 842 */
 843bool sec_queue_empty(struct sec_queue *queue)
 844{
 845        struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
 846
 847        return !atomic_read(&msg_ring->used);
 848}
 849
 850/**
 851 * sec_queue_send() - queue up a single operation in the hw queue
 852 * @queue: The queue in which to put the message
 853 * @msg: The message
 854 * @ctx: Context to be put in the shadow array and passed back to cb on result.
 855 *
 856 * This function will return -EAGAIN if the queue is currently full.
 857 */
 858int sec_queue_send(struct sec_queue *queue, struct sec_bd_info *msg, void *ctx)
 859{
 860        struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
 861        void __iomem *base = queue->regs;
 862        u32 write, read;
 863
 864        mutex_lock(&msg_ring->lock);
 865        read = readl(base + SEC_Q_RD_PTR_REG);
 866        write = readl(base + SEC_Q_WR_PTR_REG);
 867        if (write == read && atomic_read(&msg_ring->used) == SEC_QUEUE_LEN) {
 868                mutex_unlock(&msg_ring->lock);
 869                return -EAGAIN;
 870        }
 871        memcpy(msg_ring->vaddr + write, msg, sizeof(*msg));
 872        queue->shadow[write] = ctx;
 873        write = (write + 1) % SEC_QUEUE_LEN;
 874
 875        /* Ensure content updated before queue advance */
 876        wmb();
 877        writel(write, base + SEC_Q_WR_PTR_REG);
 878
 879        atomic_inc(&msg_ring->used);
 880        mutex_unlock(&msg_ring->lock);
 881
 882        return 0;
 883}
 884
 885bool sec_queue_can_enqueue(struct sec_queue *queue, int num)
 886{
 887        struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd;
 888
 889        return SEC_QUEUE_LEN - atomic_read(&msg_ring->used) >= num;
 890}
 891
 892static void sec_queue_hw_init(struct sec_queue *queue)
 893{
 894        sec_queue_ar_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC);
 895        sec_queue_aw_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC);
 896        sec_queue_ar_pkgattr(queue, 1);
 897        sec_queue_aw_pkgattr(queue, 1);
 898
 899        /* Enable out of order queue */
 900        sec_queue_reorder(queue, true);
 901
 902        /* Interrupt after a single complete element */
 903        writel_relaxed(1, queue->regs + SEC_Q_PROC_NUM_CFG_REG);
 904
 905        sec_queue_depth(queue, SEC_QUEUE_LEN - 1);
 906
 907        sec_queue_cmdbase_addr(queue, queue->ring_cmd.paddr);
 908
 909        sec_queue_outorder_addr(queue, queue->ring_cq.paddr);
 910
 911        sec_queue_errbase_addr(queue, queue->ring_db.paddr);
 912
 913        writel_relaxed(0x100, queue->regs + SEC_Q_OT_TH_REG);
 914
 915        sec_queue_abn_irq_disable(queue);
 916        sec_queue_irq_disable(queue);
 917        writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG);
 918}
 919
 920static int sec_hw_init(struct sec_dev_info *info)
 921{
 922        struct iommu_domain *domain;
 923        u32 sec_ipv4_mask = 0;
 924        u32 sec_ipv6_mask[10] = {};
 925        u32 i, ret;
 926
 927        domain = iommu_get_domain_for_dev(info->dev);
 928
 929        /*
 930         * Enable all available processing unit clocks.
 931         * Only the first cluster is usable with translations.
 932         */
 933        if (domain && (domain->type & __IOMMU_DOMAIN_PAGING))
 934                info->num_saas = 5;
 935
 936        else
 937                info->num_saas = 10;
 938
 939        writel_relaxed(GENMASK(info->num_saas - 1, 0),
 940                       info->regs[SEC_SAA] + SEC_CLK_EN_REG);
 941
 942        /* 32 bit little endian */
 943        sec_bd_endian_little(info);
 944
 945        sec_cache_config(info);
 946
 947        /* Data axi port write and read outstanding config as per datasheet */
 948        sec_data_axiwr_otsd_cfg(info, 0x7);
 949        sec_data_axird_otsd_cfg(info, 0x7);
 950
 951        /* Enable clock gating */
 952        sec_clk_gate_en(info, true);
 953
 954        /* Set CNT_CYC register not read clear */
 955        sec_comm_cnt_cfg(info, false);
 956
 957        /* Enable CNT_CYC */
 958        sec_commsnap_en(info, false);
 959
 960        writel_relaxed((u32)~0, info->regs[SEC_SAA] + SEC_FSM_MAX_CNT_REG);
 961
 962        ret = sec_ipv4_hashmask(info, sec_ipv4_mask);
 963        if (ret) {
 964                dev_err(info->dev, "Failed to set ipv4 hashmask %d\n", ret);
 965                return -EIO;
 966        }
 967
 968        sec_ipv6_hashmask(info, sec_ipv6_mask);
 969
 970        /*  do not use debug bd */
 971        sec_set_dbg_bd_cfg(info, 0);
 972
 973        if (domain && (domain->type & __IOMMU_DOMAIN_PAGING)) {
 974                for (i = 0; i < SEC_Q_NUM; i++) {
 975                        sec_streamid(info, i);
 976                        /* Same QoS for all queues */
 977                        writel_relaxed(0x3f,
 978                                       info->regs[SEC_SAA] +
 979                                       SEC_Q_WEIGHT_CFG_REG(i));
 980                }
 981        }
 982
 983        for (i = 0; i < info->num_saas; i++) {
 984                sec_saa_getqm_en(info, i, 1);
 985                sec_saa_int_mask(info, i, 0);
 986        }
 987
 988        return 0;
 989}
 990
 991static void sec_hw_exit(struct sec_dev_info *info)
 992{
 993        int i;
 994
 995        for (i = 0; i < SEC_MAX_SAA_NUM; i++) {
 996                sec_saa_int_mask(info, i, (u32)~0);
 997                sec_saa_getqm_en(info, i, 0);
 998        }
 999}
1000
1001static void sec_queue_base_init(struct sec_dev_info *info,
1002                                struct sec_queue *queue, int queue_id)
1003{
1004        queue->dev_info = info;
1005        queue->queue_id = queue_id;
1006        snprintf(queue->name, sizeof(queue->name),
1007                 "%s_%d", dev_name(info->dev), queue->queue_id);
1008}
1009
1010static int sec_map_io(struct sec_dev_info *info, struct platform_device *pdev)
1011{
1012        struct resource *res;
1013        int i;
1014
1015        for (i = 0; i < SEC_NUM_ADDR_REGIONS; i++) {
1016                res = platform_get_resource(pdev, IORESOURCE_MEM, i);
1017
1018                if (!res) {
1019                        dev_err(info->dev, "Memory resource %d not found\n", i);
1020                        return -EINVAL;
1021                }
1022
1023                info->regs[i] = devm_ioremap(info->dev, res->start,
1024                                             resource_size(res));
1025                if (!info->regs[i]) {
1026                        dev_err(info->dev,
1027                                "Memory resource %d could not be remapped\n",
1028                                i);
1029                        return -EINVAL;
1030                }
1031        }
1032
1033        return 0;
1034}
1035
1036static int sec_base_init(struct sec_dev_info *info,
1037                         struct platform_device *pdev)
1038{
1039        int ret;
1040
1041        ret = sec_map_io(info, pdev);
1042        if (ret)
1043                return ret;
1044
1045        ret = sec_clk_en(info);
1046        if (ret)
1047                return ret;
1048
1049        ret = sec_reset_whole_module(info);
1050        if (ret)
1051                goto sec_clk_disable;
1052
1053        ret = sec_hw_init(info);
1054        if (ret)
1055                goto sec_clk_disable;
1056
1057        return 0;
1058
1059sec_clk_disable:
1060        sec_clk_dis(info);
1061
1062        return ret;
1063}
1064
1065static void sec_base_exit(struct sec_dev_info *info)
1066{
1067        sec_hw_exit(info);
1068        sec_clk_dis(info);
1069}
1070
1071#define SEC_Q_CMD_SIZE \
1072        round_up(SEC_QUEUE_LEN * sizeof(struct sec_bd_info), PAGE_SIZE)
1073#define SEC_Q_CQ_SIZE \
1074        round_up(SEC_QUEUE_LEN * sizeof(struct sec_out_bd_info), PAGE_SIZE)
1075#define SEC_Q_DB_SIZE \
1076        round_up(SEC_QUEUE_LEN * sizeof(struct sec_debug_bd_info), PAGE_SIZE)
1077
1078static int sec_queue_res_cfg(struct sec_queue *queue)
1079{
1080        struct device *dev = queue->dev_info->dev;
1081        struct sec_queue_ring_cmd *ring_cmd = &queue->ring_cmd;
1082        struct sec_queue_ring_cq *ring_cq = &queue->ring_cq;
1083        struct sec_queue_ring_db *ring_db = &queue->ring_db;
1084        int ret;
1085
1086        ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE,
1087                                             &ring_cmd->paddr, GFP_KERNEL);
1088        if (!ring_cmd->vaddr)
1089                return -ENOMEM;
1090
1091        atomic_set(&ring_cmd->used, 0);
1092        mutex_init(&ring_cmd->lock);
1093        ring_cmd->callback = sec_alg_callback;
1094
1095        ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE,
1096                                            &ring_cq->paddr, GFP_KERNEL);
1097        if (!ring_cq->vaddr) {
1098                ret = -ENOMEM;
1099                goto err_free_ring_cmd;
1100        }
1101
1102        ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE,
1103                                            &ring_db->paddr, GFP_KERNEL);
1104        if (!ring_db->vaddr) {
1105                ret = -ENOMEM;
1106                goto err_free_ring_cq;
1107        }
1108        queue->task_irq = platform_get_irq(to_platform_device(dev),
1109                                           queue->queue_id * 2 + 1);
1110        if (queue->task_irq <= 0) {
1111                ret = -EINVAL;
1112                goto err_free_ring_db;
1113        }
1114
1115        return 0;
1116
1117err_free_ring_db:
1118        dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,
1119                          queue->ring_db.paddr);
1120err_free_ring_cq:
1121        dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,
1122                          queue->ring_cq.paddr);
1123err_free_ring_cmd:
1124        dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,
1125                          queue->ring_cmd.paddr);
1126
1127        return ret;
1128}
1129
1130static void sec_queue_free_ring_pages(struct sec_queue *queue)
1131{
1132        struct device *dev = queue->dev_info->dev;
1133
1134        dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr,
1135                          queue->ring_db.paddr);
1136        dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr,
1137                          queue->ring_cq.paddr);
1138        dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr,
1139                          queue->ring_cmd.paddr);
1140}
1141
1142static int sec_queue_config(struct sec_dev_info *info, struct sec_queue *queue,
1143                            int queue_id)
1144{
1145        int ret;
1146
1147        sec_queue_base_init(info, queue, queue_id);
1148
1149        ret = sec_queue_res_cfg(queue);
1150        if (ret)
1151                return ret;
1152
1153        ret = sec_queue_map_io(queue);
1154        if (ret) {
1155                dev_err(info->dev, "Queue map failed %d\n", ret);
1156                sec_queue_free_ring_pages(queue);
1157                return ret;
1158        }
1159
1160        sec_queue_hw_init(queue);
1161
1162        return 0;
1163}
1164
1165static void sec_queue_unconfig(struct sec_dev_info *info,
1166                               struct sec_queue *queue)
1167{
1168        sec_queue_unmap_io(queue);
1169        sec_queue_free_ring_pages(queue);
1170}
1171
1172static int sec_id_alloc(struct sec_dev_info *info)
1173{
1174        int ret = 0;
1175        int i;
1176
1177        mutex_lock(&sec_id_lock);
1178
1179        for (i = 0; i < SEC_MAX_DEVICES; i++)
1180                if (!sec_devices[i])
1181                        break;
1182        if (i == SEC_MAX_DEVICES) {
1183                ret = -ENOMEM;
1184                goto unlock;
1185        }
1186        info->sec_id = i;
1187        sec_devices[info->sec_id] = info;
1188
1189unlock:
1190        mutex_unlock(&sec_id_lock);
1191
1192        return ret;
1193}
1194
1195static void sec_id_free(struct sec_dev_info *info)
1196{
1197        mutex_lock(&sec_id_lock);
1198        sec_devices[info->sec_id] = NULL;
1199        mutex_unlock(&sec_id_lock);
1200}
1201
1202static int sec_probe(struct platform_device *pdev)
1203{
1204        struct sec_dev_info *info;
1205        struct device *dev = &pdev->dev;
1206        int i, j;
1207        int ret;
1208
1209        ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
1210        if (ret) {
1211                dev_err(dev, "Failed to set 64 bit dma mask %d", ret);
1212                return -ENODEV;
1213        }
1214
1215        info = devm_kzalloc(dev, (sizeof(*info)), GFP_KERNEL);
1216        if (!info)
1217                return -ENOMEM;
1218
1219        info->dev = dev;
1220        mutex_init(&info->dev_lock);
1221
1222        info->hw_sgl_pool = dmam_pool_create("sgl", dev,
1223                                             sizeof(struct sec_hw_sgl), 64, 0);
1224        if (!info->hw_sgl_pool) {
1225                dev_err(dev, "Failed to create sec sgl dma pool\n");
1226                return -ENOMEM;
1227        }
1228
1229        ret = sec_base_init(info, pdev);
1230        if (ret) {
1231                dev_err(dev, "Base initialization fail! %d\n", ret);
1232                return ret;
1233        }
1234
1235        for (i = 0; i < SEC_Q_NUM; i++) {
1236                ret = sec_queue_config(info, &info->queues[i], i);
1237                if (ret)
1238                        goto queues_unconfig;
1239
1240                ret = sec_queue_irq_init(&info->queues[i]);
1241                if (ret) {
1242                        sec_queue_unconfig(info, &info->queues[i]);
1243                        goto queues_unconfig;
1244                }
1245        }
1246
1247        ret = sec_algs_register();
1248        if (ret) {
1249                dev_err(dev, "Failed to register algorithms with crypto %d\n",
1250                        ret);
1251                goto queues_unconfig;
1252        }
1253
1254        platform_set_drvdata(pdev, info);
1255
1256        ret = sec_id_alloc(info);
1257        if (ret)
1258                goto algs_unregister;
1259
1260        return 0;
1261
1262algs_unregister:
1263        sec_algs_unregister();
1264queues_unconfig:
1265        for (j = i - 1; j >= 0; j--) {
1266                sec_queue_irq_uninit(&info->queues[j]);
1267                sec_queue_unconfig(info, &info->queues[j]);
1268        }
1269        sec_base_exit(info);
1270
1271        return ret;
1272}
1273
1274static int sec_remove(struct platform_device *pdev)
1275{
1276        struct sec_dev_info *info = platform_get_drvdata(pdev);
1277        int i;
1278
1279        /* Unexpose as soon as possible, reuse during remove is fine */
1280        sec_id_free(info);
1281
1282        sec_algs_unregister();
1283
1284        for (i = 0; i < SEC_Q_NUM; i++) {
1285                sec_queue_irq_uninit(&info->queues[i]);
1286                sec_queue_unconfig(info, &info->queues[i]);
1287        }
1288
1289        sec_base_exit(info);
1290
1291        return 0;
1292}
1293
1294static const __maybe_unused struct of_device_id sec_match[] = {
1295        { .compatible = "hisilicon,hip06-sec" },
1296        { .compatible = "hisilicon,hip07-sec" },
1297        {}
1298};
1299MODULE_DEVICE_TABLE(of, sec_match);
1300
1301static const __maybe_unused struct acpi_device_id sec_acpi_match[] = {
1302        { "HISI02C1", 0 },
1303        { }
1304};
1305MODULE_DEVICE_TABLE(acpi, sec_acpi_match);
1306
1307static struct platform_driver sec_driver = {
1308        .probe = sec_probe,
1309        .remove = sec_remove,
1310        .driver = {
1311                .name = "hisi_sec_platform_driver",
1312                .of_match_table = sec_match,
1313                .acpi_match_table = ACPI_PTR(sec_acpi_match),
1314        },
1315};
1316module_platform_driver(sec_driver);
1317
1318MODULE_LICENSE("GPL");
1319MODULE_DESCRIPTION("HiSilicon Security Accelerators");
1320MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com");
1321MODULE_AUTHOR("Jonathan Cameron <jonathan.cameron@huawei.com>");
1322