linux/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/* Copyright (C) 2020 Marvell. */
   3
   4#include "otx2_cpt_common.h"
   5#include "otx2_cptlf.h"
   6#include "rvu_reg.h"
   7
   8#define CPT_TIMER_HOLD 0x03F
   9#define CPT_COUNT_HOLD 32
  10
  11static void cptlf_do_set_done_time_wait(struct otx2_cptlf_info *lf,
  12                                        int time_wait)
  13{
  14        union otx2_cptx_lf_done_wait done_wait;
  15
  16        done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  17                                      OTX2_CPT_LF_DONE_WAIT);
  18        done_wait.s.time_wait = time_wait;
  19        otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  20                         OTX2_CPT_LF_DONE_WAIT, done_wait.u);
  21}
  22
  23static void cptlf_do_set_done_num_wait(struct otx2_cptlf_info *lf, int num_wait)
  24{
  25        union otx2_cptx_lf_done_wait done_wait;
  26
  27        done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  28                                      OTX2_CPT_LF_DONE_WAIT);
  29        done_wait.s.num_wait = num_wait;
  30        otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
  31                         OTX2_CPT_LF_DONE_WAIT, done_wait.u);
  32}
  33
  34static void cptlf_set_done_time_wait(struct otx2_cptlfs_info *lfs,
  35                                     int time_wait)
  36{
  37        int slot;
  38
  39        for (slot = 0; slot < lfs->lfs_num; slot++)
  40                cptlf_do_set_done_time_wait(&lfs->lf[slot], time_wait);
  41}
  42
  43static void cptlf_set_done_num_wait(struct otx2_cptlfs_info *lfs, int num_wait)
  44{
  45        int slot;
  46
  47        for (slot = 0; slot < lfs->lfs_num; slot++)
  48                cptlf_do_set_done_num_wait(&lfs->lf[slot], num_wait);
  49}
  50
  51static int cptlf_set_pri(struct otx2_cptlf_info *lf, int pri)
  52{
  53        struct otx2_cptlfs_info *lfs = lf->lfs;
  54        union otx2_cptx_af_lf_ctrl lf_ctrl;
  55        int ret;
  56
  57        ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
  58                                   CPT_AF_LFX_CTL(lf->slot),
  59                                   &lf_ctrl.u);
  60        if (ret)
  61                return ret;
  62
  63        lf_ctrl.s.pri = pri ? 1 : 0;
  64
  65        ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
  66                                    CPT_AF_LFX_CTL(lf->slot),
  67                                    lf_ctrl.u);
  68        return ret;
  69}
  70
  71static int cptlf_set_eng_grps_mask(struct otx2_cptlf_info *lf,
  72                                   int eng_grps_mask)
  73{
  74        struct otx2_cptlfs_info *lfs = lf->lfs;
  75        union otx2_cptx_af_lf_ctrl lf_ctrl;
  76        int ret;
  77
  78        ret = otx2_cpt_read_af_reg(lfs->mbox, lfs->pdev,
  79                                   CPT_AF_LFX_CTL(lf->slot),
  80                                   &lf_ctrl.u);
  81        if (ret)
  82                return ret;
  83
  84        lf_ctrl.s.grp = eng_grps_mask;
  85
  86        ret = otx2_cpt_write_af_reg(lfs->mbox, lfs->pdev,
  87                                    CPT_AF_LFX_CTL(lf->slot),
  88                                    lf_ctrl.u);
  89        return ret;
  90}
  91
  92static int cptlf_set_grp_and_pri(struct otx2_cptlfs_info *lfs,
  93                                 int eng_grp_mask, int pri)
  94{
  95        int slot, ret = 0;
  96
  97        for (slot = 0; slot < lfs->lfs_num; slot++) {
  98                ret = cptlf_set_pri(&lfs->lf[slot], pri);
  99                if (ret)
 100                        return ret;
 101
 102                ret = cptlf_set_eng_grps_mask(&lfs->lf[slot], eng_grp_mask);
 103                if (ret)
 104                        return ret;
 105        }
 106        return ret;
 107}
 108
 109static void cptlf_hw_init(struct otx2_cptlfs_info *lfs)
 110{
 111        /* Disable instruction queues */
 112        otx2_cptlf_disable_iqueues(lfs);
 113
 114        /* Set instruction queues base addresses */
 115        otx2_cptlf_set_iqueues_base_addr(lfs);
 116
 117        /* Set instruction queues sizes */
 118        otx2_cptlf_set_iqueues_size(lfs);
 119
 120        /* Set done interrupts time wait */
 121        cptlf_set_done_time_wait(lfs, CPT_TIMER_HOLD);
 122
 123        /* Set done interrupts num wait */
 124        cptlf_set_done_num_wait(lfs, CPT_COUNT_HOLD);
 125
 126        /* Enable instruction queues */
 127        otx2_cptlf_enable_iqueues(lfs);
 128}
 129
 130static void cptlf_hw_cleanup(struct otx2_cptlfs_info *lfs)
 131{
 132        /* Disable instruction queues */
 133        otx2_cptlf_disable_iqueues(lfs);
 134}
 135
 136static void cptlf_set_misc_intrs(struct otx2_cptlfs_info *lfs, u8 enable)
 137{
 138        union otx2_cptx_lf_misc_int_ena_w1s irq_misc = { .u = 0x0 };
 139        u64 reg = enable ? OTX2_CPT_LF_MISC_INT_ENA_W1S :
 140                           OTX2_CPT_LF_MISC_INT_ENA_W1C;
 141        int slot;
 142
 143        irq_misc.s.fault = 0x1;
 144        irq_misc.s.hwerr = 0x1;
 145        irq_misc.s.irde = 0x1;
 146        irq_misc.s.nqerr = 0x1;
 147        irq_misc.s.nwrp = 0x1;
 148
 149        for (slot = 0; slot < lfs->lfs_num; slot++)
 150                otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot, reg,
 151                                 irq_misc.u);
 152}
 153
 154static void cptlf_enable_intrs(struct otx2_cptlfs_info *lfs)
 155{
 156        int slot;
 157
 158        /* Enable done interrupts */
 159        for (slot = 0; slot < lfs->lfs_num; slot++)
 160                otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
 161                                 OTX2_CPT_LF_DONE_INT_ENA_W1S, 0x1);
 162        /* Enable Misc interrupts */
 163        cptlf_set_misc_intrs(lfs, true);
 164}
 165
 166static void cptlf_disable_intrs(struct otx2_cptlfs_info *lfs)
 167{
 168        int slot;
 169
 170        for (slot = 0; slot < lfs->lfs_num; slot++)
 171                otx2_cpt_write64(lfs->reg_base, BLKADDR_CPT0, slot,
 172                                 OTX2_CPT_LF_DONE_INT_ENA_W1C, 0x1);
 173        cptlf_set_misc_intrs(lfs, false);
 174}
 175
 176static inline int cptlf_read_done_cnt(struct otx2_cptlf_info *lf)
 177{
 178        union otx2_cptx_lf_done irq_cnt;
 179
 180        irq_cnt.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
 181                                    OTX2_CPT_LF_DONE);
 182        return irq_cnt.s.done;
 183}
 184
 185static irqreturn_t cptlf_misc_intr_handler(int __always_unused irq, void *arg)
 186{
 187        union otx2_cptx_lf_misc_int irq_misc, irq_misc_ack;
 188        struct otx2_cptlf_info *lf = arg;
 189        struct device *dev;
 190
 191        dev = &lf->lfs->pdev->dev;
 192        irq_misc.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
 193                                     OTX2_CPT_LF_MISC_INT);
 194        irq_misc_ack.u = 0x0;
 195
 196        if (irq_misc.s.fault) {
 197                dev_err(dev, "Memory error detected while executing CPT_INST_S, LF %d.\n",
 198                        lf->slot);
 199                irq_misc_ack.s.fault = 0x1;
 200
 201        } else if (irq_misc.s.hwerr) {
 202                dev_err(dev, "HW error from an engine executing CPT_INST_S, LF %d.",
 203                        lf->slot);
 204                irq_misc_ack.s.hwerr = 0x1;
 205
 206        } else if (irq_misc.s.nwrp) {
 207                dev_err(dev, "SMMU fault while writing CPT_RES_S to CPT_INST_S[RES_ADDR], LF %d.\n",
 208                        lf->slot);
 209                irq_misc_ack.s.nwrp = 0x1;
 210
 211        } else if (irq_misc.s.irde) {
 212                dev_err(dev, "Memory error when accessing instruction memory queue CPT_LF_Q_BASE[ADDR].\n");
 213                irq_misc_ack.s.irde = 0x1;
 214
 215        } else if (irq_misc.s.nqerr) {
 216                dev_err(dev, "Error enqueuing an instruction received at CPT_LF_NQ.\n");
 217                irq_misc_ack.s.nqerr = 0x1;
 218
 219        } else {
 220                dev_err(dev, "Unhandled interrupt in CPT LF %d\n", lf->slot);
 221                return IRQ_NONE;
 222        }
 223
 224        /* Acknowledge interrupts */
 225        otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
 226                         OTX2_CPT_LF_MISC_INT, irq_misc_ack.u);
 227
 228        return IRQ_HANDLED;
 229}
 230
 231static irqreturn_t cptlf_done_intr_handler(int irq, void *arg)
 232{
 233        union otx2_cptx_lf_done_wait done_wait;
 234        struct otx2_cptlf_info *lf = arg;
 235        int irq_cnt;
 236
 237        /* Read the number of completed requests */
 238        irq_cnt = cptlf_read_done_cnt(lf);
 239        if (irq_cnt) {
 240                done_wait.u = otx2_cpt_read64(lf->lfs->reg_base, BLKADDR_CPT0,
 241                                              lf->slot, OTX2_CPT_LF_DONE_WAIT);
 242                /* Acknowledge the number of completed requests */
 243                otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
 244                                 OTX2_CPT_LF_DONE_ACK, irq_cnt);
 245
 246                otx2_cpt_write64(lf->lfs->reg_base, BLKADDR_CPT0, lf->slot,
 247                                 OTX2_CPT_LF_DONE_WAIT, done_wait.u);
 248                if (unlikely(!lf->wqe)) {
 249                        dev_err(&lf->lfs->pdev->dev, "No work for LF %d\n",
 250                                lf->slot);
 251                        return IRQ_NONE;
 252                }
 253
 254                /* Schedule processing of completed requests */
 255                tasklet_hi_schedule(&lf->wqe->work);
 256        }
 257        return IRQ_HANDLED;
 258}
 259
 260void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs)
 261{
 262        int i, offs, vector;
 263
 264        for (i = 0; i < lfs->lfs_num; i++) {
 265                for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
 266                        if (!lfs->lf[i].is_irq_reg[offs])
 267                                continue;
 268
 269                        vector = pci_irq_vector(lfs->pdev,
 270                                                lfs->lf[i].msix_offset + offs);
 271                        free_irq(vector, &lfs->lf[i]);
 272                        lfs->lf[i].is_irq_reg[offs] = false;
 273                }
 274        }
 275        cptlf_disable_intrs(lfs);
 276}
 277
 278static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
 279                                         int lf_num, int irq_offset,
 280                                         irq_handler_t handler)
 281{
 282        int ret, vector;
 283
 284        vector = pci_irq_vector(lfs->pdev, lfs->lf[lf_num].msix_offset +
 285                                irq_offset);
 286        ret = request_irq(vector, handler, 0,
 287                          lfs->lf[lf_num].irq_name[irq_offset],
 288                          &lfs->lf[lf_num]);
 289        if (ret)
 290                return ret;
 291
 292        lfs->lf[lf_num].is_irq_reg[irq_offset] = true;
 293
 294        return ret;
 295}
 296
 297int otx2_cptlf_register_interrupts(struct otx2_cptlfs_info *lfs)
 298{
 299        int irq_offs, ret, i;
 300
 301        for (i = 0; i < lfs->lfs_num; i++) {
 302                irq_offs = OTX2_CPT_LF_INT_VEC_E_MISC;
 303                snprintf(lfs->lf[i].irq_name[irq_offs], 32, "CPTLF Misc%d", i);
 304                ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
 305                                                    cptlf_misc_intr_handler);
 306                if (ret)
 307                        goto free_irq;
 308
 309                irq_offs = OTX2_CPT_LF_INT_VEC_E_DONE;
 310                snprintf(lfs->lf[i].irq_name[irq_offs], 32, "OTX2_CPTLF Done%d",
 311                         i);
 312                ret = cptlf_do_register_interrrupts(lfs, i, irq_offs,
 313                                                    cptlf_done_intr_handler);
 314                if (ret)
 315                        goto free_irq;
 316        }
 317        cptlf_enable_intrs(lfs);
 318        return 0;
 319
 320free_irq:
 321        otx2_cptlf_unregister_interrupts(lfs);
 322        return ret;
 323}
 324
 325void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
 326{
 327        int slot, offs;
 328
 329        for (slot = 0; slot < lfs->lfs_num; slot++) {
 330                for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++)
 331                        irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
 332                                              lfs->lf[slot].msix_offset +
 333                                              offs), NULL);
 334                free_cpumask_var(lfs->lf[slot].affinity_mask);
 335        }
 336}
 337
 338int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs)
 339{
 340        struct otx2_cptlf_info *lf = lfs->lf;
 341        int slot, offs, ret;
 342
 343        for (slot = 0; slot < lfs->lfs_num; slot++) {
 344                if (!zalloc_cpumask_var(&lf[slot].affinity_mask, GFP_KERNEL)) {
 345                        dev_err(&lfs->pdev->dev,
 346                                "cpumask allocation failed for LF %d", slot);
 347                        ret = -ENOMEM;
 348                        goto free_affinity_mask;
 349                }
 350
 351                cpumask_set_cpu(cpumask_local_spread(slot,
 352                                dev_to_node(&lfs->pdev->dev)),
 353                                lf[slot].affinity_mask);
 354
 355                for (offs = 0; offs < OTX2_CPT_LF_MSIX_VECTORS; offs++) {
 356                        ret = irq_set_affinity_hint(pci_irq_vector(lfs->pdev,
 357                                                lf[slot].msix_offset + offs),
 358                                                lf[slot].affinity_mask);
 359                        if (ret)
 360                                goto free_affinity_mask;
 361                }
 362        }
 363        return 0;
 364
 365free_affinity_mask:
 366        otx2_cptlf_free_irqs_affinity(lfs);
 367        return ret;
 368}
 369
 370int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
 371                    int lfs_num)
 372{
 373        int slot, ret;
 374
 375        if (!lfs->pdev || !lfs->reg_base)
 376                return -EINVAL;
 377
 378        lfs->lfs_num = lfs_num;
 379        for (slot = 0; slot < lfs->lfs_num; slot++) {
 380                lfs->lf[slot].lfs = lfs;
 381                lfs->lf[slot].slot = slot;
 382                lfs->lf[slot].lmtline = lfs->reg_base +
 383                        OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_LMT, slot,
 384                                                 OTX2_CPT_LMT_LF_LMTLINEX(0));
 385                lfs->lf[slot].ioreg = lfs->reg_base +
 386                        OTX2_CPT_RVU_FUNC_ADDR_S(BLKADDR_CPT0, slot,
 387                                                 OTX2_CPT_LF_NQX(0));
 388        }
 389        /* Send request to attach LFs */
 390        ret = otx2_cpt_attach_rscrs_msg(lfs);
 391        if (ret)
 392                goto clear_lfs_num;
 393
 394        ret = otx2_cpt_alloc_instruction_queues(lfs);
 395        if (ret) {
 396                dev_err(&lfs->pdev->dev,
 397                        "Allocating instruction queues failed\n");
 398                goto detach_rsrcs;
 399        }
 400        cptlf_hw_init(lfs);
 401        /*
 402         * Allow each LF to execute requests destined to any of 8 engine
 403         * groups and set queue priority of each LF to high
 404         */
 405        ret = cptlf_set_grp_and_pri(lfs, eng_grp_mask, pri);
 406        if (ret)
 407                goto free_iq;
 408
 409        return 0;
 410
 411free_iq:
 412        otx2_cpt_free_instruction_queues(lfs);
 413        cptlf_hw_cleanup(lfs);
 414detach_rsrcs:
 415        otx2_cpt_detach_rsrcs_msg(lfs);
 416clear_lfs_num:
 417        lfs->lfs_num = 0;
 418        return ret;
 419}
 420
 421void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
 422{
 423        lfs->lfs_num = 0;
 424        /* Cleanup LFs hardware side */
 425        cptlf_hw_cleanup(lfs);
 426        /* Send request to detach LFs */
 427        otx2_cpt_detach_rsrcs_msg(lfs);
 428}
 429