linux/drivers/crypto/caam/qi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * CAAM/SEC 4.x QI transport/backend driver
   4 * Queue Interface backend functionality
   5 *
   6 * Copyright 2013-2016 Freescale Semiconductor, Inc.
   7 * Copyright 2016-2017, 2019 NXP
   8 */
   9
  10#include <linux/cpumask.h>
  11#include <linux/kthread.h>
  12#include <soc/fsl/qman.h>
  13
  14#include "regs.h"
  15#include "qi.h"
  16#include "desc.h"
  17#include "intern.h"
  18#include "desc_constr.h"
  19
  20#define PREHDR_RSLS_SHIFT       31
  21#define PREHDR_ABS              BIT(25)
  22
  23/*
  24 * Use a reasonable backlog of frames (per CPU) as congestion threshold,
  25 * so that resources used by the in-flight buffers do not become a memory hog.
  26 */
  27#define MAX_RSP_FQ_BACKLOG_PER_CPU      256
  28
  29#define CAAM_QI_ENQUEUE_RETRIES 10000
  30
  31#define CAAM_NAPI_WEIGHT        63
  32
  33/*
  34 * caam_napi - struct holding CAAM NAPI-related params
  35 * @irqtask: IRQ task for QI backend
  36 * @p: QMan portal
  37 */
  38struct caam_napi {
  39        struct napi_struct irqtask;
  40        struct qman_portal *p;
  41};
  42
  43/*
  44 * caam_qi_pcpu_priv - percpu private data structure to main list of pending
  45 *                     responses expected on each cpu.
  46 * @caam_napi: CAAM NAPI params
  47 * @net_dev: netdev used by NAPI
  48 * @rsp_fq: response FQ from CAAM
  49 */
  50struct caam_qi_pcpu_priv {
  51        struct caam_napi caam_napi;
  52        struct net_device net_dev;
  53        struct qman_fq *rsp_fq;
  54} ____cacheline_aligned;
  55
  56static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv);
  57static DEFINE_PER_CPU(int, last_cpu);
  58
  59/*
  60 * caam_qi_priv - CAAM QI backend private params
  61 * @cgr: QMan congestion group
  62 */
  63struct caam_qi_priv {
  64        struct qman_cgr cgr;
  65};
  66
  67static struct caam_qi_priv qipriv ____cacheline_aligned;
  68
  69/*
  70 * This is written by only one core - the one that initialized the CGR - and
  71 * read by multiple cores (all the others).
  72 */
  73bool caam_congested __read_mostly;
  74EXPORT_SYMBOL(caam_congested);
  75
  76#ifdef CONFIG_DEBUG_FS
  77/*
  78 * This is a counter for the number of times the congestion group (where all
  79 * the request and response queueus are) reached congestion. Incremented
  80 * each time the congestion callback is called with congested == true.
  81 */
  82static u64 times_congested;
  83#endif
  84
  85/*
  86 * This is a a cache of buffers, from which the users of CAAM QI driver
  87 * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
  88 * doing malloc on the hotpath.
  89 * NOTE: A more elegant solution would be to have some headroom in the frames
  90 *       being processed. This could be added by the dpaa-ethernet driver.
  91 *       This would pose a problem for userspace application processing which
  92 *       cannot know of this limitation. So for now, this will work.
  93 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
  94 */
  95static struct kmem_cache *qi_cache;
  96
  97static void *caam_iova_to_virt(struct iommu_domain *domain,
  98                               dma_addr_t iova_addr)
  99{
 100        phys_addr_t phys_addr;
 101
 102        phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
 103
 104        return phys_to_virt(phys_addr);
 105}
 106
 107int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req)
 108{
 109        struct qm_fd fd;
 110        dma_addr_t addr;
 111        int ret;
 112        int num_retries = 0;
 113
 114        qm_fd_clear_fd(&fd);
 115        qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1]));
 116
 117        addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt),
 118                              DMA_BIDIRECTIONAL);
 119        if (dma_mapping_error(qidev, addr)) {
 120                dev_err(qidev, "DMA mapping error for QI enqueue request\n");
 121                return -EIO;
 122        }
 123        qm_fd_addr_set64(&fd, addr);
 124
 125        do {
 126                ret = qman_enqueue(req->drv_ctx->req_fq, &fd);
 127                if (likely(!ret))
 128                        return 0;
 129
 130                if (ret != -EBUSY)
 131                        break;
 132                num_retries++;
 133        } while (num_retries < CAAM_QI_ENQUEUE_RETRIES);
 134
 135        dev_err(qidev, "qman_enqueue failed: %d\n", ret);
 136
 137        return ret;
 138}
 139EXPORT_SYMBOL(caam_qi_enqueue);
 140
 141static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
 142                           const union qm_mr_entry *msg)
 143{
 144        const struct qm_fd *fd;
 145        struct caam_drv_req *drv_req;
 146        struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
 147        struct caam_drv_private *priv = dev_get_drvdata(qidev);
 148
 149        fd = &msg->ern.fd;
 150
 151        if (qm_fd_get_format(fd) != qm_fd_compound) {
 152                dev_err(qidev, "Non-compound FD from CAAM\n");
 153                return;
 154        }
 155
 156        drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
 157        if (!drv_req) {
 158                dev_err(qidev,
 159                        "Can't find original request for CAAM response\n");
 160                return;
 161        }
 162
 163        dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
 164                         sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
 165
 166        if (fd->status)
 167                drv_req->cbk(drv_req, be32_to_cpu(fd->status));
 168        else
 169                drv_req->cbk(drv_req, JRSTA_SSRC_QI);
 170}
 171
 172static struct qman_fq *create_caam_req_fq(struct device *qidev,
 173                                          struct qman_fq *rsp_fq,
 174                                          dma_addr_t hwdesc,
 175                                          int fq_sched_flag)
 176{
 177        int ret;
 178        struct qman_fq *req_fq;
 179        struct qm_mcc_initfq opts;
 180
 181        req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC);
 182        if (!req_fq)
 183                return ERR_PTR(-ENOMEM);
 184
 185        req_fq->cb.ern = caam_fq_ern_cb;
 186        req_fq->cb.fqs = NULL;
 187
 188        ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
 189                                QMAN_FQ_FLAG_TO_DCPORTAL, req_fq);
 190        if (ret) {
 191                dev_err(qidev, "Failed to create session req FQ\n");
 192                goto create_req_fq_fail;
 193        }
 194
 195        memset(&opts, 0, sizeof(opts));
 196        opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
 197                                   QM_INITFQ_WE_CONTEXTB |
 198                                   QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
 199        opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
 200        qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2);
 201        opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq));
 202        qm_fqd_context_a_set64(&opts.fqd, hwdesc);
 203        opts.fqd.cgid = qipriv.cgr.cgrid;
 204
 205        ret = qman_init_fq(req_fq, fq_sched_flag, &opts);
 206        if (ret) {
 207                dev_err(qidev, "Failed to init session req FQ\n");
 208                goto init_req_fq_fail;
 209        }
 210
 211        dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid,
 212                smp_processor_id());
 213        return req_fq;
 214
 215init_req_fq_fail:
 216        qman_destroy_fq(req_fq);
 217create_req_fq_fail:
 218        kfree(req_fq);
 219        return ERR_PTR(ret);
 220}
 221
 222static int empty_retired_fq(struct device *qidev, struct qman_fq *fq)
 223{
 224        int ret;
 225
 226        ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT |
 227                                    QMAN_VOLATILE_FLAG_FINISH,
 228                                    QM_VDQCR_PRECEDENCE_VDQCR |
 229                                    QM_VDQCR_NUMFRAMES_TILLEMPTY);
 230        if (ret) {
 231                dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid);
 232                return ret;
 233        }
 234
 235        do {
 236                struct qman_portal *p;
 237
 238                p = qman_get_affine_portal(smp_processor_id());
 239                qman_p_poll_dqrr(p, 16);
 240        } while (fq->flags & QMAN_FQ_STATE_NE);
 241
 242        return 0;
 243}
 244
 245static int kill_fq(struct device *qidev, struct qman_fq *fq)
 246{
 247        u32 flags;
 248        int ret;
 249
 250        ret = qman_retire_fq(fq, &flags);
 251        if (ret < 0) {
 252                dev_err(qidev, "qman_retire_fq failed: %d\n", ret);
 253                return ret;
 254        }
 255
 256        if (!ret)
 257                goto empty_fq;
 258
 259        /* Async FQ retirement condition */
 260        if (ret == 1) {
 261                /* Retry till FQ gets in retired state */
 262                do {
 263                        msleep(20);
 264                } while (fq->state != qman_fq_state_retired);
 265
 266                WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS);
 267                WARN_ON(fq->flags & QMAN_FQ_STATE_ORL);
 268        }
 269
 270empty_fq:
 271        if (fq->flags & QMAN_FQ_STATE_NE) {
 272                ret = empty_retired_fq(qidev, fq);
 273                if (ret) {
 274                        dev_err(qidev, "empty_retired_fq fail for FQ: %u\n",
 275                                fq->fqid);
 276                        return ret;
 277                }
 278        }
 279
 280        ret = qman_oos_fq(fq);
 281        if (ret)
 282                dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid);
 283
 284        qman_destroy_fq(fq);
 285        kfree(fq);
 286
 287        return ret;
 288}
 289
 290static int empty_caam_fq(struct qman_fq *fq)
 291{
 292        int ret;
 293        struct qm_mcr_queryfq_np np;
 294
 295        /* Wait till the older CAAM FQ get empty */
 296        do {
 297                ret = qman_query_fq_np(fq, &np);
 298                if (ret)
 299                        return ret;
 300
 301                if (!qm_mcr_np_get(&np, frm_cnt))
 302                        break;
 303
 304                msleep(20);
 305        } while (1);
 306
 307        /*
 308         * Give extra time for pending jobs from this FQ in holding tanks
 309         * to get processed
 310         */
 311        msleep(20);
 312        return 0;
 313}
 314
 315int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc)
 316{
 317        int ret;
 318        u32 num_words;
 319        struct qman_fq *new_fq, *old_fq;
 320        struct device *qidev = drv_ctx->qidev;
 321
 322        num_words = desc_len(sh_desc);
 323        if (num_words > MAX_SDLEN) {
 324                dev_err(qidev, "Invalid descriptor len: %d words\n", num_words);
 325                return -EINVAL;
 326        }
 327
 328        /* Note down older req FQ */
 329        old_fq = drv_ctx->req_fq;
 330
 331        /* Create a new req FQ in parked state */
 332        new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq,
 333                                    drv_ctx->context_a, 0);
 334        if (IS_ERR(new_fq)) {
 335                dev_err(qidev, "FQ allocation for shdesc update failed\n");
 336                return PTR_ERR(new_fq);
 337        }
 338
 339        /* Hook up new FQ to context so that new requests keep queuing */
 340        drv_ctx->req_fq = new_fq;
 341
 342        /* Empty and remove the older FQ */
 343        ret = empty_caam_fq(old_fq);
 344        if (ret) {
 345                dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret);
 346
 347                /* We can revert to older FQ */
 348                drv_ctx->req_fq = old_fq;
 349
 350                if (kill_fq(qidev, new_fq))
 351                        dev_warn(qidev, "New CAAM FQ kill failed\n");
 352
 353                return ret;
 354        }
 355
 356        /*
 357         * Re-initialise pre-header. Set RSLS and SDLEN.
 358         * Update the shared descriptor for driver context.
 359         */
 360        drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
 361                                           num_words);
 362        drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
 363        memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
 364        dma_sync_single_for_device(qidev, drv_ctx->context_a,
 365                                   sizeof(drv_ctx->sh_desc) +
 366                                   sizeof(drv_ctx->prehdr),
 367                                   DMA_BIDIRECTIONAL);
 368
 369        /* Put the new FQ in scheduled state */
 370        ret = qman_schedule_fq(new_fq);
 371        if (ret) {
 372                dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret);
 373
 374                /*
 375                 * We can kill new FQ and revert to old FQ.
 376                 * Since the desc is already modified, it is success case
 377                 */
 378
 379                drv_ctx->req_fq = old_fq;
 380
 381                if (kill_fq(qidev, new_fq))
 382                        dev_warn(qidev, "New CAAM FQ kill failed\n");
 383        } else if (kill_fq(qidev, old_fq)) {
 384                dev_warn(qidev, "Old CAAM FQ kill failed\n");
 385        }
 386
 387        return 0;
 388}
 389EXPORT_SYMBOL(caam_drv_ctx_update);
 390
 391struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev,
 392                                       int *cpu,
 393                                       u32 *sh_desc)
 394{
 395        size_t size;
 396        u32 num_words;
 397        dma_addr_t hwdesc;
 398        struct caam_drv_ctx *drv_ctx;
 399        const cpumask_t *cpus = qman_affine_cpus();
 400
 401        num_words = desc_len(sh_desc);
 402        if (num_words > MAX_SDLEN) {
 403                dev_err(qidev, "Invalid descriptor len: %d words\n",
 404                        num_words);
 405                return ERR_PTR(-EINVAL);
 406        }
 407
 408        drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC);
 409        if (!drv_ctx)
 410                return ERR_PTR(-ENOMEM);
 411
 412        /*
 413         * Initialise pre-header - set RSLS and SDLEN - and shared descriptor
 414         * and dma-map them.
 415         */
 416        drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) |
 417                                           num_words);
 418        drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS);
 419        memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc));
 420        size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc);
 421        hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size,
 422                                DMA_BIDIRECTIONAL);
 423        if (dma_mapping_error(qidev, hwdesc)) {
 424                dev_err(qidev, "DMA map error for preheader + shdesc\n");
 425                kfree(drv_ctx);
 426                return ERR_PTR(-ENOMEM);
 427        }
 428        drv_ctx->context_a = hwdesc;
 429
 430        /* If given CPU does not own the portal, choose another one that does */
 431        if (!cpumask_test_cpu(*cpu, cpus)) {
 432                int *pcpu = &get_cpu_var(last_cpu);
 433
 434                *pcpu = cpumask_next(*pcpu, cpus);
 435                if (*pcpu >= nr_cpu_ids)
 436                        *pcpu = cpumask_first(cpus);
 437                *cpu = *pcpu;
 438
 439                put_cpu_var(last_cpu);
 440        }
 441        drv_ctx->cpu = *cpu;
 442
 443        /* Find response FQ hooked with this CPU */
 444        drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu);
 445
 446        /* Attach request FQ */
 447        drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc,
 448                                             QMAN_INITFQ_FLAG_SCHED);
 449        if (IS_ERR(drv_ctx->req_fq)) {
 450                dev_err(qidev, "create_caam_req_fq failed\n");
 451                dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL);
 452                kfree(drv_ctx);
 453                return ERR_PTR(-ENOMEM);
 454        }
 455
 456        drv_ctx->qidev = qidev;
 457        return drv_ctx;
 458}
 459EXPORT_SYMBOL(caam_drv_ctx_init);
 460
 461void *qi_cache_alloc(gfp_t flags)
 462{
 463        return kmem_cache_alloc(qi_cache, flags);
 464}
 465EXPORT_SYMBOL(qi_cache_alloc);
 466
 467void qi_cache_free(void *obj)
 468{
 469        kmem_cache_free(qi_cache, obj);
 470}
 471EXPORT_SYMBOL(qi_cache_free);
 472
 473static int caam_qi_poll(struct napi_struct *napi, int budget)
 474{
 475        struct caam_napi *np = container_of(napi, struct caam_napi, irqtask);
 476
 477        int cleaned = qman_p_poll_dqrr(np->p, budget);
 478
 479        if (cleaned < budget) {
 480                napi_complete(napi);
 481                qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
 482        }
 483
 484        return cleaned;
 485}
 486
 487void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
 488{
 489        if (IS_ERR_OR_NULL(drv_ctx))
 490                return;
 491
 492        /* Remove request FQ */
 493        if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq))
 494                dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n");
 495
 496        dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a,
 497                         sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr),
 498                         DMA_BIDIRECTIONAL);
 499        kfree(drv_ctx);
 500}
 501EXPORT_SYMBOL(caam_drv_ctx_rel);
 502
 503void caam_qi_shutdown(struct device *qidev)
 504{
 505        int i;
 506        struct caam_qi_priv *priv = &qipriv;
 507        const cpumask_t *cpus = qman_affine_cpus();
 508
 509        for_each_cpu(i, cpus) {
 510                struct napi_struct *irqtask;
 511
 512                irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask;
 513                napi_disable(irqtask);
 514                netif_napi_del(irqtask);
 515
 516                if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
 517                        dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
 518        }
 519
 520        qman_delete_cgr_safe(&priv->cgr);
 521        qman_release_cgrid(priv->cgr.cgrid);
 522
 523        kmem_cache_destroy(qi_cache);
 524}
 525
 526static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
 527{
 528        caam_congested = congested;
 529
 530        if (congested) {
 531#ifdef CONFIG_DEBUG_FS
 532                times_congested++;
 533#endif
 534                pr_debug_ratelimited("CAAM entered congestion\n");
 535
 536        } else {
 537                pr_debug_ratelimited("CAAM exited congestion\n");
 538        }
 539}
 540
 541static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
 542{
 543        /*
 544         * In case of threaded ISR, for RT kernels in_irq() does not return
 545         * appropriate value, so use in_serving_softirq to distinguish between
 546         * softirq and irq contexts.
 547         */
 548        if (unlikely(in_irq() || !in_serving_softirq())) {
 549                /* Disable QMan IRQ source and invoke NAPI */
 550                qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
 551                np->p = p;
 552                napi_schedule(&np->irqtask);
 553                return 1;
 554        }
 555        return 0;
 556}
 557
 558static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
 559                                                    struct qman_fq *rsp_fq,
 560                                                    const struct qm_dqrr_entry *dqrr)
 561{
 562        struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
 563        struct caam_drv_req *drv_req;
 564        const struct qm_fd *fd;
 565        struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
 566        struct caam_drv_private *priv = dev_get_drvdata(qidev);
 567        u32 status;
 568
 569        if (caam_qi_napi_schedule(p, caam_napi))
 570                return qman_cb_dqrr_stop;
 571
 572        fd = &dqrr->fd;
 573        status = be32_to_cpu(fd->status);
 574        if (unlikely(status)) {
 575                u32 ssrc = status & JRSTA_SSRC_MASK;
 576                u8 err_id = status & JRSTA_CCBERR_ERRID_MASK;
 577
 578                if (ssrc != JRSTA_SSRC_CCB_ERROR ||
 579                    err_id != JRSTA_CCBERR_ERRID_ICVCHK)
 580                        dev_err_ratelimited(qidev,
 581                                            "Error: %#x in CAAM response FD\n",
 582                                            status);
 583        }
 584
 585        if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) {
 586                dev_err(qidev, "Non-compound FD from CAAM\n");
 587                return qman_cb_dqrr_consume;
 588        }
 589
 590        drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd));
 591        if (unlikely(!drv_req)) {
 592                dev_err(qidev,
 593                        "Can't find original request for caam response\n");
 594                return qman_cb_dqrr_consume;
 595        }
 596
 597        dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd),
 598                         sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL);
 599
 600        drv_req->cbk(drv_req, status);
 601        return qman_cb_dqrr_consume;
 602}
 603
 604static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
 605{
 606        struct qm_mcc_initfq opts;
 607        struct qman_fq *fq;
 608        int ret;
 609
 610        fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
 611        if (!fq)
 612                return -ENOMEM;
 613
 614        fq->cb.dqrr = caam_rsp_fq_dqrr_cb;
 615
 616        ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE |
 617                             QMAN_FQ_FLAG_DYNAMIC_FQID, fq);
 618        if (ret) {
 619                dev_err(qidev, "Rsp FQ create failed\n");
 620                kfree(fq);
 621                return -ENODEV;
 622        }
 623
 624        memset(&opts, 0, sizeof(opts));
 625        opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ |
 626                                   QM_INITFQ_WE_CONTEXTB |
 627                                   QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID);
 628        opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING |
 629                                       QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE);
 630        qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3);
 631        opts.fqd.cgid = qipriv.cgr.cgrid;
 632        opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX |
 633                                                QM_STASHING_EXCL_DATA;
 634        qm_fqd_set_stashing(&opts.fqd, 0, 1, 1);
 635
 636        ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
 637        if (ret) {
 638                dev_err(qidev, "Rsp FQ init failed\n");
 639                kfree(fq);
 640                return -ENODEV;
 641        }
 642
 643        per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq;
 644
 645        dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu);
 646        return 0;
 647}
 648
 649static int init_cgr(struct device *qidev)
 650{
 651        int ret;
 652        struct qm_mcc_initcgr opts;
 653        const u64 val = (u64)cpumask_weight(qman_affine_cpus()) *
 654                        MAX_RSP_FQ_BACKLOG_PER_CPU;
 655
 656        ret = qman_alloc_cgrid(&qipriv.cgr.cgrid);
 657        if (ret) {
 658                dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret);
 659                return ret;
 660        }
 661
 662        qipriv.cgr.cb = cgr_cb;
 663        memset(&opts, 0, sizeof(opts));
 664        opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES |
 665                                   QM_CGR_WE_MODE);
 666        opts.cgr.cscn_en = QM_CGR_EN;
 667        opts.cgr.mode = QMAN_CGR_MODE_FRAME;
 668        qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1);
 669
 670        ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts);
 671        if (ret) {
 672                dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret,
 673                        qipriv.cgr.cgrid);
 674                return ret;
 675        }
 676
 677        dev_dbg(qidev, "Congestion threshold set to %llu\n", val);
 678        return 0;
 679}
 680
 681static int alloc_rsp_fqs(struct device *qidev)
 682{
 683        int ret, i;
 684        const cpumask_t *cpus = qman_affine_cpus();
 685
 686        /*Now create response FQs*/
 687        for_each_cpu(i, cpus) {
 688                ret = alloc_rsp_fq_cpu(qidev, i);
 689                if (ret) {
 690                        dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i);
 691                        return ret;
 692                }
 693        }
 694
 695        return 0;
 696}
 697
 698static void free_rsp_fqs(void)
 699{
 700        int i;
 701        const cpumask_t *cpus = qman_affine_cpus();
 702
 703        for_each_cpu(i, cpus)
 704                kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
 705}
 706
 707int caam_qi_init(struct platform_device *caam_pdev)
 708{
 709        int err, i;
 710        struct device *ctrldev = &caam_pdev->dev, *qidev;
 711        struct caam_drv_private *ctrlpriv;
 712        const cpumask_t *cpus = qman_affine_cpus();
 713
 714        ctrlpriv = dev_get_drvdata(ctrldev);
 715        qidev = ctrldev;
 716
 717        /* Initialize the congestion detection */
 718        err = init_cgr(qidev);
 719        if (err) {
 720                dev_err(qidev, "CGR initialization failed: %d\n", err);
 721                return err;
 722        }
 723
 724        /* Initialise response FQs */
 725        err = alloc_rsp_fqs(qidev);
 726        if (err) {
 727                dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err);
 728                free_rsp_fqs();
 729                return err;
 730        }
 731
 732        /*
 733         * Enable the NAPI contexts on each of the core which has an affine
 734         * portal.
 735         */
 736        for_each_cpu(i, cpus) {
 737                struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
 738                struct caam_napi *caam_napi = &priv->caam_napi;
 739                struct napi_struct *irqtask = &caam_napi->irqtask;
 740                struct net_device *net_dev = &priv->net_dev;
 741
 742                net_dev->dev = *qidev;
 743                INIT_LIST_HEAD(&net_dev->napi_list);
 744
 745                netif_napi_add(net_dev, irqtask, caam_qi_poll,
 746                               CAAM_NAPI_WEIGHT);
 747
 748                napi_enable(irqtask);
 749        }
 750
 751        qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
 752                                     SLAB_CACHE_DMA, NULL);
 753        if (!qi_cache) {
 754                dev_err(qidev, "Can't allocate CAAM cache\n");
 755                free_rsp_fqs();
 756                return -ENOMEM;
 757        }
 758
 759#ifdef CONFIG_DEBUG_FS
 760        debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
 761                            &times_congested, &caam_fops_u64_ro);
 762#endif
 763
 764        ctrlpriv->qi_init = 1;
 765        dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
 766        return 0;
 767}
 768