linux/drivers/crypto/ccree/cc_request_mgr.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
   3
   4#include <linux/kernel.h>
   5#include "cc_driver.h"
   6#include "cc_buffer_mgr.h"
   7#include "cc_request_mgr.h"
   8#include "cc_ivgen.h"
   9#include "cc_pm.h"
  10
  11#define CC_MAX_POLL_ITER        10
  12/* The highest descriptor count in used */
  13#define CC_MAX_DESC_SEQ_LEN     23
  14
  15struct cc_req_mgr_handle {
  16        /* Request manager resources */
  17        unsigned int hw_queue_size; /* HW capability */
  18        unsigned int min_free_hw_slots;
  19        unsigned int max_used_sw_slots;
  20        struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE];
  21        u32 req_queue_head;
  22        u32 req_queue_tail;
  23        u32 axi_completed;
  24        u32 q_free_slots;
  25        /* This lock protects access to HW register
  26         * that must be single request at a time
  27         */
  28        spinlock_t hw_lock;
  29        struct cc_hw_desc compl_desc;
  30        u8 *dummy_comp_buff;
  31        dma_addr_t dummy_comp_buff_dma;
  32
  33        /* backlog queue */
  34        struct list_head backlog;
  35        unsigned int bl_len;
  36        spinlock_t bl_lock; /* protect backlog queue */
  37
  38#ifdef COMP_IN_WQ
  39        struct workqueue_struct *workq;
  40        struct delayed_work compwork;
  41#else
  42        struct tasklet_struct comptask;
  43#endif
  44        bool is_runtime_suspended;
  45};
  46
  47struct cc_bl_item {
  48        struct cc_crypto_req creq;
  49        struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN];
  50        unsigned int len;
  51        struct list_head list;
  52        bool notif;
  53};
  54
  55static void comp_handler(unsigned long devarg);
  56#ifdef COMP_IN_WQ
  57static void comp_work_handler(struct work_struct *work);
  58#endif
  59
  60void cc_req_mgr_fini(struct cc_drvdata *drvdata)
  61{
  62        struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
  63        struct device *dev = drvdata_to_dev(drvdata);
  64
  65        if (!req_mgr_h)
  66                return; /* Not allocated */
  67
  68        if (req_mgr_h->dummy_comp_buff_dma) {
  69                dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff,
  70                                  req_mgr_h->dummy_comp_buff_dma);
  71        }
  72
  73        dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size -
  74                                                req_mgr_h->min_free_hw_slots));
  75        dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots);
  76
  77#ifdef COMP_IN_WQ
  78        flush_workqueue(req_mgr_h->workq);
  79        destroy_workqueue(req_mgr_h->workq);
  80#else
  81        /* Kill tasklet */
  82        tasklet_kill(&req_mgr_h->comptask);
  83#endif
  84        kzfree(req_mgr_h);
  85        drvdata->request_mgr_handle = NULL;
  86}
  87
  88int cc_req_mgr_init(struct cc_drvdata *drvdata)
  89{
  90        struct cc_req_mgr_handle *req_mgr_h;
  91        struct device *dev = drvdata_to_dev(drvdata);
  92        int rc = 0;
  93
  94        req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL);
  95        if (!req_mgr_h) {
  96                rc = -ENOMEM;
  97                goto req_mgr_init_err;
  98        }
  99
 100        drvdata->request_mgr_handle = req_mgr_h;
 101
 102        spin_lock_init(&req_mgr_h->hw_lock);
 103        spin_lock_init(&req_mgr_h->bl_lock);
 104        INIT_LIST_HEAD(&req_mgr_h->backlog);
 105
 106#ifdef COMP_IN_WQ
 107        dev_dbg(dev, "Initializing completion workqueue\n");
 108        req_mgr_h->workq = create_singlethread_workqueue("ccree");
 109        if (!req_mgr_h->workq) {
 110                dev_err(dev, "Failed creating work queue\n");
 111                rc = -ENOMEM;
 112                goto req_mgr_init_err;
 113        }
 114        INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler);
 115#else
 116        dev_dbg(dev, "Initializing completion tasklet\n");
 117        tasklet_init(&req_mgr_h->comptask, comp_handler,
 118                     (unsigned long)drvdata);
 119#endif
 120        req_mgr_h->hw_queue_size = cc_ioread(drvdata,
 121                                             CC_REG(DSCRPTR_QUEUE_SRAM_SIZE));
 122        dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size);
 123        if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) {
 124                dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n",
 125                        req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE);
 126                rc = -ENOMEM;
 127                goto req_mgr_init_err;
 128        }
 129        req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size;
 130        req_mgr_h->max_used_sw_slots = 0;
 131
 132        /* Allocate DMA word for "dummy" completion descriptor use */
 133        req_mgr_h->dummy_comp_buff =
 134                dma_alloc_coherent(dev, sizeof(u32),
 135                                   &req_mgr_h->dummy_comp_buff_dma,
 136                                   GFP_KERNEL);
 137        if (!req_mgr_h->dummy_comp_buff) {
 138                dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n",
 139                        sizeof(u32));
 140                rc = -ENOMEM;
 141                goto req_mgr_init_err;
 142        }
 143
 144        /* Init. "dummy" completion descriptor */
 145        hw_desc_init(&req_mgr_h->compl_desc);
 146        set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32));
 147        set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma,
 148                      sizeof(u32), NS_BIT, 1);
 149        set_flow_mode(&req_mgr_h->compl_desc, BYPASS);
 150        set_queue_last_ind(drvdata, &req_mgr_h->compl_desc);
 151
 152        return 0;
 153
 154req_mgr_init_err:
 155        cc_req_mgr_fini(drvdata);
 156        return rc;
 157}
 158
 159static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[],
 160                        unsigned int seq_len)
 161{
 162        int i, w;
 163        void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0);
 164        struct device *dev = drvdata_to_dev(drvdata);
 165
 166        /*
 167         * We do indeed write all 6 command words to the same
 168         * register. The HW supports this.
 169         */
 170
 171        for (i = 0; i < seq_len; i++) {
 172                for (w = 0; w <= 5; w++)
 173                        writel_relaxed(seq[i].word[w], reg);
 174
 175                if (cc_dump_desc)
 176                        dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n",
 177                                i, seq[i].word[0], seq[i].word[1],
 178                                seq[i].word[2], seq[i].word[3],
 179                                seq[i].word[4], seq[i].word[5]);
 180        }
 181}
 182
 183/*!
 184 * Completion will take place if and only if user requested completion
 185 * by cc_send_sync_request().
 186 *
 187 * \param dev
 188 * \param dx_compl_h The completion event to signal
 189 */
 190static void request_mgr_complete(struct device *dev, void *dx_compl_h,
 191                                 int dummy)
 192{
 193        struct completion *this_compl = dx_compl_h;
 194
 195        complete(this_compl);
 196}
 197
 198static int cc_queues_status(struct cc_drvdata *drvdata,
 199                            struct cc_req_mgr_handle *req_mgr_h,
 200                            unsigned int total_seq_len)
 201{
 202        unsigned long poll_queue;
 203        struct device *dev = drvdata_to_dev(drvdata);
 204
 205        /* SW queue is checked only once as it will not
 206         * be chaned during the poll because the spinlock_bh
 207         * is held by the thread
 208         */
 209        if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) ==
 210            req_mgr_h->req_queue_tail) {
 211                dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n",
 212                        req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE);
 213                return -ENOSPC;
 214        }
 215
 216        if (req_mgr_h->q_free_slots >= total_seq_len)
 217                return 0;
 218
 219        /* Wait for space in HW queue. Poll constant num of iterations. */
 220        for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) {
 221                req_mgr_h->q_free_slots =
 222                        cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
 223                if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots)
 224                        req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots;
 225
 226                if (req_mgr_h->q_free_slots >= total_seq_len) {
 227                        /* If there is enough place return */
 228                        return 0;
 229                }
 230
 231                dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n",
 232                        req_mgr_h->q_free_slots, total_seq_len);
 233        }
 234        /* No room in the HW queue try again later */
 235        dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n",
 236                req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE,
 237                req_mgr_h->q_free_slots, total_seq_len);
 238        return -ENOSPC;
 239}
 240
 241/*!
 242 * Enqueue caller request to crypto hardware.
 243 * Need to be called with HW lock held and PM running
 244 *
 245 * \param drvdata
 246 * \param cc_req The request to enqueue
 247 * \param desc The crypto sequence
 248 * \param len The crypto sequence length
 249 * \param add_comp If "true": add an artificial dout DMA to mark completion
 250 *
 251 * \return int Returns -EINPROGRESS or error code
 252 */
 253static int cc_do_send_request(struct cc_drvdata *drvdata,
 254                              struct cc_crypto_req *cc_req,
 255                              struct cc_hw_desc *desc, unsigned int len,
 256                                bool add_comp, bool ivgen)
 257{
 258        struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
 259        unsigned int used_sw_slots;
 260        unsigned int iv_seq_len = 0;
 261        unsigned int total_seq_len = len; /*initial sequence length*/
 262        struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN];
 263        struct device *dev = drvdata_to_dev(drvdata);
 264        int rc;
 265
 266        if (ivgen) {
 267                dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n",
 268                        cc_req->ivgen_dma_addr_len,
 269                        &cc_req->ivgen_dma_addr[0],
 270                        &cc_req->ivgen_dma_addr[1],
 271                        &cc_req->ivgen_dma_addr[2],
 272                        cc_req->ivgen_size);
 273
 274                /* Acquire IV from pool */
 275                rc = cc_get_iv(drvdata, cc_req->ivgen_dma_addr,
 276                               cc_req->ivgen_dma_addr_len,
 277                               cc_req->ivgen_size, iv_seq, &iv_seq_len);
 278
 279                if (rc) {
 280                        dev_err(dev, "Failed to generate IV (rc=%d)\n", rc);
 281                        return rc;
 282                }
 283
 284                total_seq_len += iv_seq_len;
 285        }
 286
 287        used_sw_slots = ((req_mgr_h->req_queue_head -
 288                          req_mgr_h->req_queue_tail) &
 289                         (MAX_REQUEST_QUEUE_SIZE - 1));
 290        if (used_sw_slots > req_mgr_h->max_used_sw_slots)
 291                req_mgr_h->max_used_sw_slots = used_sw_slots;
 292
 293        /* Enqueue request - must be locked with HW lock*/
 294        req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req;
 295        req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) &
 296                                    (MAX_REQUEST_QUEUE_SIZE - 1);
 297        /* TODO: Use circ_buf.h ? */
 298
 299        dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head);
 300
 301        /*
 302         * We are about to push command to the HW via the command registers
 303         * that may refernece hsot memory. We need to issue a memory barrier
 304         * to make sure there are no outstnading memory writes
 305         */
 306        wmb();
 307
 308        /* STAT_PHASE_4: Push sequence */
 309        if (ivgen)
 310                enqueue_seq(drvdata, iv_seq, iv_seq_len);
 311
 312        enqueue_seq(drvdata, desc, len);
 313
 314        if (add_comp) {
 315                enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1);
 316                total_seq_len++;
 317        }
 318
 319        if (req_mgr_h->q_free_slots < total_seq_len) {
 320                /* This situation should never occur. Maybe indicating problem
 321                 * with resuming power. Set the free slot count to 0 and hope
 322                 * for the best.
 323                 */
 324                dev_err(dev, "HW free slot count mismatch.");
 325                req_mgr_h->q_free_slots = 0;
 326        } else {
 327                /* Update the free slots in HW queue */
 328                req_mgr_h->q_free_slots -= total_seq_len;
 329        }
 330
 331        /* Operation still in process */
 332        return -EINPROGRESS;
 333}
 334
 335static void cc_enqueue_backlog(struct cc_drvdata *drvdata,
 336                               struct cc_bl_item *bli)
 337{
 338        struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
 339
 340        spin_lock_bh(&mgr->bl_lock);
 341        list_add_tail(&bli->list, &mgr->backlog);
 342        ++mgr->bl_len;
 343        spin_unlock_bh(&mgr->bl_lock);
 344        tasklet_schedule(&mgr->comptask);
 345}
 346
 347static void cc_proc_backlog(struct cc_drvdata *drvdata)
 348{
 349        struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
 350        struct cc_bl_item *bli;
 351        struct cc_crypto_req *creq;
 352        struct crypto_async_request *req;
 353        bool ivgen;
 354        unsigned int total_len;
 355        struct device *dev = drvdata_to_dev(drvdata);
 356        int rc;
 357
 358        spin_lock(&mgr->bl_lock);
 359
 360        while (mgr->bl_len) {
 361                bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list);
 362                spin_unlock(&mgr->bl_lock);
 363
 364                creq = &bli->creq;
 365                req = (struct crypto_async_request *)creq->user_arg;
 366
 367                /*
 368                 * Notify the request we're moving out of the backlog
 369                 * but only if we haven't done so already.
 370                 */
 371                if (!bli->notif) {
 372                        req->complete(req, -EINPROGRESS);
 373                        bli->notif = true;
 374                }
 375
 376                ivgen = !!creq->ivgen_dma_addr_len;
 377                total_len = bli->len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
 378
 379                spin_lock(&mgr->hw_lock);
 380
 381                rc = cc_queues_status(drvdata, mgr, total_len);
 382                if (rc) {
 383                        /*
 384                         * There is still not room in the FIFO for
 385                         * this request. Bail out. We'll return here
 386                         * on the next completion irq.
 387                         */
 388                        spin_unlock(&mgr->hw_lock);
 389                        return;
 390                }
 391
 392                rc = cc_do_send_request(drvdata, &bli->creq, bli->desc,
 393                                        bli->len, false, ivgen);
 394
 395                spin_unlock(&mgr->hw_lock);
 396
 397                if (rc != -EINPROGRESS) {
 398                        cc_pm_put_suspend(dev);
 399                        creq->user_cb(dev, req, rc);
 400                }
 401
 402                /* Remove ourselves from the backlog list */
 403                spin_lock(&mgr->bl_lock);
 404                list_del(&bli->list);
 405                --mgr->bl_len;
 406        }
 407
 408        spin_unlock(&mgr->bl_lock);
 409}
 410
 411int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req,
 412                    struct cc_hw_desc *desc, unsigned int len,
 413                    struct crypto_async_request *req)
 414{
 415        int rc;
 416        struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
 417        bool ivgen = !!cc_req->ivgen_dma_addr_len;
 418        unsigned int total_len = len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0);
 419        struct device *dev = drvdata_to_dev(drvdata);
 420        bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
 421        gfp_t flags = cc_gfp_flags(req);
 422        struct cc_bl_item *bli;
 423
 424        rc = cc_pm_get(dev);
 425        if (rc) {
 426                dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
 427                return rc;
 428        }
 429
 430        spin_lock_bh(&mgr->hw_lock);
 431        rc = cc_queues_status(drvdata, mgr, total_len);
 432
 433#ifdef CC_DEBUG_FORCE_BACKLOG
 434        if (backlog_ok)
 435                rc = -ENOSPC;
 436#endif /* CC_DEBUG_FORCE_BACKLOG */
 437
 438        if (rc == -ENOSPC && backlog_ok) {
 439                spin_unlock_bh(&mgr->hw_lock);
 440
 441                bli = kmalloc(sizeof(*bli), flags);
 442                if (!bli) {
 443                        cc_pm_put_suspend(dev);
 444                        return -ENOMEM;
 445                }
 446
 447                memcpy(&bli->creq, cc_req, sizeof(*cc_req));
 448                memcpy(&bli->desc, desc, len * sizeof(*desc));
 449                bli->len = len;
 450                bli->notif = false;
 451                cc_enqueue_backlog(drvdata, bli);
 452                return -EBUSY;
 453        }
 454
 455        if (!rc)
 456                rc = cc_do_send_request(drvdata, cc_req, desc, len, false,
 457                                        ivgen);
 458
 459        spin_unlock_bh(&mgr->hw_lock);
 460        return rc;
 461}
 462
 463int cc_send_sync_request(struct cc_drvdata *drvdata,
 464                         struct cc_crypto_req *cc_req, struct cc_hw_desc *desc,
 465                         unsigned int len)
 466{
 467        int rc;
 468        struct device *dev = drvdata_to_dev(drvdata);
 469        struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle;
 470
 471        init_completion(&cc_req->seq_compl);
 472        cc_req->user_cb = request_mgr_complete;
 473        cc_req->user_arg = &cc_req->seq_compl;
 474
 475        rc = cc_pm_get(dev);
 476        if (rc) {
 477                dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc);
 478                return rc;
 479        }
 480
 481        while (true) {
 482                spin_lock_bh(&mgr->hw_lock);
 483                rc = cc_queues_status(drvdata, mgr, len + 1);
 484
 485                if (!rc)
 486                        break;
 487
 488                spin_unlock_bh(&mgr->hw_lock);
 489                if (rc != -EAGAIN) {
 490                        cc_pm_put_suspend(dev);
 491                        return rc;
 492                }
 493                wait_for_completion_interruptible(&drvdata->hw_queue_avail);
 494                reinit_completion(&drvdata->hw_queue_avail);
 495        }
 496
 497        rc = cc_do_send_request(drvdata, cc_req, desc, len, true, false);
 498        spin_unlock_bh(&mgr->hw_lock);
 499
 500        if (rc != -EINPROGRESS) {
 501                cc_pm_put_suspend(dev);
 502                return rc;
 503        }
 504
 505        wait_for_completion(&cc_req->seq_compl);
 506        return 0;
 507}
 508
 509/*!
 510 * Enqueue caller request to crypto hardware during init process.
 511 * assume this function is not called in middle of a flow,
 512 * since we set QUEUE_LAST_IND flag in the last descriptor.
 513 *
 514 * \param drvdata
 515 * \param desc The crypto sequence
 516 * \param len The crypto sequence length
 517 *
 518 * \return int Returns "0" upon success
 519 */
 520int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc,
 521                      unsigned int len)
 522{
 523        struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle;
 524        unsigned int total_seq_len = len; /*initial sequence length*/
 525        int rc = 0;
 526
 527        /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT.
 528         */
 529        rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len);
 530        if (rc)
 531                return rc;
 532
 533        set_queue_last_ind(drvdata, &desc[(len - 1)]);
 534
 535        /*
 536         * We are about to push command to the HW via the command registers
 537         * that may refernece hsot memory. We need to issue a memory barrier
 538         * to make sure there are no outstnading memory writes
 539         */
 540        wmb();
 541        enqueue_seq(drvdata, desc, len);
 542
 543        /* Update the free slots in HW queue */
 544        req_mgr_h->q_free_slots =
 545                cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT));
 546
 547        return 0;
 548}
 549
 550void complete_request(struct cc_drvdata *drvdata)
 551{
 552        struct cc_req_mgr_handle *request_mgr_handle =
 553                                                drvdata->request_mgr_handle;
 554
 555        complete(&drvdata->hw_queue_avail);
 556#ifdef COMP_IN_WQ
 557        queue_delayed_work(request_mgr_handle->workq,
 558                           &request_mgr_handle->compwork, 0);
 559#else
 560        tasklet_schedule(&request_mgr_handle->comptask);
 561#endif
 562}
 563
 564#ifdef COMP_IN_WQ
 565static void comp_work_handler(struct work_struct *work)
 566{
 567        struct cc_drvdata *drvdata =
 568                container_of(work, struct cc_drvdata, compwork.work);
 569
 570        comp_handler((unsigned long)drvdata);
 571}
 572#endif
 573
 574static void proc_completions(struct cc_drvdata *drvdata)
 575{
 576        struct cc_crypto_req *cc_req;
 577        struct device *dev = drvdata_to_dev(drvdata);
 578        struct cc_req_mgr_handle *request_mgr_handle =
 579                                                drvdata->request_mgr_handle;
 580        unsigned int *tail = &request_mgr_handle->req_queue_tail;
 581        unsigned int *head = &request_mgr_handle->req_queue_head;
 582
 583        while (request_mgr_handle->axi_completed) {
 584                request_mgr_handle->axi_completed--;
 585
 586                /* Dequeue request */
 587                if (*head == *tail) {
 588                        /* We are supposed to handle a completion but our
 589                         * queue is empty. This is not normal. Return and
 590                         * hope for the best.
 591                         */
 592                        dev_err(dev, "Request queue is empty head == tail %u\n",
 593                                *head);
 594                        break;
 595                }
 596
 597                cc_req = &request_mgr_handle->req_queue[*tail];
 598
 599                if (cc_req->user_cb)
 600                        cc_req->user_cb(dev, cc_req->user_arg, 0);
 601                *tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1);
 602                dev_dbg(dev, "Dequeue request tail=%u\n", *tail);
 603                dev_dbg(dev, "Request completed. axi_completed=%d\n",
 604                        request_mgr_handle->axi_completed);
 605                cc_pm_put_suspend(dev);
 606        }
 607}
 608
 609static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata)
 610{
 611        return FIELD_GET(AXIM_MON_COMP_VALUE,
 612                         cc_ioread(drvdata, drvdata->axim_mon_offset));
 613}
 614
 615/* Deferred service handler, run as interrupt-fired tasklet */
 616static void comp_handler(unsigned long devarg)
 617{
 618        struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg;
 619        struct cc_req_mgr_handle *request_mgr_handle =
 620                                                drvdata->request_mgr_handle;
 621
 622        u32 irq;
 623
 624        irq = (drvdata->irq & CC_COMP_IRQ_MASK);
 625
 626        if (irq & CC_COMP_IRQ_MASK) {
 627                /* To avoid the interrupt from firing as we unmask it,
 628                 * we clear it now
 629                 */
 630                cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK);
 631
 632                /* Avoid race with above clear: Test completion counter
 633                 * once more
 634                 */
 635                request_mgr_handle->axi_completed +=
 636                                cc_axi_comp_count(drvdata);
 637
 638                while (request_mgr_handle->axi_completed) {
 639                        do {
 640                                proc_completions(drvdata);
 641                                /* At this point (after proc_completions()),
 642                                 * request_mgr_handle->axi_completed is 0.
 643                                 */
 644                                request_mgr_handle->axi_completed =
 645                                                cc_axi_comp_count(drvdata);
 646                        } while (request_mgr_handle->axi_completed > 0);
 647
 648                        cc_iowrite(drvdata, CC_REG(HOST_ICR),
 649                                   CC_COMP_IRQ_MASK);
 650
 651                        request_mgr_handle->axi_completed +=
 652                                        cc_axi_comp_count(drvdata);
 653                }
 654        }
 655        /* after verifing that there is nothing to do,
 656         * unmask AXI completion interrupt
 657         */
 658        cc_iowrite(drvdata, CC_REG(HOST_IMR),
 659                   cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq);
 660
 661        cc_proc_backlog(drvdata);
 662}
 663
 664/*
 665 * resume the queue configuration - no need to take the lock as this happens
 666 * inside the spin lock protection
 667 */
 668#if defined(CONFIG_PM)
 669int cc_resume_req_queue(struct cc_drvdata *drvdata)
 670{
 671        struct cc_req_mgr_handle *request_mgr_handle =
 672                drvdata->request_mgr_handle;
 673
 674        spin_lock_bh(&request_mgr_handle->hw_lock);
 675        request_mgr_handle->is_runtime_suspended = false;
 676        spin_unlock_bh(&request_mgr_handle->hw_lock);
 677
 678        return 0;
 679}
 680
 681/*
 682 * suspend the queue configuration. Since it is used for the runtime suspend
 683 * only verify that the queue can be suspended.
 684 */
 685int cc_suspend_req_queue(struct cc_drvdata *drvdata)
 686{
 687        struct cc_req_mgr_handle *request_mgr_handle =
 688                                                drvdata->request_mgr_handle;
 689
 690        /* lock the send_request */
 691        spin_lock_bh(&request_mgr_handle->hw_lock);
 692        if (request_mgr_handle->req_queue_head !=
 693            request_mgr_handle->req_queue_tail) {
 694                spin_unlock_bh(&request_mgr_handle->hw_lock);
 695                return -EBUSY;
 696        }
 697        request_mgr_handle->is_runtime_suspended = true;
 698        spin_unlock_bh(&request_mgr_handle->hw_lock);
 699
 700        return 0;
 701}
 702
 703bool cc_req_queue_suspended(struct cc_drvdata *drvdata)
 704{
 705        struct cc_req_mgr_handle *request_mgr_handle =
 706                                                drvdata->request_mgr_handle;
 707
 708        return  request_mgr_handle->is_runtime_suspended;
 709}
 710
 711#endif
 712