linux/drivers/crypto/qat/qat_common/adf_transport.c
<<
>>
Prefs
   1/*
   2  This file is provided under a dual BSD/GPLv2 license.  When using or
   3  redistributing this file, you may do so under either license.
   4
   5  GPL LICENSE SUMMARY
   6  Copyright(c) 2014 Intel Corporation.
   7  This program is free software; you can redistribute it and/or modify
   8  it under the terms of version 2 of the GNU General Public License as
   9  published by the Free Software Foundation.
  10
  11  This program is distributed in the hope that it will be useful, but
  12  WITHOUT ANY WARRANTY; without even the implied warranty of
  13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14  General Public License for more details.
  15
  16  Contact Information:
  17  qat-linux@intel.com
  18
  19  BSD LICENSE
  20  Copyright(c) 2014 Intel Corporation.
  21  Redistribution and use in source and binary forms, with or without
  22  modification, are permitted provided that the following conditions
  23  are met:
  24
  25    * Redistributions of source code must retain the above copyright
  26      notice, this list of conditions and the following disclaimer.
  27    * Redistributions in binary form must reproduce the above copyright
  28      notice, this list of conditions and the following disclaimer in
  29      the documentation and/or other materials provided with the
  30      distribution.
  31    * Neither the name of Intel Corporation nor the names of its
  32      contributors may be used to endorse or promote products derived
  33      from this software without specific prior written permission.
  34
  35  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  36  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  37  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  38  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  39  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  40  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  41  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  42  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  43  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  44  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  45  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  46*/
  47#include <linux/delay.h>
  48#include "adf_accel_devices.h"
  49#include "adf_transport_internal.h"
  50#include "adf_transport_access_macros.h"
  51#include "adf_cfg.h"
  52#include "adf_common_drv.h"
  53
  54static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
  55{
  56        uint32_t div = data >> shift;
  57        uint32_t mult = div << shift;
  58
  59        return data - mult;
  60}
  61
  62static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size)
  63{
  64        if (((size - 1) & addr) != 0)
  65                return -EFAULT;
  66        return 0;
  67}
  68
  69static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num)
  70{
  71        int i = ADF_MIN_RING_SIZE;
  72
  73        for (; i <= ADF_MAX_RING_SIZE; i++)
  74                if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
  75                        return i;
  76
  77        return ADF_DEFAULT_RING_SIZE;
  78}
  79
  80static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
  81{
  82        spin_lock(&bank->lock);
  83        if (bank->ring_mask & (1 << ring)) {
  84                spin_unlock(&bank->lock);
  85                return -EFAULT;
  86        }
  87        bank->ring_mask |= (1 << ring);
  88        spin_unlock(&bank->lock);
  89        return 0;
  90}
  91
  92static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
  93{
  94        spin_lock(&bank->lock);
  95        bank->ring_mask &= ~(1 << ring);
  96        spin_unlock(&bank->lock);
  97}
  98
  99static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
 100{
 101        spin_lock_bh(&bank->lock);
 102        bank->irq_mask |= (1 << ring);
 103        spin_unlock_bh(&bank->lock);
 104        WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
 105        WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number,
 106                              bank->irq_coalesc_timer);
 107}
 108
 109static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
 110{
 111        spin_lock_bh(&bank->lock);
 112        bank->irq_mask &= ~(1 << ring);
 113        spin_unlock_bh(&bank->lock);
 114        WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
 115}
 116
 117int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
 118{
 119        if (atomic_add_return(1, ring->inflights) >
 120            ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
 121                atomic_dec(ring->inflights);
 122                return -EAGAIN;
 123        }
 124        spin_lock_bh(&ring->lock);
 125        memcpy(ring->base_addr + ring->tail, msg,
 126               ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
 127
 128        ring->tail = adf_modulo(ring->tail +
 129                                ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
 130                                ADF_RING_SIZE_MODULO(ring->ring_size));
 131        WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number,
 132                            ring->ring_number, ring->tail);
 133        spin_unlock_bh(&ring->lock);
 134        return 0;
 135}
 136
 137static int adf_handle_response(struct adf_etr_ring_data *ring)
 138{
 139        uint32_t msg_counter = 0;
 140        uint32_t *msg = (uint32_t *)(ring->base_addr + ring->head);
 141
 142        while (*msg != ADF_RING_EMPTY_SIG) {
 143                ring->callback((uint32_t *)msg);
 144                *msg = ADF_RING_EMPTY_SIG;
 145                ring->head = adf_modulo(ring->head +
 146                                        ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
 147                                        ADF_RING_SIZE_MODULO(ring->ring_size));
 148                msg_counter++;
 149                msg = (uint32_t *)(ring->base_addr + ring->head);
 150        }
 151        if (msg_counter > 0) {
 152                WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
 153                                    ring->bank->bank_number,
 154                                    ring->ring_number, ring->head);
 155                atomic_sub(msg_counter, ring->inflights);
 156        }
 157        return 0;
 158}
 159
 160static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
 161{
 162        uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size);
 163
 164        WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
 165                              ring->ring_number, ring_config);
 166}
 167
 168static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
 169{
 170        uint32_t ring_config =
 171                        BUILD_RESP_RING_CONFIG(ring->ring_size,
 172                                               ADF_RING_NEAR_WATERMARK_512,
 173                                               ADF_RING_NEAR_WATERMARK_0);
 174
 175        WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
 176                              ring->ring_number, ring_config);
 177}
 178
 179static int adf_init_ring(struct adf_etr_ring_data *ring)
 180{
 181        struct adf_etr_bank_data *bank = ring->bank;
 182        struct adf_accel_dev *accel_dev = bank->accel_dev;
 183        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 184        uint64_t ring_base;
 185        uint32_t ring_size_bytes =
 186                        ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
 187
 188        ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
 189        ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
 190                                             ring_size_bytes, &ring->dma_addr,
 191                                             GFP_KERNEL);
 192        if (!ring->base_addr)
 193                return -ENOMEM;
 194
 195        memset(ring->base_addr, 0x7F, ring_size_bytes);
 196        /* The base_addr has to be aligned to the size of the buffer */
 197        if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
 198                dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
 199                dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
 200                                  ring->base_addr, ring->dma_addr);
 201                return -EFAULT;
 202        }
 203
 204        if (hw_data->tx_rings_mask & (1 << ring->ring_number))
 205                adf_configure_tx_ring(ring);
 206
 207        else
 208                adf_configure_rx_ring(ring);
 209
 210        ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size);
 211        WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number,
 212                            ring->ring_number, ring_base);
 213        spin_lock_init(&ring->lock);
 214        return 0;
 215}
 216
 217static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
 218{
 219        uint32_t ring_size_bytes =
 220                        ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
 221        ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
 222
 223        if (ring->base_addr) {
 224                memset(ring->base_addr, 0x7F, ring_size_bytes);
 225                dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
 226                                  ring_size_bytes, ring->base_addr,
 227                                  ring->dma_addr);
 228        }
 229}
 230
 231int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
 232                    uint32_t bank_num, uint32_t num_msgs,
 233                    uint32_t msg_size, const char *ring_name,
 234                    adf_callback_fn callback, int poll_mode,
 235                    struct adf_etr_ring_data **ring_ptr)
 236{
 237        struct adf_etr_data *transport_data = accel_dev->transport;
 238        struct adf_etr_bank_data *bank;
 239        struct adf_etr_ring_data *ring;
 240        char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
 241        uint32_t ring_num;
 242        int ret;
 243
 244        if (bank_num >= GET_MAX_BANKS(accel_dev)) {
 245                dev_err(&GET_DEV(accel_dev), "Invalid bank number\n");
 246                return -EFAULT;
 247        }
 248        if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
 249                dev_err(&GET_DEV(accel_dev), "Invalid msg size\n");
 250                return -EFAULT;
 251        }
 252        if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
 253                              ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
 254                dev_err(&GET_DEV(accel_dev),
 255                        "Invalid ring size for given msg size\n");
 256                return -EFAULT;
 257        }
 258        if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
 259                dev_err(&GET_DEV(accel_dev), "Section %s, no such entry : %s\n",
 260                        section, ring_name);
 261                return -EFAULT;
 262        }
 263        if (kstrtouint(val, 10, &ring_num)) {
 264                dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
 265                return -EFAULT;
 266        }
 267
 268        bank = &transport_data->banks[bank_num];
 269        if (adf_reserve_ring(bank, ring_num)) {
 270                dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n",
 271                        ring_num, ring_name);
 272                return -EFAULT;
 273        }
 274        ring = &bank->rings[ring_num];
 275        ring->ring_number = ring_num;
 276        ring->bank = bank;
 277        ring->callback = callback;
 278        ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
 279        ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
 280        ring->head = 0;
 281        ring->tail = 0;
 282        atomic_set(ring->inflights, 0);
 283        ret = adf_init_ring(ring);
 284        if (ret)
 285                goto err;
 286
 287        /* Enable HW arbitration for the given ring */
 288        accel_dev->hw_device->hw_arb_ring_enable(ring);
 289
 290        if (adf_ring_debugfs_add(ring, ring_name)) {
 291                dev_err(&GET_DEV(accel_dev),
 292                        "Couldn't add ring debugfs entry\n");
 293                ret = -EFAULT;
 294                goto err;
 295        }
 296
 297        /* Enable interrupts if needed */
 298        if (callback && (!poll_mode))
 299                adf_enable_ring_irq(bank, ring->ring_number);
 300        *ring_ptr = ring;
 301        return 0;
 302err:
 303        adf_cleanup_ring(ring);
 304        adf_unreserve_ring(bank, ring_num);
 305        accel_dev->hw_device->hw_arb_ring_disable(ring);
 306        return ret;
 307}
 308
 309void adf_remove_ring(struct adf_etr_ring_data *ring)
 310{
 311        struct adf_etr_bank_data *bank = ring->bank;
 312        struct adf_accel_dev *accel_dev = bank->accel_dev;
 313
 314        /* Disable interrupts for the given ring */
 315        adf_disable_ring_irq(bank, ring->ring_number);
 316
 317        /* Clear PCI config space */
 318        WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number,
 319                              ring->ring_number, 0);
 320        WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number,
 321                            ring->ring_number, 0);
 322        adf_ring_debugfs_rm(ring);
 323        adf_unreserve_ring(bank, ring->ring_number);
 324        /* Disable HW arbitration for the given ring */
 325        accel_dev->hw_device->hw_arb_ring_disable(ring);
 326        adf_cleanup_ring(ring);
 327}
 328
 329static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
 330{
 331        uint32_t empty_rings, i;
 332
 333        empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
 334        empty_rings = ~empty_rings & bank->irq_mask;
 335
 336        for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) {
 337                if (empty_rings & (1 << i))
 338                        adf_handle_response(&bank->rings[i]);
 339        }
 340}
 341
 342/**
 343 * adf_response_handler() - Bottom half handler response handler
 344 * @bank_addr:  Address of a ring bank for with the BH was scheduled.
 345 *
 346 * Function is the bottom half handler for the response from acceleration
 347 * device. There is one handler for every ring bank. Function checks all
 348 * communication rings in the bank.
 349 * To be used by QAT device specific drivers.
 350 *
 351 * Return: void
 352 */
 353void adf_response_handler(unsigned long bank_addr)
 354{
 355        struct adf_etr_bank_data *bank = (void *)bank_addr;
 356
 357        /* Handle all the responses nad reenable IRQs */
 358        adf_ring_response_handler(bank);
 359        WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
 360                                   bank->irq_mask);
 361}
 362EXPORT_SYMBOL_GPL(adf_response_handler);
 363
 364static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
 365                                  const char *section, const char *format,
 366                                  uint32_t key, uint32_t *value)
 367{
 368        char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
 369        char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
 370
 371        snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
 372
 373        if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
 374                return -EFAULT;
 375
 376        if (kstrtouint(val_buf, 10, value))
 377                return -EFAULT;
 378        return 0;
 379}
 380
 381static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
 382                                  const char *section,
 383                                  uint32_t bank_num_in_accel)
 384{
 385        if (adf_get_cfg_int(bank->accel_dev, section,
 386                            ADF_ETRMGR_COALESCE_TIMER_FORMAT,
 387                            bank_num_in_accel, &bank->irq_coalesc_timer))
 388                bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
 389
 390        if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
 391            ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
 392                bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
 393}
 394
 395static int adf_init_bank(struct adf_accel_dev *accel_dev,
 396                         struct adf_etr_bank_data *bank,
 397                         uint32_t bank_num, void __iomem *csr_addr)
 398{
 399        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 400        struct adf_etr_ring_data *ring;
 401        struct adf_etr_ring_data *tx_ring;
 402        uint32_t i, coalesc_enabled = 0;
 403
 404        memset(bank, 0, sizeof(*bank));
 405        bank->bank_number = bank_num;
 406        bank->csr_addr = csr_addr;
 407        bank->accel_dev = accel_dev;
 408        spin_lock_init(&bank->lock);
 409
 410        /* Enable IRQ coalescing always. This will allow to use
 411         * the optimised flag and coalesc register.
 412         * If it is disabled in the config file just use min time value */
 413        if ((adf_get_cfg_int(accel_dev, "Accelerator0",
 414                             ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
 415                             &coalesc_enabled) == 0) && coalesc_enabled)
 416                adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
 417        else
 418                bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
 419
 420        for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
 421                WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0);
 422                WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
 423                ring = &bank->rings[i];
 424                if (hw_data->tx_rings_mask & (1 << i)) {
 425                        ring->inflights =
 426                                kzalloc_node(sizeof(atomic_t),
 427                                             GFP_KERNEL,
 428                                             dev_to_node(&GET_DEV(accel_dev)));
 429                        if (!ring->inflights)
 430                                goto err;
 431                } else {
 432                        if (i < hw_data->tx_rx_gap) {
 433                                dev_err(&GET_DEV(accel_dev),
 434                                        "Invalid tx rings mask config\n");
 435                                goto err;
 436                        }
 437                        tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
 438                        ring->inflights = tx_ring->inflights;
 439                }
 440        }
 441        if (adf_bank_debugfs_add(bank)) {
 442                dev_err(&GET_DEV(accel_dev),
 443                        "Failed to add bank debugfs entry\n");
 444                goto err;
 445        }
 446
 447        WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
 448        return 0;
 449err:
 450        for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
 451                ring = &bank->rings[i];
 452                if (hw_data->tx_rings_mask & (1 << i) && ring->inflights)
 453                        kfree(ring->inflights);
 454        }
 455        return -ENOMEM;
 456}
 457
 458/**
 459 * adf_init_etr_data() - Initialize transport rings for acceleration device
 460 * @accel_dev:  Pointer to acceleration device.
 461 *
 462 * Function is the initializes the communications channels (rings) to the
 463 * acceleration device accel_dev.
 464 * To be used by QAT device specific drivers.
 465 *
 466 * Return: 0 on success, error code othewise.
 467 */
 468int adf_init_etr_data(struct adf_accel_dev *accel_dev)
 469{
 470        struct adf_etr_data *etr_data;
 471        struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 472        void __iomem *csr_addr;
 473        uint32_t size;
 474        uint32_t num_banks = 0;
 475        int i, ret;
 476
 477        etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
 478                                dev_to_node(&GET_DEV(accel_dev)));
 479        if (!etr_data)
 480                return -ENOMEM;
 481
 482        num_banks = GET_MAX_BANKS(accel_dev);
 483        size = num_banks * sizeof(struct adf_etr_bank_data);
 484        etr_data->banks = kzalloc_node(size, GFP_KERNEL,
 485                                       dev_to_node(&GET_DEV(accel_dev)));
 486        if (!etr_data->banks) {
 487                ret = -ENOMEM;
 488                goto err_bank;
 489        }
 490
 491        accel_dev->transport = etr_data;
 492        i = hw_data->get_etr_bar_id(hw_data);
 493        csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
 494
 495        /* accel_dev->debugfs_dir should always be non-NULL here */
 496        etr_data->debug = debugfs_create_dir("transport",
 497                                             accel_dev->debugfs_dir);
 498        if (!etr_data->debug) {
 499                dev_err(&GET_DEV(accel_dev),
 500                        "Unable to create transport debugfs entry\n");
 501                ret = -ENOENT;
 502                goto err_bank_debug;
 503        }
 504
 505        for (i = 0; i < num_banks; i++) {
 506                ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
 507                                    csr_addr);
 508                if (ret)
 509                        goto err_bank_all;
 510        }
 511
 512        return 0;
 513
 514err_bank_all:
 515        debugfs_remove(etr_data->debug);
 516err_bank_debug:
 517        kfree(etr_data->banks);
 518err_bank:
 519        kfree(etr_data);
 520        accel_dev->transport = NULL;
 521        return ret;
 522}
 523EXPORT_SYMBOL_GPL(adf_init_etr_data);
 524
 525static void cleanup_bank(struct adf_etr_bank_data *bank)
 526{
 527        uint32_t i;
 528
 529        for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
 530                struct adf_accel_dev *accel_dev = bank->accel_dev;
 531                struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 532                struct adf_etr_ring_data *ring = &bank->rings[i];
 533
 534                if (bank->ring_mask & (1 << i))
 535                        adf_cleanup_ring(ring);
 536
 537                if (hw_data->tx_rings_mask & (1 << i))
 538                        kfree(ring->inflights);
 539        }
 540        adf_bank_debugfs_rm(bank);
 541        memset(bank, 0, sizeof(*bank));
 542}
 543
 544static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
 545{
 546        struct adf_etr_data *etr_data = accel_dev->transport;
 547        uint32_t i, num_banks = GET_MAX_BANKS(accel_dev);
 548
 549        for (i = 0; i < num_banks; i++)
 550                cleanup_bank(&etr_data->banks[i]);
 551}
 552
 553/**
 554 * adf_cleanup_etr_data() - Clear transport rings for acceleration device
 555 * @accel_dev:  Pointer to acceleration device.
 556 *
 557 * Function is the clears the communications channels (rings) of the
 558 * acceleration device accel_dev.
 559 * To be used by QAT device specific drivers.
 560 *
 561 * Return: void
 562 */
 563void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
 564{
 565        struct adf_etr_data *etr_data = accel_dev->transport;
 566
 567        if (etr_data) {
 568                adf_cleanup_etr_handles(accel_dev);
 569                debugfs_remove(etr_data->debug);
 570                kfree(etr_data->banks);
 571                kfree(etr_data);
 572                accel_dev->transport = NULL;
 573        }
 574}
 575EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);
 576