linux/drivers/crypto/qce/dma.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 and
   6 * only version 2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 */
  13
  14#include <linux/dmaengine.h>
  15#include <crypto/scatterwalk.h>
  16
  17#include "dma.h"
  18
  19int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
  20{
  21        int ret;
  22
  23        dma->txchan = dma_request_slave_channel_reason(dev, "tx");
  24        if (IS_ERR(dma->txchan))
  25                return PTR_ERR(dma->txchan);
  26
  27        dma->rxchan = dma_request_slave_channel_reason(dev, "rx");
  28        if (IS_ERR(dma->rxchan)) {
  29                ret = PTR_ERR(dma->rxchan);
  30                goto error_rx;
  31        }
  32
  33        dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ,
  34                                  GFP_KERNEL);
  35        if (!dma->result_buf) {
  36                ret = -ENOMEM;
  37                goto error_nomem;
  38        }
  39
  40        dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
  41
  42        return 0;
  43error_nomem:
  44        dma_release_channel(dma->rxchan);
  45error_rx:
  46        dma_release_channel(dma->txchan);
  47        return ret;
  48}
  49
  50void qce_dma_release(struct qce_dma_data *dma)
  51{
  52        dma_release_channel(dma->txchan);
  53        dma_release_channel(dma->rxchan);
  54        kfree(dma->result_buf);
  55}
  56
  57struct scatterlist *
  58qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
  59{
  60        struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
  61
  62        while (sg) {
  63                if (!sg_page(sg))
  64                        break;
  65                sg = sg_next(sg);
  66        }
  67
  68        if (!sg)
  69                return ERR_PTR(-EINVAL);
  70
  71        while (new_sgl && sg) {
  72                sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
  73                            new_sgl->offset);
  74                sg_last = sg;
  75                sg = sg_next(sg);
  76                new_sgl = sg_next(new_sgl);
  77        }
  78
  79        return sg_last;
  80}
  81
  82static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg,
  83                           int nents, unsigned long flags,
  84                           enum dma_transfer_direction dir,
  85                           dma_async_tx_callback cb, void *cb_param)
  86{
  87        struct dma_async_tx_descriptor *desc;
  88        dma_cookie_t cookie;
  89
  90        if (!sg || !nents)
  91                return -EINVAL;
  92
  93        desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags);
  94        if (!desc)
  95                return -EINVAL;
  96
  97        desc->callback = cb;
  98        desc->callback_param = cb_param;
  99        cookie = dmaengine_submit(desc);
 100
 101        return dma_submit_error(cookie);
 102}
 103
 104int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg,
 105                     int rx_nents, struct scatterlist *tx_sg, int tx_nents,
 106                     dma_async_tx_callback cb, void *cb_param)
 107{
 108        struct dma_chan *rxchan = dma->rxchan;
 109        struct dma_chan *txchan = dma->txchan;
 110        unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
 111        int ret;
 112
 113        ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV,
 114                             NULL, NULL);
 115        if (ret)
 116                return ret;
 117
 118        return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM,
 119                               cb, cb_param);
 120}
 121
 122void qce_dma_issue_pending(struct qce_dma_data *dma)
 123{
 124        dma_async_issue_pending(dma->rxchan);
 125        dma_async_issue_pending(dma->txchan);
 126}
 127
 128int qce_dma_terminate_all(struct qce_dma_data *dma)
 129{
 130        int ret;
 131
 132        ret = dmaengine_terminate_all(dma->rxchan);
 133        return ret ?: dmaengine_terminate_all(dma->txchan);
 134}
 135