1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef _DMA_H_
15#define _DMA_H_
16
17#include <linux/dmaengine.h>
18
19
20#define QCE_BAM_BURST_SIZE 64
21
22#define QCE_AUTHIV_REGS_CNT 16
23#define QCE_AUTH_BYTECOUNT_REGS_CNT 4
24#define QCE_CNTRIV_REGS_CNT 4
25
26struct qce_result_dump {
27 u32 auth_iv[QCE_AUTHIV_REGS_CNT];
28 u32 auth_byte_count[QCE_AUTH_BYTECOUNT_REGS_CNT];
29 u32 encr_cntr_iv[QCE_CNTRIV_REGS_CNT];
30 u32 status;
31 u32 status2;
32};
33
34#define QCE_IGNORE_BUF_SZ (2 * QCE_BAM_BURST_SIZE)
35#define QCE_RESULT_BUF_SZ \
36 ALIGN(sizeof(struct qce_result_dump), QCE_BAM_BURST_SIZE)
37
38struct qce_dma_data {
39 struct dma_chan *txchan;
40 struct dma_chan *rxchan;
41 struct qce_result_dump *result_buf;
42 void *ignore_buf;
43};
44
45int qce_dma_request(struct device *dev, struct qce_dma_data *dma);
46void qce_dma_release(struct qce_dma_data *dma);
47int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
48 int in_ents, struct scatterlist *sg_out, int out_ents,
49 dma_async_tx_callback cb, void *cb_param);
50void qce_dma_issue_pending(struct qce_dma_data *dma);
51int qce_dma_terminate_all(struct qce_dma_data *dma);
52int qce_countsg(struct scatterlist *sg_list, int nbytes, bool *chained);
53void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
54 enum dma_data_direction dir, bool chained);
55int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
56 enum dma_data_direction dir, bool chained);
57struct scatterlist *
58qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);
59
60#endif
61