1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/kernel.h>
36#include <linux/io.h>
37#include <linux/init.h>
38#include <linux/slab.h>
39#include <linux/module.h>
40#include <linux/interrupt.h>
41#include <linux/dma-mapping.h>
42#include <linux/scatterlist.h>
43#include <linux/device.h>
44#include <linux/platform_device.h>
45#include <linux/of.h>
46#include <linux/of_address.h>
47#include <linux/of_irq.h>
48#include <linux/of_dma.h>
49#include <linux/clk.h>
50#include <linux/dmaengine.h>
51#include <linux/pm_runtime.h>
52
53#include "../dmaengine.h"
54#include "../virt-dma.h"
55
56struct bam_desc_hw {
57 __le32 addr;
58 __le16 size;
59 __le16 flags;
60};
61
62#define BAM_DMA_AUTOSUSPEND_DELAY 100
63
64#define DESC_FLAG_INT BIT(15)
65#define DESC_FLAG_EOT BIT(14)
66#define DESC_FLAG_EOB BIT(13)
67#define DESC_FLAG_NWD BIT(12)
68#define DESC_FLAG_CMD BIT(11)
69
70struct bam_async_desc {
71 struct virt_dma_desc vd;
72
73 u32 num_desc;
74 u32 xfer_len;
75
76
77 u16 flags;
78
79 struct bam_desc_hw *curr_desc;
80
81 enum dma_transfer_direction dir;
82 size_t length;
83 struct bam_desc_hw desc[0];
84};
85
86enum bam_reg {
87 BAM_CTRL,
88 BAM_REVISION,
89 BAM_NUM_PIPES,
90 BAM_DESC_CNT_TRSHLD,
91 BAM_IRQ_SRCS,
92 BAM_IRQ_SRCS_MSK,
93 BAM_IRQ_SRCS_UNMASKED,
94 BAM_IRQ_STTS,
95 BAM_IRQ_CLR,
96 BAM_IRQ_EN,
97 BAM_CNFG_BITS,
98 BAM_IRQ_SRCS_EE,
99 BAM_IRQ_SRCS_MSK_EE,
100 BAM_P_CTRL,
101 BAM_P_RST,
102 BAM_P_HALT,
103 BAM_P_IRQ_STTS,
104 BAM_P_IRQ_CLR,
105 BAM_P_IRQ_EN,
106 BAM_P_EVNT_DEST_ADDR,
107 BAM_P_EVNT_REG,
108 BAM_P_SW_OFSTS,
109 BAM_P_DATA_FIFO_ADDR,
110 BAM_P_DESC_FIFO_ADDR,
111 BAM_P_EVNT_GEN_TRSHLD,
112 BAM_P_FIFO_SIZES,
113};
114
115struct reg_offset_data {
116 u32 base_offset;
117 unsigned int pipe_mult, evnt_mult, ee_mult;
118};
119
120static const struct reg_offset_data bam_v1_3_reg_info[] = {
121 [BAM_CTRL] = { 0x0F80, 0x00, 0x00, 0x00 },
122 [BAM_REVISION] = { 0x0F84, 0x00, 0x00, 0x00 },
123 [BAM_NUM_PIPES] = { 0x0FBC, 0x00, 0x00, 0x00 },
124 [BAM_DESC_CNT_TRSHLD] = { 0x0F88, 0x00, 0x00, 0x00 },
125 [BAM_IRQ_SRCS] = { 0x0F8C, 0x00, 0x00, 0x00 },
126 [BAM_IRQ_SRCS_MSK] = { 0x0F90, 0x00, 0x00, 0x00 },
127 [BAM_IRQ_SRCS_UNMASKED] = { 0x0FB0, 0x00, 0x00, 0x00 },
128 [BAM_IRQ_STTS] = { 0x0F94, 0x00, 0x00, 0x00 },
129 [BAM_IRQ_CLR] = { 0x0F98, 0x00, 0x00, 0x00 },
130 [BAM_IRQ_EN] = { 0x0F9C, 0x00, 0x00, 0x00 },
131 [BAM_CNFG_BITS] = { 0x0FFC, 0x00, 0x00, 0x00 },
132 [BAM_IRQ_SRCS_EE] = { 0x1800, 0x00, 0x00, 0x80 },
133 [BAM_IRQ_SRCS_MSK_EE] = { 0x1804, 0x00, 0x00, 0x80 },
134 [BAM_P_CTRL] = { 0x0000, 0x80, 0x00, 0x00 },
135 [BAM_P_RST] = { 0x0004, 0x80, 0x00, 0x00 },
136 [BAM_P_HALT] = { 0x0008, 0x80, 0x00, 0x00 },
137 [BAM_P_IRQ_STTS] = { 0x0010, 0x80, 0x00, 0x00 },
138 [BAM_P_IRQ_CLR] = { 0x0014, 0x80, 0x00, 0x00 },
139 [BAM_P_IRQ_EN] = { 0x0018, 0x80, 0x00, 0x00 },
140 [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x40, 0x00 },
141 [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x40, 0x00 },
142 [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x40, 0x00 },
143 [BAM_P_DATA_FIFO_ADDR] = { 0x1024, 0x00, 0x40, 0x00 },
144 [BAM_P_DESC_FIFO_ADDR] = { 0x101C, 0x00, 0x40, 0x00 },
145 [BAM_P_EVNT_GEN_TRSHLD] = { 0x1028, 0x00, 0x40, 0x00 },
146 [BAM_P_FIFO_SIZES] = { 0x1020, 0x00, 0x40, 0x00 },
147};
148
149static const struct reg_offset_data bam_v1_4_reg_info[] = {
150 [BAM_CTRL] = { 0x0000, 0x00, 0x00, 0x00 },
151 [BAM_REVISION] = { 0x0004, 0x00, 0x00, 0x00 },
152 [BAM_NUM_PIPES] = { 0x003C, 0x00, 0x00, 0x00 },
153 [BAM_DESC_CNT_TRSHLD] = { 0x0008, 0x00, 0x00, 0x00 },
154 [BAM_IRQ_SRCS] = { 0x000C, 0x00, 0x00, 0x00 },
155 [BAM_IRQ_SRCS_MSK] = { 0x0010, 0x00, 0x00, 0x00 },
156 [BAM_IRQ_SRCS_UNMASKED] = { 0x0030, 0x00, 0x00, 0x00 },
157 [BAM_IRQ_STTS] = { 0x0014, 0x00, 0x00, 0x00 },
158 [BAM_IRQ_CLR] = { 0x0018, 0x00, 0x00, 0x00 },
159 [BAM_IRQ_EN] = { 0x001C, 0x00, 0x00, 0x00 },
160 [BAM_CNFG_BITS] = { 0x007C, 0x00, 0x00, 0x00 },
161 [BAM_IRQ_SRCS_EE] = { 0x0800, 0x00, 0x00, 0x80 },
162 [BAM_IRQ_SRCS_MSK_EE] = { 0x0804, 0x00, 0x00, 0x80 },
163 [BAM_P_CTRL] = { 0x1000, 0x1000, 0x00, 0x00 },
164 [BAM_P_RST] = { 0x1004, 0x1000, 0x00, 0x00 },
165 [BAM_P_HALT] = { 0x1008, 0x1000, 0x00, 0x00 },
166 [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 },
167 [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 },
168 [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 },
169 [BAM_P_EVNT_DEST_ADDR] = { 0x182C, 0x00, 0x1000, 0x00 },
170 [BAM_P_EVNT_REG] = { 0x1818, 0x00, 0x1000, 0x00 },
171 [BAM_P_SW_OFSTS] = { 0x1800, 0x00, 0x1000, 0x00 },
172 [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 },
173 [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 },
174 [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 },
175 [BAM_P_FIFO_SIZES] = { 0x1820, 0x00, 0x1000, 0x00 },
176};
177
178static const struct reg_offset_data bam_v1_7_reg_info[] = {
179 [BAM_CTRL] = { 0x00000, 0x00, 0x00, 0x00 },
180 [BAM_REVISION] = { 0x01000, 0x00, 0x00, 0x00 },
181 [BAM_NUM_PIPES] = { 0x01008, 0x00, 0x00, 0x00 },
182 [BAM_DESC_CNT_TRSHLD] = { 0x00008, 0x00, 0x00, 0x00 },
183 [BAM_IRQ_SRCS] = { 0x03010, 0x00, 0x00, 0x00 },
184 [BAM_IRQ_SRCS_MSK] = { 0x03014, 0x00, 0x00, 0x00 },
185 [BAM_IRQ_SRCS_UNMASKED] = { 0x03018, 0x00, 0x00, 0x00 },
186 [BAM_IRQ_STTS] = { 0x00014, 0x00, 0x00, 0x00 },
187 [BAM_IRQ_CLR] = { 0x00018, 0x00, 0x00, 0x00 },
188 [BAM_IRQ_EN] = { 0x0001C, 0x00, 0x00, 0x00 },
189 [BAM_CNFG_BITS] = { 0x0007C, 0x00, 0x00, 0x00 },
190 [BAM_IRQ_SRCS_EE] = { 0x03000, 0x00, 0x00, 0x1000 },
191 [BAM_IRQ_SRCS_MSK_EE] = { 0x03004, 0x00, 0x00, 0x1000 },
192 [BAM_P_CTRL] = { 0x13000, 0x1000, 0x00, 0x00 },
193 [BAM_P_RST] = { 0x13004, 0x1000, 0x00, 0x00 },
194 [BAM_P_HALT] = { 0x13008, 0x1000, 0x00, 0x00 },
195 [BAM_P_IRQ_STTS] = { 0x13010, 0x1000, 0x00, 0x00 },
196 [BAM_P_IRQ_CLR] = { 0x13014, 0x1000, 0x00, 0x00 },
197 [BAM_P_IRQ_EN] = { 0x13018, 0x1000, 0x00, 0x00 },
198 [BAM_P_EVNT_DEST_ADDR] = { 0x1382C, 0x00, 0x1000, 0x00 },
199 [BAM_P_EVNT_REG] = { 0x13818, 0x00, 0x1000, 0x00 },
200 [BAM_P_SW_OFSTS] = { 0x13800, 0x00, 0x1000, 0x00 },
201 [BAM_P_DATA_FIFO_ADDR] = { 0x13824, 0x00, 0x1000, 0x00 },
202 [BAM_P_DESC_FIFO_ADDR] = { 0x1381C, 0x00, 0x1000, 0x00 },
203 [BAM_P_EVNT_GEN_TRSHLD] = { 0x13828, 0x00, 0x1000, 0x00 },
204 [BAM_P_FIFO_SIZES] = { 0x13820, 0x00, 0x1000, 0x00 },
205};
206
207
208#define BAM_SW_RST BIT(0)
209#define BAM_EN BIT(1)
210#define BAM_EN_ACCUM BIT(4)
211#define BAM_TESTBUS_SEL_SHIFT 5
212#define BAM_TESTBUS_SEL_MASK 0x3F
213#define BAM_DESC_CACHE_SEL_SHIFT 13
214#define BAM_DESC_CACHE_SEL_MASK 0x3
215#define BAM_CACHED_DESC_STORE BIT(15)
216#define IBC_DISABLE BIT(16)
217
218
219#define REVISION_SHIFT 0
220#define REVISION_MASK 0xFF
221#define NUM_EES_SHIFT 8
222#define NUM_EES_MASK 0xF
223#define CE_BUFFER_SIZE BIT(13)
224#define AXI_ACTIVE BIT(14)
225#define USE_VMIDMT BIT(15)
226#define SECURED BIT(16)
227#define BAM_HAS_NO_BYPASS BIT(17)
228#define HIGH_FREQUENCY_BAM BIT(18)
229#define INACTIV_TMRS_EXST BIT(19)
230#define NUM_INACTIV_TMRS BIT(20)
231#define DESC_CACHE_DEPTH_SHIFT 21
232#define DESC_CACHE_DEPTH_1 (0 << DESC_CACHE_DEPTH_SHIFT)
233#define DESC_CACHE_DEPTH_2 (1 << DESC_CACHE_DEPTH_SHIFT)
234#define DESC_CACHE_DEPTH_3 (2 << DESC_CACHE_DEPTH_SHIFT)
235#define DESC_CACHE_DEPTH_4 (3 << DESC_CACHE_DEPTH_SHIFT)
236#define CMD_DESC_EN BIT(23)
237#define INACTIV_TMR_BASE_SHIFT 24
238#define INACTIV_TMR_BASE_MASK 0xFF
239
240
241#define BAM_NUM_PIPES_SHIFT 0
242#define BAM_NUM_PIPES_MASK 0xFF
243#define PERIPH_NON_PIPE_GRP_SHIFT 16
244#define PERIPH_NON_PIP_GRP_MASK 0xFF
245#define BAM_NON_PIPE_GRP_SHIFT 24
246#define BAM_NON_PIPE_GRP_MASK 0xFF
247
248
249#define BAM_PIPE_CNFG BIT(2)
250#define BAM_FULL_PIPE BIT(11)
251#define BAM_NO_EXT_P_RST BIT(12)
252#define BAM_IBC_DISABLE BIT(13)
253#define BAM_SB_CLK_REQ BIT(14)
254#define BAM_PSM_CSW_REQ BIT(15)
255#define BAM_PSM_P_RES BIT(16)
256#define BAM_AU_P_RES BIT(17)
257#define BAM_SI_P_RES BIT(18)
258#define BAM_WB_P_RES BIT(19)
259#define BAM_WB_BLK_CSW BIT(20)
260#define BAM_WB_CSW_ACK_IDL BIT(21)
261#define BAM_WB_RETR_SVPNT BIT(22)
262#define BAM_WB_DSC_AVL_P_RST BIT(23)
263#define BAM_REG_P_EN BIT(24)
264#define BAM_PSM_P_HD_DATA BIT(25)
265#define BAM_AU_ACCUMED BIT(26)
266#define BAM_CMD_ENABLE BIT(27)
267
268#define BAM_CNFG_BITS_DEFAULT (BAM_PIPE_CNFG | \
269 BAM_NO_EXT_P_RST | \
270 BAM_IBC_DISABLE | \
271 BAM_SB_CLK_REQ | \
272 BAM_PSM_CSW_REQ | \
273 BAM_PSM_P_RES | \
274 BAM_AU_P_RES | \
275 BAM_SI_P_RES | \
276 BAM_WB_P_RES | \
277 BAM_WB_BLK_CSW | \
278 BAM_WB_CSW_ACK_IDL | \
279 BAM_WB_RETR_SVPNT | \
280 BAM_WB_DSC_AVL_P_RST | \
281 BAM_REG_P_EN | \
282 BAM_PSM_P_HD_DATA | \
283 BAM_AU_ACCUMED | \
284 BAM_CMD_ENABLE)
285
286
287#define P_EN BIT(1)
288#define P_DIRECTION BIT(3)
289#define P_SYS_STRM BIT(4)
290#define P_SYS_MODE BIT(5)
291#define P_AUTO_EOB BIT(6)
292#define P_AUTO_EOB_SEL_SHIFT 7
293#define P_AUTO_EOB_SEL_512 (0 << P_AUTO_EOB_SEL_SHIFT)
294#define P_AUTO_EOB_SEL_256 (1 << P_AUTO_EOB_SEL_SHIFT)
295#define P_AUTO_EOB_SEL_128 (2 << P_AUTO_EOB_SEL_SHIFT)
296#define P_AUTO_EOB_SEL_64 (3 << P_AUTO_EOB_SEL_SHIFT)
297#define P_PREFETCH_LIMIT_SHIFT 9
298#define P_PREFETCH_LIMIT_32 (0 << P_PREFETCH_LIMIT_SHIFT)
299#define P_PREFETCH_LIMIT_16 (1 << P_PREFETCH_LIMIT_SHIFT)
300#define P_PREFETCH_LIMIT_4 (2 << P_PREFETCH_LIMIT_SHIFT)
301#define P_WRITE_NWD BIT(11)
302#define P_LOCK_GROUP_SHIFT 16
303#define P_LOCK_GROUP_MASK 0x1F
304
305
306#define CNT_TRSHLD 0xffff
307#define DEFAULT_CNT_THRSHLD 0x4
308
309
310#define BAM_IRQ BIT(31)
311#define P_IRQ 0x7fffffff
312
313
314#define BAM_IRQ_MSK BAM_IRQ
315#define P_IRQ_MSK P_IRQ
316
317
318#define BAM_TIMER_IRQ BIT(4)
319#define BAM_EMPTY_IRQ BIT(3)
320#define BAM_ERROR_IRQ BIT(2)
321#define BAM_HRESP_ERR_IRQ BIT(1)
322
323
324#define BAM_TIMER_CLR BIT(4)
325#define BAM_EMPTY_CLR BIT(3)
326#define BAM_ERROR_CLR BIT(2)
327#define BAM_HRESP_ERR_CLR BIT(1)
328
329
330#define BAM_TIMER_EN BIT(4)
331#define BAM_EMPTY_EN BIT(3)
332#define BAM_ERROR_EN BIT(2)
333#define BAM_HRESP_ERR_EN BIT(1)
334
335
336#define P_PRCSD_DESC_EN BIT(0)
337#define P_TIMER_EN BIT(1)
338#define P_WAKE_EN BIT(2)
339#define P_OUT_OF_DESC_EN BIT(3)
340#define P_ERR_EN BIT(4)
341#define P_TRNSFR_END_EN BIT(5)
342#define P_DEFAULT_IRQS_EN (P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN)
343
344
345#define P_SW_OFSTS_MASK 0xffff
346
347#define BAM_DESC_FIFO_SIZE SZ_32K
348#define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
349#define BAM_FIFO_SIZE (SZ_32K - 8)
350
351struct bam_chan {
352 struct virt_dma_chan vc;
353
354 struct bam_device *bdev;
355
356
357 u32 id;
358
359 struct bam_async_desc *curr_txd;
360
361
362 struct dma_slave_config slave;
363
364
365 struct bam_desc_hw *fifo_virt;
366 dma_addr_t fifo_phys;
367
368
369 unsigned short head;
370 unsigned short tail;
371
372 unsigned int initialized;
373 unsigned int paused;
374 unsigned int reconfigure;
375
376 struct list_head node;
377};
378
379static inline struct bam_chan *to_bam_chan(struct dma_chan *common)
380{
381 return container_of(common, struct bam_chan, vc.chan);
382}
383
384struct bam_device {
385 void __iomem *regs;
386 struct device *dev;
387 struct dma_device common;
388 struct device_dma_parameters dma_parms;
389 struct bam_chan *channels;
390 u32 num_channels;
391
392
393 u32 ee;
394 bool controlled_remotely;
395
396 const struct reg_offset_data *layout;
397
398 struct clk *bamclk;
399 int irq;
400
401
402 struct tasklet_struct task;
403};
404
405
406
407
408
409
410
411static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe,
412 enum bam_reg reg)
413{
414 const struct reg_offset_data r = bdev->layout[reg];
415
416 return bdev->regs + r.base_offset +
417 r.pipe_mult * pipe +
418 r.evnt_mult * pipe +
419 r.ee_mult * bdev->ee;
420}
421
422
423
424
425
426
427
428static void bam_reset_channel(struct bam_chan *bchan)
429{
430 struct bam_device *bdev = bchan->bdev;
431
432 lockdep_assert_held(&bchan->vc.lock);
433
434
435 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST));
436 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST));
437
438
439 wmb();
440
441
442 bchan->initialized = 0;
443}
444
445
446
447
448
449
450
451static void bam_chan_init_hw(struct bam_chan *bchan,
452 enum dma_transfer_direction dir)
453{
454 struct bam_device *bdev = bchan->bdev;
455 u32 val;
456
457
458 bam_reset_channel(bchan);
459
460
461
462
463
464 writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
465 bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR));
466 writel_relaxed(BAM_FIFO_SIZE,
467 bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES));
468
469
470 writel_relaxed(P_DEFAULT_IRQS_EN,
471 bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
472
473
474 val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
475 val |= BIT(bchan->id);
476 writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
477
478
479 wmb();
480
481
482 val = P_EN | P_SYS_MODE;
483 if (dir == DMA_DEV_TO_MEM)
484 val |= P_DIRECTION;
485
486 writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL));
487
488 bchan->initialized = 1;
489
490
491 bchan->head = 0;
492 bchan->tail = 0;
493}
494
495
496
497
498
499
500
501static int bam_alloc_chan(struct dma_chan *chan)
502{
503 struct bam_chan *bchan = to_bam_chan(chan);
504 struct bam_device *bdev = bchan->bdev;
505
506 if (bchan->fifo_virt)
507 return 0;
508
509
510 bchan->fifo_virt = dma_alloc_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
511 &bchan->fifo_phys, GFP_KERNEL);
512
513 if (!bchan->fifo_virt) {
514 dev_err(bdev->dev, "Failed to allocate desc fifo\n");
515 return -ENOMEM;
516 }
517
518 return 0;
519}
520
521
522
523
524
525
526
527
528static void bam_free_chan(struct dma_chan *chan)
529{
530 struct bam_chan *bchan = to_bam_chan(chan);
531 struct bam_device *bdev = bchan->bdev;
532 u32 val;
533 unsigned long flags;
534 int ret;
535
536 ret = pm_runtime_get_sync(bdev->dev);
537 if (ret < 0)
538 return;
539
540 vchan_free_chan_resources(to_virt_chan(chan));
541
542 if (bchan->curr_txd) {
543 dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
544 goto err;
545 }
546
547 spin_lock_irqsave(&bchan->vc.lock, flags);
548 bam_reset_channel(bchan);
549 spin_unlock_irqrestore(&bchan->vc.lock, flags);
550
551 dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
552 bchan->fifo_phys);
553 bchan->fifo_virt = NULL;
554
555
556 val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
557 val &= ~BIT(bchan->id);
558 writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
559
560
561 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
562
563err:
564 pm_runtime_mark_last_busy(bdev->dev);
565 pm_runtime_put_autosuspend(bdev->dev);
566}
567
568
569
570
571
572
573
574
575
576static int bam_slave_config(struct dma_chan *chan,
577 struct dma_slave_config *cfg)
578{
579 struct bam_chan *bchan = to_bam_chan(chan);
580 unsigned long flag;
581
582 spin_lock_irqsave(&bchan->vc.lock, flag);
583 memcpy(&bchan->slave, cfg, sizeof(*cfg));
584 bchan->reconfigure = 1;
585 spin_unlock_irqrestore(&bchan->vc.lock, flag);
586
587 return 0;
588}
589
590
591
592
593
594
595
596
597
598
599
600static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
601 struct scatterlist *sgl, unsigned int sg_len,
602 enum dma_transfer_direction direction, unsigned long flags,
603 void *context)
604{
605 struct bam_chan *bchan = to_bam_chan(chan);
606 struct bam_device *bdev = bchan->bdev;
607 struct bam_async_desc *async_desc;
608 struct scatterlist *sg;
609 u32 i;
610 struct bam_desc_hw *desc;
611 unsigned int num_alloc = 0;
612
613
614 if (!is_slave_direction(direction)) {
615 dev_err(bdev->dev, "invalid dma direction\n");
616 return NULL;
617 }
618
619
620 for_each_sg(sgl, sg, sg_len, i)
621 num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE);
622
623
624 async_desc = kzalloc(sizeof(*async_desc) +
625 (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT);
626
627 if (!async_desc)
628 goto err_out;
629
630 if (flags & DMA_PREP_FENCE)
631 async_desc->flags |= DESC_FLAG_NWD;
632
633 if (flags & DMA_PREP_INTERRUPT)
634 async_desc->flags |= DESC_FLAG_EOT;
635 else
636 async_desc->flags |= DESC_FLAG_INT;
637
638 async_desc->num_desc = num_alloc;
639 async_desc->curr_desc = async_desc->desc;
640 async_desc->dir = direction;
641
642
643 desc = async_desc->desc;
644 for_each_sg(sgl, sg, sg_len, i) {
645 unsigned int remainder = sg_dma_len(sg);
646 unsigned int curr_offset = 0;
647
648 do {
649 if (flags & DMA_PREP_CMD)
650 desc->flags |= cpu_to_le16(DESC_FLAG_CMD);
651
652 desc->addr = cpu_to_le32(sg_dma_address(sg) +
653 curr_offset);
654
655 if (remainder > BAM_FIFO_SIZE) {
656 desc->size = cpu_to_le16(BAM_FIFO_SIZE);
657 remainder -= BAM_FIFO_SIZE;
658 curr_offset += BAM_FIFO_SIZE;
659 } else {
660 desc->size = cpu_to_le16(remainder);
661 remainder = 0;
662 }
663
664 async_desc->length += desc->size;
665 desc++;
666 } while (remainder > 0);
667 }
668
669 return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
670
671err_out:
672 kfree(async_desc);
673 return NULL;
674}
675
676
677
678
679
680
681
682
683
684static int bam_dma_terminate_all(struct dma_chan *chan)
685{
686 struct bam_chan *bchan = to_bam_chan(chan);
687 unsigned long flag;
688 LIST_HEAD(head);
689
690
691 spin_lock_irqsave(&bchan->vc.lock, flag);
692 if (bchan->curr_txd) {
693 list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued);
694 bchan->curr_txd = NULL;
695 }
696
697 vchan_get_all_descriptors(&bchan->vc, &head);
698 spin_unlock_irqrestore(&bchan->vc.lock, flag);
699
700 vchan_dma_desc_free_list(&bchan->vc, &head);
701
702 return 0;
703}
704
705
706
707
708
709
710static int bam_pause(struct dma_chan *chan)
711{
712 struct bam_chan *bchan = to_bam_chan(chan);
713 struct bam_device *bdev = bchan->bdev;
714 unsigned long flag;
715 int ret;
716
717 ret = pm_runtime_get_sync(bdev->dev);
718 if (ret < 0)
719 return ret;
720
721 spin_lock_irqsave(&bchan->vc.lock, flag);
722 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
723 bchan->paused = 1;
724 spin_unlock_irqrestore(&bchan->vc.lock, flag);
725 pm_runtime_mark_last_busy(bdev->dev);
726 pm_runtime_put_autosuspend(bdev->dev);
727
728 return 0;
729}
730
731
732
733
734
735
736static int bam_resume(struct dma_chan *chan)
737{
738 struct bam_chan *bchan = to_bam_chan(chan);
739 struct bam_device *bdev = bchan->bdev;
740 unsigned long flag;
741 int ret;
742
743 ret = pm_runtime_get_sync(bdev->dev);
744 if (ret < 0)
745 return ret;
746
747 spin_lock_irqsave(&bchan->vc.lock, flag);
748 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
749 bchan->paused = 0;
750 spin_unlock_irqrestore(&bchan->vc.lock, flag);
751 pm_runtime_mark_last_busy(bdev->dev);
752 pm_runtime_put_autosuspend(bdev->dev);
753
754 return 0;
755}
756
757
758
759
760
761
762
763
764static u32 process_channel_irqs(struct bam_device *bdev)
765{
766 u32 i, srcs, pipe_stts;
767 unsigned long flags;
768 struct bam_async_desc *async_desc;
769
770 srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE));
771
772
773 if (!(srcs & P_IRQ))
774 return srcs;
775
776 for (i = 0; i < bdev->num_channels; i++) {
777 struct bam_chan *bchan = &bdev->channels[i];
778
779 if (!(srcs & BIT(i)))
780 continue;
781
782
783 pipe_stts = readl_relaxed(bam_addr(bdev, i, BAM_P_IRQ_STTS));
784
785 writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR));
786
787 spin_lock_irqsave(&bchan->vc.lock, flags);
788 async_desc = bchan->curr_txd;
789
790 if (async_desc) {
791 async_desc->num_desc -= async_desc->xfer_len;
792 async_desc->curr_desc += async_desc->xfer_len;
793 bchan->curr_txd = NULL;
794
795
796 bchan->head += async_desc->xfer_len;
797 bchan->head %= MAX_DESCRIPTORS;
798
799
800
801
802
803
804 if (!async_desc->num_desc)
805 vchan_cookie_complete(&async_desc->vd);
806 else
807 list_add(&async_desc->vd.node,
808 &bchan->vc.desc_issued);
809 }
810
811 spin_unlock_irqrestore(&bchan->vc.lock, flags);
812 }
813
814 return srcs;
815}
816
817
818
819
820
821
822
823
824static irqreturn_t bam_dma_irq(int irq, void *data)
825{
826 struct bam_device *bdev = data;
827 u32 clr_mask = 0, srcs = 0;
828 int ret;
829
830 srcs |= process_channel_irqs(bdev);
831
832
833 if (srcs & P_IRQ)
834 tasklet_schedule(&bdev->task);
835
836 ret = pm_runtime_get_sync(bdev->dev);
837 if (ret < 0)
838 return ret;
839
840 if (srcs & BAM_IRQ) {
841 clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
842
843
844
845
846
847 mb();
848
849 writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
850 }
851
852 pm_runtime_mark_last_busy(bdev->dev);
853 pm_runtime_put_autosuspend(bdev->dev);
854
855 return IRQ_HANDLED;
856}
857
858
859
860
861
862
863
864
865
866static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
867 struct dma_tx_state *txstate)
868{
869 struct bam_chan *bchan = to_bam_chan(chan);
870 struct virt_dma_desc *vd;
871 int ret;
872 size_t residue = 0;
873 unsigned int i;
874 unsigned long flags;
875
876 ret = dma_cookie_status(chan, cookie, txstate);
877 if (ret == DMA_COMPLETE)
878 return ret;
879
880 if (!txstate)
881 return bchan->paused ? DMA_PAUSED : ret;
882
883 spin_lock_irqsave(&bchan->vc.lock, flags);
884 vd = vchan_find_desc(&bchan->vc, cookie);
885 if (vd)
886 residue = container_of(vd, struct bam_async_desc, vd)->length;
887 else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie)
888 for (i = 0; i < bchan->curr_txd->num_desc; i++)
889 residue += bchan->curr_txd->curr_desc[i].size;
890
891 spin_unlock_irqrestore(&bchan->vc.lock, flags);
892
893 dma_set_residue(txstate, residue);
894
895 if (ret == DMA_IN_PROGRESS && bchan->paused)
896 ret = DMA_PAUSED;
897
898 return ret;
899}
900
901
902
903
904
905
906static void bam_apply_new_config(struct bam_chan *bchan,
907 enum dma_transfer_direction dir)
908{
909 struct bam_device *bdev = bchan->bdev;
910 u32 maxburst;
911
912 if (dir == DMA_DEV_TO_MEM)
913 maxburst = bchan->slave.src_maxburst;
914 else
915 maxburst = bchan->slave.dst_maxburst;
916
917 writel_relaxed(maxburst, bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
918
919 bchan->reconfigure = 0;
920}
921
922
923
924
925
926static void bam_start_dma(struct bam_chan *bchan)
927{
928 struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
929 struct bam_device *bdev = bchan->bdev;
930 struct bam_async_desc *async_desc;
931 struct bam_desc_hw *desc;
932 struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
933 sizeof(struct bam_desc_hw));
934 int ret;
935
936 lockdep_assert_held(&bchan->vc.lock);
937
938 if (!vd)
939 return;
940
941 list_del(&vd->node);
942
943 async_desc = container_of(vd, struct bam_async_desc, vd);
944 bchan->curr_txd = async_desc;
945
946 ret = pm_runtime_get_sync(bdev->dev);
947 if (ret < 0)
948 return;
949
950
951 if (!bchan->initialized)
952 bam_chan_init_hw(bchan, async_desc->dir);
953
954
955 if (bchan->reconfigure)
956 bam_apply_new_config(bchan, async_desc->dir);
957
958 desc = bchan->curr_txd->curr_desc;
959
960 if (async_desc->num_desc > MAX_DESCRIPTORS)
961 async_desc->xfer_len = MAX_DESCRIPTORS;
962 else
963 async_desc->xfer_len = async_desc->num_desc;
964
965
966 if (async_desc->num_desc == async_desc->xfer_len)
967 desc[async_desc->xfer_len - 1].flags |=
968 cpu_to_le16(async_desc->flags);
969 else
970 desc[async_desc->xfer_len - 1].flags |=
971 cpu_to_le16(DESC_FLAG_INT);
972
973 if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
974 u32 partial = MAX_DESCRIPTORS - bchan->tail;
975
976 memcpy(&fifo[bchan->tail], desc,
977 partial * sizeof(struct bam_desc_hw));
978 memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) *
979 sizeof(struct bam_desc_hw));
980 } else {
981 memcpy(&fifo[bchan->tail], desc,
982 async_desc->xfer_len * sizeof(struct bam_desc_hw));
983 }
984
985 bchan->tail += async_desc->xfer_len;
986 bchan->tail %= MAX_DESCRIPTORS;
987
988
989 wmb();
990 writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
991 bam_addr(bdev, bchan->id, BAM_P_EVNT_REG));
992
993 pm_runtime_mark_last_busy(bdev->dev);
994 pm_runtime_put_autosuspend(bdev->dev);
995}
996
997
998
999
1000
1001
1002
1003static void dma_tasklet(unsigned long data)
1004{
1005 struct bam_device *bdev = (struct bam_device *)data;
1006 struct bam_chan *bchan;
1007 unsigned long flags;
1008 unsigned int i;
1009
1010
1011 for (i = 0; i < bdev->num_channels; i++) {
1012 bchan = &bdev->channels[i];
1013 spin_lock_irqsave(&bchan->vc.lock, flags);
1014
1015 if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd)
1016 bam_start_dma(bchan);
1017 spin_unlock_irqrestore(&bchan->vc.lock, flags);
1018 }
1019
1020}
1021
1022
1023
1024
1025
1026
1027
1028static void bam_issue_pending(struct dma_chan *chan)
1029{
1030 struct bam_chan *bchan = to_bam_chan(chan);
1031 unsigned long flags;
1032
1033 spin_lock_irqsave(&bchan->vc.lock, flags);
1034
1035
1036 if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd)
1037 bam_start_dma(bchan);
1038
1039 spin_unlock_irqrestore(&bchan->vc.lock, flags);
1040}
1041
1042
1043
1044
1045
1046
1047static void bam_dma_free_desc(struct virt_dma_desc *vd)
1048{
1049 struct bam_async_desc *async_desc = container_of(vd,
1050 struct bam_async_desc, vd);
1051
1052 kfree(async_desc);
1053}
1054
1055static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec,
1056 struct of_dma *of)
1057{
1058 struct bam_device *bdev = container_of(of->of_dma_data,
1059 struct bam_device, common);
1060 unsigned int request;
1061
1062 if (dma_spec->args_count != 1)
1063 return NULL;
1064
1065 request = dma_spec->args[0];
1066 if (request >= bdev->num_channels)
1067 return NULL;
1068
1069 return dma_get_slave_channel(&(bdev->channels[request].vc.chan));
1070}
1071
1072
1073
1074
1075
1076
1077
1078static int bam_init(struct bam_device *bdev)
1079{
1080 u32 val;
1081
1082
1083 val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT;
1084 val &= NUM_EES_MASK;
1085
1086
1087 if (bdev->ee >= val)
1088 return -EINVAL;
1089
1090 val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
1091 bdev->num_channels = val & BAM_NUM_PIPES_MASK;
1092
1093 if (bdev->controlled_remotely)
1094 return 0;
1095
1096
1097
1098 val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
1099 val |= BAM_SW_RST;
1100 writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
1101 val &= ~BAM_SW_RST;
1102 writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
1103
1104
1105 wmb();
1106
1107
1108 val |= BAM_EN;
1109 writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
1110
1111
1112 writel_relaxed(DEFAULT_CNT_THRSHLD,
1113 bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
1114
1115
1116 writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
1117
1118
1119 writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
1120 bam_addr(bdev, 0, BAM_IRQ_EN));
1121
1122
1123 writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
1124
1125 return 0;
1126}
1127
1128static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
1129 u32 index)
1130{
1131 bchan->id = index;
1132 bchan->bdev = bdev;
1133
1134 vchan_init(&bchan->vc, &bdev->common);
1135 bchan->vc.desc_free = bam_dma_free_desc;
1136}
1137
1138static const struct of_device_id bam_of_match[] = {
1139 { .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info },
1140 { .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info },
1141 { .compatible = "qcom,bam-v1.7.0", .data = &bam_v1_7_reg_info },
1142 {}
1143};
1144
1145MODULE_DEVICE_TABLE(of, bam_of_match);
1146
1147static int bam_dma_probe(struct platform_device *pdev)
1148{
1149 struct bam_device *bdev;
1150 const struct of_device_id *match;
1151 struct resource *iores;
1152 int ret, i;
1153
1154 bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
1155 if (!bdev)
1156 return -ENOMEM;
1157
1158 bdev->dev = &pdev->dev;
1159
1160 match = of_match_node(bam_of_match, pdev->dev.of_node);
1161 if (!match) {
1162 dev_err(&pdev->dev, "Unsupported BAM module\n");
1163 return -ENODEV;
1164 }
1165
1166 bdev->layout = match->data;
1167
1168 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1169 bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
1170 if (IS_ERR(bdev->regs))
1171 return PTR_ERR(bdev->regs);
1172
1173 bdev->irq = platform_get_irq(pdev, 0);
1174 if (bdev->irq < 0)
1175 return bdev->irq;
1176
1177 ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee);
1178 if (ret) {
1179 dev_err(bdev->dev, "Execution environment unspecified\n");
1180 return ret;
1181 }
1182
1183 bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
1184 "qcom,controlled-remotely");
1185
1186 bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
1187 if (IS_ERR(bdev->bamclk))
1188 return PTR_ERR(bdev->bamclk);
1189
1190 ret = clk_prepare_enable(bdev->bamclk);
1191 if (ret) {
1192 dev_err(bdev->dev, "failed to prepare/enable clock\n");
1193 return ret;
1194 }
1195
1196 ret = bam_init(bdev);
1197 if (ret)
1198 goto err_disable_clk;
1199
1200 tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev);
1201
1202 bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels,
1203 sizeof(*bdev->channels), GFP_KERNEL);
1204
1205 if (!bdev->channels) {
1206 ret = -ENOMEM;
1207 goto err_tasklet_kill;
1208 }
1209
1210
1211 INIT_LIST_HEAD(&bdev->common.channels);
1212
1213 for (i = 0; i < bdev->num_channels; i++)
1214 bam_channel_init(bdev, &bdev->channels[i], i);
1215
1216 ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq,
1217 IRQF_TRIGGER_HIGH, "bam_dma", bdev);
1218 if (ret)
1219 goto err_bam_channel_exit;
1220
1221
1222 bdev->common.dev = bdev->dev;
1223 bdev->common.dev->dma_parms = &bdev->dma_parms;
1224 ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
1225 if (ret) {
1226 dev_err(bdev->dev, "cannot set maximum segment size\n");
1227 goto err_bam_channel_exit;
1228 }
1229
1230 platform_set_drvdata(pdev, bdev);
1231
1232
1233 dma_cap_zero(bdev->common.cap_mask);
1234 dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);
1235
1236
1237 bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1238 bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1239 bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
1240 bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
1241 bdev->common.device_alloc_chan_resources = bam_alloc_chan;
1242 bdev->common.device_free_chan_resources = bam_free_chan;
1243 bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
1244 bdev->common.device_config = bam_slave_config;
1245 bdev->common.device_pause = bam_pause;
1246 bdev->common.device_resume = bam_resume;
1247 bdev->common.device_terminate_all = bam_dma_terminate_all;
1248 bdev->common.device_issue_pending = bam_issue_pending;
1249 bdev->common.device_tx_status = bam_tx_status;
1250 bdev->common.dev = bdev->dev;
1251
1252 ret = dma_async_device_register(&bdev->common);
1253 if (ret) {
1254 dev_err(bdev->dev, "failed to register dma async device\n");
1255 goto err_bam_channel_exit;
1256 }
1257
1258 ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate,
1259 &bdev->common);
1260 if (ret)
1261 goto err_unregister_dma;
1262
1263 pm_runtime_irq_safe(&pdev->dev);
1264 pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY);
1265 pm_runtime_use_autosuspend(&pdev->dev);
1266 pm_runtime_mark_last_busy(&pdev->dev);
1267 pm_runtime_set_active(&pdev->dev);
1268 pm_runtime_enable(&pdev->dev);
1269
1270 return 0;
1271
1272err_unregister_dma:
1273 dma_async_device_unregister(&bdev->common);
1274err_bam_channel_exit:
1275 for (i = 0; i < bdev->num_channels; i++)
1276 tasklet_kill(&bdev->channels[i].vc.task);
1277err_tasklet_kill:
1278 tasklet_kill(&bdev->task);
1279err_disable_clk:
1280 clk_disable_unprepare(bdev->bamclk);
1281
1282 return ret;
1283}
1284
1285static int bam_dma_remove(struct platform_device *pdev)
1286{
1287 struct bam_device *bdev = platform_get_drvdata(pdev);
1288 u32 i;
1289
1290 pm_runtime_force_suspend(&pdev->dev);
1291
1292 of_dma_controller_free(pdev->dev.of_node);
1293 dma_async_device_unregister(&bdev->common);
1294
1295
1296 writel_relaxed(0, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
1297
1298 devm_free_irq(bdev->dev, bdev->irq, bdev);
1299
1300 for (i = 0; i < bdev->num_channels; i++) {
1301 bam_dma_terminate_all(&bdev->channels[i].vc.chan);
1302 tasklet_kill(&bdev->channels[i].vc.task);
1303
1304 if (!bdev->channels[i].fifo_virt)
1305 continue;
1306
1307 dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
1308 bdev->channels[i].fifo_virt,
1309 bdev->channels[i].fifo_phys);
1310 }
1311
1312 tasklet_kill(&bdev->task);
1313
1314 clk_disable_unprepare(bdev->bamclk);
1315
1316 return 0;
1317}
1318
1319static int __maybe_unused bam_dma_runtime_suspend(struct device *dev)
1320{
1321 struct bam_device *bdev = dev_get_drvdata(dev);
1322
1323 clk_disable(bdev->bamclk);
1324
1325 return 0;
1326}
1327
1328static int __maybe_unused bam_dma_runtime_resume(struct device *dev)
1329{
1330 struct bam_device *bdev = dev_get_drvdata(dev);
1331 int ret;
1332
1333 ret = clk_enable(bdev->bamclk);
1334 if (ret < 0) {
1335 dev_err(dev, "clk_enable failed: %d\n", ret);
1336 return ret;
1337 }
1338
1339 return 0;
1340}
1341
1342static int __maybe_unused bam_dma_suspend(struct device *dev)
1343{
1344 struct bam_device *bdev = dev_get_drvdata(dev);
1345
1346 pm_runtime_force_suspend(dev);
1347
1348 clk_unprepare(bdev->bamclk);
1349
1350 return 0;
1351}
1352
1353static int __maybe_unused bam_dma_resume(struct device *dev)
1354{
1355 struct bam_device *bdev = dev_get_drvdata(dev);
1356 int ret;
1357
1358 ret = clk_prepare(bdev->bamclk);
1359 if (ret)
1360 return ret;
1361
1362 pm_runtime_force_resume(dev);
1363
1364 return 0;
1365}
1366
1367static const struct dev_pm_ops bam_dma_pm_ops = {
1368 SET_LATE_SYSTEM_SLEEP_PM_OPS(bam_dma_suspend, bam_dma_resume)
1369 SET_RUNTIME_PM_OPS(bam_dma_runtime_suspend, bam_dma_runtime_resume,
1370 NULL)
1371};
1372
1373static struct platform_driver bam_dma_driver = {
1374 .probe = bam_dma_probe,
1375 .remove = bam_dma_remove,
1376 .driver = {
1377 .name = "bam-dma-engine",
1378 .pm = &bam_dma_pm_ops,
1379 .of_match_table = bam_of_match,
1380 },
1381};
1382
1383module_platform_driver(bam_dma_driver);
1384
1385MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
1386MODULE_DESCRIPTION("QCOM BAM DMA engine driver");
1387MODULE_LICENSE("GPL v2");
1388