1
2
3
4
5
6
7#include <linux/clk.h>
8#include <linux/dma-mapping.h>
9#include <linux/interrupt.h>
10#include <linux/io.h>
11#include <linux/module.h>
12#include <linux/msi.h>
13#include <linux/of.h>
14#include <linux/of_irq.h>
15#include <linux/platform_device.h>
16#include <linux/spinlock.h>
17
18#include "dmaengine.h"
19
20
21#define MV_XOR_V2_DMA_DESQ_BALR_OFF 0x000
22#define MV_XOR_V2_DMA_DESQ_BAHR_OFF 0x004
23#define MV_XOR_V2_DMA_DESQ_SIZE_OFF 0x008
24#define MV_XOR_V2_DMA_DESQ_DONE_OFF 0x00C
25#define MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK 0x7FFF
26#define MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT 0
27#define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK 0x1FFF
28#define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT 16
29#define MV_XOR_V2_DMA_DESQ_ARATTR_OFF 0x010
30#define MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK 0x3F3F
31#define MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE 0x202
32#define MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE 0x3C3C
33#define MV_XOR_V2_DMA_IMSG_CDAT_OFF 0x014
34#define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018
35#define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF
36#define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0
37#define MV_XOR_V2_DMA_IMSG_TIMER_EN BIT(18)
38#define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C
39
40#define MV_XOR_V2_DMA_DESQ_ALLOC_OFF 0x04C
41#define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK 0xFFFF
42#define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT 16
43#define MV_XOR_V2_DMA_IMSG_BALR_OFF 0x050
44#define MV_XOR_V2_DMA_IMSG_BAHR_OFF 0x054
45#define MV_XOR_V2_DMA_DESQ_CTRL_OFF 0x100
46#define MV_XOR_V2_DMA_DESQ_CTRL_32B 1
47#define MV_XOR_V2_DMA_DESQ_CTRL_128B 7
48#define MV_XOR_V2_DMA_DESQ_STOP_OFF 0x800
49#define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF 0x804
50#define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808
51#define MV_XOR_V2_DMA_IMSG_TMOT 0x810
52#define MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK 0x1FFF
53#define MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT 0
54
55
56#define MV_XOR_V2_GLOB_BW_CTRL 0x4
57#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT 0
58#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL 64
59#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT 8
60#define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL 8
61#define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT 12
62#define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL 4
63#define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT 16
64#define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL 4
65#define MV_XOR_V2_GLOB_PAUSE 0x014
66#define MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL 0x8
67#define MV_XOR_V2_GLOB_SYS_INT_CAUSE 0x200
68#define MV_XOR_V2_GLOB_SYS_INT_MASK 0x204
69#define MV_XOR_V2_GLOB_MEM_INT_CAUSE 0x220
70#define MV_XOR_V2_GLOB_MEM_INT_MASK 0x224
71
72#define MV_XOR_V2_MIN_DESC_SIZE 32
73#define MV_XOR_V2_EXT_DESC_SIZE 128
74
75#define MV_XOR_V2_DESC_RESERVED_SIZE 12
76#define MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE 12
77
78#define MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF 8
79
80
81
82
83
84
85
86
87#define MV_XOR_V2_DESC_NUM 1024
88
89
90
91
92
93#define MV_XOR_V2_DONE_IMSG_THRD 0x14
94#define MV_XOR_V2_TIMER_THRD 0xB0
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109struct mv_xor_v2_descriptor {
110 u16 desc_id;
111 u16 flags;
112 u32 crc32_result;
113 u32 desc_ctrl;
114
115
116#define DESC_NUM_ACTIVE_D_BUF_SHIFT 22
117#define DESC_OP_MODE_SHIFT 28
118#define DESC_OP_MODE_NOP 0
119#define DESC_OP_MODE_MEMCPY 1
120#define DESC_OP_MODE_MEMSET 2
121#define DESC_OP_MODE_MEMINIT 3
122#define DESC_OP_MODE_MEM_COMPARE 4
123#define DESC_OP_MODE_CRC32 5
124#define DESC_OP_MODE_XOR 6
125#define DESC_OP_MODE_RAID6 7
126#define DESC_OP_MODE_RAID6_REC 8
127#define DESC_Q_BUFFER_ENABLE BIT(16)
128#define DESC_P_BUFFER_ENABLE BIT(17)
129#define DESC_IOD BIT(27)
130
131 u32 buff_size;
132 u32 fill_pattern_src_addr[4];
133 u32 data_buff_addr[MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE];
134 u32 reserved[MV_XOR_V2_DESC_RESERVED_SIZE];
135};
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153struct mv_xor_v2_device {
154 spinlock_t lock;
155 void __iomem *dma_base;
156 void __iomem *glob_base;
157 struct clk *clk;
158 struct clk *reg_clk;
159 struct tasklet_struct irq_tasklet;
160 struct list_head free_sw_desc;
161 struct dma_device dmadev;
162 struct dma_chan dmachan;
163 dma_addr_t hw_desq;
164 struct mv_xor_v2_descriptor *hw_desq_virt;
165 struct mv_xor_v2_sw_desc *sw_desq;
166 int desc_size;
167 unsigned int npendings;
168 unsigned int hw_queue_idx;
169 struct msi_desc *msi_desc;
170};
171
172
173
174
175
176
177
178
179struct mv_xor_v2_sw_desc {
180 int idx;
181 struct dma_async_tx_descriptor async_tx;
182 struct mv_xor_v2_descriptor hw_desc;
183 struct list_head free_list;
184};
185
186
187
188
189static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
190 struct mv_xor_v2_descriptor *desc,
191 dma_addr_t src, int index)
192{
193 int arr_index = ((index >> 1) * 3);
194
195
196
197
198
199
200
201
202
203
204
205
206 if ((index & 0x1) == 0) {
207 desc->data_buff_addr[arr_index] = lower_32_bits(src);
208
209 desc->data_buff_addr[arr_index + 2] &= ~0xFFFF;
210 desc->data_buff_addr[arr_index + 2] |=
211 upper_32_bits(src) & 0xFFFF;
212 } else {
213 desc->data_buff_addr[arr_index + 1] =
214 lower_32_bits(src);
215
216 desc->data_buff_addr[arr_index + 2] &= ~0xFFFF0000;
217 desc->data_buff_addr[arr_index + 2] |=
218 (upper_32_bits(src) & 0xFFFF) << 16;
219 }
220}
221
222
223
224
225static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev,
226 int num_of_desc)
227{
228
229 writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ADD_OFF);
230}
231
232
233
234
235static void mv_xor_v2_free_desc_from_desq(struct mv_xor_v2_device *xor_dev,
236 int num_of_desc)
237{
238
239 writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DEALLOC_OFF);
240}
241
242
243
244
245
246static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
247{
248 writel(MV_XOR_V2_DMA_DESQ_CTRL_128B,
249 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_CTRL_OFF);
250
251 return MV_XOR_V2_EXT_DESC_SIZE;
252}
253
254
255
256
257static inline
258void mv_xor_v2_enable_imsg_thrd(struct mv_xor_v2_device *xor_dev)
259{
260 u32 reg;
261
262
263 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
264 reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
265 reg |= (MV_XOR_V2_DONE_IMSG_THRD << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
266 reg |= MV_XOR_V2_DMA_IMSG_TIMER_EN;
267 writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
268
269
270 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
271 reg &= (~MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK <<
272 MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT);
273 reg |= (MV_XOR_V2_TIMER_THRD << MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT);
274 writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
275}
276
277static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
278{
279 struct mv_xor_v2_device *xor_dev = data;
280 unsigned int ndescs;
281 u32 reg;
282
283 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
284
285 ndescs = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
286 MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
287
288
289 if (!ndescs)
290 return IRQ_NONE;
291
292
293 tasklet_schedule(&xor_dev->irq_tasklet);
294
295 return IRQ_HANDLED;
296}
297
298
299
300
301static dma_cookie_t
302mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
303{
304 void *dest_hw_desc;
305 dma_cookie_t cookie;
306 struct mv_xor_v2_sw_desc *sw_desc =
307 container_of(tx, struct mv_xor_v2_sw_desc, async_tx);
308 struct mv_xor_v2_device *xor_dev =
309 container_of(tx->chan, struct mv_xor_v2_device, dmachan);
310
311 dev_dbg(xor_dev->dmadev.dev,
312 "%s sw_desc %p: async_tx %p\n",
313 __func__, sw_desc, &sw_desc->async_tx);
314
315
316 spin_lock_bh(&xor_dev->lock);
317 cookie = dma_cookie_assign(tx);
318
319
320 dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
321
322 memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
323
324 xor_dev->npendings++;
325 xor_dev->hw_queue_idx++;
326 if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
327 xor_dev->hw_queue_idx = 0;
328
329 spin_unlock_bh(&xor_dev->lock);
330
331 return cookie;
332}
333
334
335
336
337static struct mv_xor_v2_sw_desc *
338mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
339{
340 struct mv_xor_v2_sw_desc *sw_desc;
341 bool found = false;
342
343
344 spin_lock_bh(&xor_dev->lock);
345
346 if (list_empty(&xor_dev->free_sw_desc)) {
347 spin_unlock_bh(&xor_dev->lock);
348
349 tasklet_schedule(&xor_dev->irq_tasklet);
350 return NULL;
351 }
352
353 list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
354 if (async_tx_test_ack(&sw_desc->async_tx)) {
355 found = true;
356 break;
357 }
358 }
359
360 if (!found) {
361 spin_unlock_bh(&xor_dev->lock);
362 return NULL;
363 }
364
365 list_del(&sw_desc->free_list);
366
367
368 spin_unlock_bh(&xor_dev->lock);
369
370 return sw_desc;
371}
372
373
374
375
376static struct dma_async_tx_descriptor *
377mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
378 dma_addr_t src, size_t len, unsigned long flags)
379{
380 struct mv_xor_v2_sw_desc *sw_desc;
381 struct mv_xor_v2_descriptor *hw_descriptor;
382 struct mv_xor_v2_device *xor_dev;
383
384 xor_dev = container_of(chan, struct mv_xor_v2_device, dmachan);
385
386 dev_dbg(xor_dev->dmadev.dev,
387 "%s len: %zu src %pad dest %pad flags: %ld\n",
388 __func__, len, &src, &dest, flags);
389
390 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
391 if (!sw_desc)
392 return NULL;
393
394 sw_desc->async_tx.flags = flags;
395
396
397 hw_descriptor = &sw_desc->hw_desc;
398
399
400 hw_descriptor->desc_id = sw_desc->idx;
401
402
403 hw_descriptor->desc_ctrl =
404 DESC_OP_MODE_MEMCPY << DESC_OP_MODE_SHIFT;
405
406 if (flags & DMA_PREP_INTERRUPT)
407 hw_descriptor->desc_ctrl |= DESC_IOD;
408
409
410 hw_descriptor->fill_pattern_src_addr[0] = lower_32_bits(src);
411 hw_descriptor->fill_pattern_src_addr[1] =
412 upper_32_bits(src) & 0xFFFF;
413
414
415 hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
416 hw_descriptor->fill_pattern_src_addr[3] =
417 upper_32_bits(dest) & 0xFFFF;
418
419
420 hw_descriptor->buff_size = len;
421
422
423 return &sw_desc->async_tx;
424}
425
426
427
428
429static struct dma_async_tx_descriptor *
430mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
431 unsigned int src_cnt, size_t len, unsigned long flags)
432{
433 struct mv_xor_v2_sw_desc *sw_desc;
434 struct mv_xor_v2_descriptor *hw_descriptor;
435 struct mv_xor_v2_device *xor_dev =
436 container_of(chan, struct mv_xor_v2_device, dmachan);
437 int i;
438
439 if (src_cnt > MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF || src_cnt < 1)
440 return NULL;
441
442 dev_dbg(xor_dev->dmadev.dev,
443 "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
444 __func__, src_cnt, len, &dest, flags);
445
446 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
447 if (!sw_desc)
448 return NULL;
449
450 sw_desc->async_tx.flags = flags;
451
452
453 hw_descriptor = &sw_desc->hw_desc;
454
455
456 hw_descriptor->desc_id = sw_desc->idx;
457
458
459 hw_descriptor->desc_ctrl =
460 DESC_OP_MODE_XOR << DESC_OP_MODE_SHIFT;
461 hw_descriptor->desc_ctrl |= DESC_P_BUFFER_ENABLE;
462
463 if (flags & DMA_PREP_INTERRUPT)
464 hw_descriptor->desc_ctrl |= DESC_IOD;
465
466
467 for (i = 0; i < src_cnt; i++)
468 mv_xor_v2_set_data_buffers(xor_dev, hw_descriptor, src[i], i);
469
470 hw_descriptor->desc_ctrl |=
471 src_cnt << DESC_NUM_ACTIVE_D_BUF_SHIFT;
472
473
474 hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
475 hw_descriptor->fill_pattern_src_addr[3] =
476 upper_32_bits(dest) & 0xFFFF;
477
478
479 hw_descriptor->buff_size = len;
480
481
482 return &sw_desc->async_tx;
483}
484
485
486
487
488static struct dma_async_tx_descriptor *
489mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
490{
491 struct mv_xor_v2_sw_desc *sw_desc;
492 struct mv_xor_v2_descriptor *hw_descriptor;
493 struct mv_xor_v2_device *xor_dev =
494 container_of(chan, struct mv_xor_v2_device, dmachan);
495
496 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
497 if (!sw_desc)
498 return NULL;
499
500
501 hw_descriptor = &sw_desc->hw_desc;
502
503
504 hw_descriptor->desc_id = sw_desc->idx;
505
506
507 hw_descriptor->desc_ctrl =
508 DESC_OP_MODE_NOP << DESC_OP_MODE_SHIFT;
509 hw_descriptor->desc_ctrl |= DESC_IOD;
510
511
512 return &sw_desc->async_tx;
513}
514
515
516
517
518static void mv_xor_v2_issue_pending(struct dma_chan *chan)
519{
520 struct mv_xor_v2_device *xor_dev =
521 container_of(chan, struct mv_xor_v2_device, dmachan);
522
523 spin_lock_bh(&xor_dev->lock);
524
525
526
527
528
529 mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings);
530 xor_dev->npendings = 0;
531
532 spin_unlock_bh(&xor_dev->lock);
533}
534
535static inline
536int mv_xor_v2_get_pending_params(struct mv_xor_v2_device *xor_dev,
537 int *pending_ptr)
538{
539 u32 reg;
540
541 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
542
543
544 *pending_ptr = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT) &
545 MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK);
546
547
548 return ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
549 MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
550}
551
552
553
554
555static void mv_xor_v2_tasklet(unsigned long data)
556{
557 struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
558 int pending_ptr, num_of_pending, i;
559 struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
560
561 dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
562
563
564 num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
565
566
567 for (i = 0; i < num_of_pending; i++) {
568 struct mv_xor_v2_descriptor *next_pending_hw_desc =
569 xor_dev->hw_desq_virt + pending_ptr;
570
571
572 next_pending_sw_desc =
573 &xor_dev->sw_desq[next_pending_hw_desc->desc_id];
574
575
576 if (next_pending_sw_desc->async_tx.cookie > 0) {
577
578
579
580
581
582 dma_cookie_complete(&next_pending_sw_desc->async_tx);
583
584 dma_descriptor_unmap(&next_pending_sw_desc->async_tx);
585 dmaengine_desc_get_callback_invoke(
586 &next_pending_sw_desc->async_tx, NULL);
587 }
588
589 dma_run_dependencies(&next_pending_sw_desc->async_tx);
590
591
592 spin_lock_bh(&xor_dev->lock);
593
594
595 list_add(&next_pending_sw_desc->free_list,
596 &xor_dev->free_sw_desc);
597
598
599 spin_unlock_bh(&xor_dev->lock);
600
601
602 pending_ptr++;
603 if (pending_ptr >= MV_XOR_V2_DESC_NUM)
604 pending_ptr = 0;
605 }
606
607 if (num_of_pending != 0) {
608
609 mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
610 }
611}
612
613
614
615
616static void mv_xor_v2_set_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
617{
618 struct mv_xor_v2_device *xor_dev = dev_get_drvdata(desc->dev);
619
620 writel(msg->address_lo,
621 xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BALR_OFF);
622 writel(msg->address_hi & 0xFFFF,
623 xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BAHR_OFF);
624 writel(msg->data,
625 xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_CDAT_OFF);
626}
627
628static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
629{
630 u32 reg;
631
632
633 writel(MV_XOR_V2_DESC_NUM,
634 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF);
635
636
637 writel(lower_32_bits(xor_dev->hw_desq),
638 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF);
639 writel(upper_32_bits(xor_dev->hw_desq),
640 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
641
642
643
644
645
646
647
648
649
650
651 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
652 reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
653 reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
654 MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
655 writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
656
657 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
658 reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
659 reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
660 MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
661 writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
662
663
664
665
666
667
668
669
670 reg = ((MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL <<
671 MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT) |
672 (MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL <<
673 MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT) |
674 (MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL <<
675 MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT) |
676 (MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL <<
677 MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT));
678 writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_BW_CTRL);
679
680
681 reg = readl(xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
682 reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
683 writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
684
685
686 writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
687
688 return 0;
689}
690
691static int mv_xor_v2_suspend(struct platform_device *dev, pm_message_t state)
692{
693 struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev);
694
695
696 writel(0x1, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
697
698 return 0;
699}
700
701static int mv_xor_v2_resume(struct platform_device *dev)
702{
703 struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev);
704
705 mv_xor_v2_set_desc_size(xor_dev);
706 mv_xor_v2_enable_imsg_thrd(xor_dev);
707 mv_xor_v2_descq_init(xor_dev);
708
709 return 0;
710}
711
712static int mv_xor_v2_probe(struct platform_device *pdev)
713{
714 struct mv_xor_v2_device *xor_dev;
715 struct resource *res;
716 int i, ret = 0;
717 struct dma_device *dma_dev;
718 struct mv_xor_v2_sw_desc *sw_desc;
719 struct msi_desc *msi_desc;
720
721 BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) !=
722 MV_XOR_V2_EXT_DESC_SIZE);
723
724 xor_dev = devm_kzalloc(&pdev->dev, sizeof(*xor_dev), GFP_KERNEL);
725 if (!xor_dev)
726 return -ENOMEM;
727
728 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
729 xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res);
730 if (IS_ERR(xor_dev->dma_base))
731 return PTR_ERR(xor_dev->dma_base);
732
733 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
734 xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res);
735 if (IS_ERR(xor_dev->glob_base))
736 return PTR_ERR(xor_dev->glob_base);
737
738 platform_set_drvdata(pdev, xor_dev);
739
740 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
741 if (ret)
742 return ret;
743
744 xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg");
745 if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) {
746 if (!IS_ERR(xor_dev->reg_clk)) {
747 ret = clk_prepare_enable(xor_dev->reg_clk);
748 if (ret)
749 return ret;
750 } else {
751 return PTR_ERR(xor_dev->reg_clk);
752 }
753 }
754
755 xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
756 if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
757 ret = EPROBE_DEFER;
758 goto disable_reg_clk;
759 }
760 if (!IS_ERR(xor_dev->clk)) {
761 ret = clk_prepare_enable(xor_dev->clk);
762 if (ret)
763 goto disable_reg_clk;
764 }
765
766 ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
767 mv_xor_v2_set_msi_msg);
768 if (ret)
769 goto disable_clk;
770
771 msi_desc = first_msi_entry(&pdev->dev);
772 if (!msi_desc)
773 goto free_msi_irqs;
774 xor_dev->msi_desc = msi_desc;
775
776 ret = devm_request_irq(&pdev->dev, msi_desc->irq,
777 mv_xor_v2_interrupt_handler, 0,
778 dev_name(&pdev->dev), xor_dev);
779 if (ret)
780 goto free_msi_irqs;
781
782 tasklet_init(&xor_dev->irq_tasklet, mv_xor_v2_tasklet,
783 (unsigned long) xor_dev);
784
785 xor_dev->desc_size = mv_xor_v2_set_desc_size(xor_dev);
786
787 dma_cookie_init(&xor_dev->dmachan);
788
789
790
791
792
793
794 xor_dev->hw_desq_virt =
795 dma_alloc_coherent(&pdev->dev,
796 xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
797 &xor_dev->hw_desq, GFP_KERNEL);
798 if (!xor_dev->hw_desq_virt) {
799 ret = -ENOMEM;
800 goto free_msi_irqs;
801 }
802
803
804 xor_dev->sw_desq = devm_kcalloc(&pdev->dev,
805 MV_XOR_V2_DESC_NUM, sizeof(*sw_desc),
806 GFP_KERNEL);
807 if (!xor_dev->sw_desq) {
808 ret = -ENOMEM;
809 goto free_hw_desq;
810 }
811
812 spin_lock_init(&xor_dev->lock);
813
814
815 INIT_LIST_HEAD(&xor_dev->free_sw_desc);
816
817
818 for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
819 struct mv_xor_v2_sw_desc *sw_desc =
820 xor_dev->sw_desq + i;
821 sw_desc->idx = i;
822 dma_async_tx_descriptor_init(&sw_desc->async_tx,
823 &xor_dev->dmachan);
824 sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
825 async_tx_ack(&sw_desc->async_tx);
826
827 list_add(&sw_desc->free_list,
828 &xor_dev->free_sw_desc);
829 }
830
831 dma_dev = &xor_dev->dmadev;
832
833
834 dma_cap_zero(dma_dev->cap_mask);
835 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
836 dma_cap_set(DMA_XOR, dma_dev->cap_mask);
837 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
838
839
840 INIT_LIST_HEAD(&dma_dev->channels);
841
842
843 dma_dev->device_tx_status = dma_cookie_status;
844 dma_dev->device_issue_pending = mv_xor_v2_issue_pending;
845 dma_dev->dev = &pdev->dev;
846
847 dma_dev->device_prep_dma_memcpy = mv_xor_v2_prep_dma_memcpy;
848 dma_dev->device_prep_dma_interrupt = mv_xor_v2_prep_dma_interrupt;
849 dma_dev->max_xor = 8;
850 dma_dev->device_prep_dma_xor = mv_xor_v2_prep_dma_xor;
851
852 xor_dev->dmachan.device = dma_dev;
853
854 list_add_tail(&xor_dev->dmachan.device_node,
855 &dma_dev->channels);
856
857 mv_xor_v2_enable_imsg_thrd(xor_dev);
858
859 mv_xor_v2_descq_init(xor_dev);
860
861 ret = dma_async_device_register(dma_dev);
862 if (ret)
863 goto free_hw_desq;
864
865 dev_notice(&pdev->dev, "Marvell Version 2 XOR driver\n");
866
867 return 0;
868
869free_hw_desq:
870 dma_free_coherent(&pdev->dev,
871 xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
872 xor_dev->hw_desq_virt, xor_dev->hw_desq);
873free_msi_irqs:
874 platform_msi_domain_free_irqs(&pdev->dev);
875disable_clk:
876 clk_disable_unprepare(xor_dev->clk);
877disable_reg_clk:
878 clk_disable_unprepare(xor_dev->reg_clk);
879 return ret;
880}
881
882static int mv_xor_v2_remove(struct platform_device *pdev)
883{
884 struct mv_xor_v2_device *xor_dev = platform_get_drvdata(pdev);
885
886 dma_async_device_unregister(&xor_dev->dmadev);
887
888 dma_free_coherent(&pdev->dev,
889 xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
890 xor_dev->hw_desq_virt, xor_dev->hw_desq);
891
892 devm_free_irq(&pdev->dev, xor_dev->msi_desc->irq, xor_dev);
893
894 platform_msi_domain_free_irqs(&pdev->dev);
895
896 tasklet_kill(&xor_dev->irq_tasklet);
897
898 clk_disable_unprepare(xor_dev->clk);
899
900 return 0;
901}
902
903#ifdef CONFIG_OF
904static const struct of_device_id mv_xor_v2_dt_ids[] = {
905 { .compatible = "marvell,xor-v2", },
906 {},
907};
908MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids);
909#endif
910
911static struct platform_driver mv_xor_v2_driver = {
912 .probe = mv_xor_v2_probe,
913 .suspend = mv_xor_v2_suspend,
914 .resume = mv_xor_v2_resume,
915 .remove = mv_xor_v2_remove,
916 .driver = {
917 .name = "mv_xor_v2",
918 .of_match_table = of_match_ptr(mv_xor_v2_dt_ids),
919 },
920};
921
922module_platform_driver(mv_xor_v2_driver);
923
924MODULE_DESCRIPTION("DMA engine driver for Marvell's Version 2 of XOR engine");
925MODULE_LICENSE("GPL");
926