1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include "xilinx_ps_pcie.h"
16#include "../dmaengine.h"
17
18#define PLATFORM_DRIVER_NAME "ps_pcie_pform_dma"
19#define MAX_BARS 6
20
21#define DMA_BAR_NUMBER 0
22
23#define MIN_SW_INTR_TRANSACTIONS 2
24
25#define CHANNEL_PROPERTY_LENGTH 50
26#define WORKQ_NAME_SIZE 100
27#define INTR_HANDLR_NAME_SIZE 100
28
29#define PS_PCIE_DMA_IRQ_NOSHARE 0
30
31#define MAX_COALESCE_COUNT 255
32
33#define DMA_CHANNEL_REGS_SIZE 0x80
34
35#define DMA_SRCQPTRLO_REG_OFFSET (0x00)
36#define DMA_SRCQPTRHI_REG_OFFSET (0x04)
37#define DMA_SRCQSZ_REG_OFFSET (0x08)
38#define DMA_SRCQLMT_REG_OFFSET (0x0C)
39#define DMA_DSTQPTRLO_REG_OFFSET (0x10)
40#define DMA_DSTQPTRHI_REG_OFFSET (0x14)
41#define DMA_DSTQSZ_REG_OFFSET (0x18)
42#define DMA_DSTQLMT_REG_OFFSET (0x1C)
43#define DMA_SSTAQPTRLO_REG_OFFSET (0x20)
44#define DMA_SSTAQPTRHI_REG_OFFSET (0x24)
45#define DMA_SSTAQSZ_REG_OFFSET (0x28)
46#define DMA_SSTAQLMT_REG_OFFSET (0x2C)
47#define DMA_DSTAQPTRLO_REG_OFFSET (0x30)
48#define DMA_DSTAQPTRHI_REG_OFFSET (0x34)
49#define DMA_DSTAQSZ_REG_OFFSET (0x38)
50#define DMA_DSTAQLMT_REG_OFFSET (0x3C)
51#define DMA_SRCQNXT_REG_OFFSET (0x40)
52#define DMA_DSTQNXT_REG_OFFSET (0x44)
53#define DMA_SSTAQNXT_REG_OFFSET (0x48)
54#define DMA_DSTAQNXT_REG_OFFSET (0x4C)
55#define DMA_SCRATCH0_REG_OFFSET (0x50)
56
57#define DMA_PCIE_INTR_CNTRL_REG_OFFSET (0x60)
58#define DMA_PCIE_INTR_STATUS_REG_OFFSET (0x64)
59#define DMA_AXI_INTR_CNTRL_REG_OFFSET (0x68)
60#define DMA_AXI_INTR_STATUS_REG_OFFSET (0x6C)
61#define DMA_PCIE_INTR_ASSRT_REG_OFFSET (0x70)
62#define DMA_AXI_INTR_ASSRT_REG_OFFSET (0x74)
63#define DMA_CNTRL_REG_OFFSET (0x78)
64#define DMA_STATUS_REG_OFFSET (0x7C)
65
66#define DMA_CNTRL_RST_BIT BIT(1)
67#define DMA_CNTRL_64BIT_STAQ_ELEMSZ_BIT BIT(2)
68#define DMA_CNTRL_ENABL_BIT BIT(0)
69#define DMA_STATUS_DMA_PRES_BIT BIT(15)
70#define DMA_STATUS_DMA_RUNNING_BIT BIT(0)
71#define DMA_QPTRLO_QLOCAXI_BIT BIT(0)
72#define DMA_QPTRLO_Q_ENABLE_BIT BIT(1)
73#define DMA_INTSTATUS_DMAERR_BIT BIT(1)
74#define DMA_INTSTATUS_SGLINTR_BIT BIT(2)
75#define DMA_INTSTATUS_SWINTR_BIT BIT(3)
76#define DMA_INTCNTRL_ENABLINTR_BIT BIT(0)
77#define DMA_INTCNTRL_DMAERRINTR_BIT BIT(1)
78#define DMA_INTCNTRL_DMASGINTR_BIT BIT(2)
79#define DMA_SW_INTR_ASSRT_BIT BIT(3)
80
81#define SOURCE_CONTROL_BD_BYTE_COUNT_MASK GENMASK(23, 0)
82#define SOURCE_CONTROL_BD_LOC_AXI BIT(24)
83#define SOURCE_CONTROL_BD_EOP_BIT BIT(25)
84#define SOURCE_CONTROL_BD_INTR_BIT BIT(26)
85#define SOURCE_CONTROL_BACK_TO_BACK_PACK_BIT BIT(25)
86#define SOURCE_CONTROL_ATTRIBUTES_MASK GENMASK(31, 28)
87#define SRC_CTL_ATTRIB_BIT_SHIFT (29)
88
89#define STA_BD_COMPLETED_BIT BIT(0)
90#define STA_BD_SOURCE_ERROR_BIT BIT(1)
91#define STA_BD_DESTINATION_ERROR_BIT BIT(2)
92#define STA_BD_INTERNAL_ERROR_BIT BIT(3)
93#define STA_BD_UPPER_STATUS_NONZERO_BIT BIT(31)
94#define STA_BD_BYTE_COUNT_MASK GENMASK(30, 4)
95
96#define STA_BD_BYTE_COUNT_SHIFT 4
97
98#define DMA_INTCNTRL_SGCOLSCCNT_BIT_SHIFT (16)
99
100#define DMA_SRC_Q_LOW_BIT_SHIFT GENMASK(5, 0)
101
102#define MAX_TRANSFER_LENGTH 0x1000000
103
104#define AXI_ATTRIBUTE 0x3
105#define PCI_ATTRIBUTE 0x2
106
107#define ROOTDMA_Q_READ_ATTRIBUTE 0x8
108
109
110
111
112#define DEFAULT_UID 1
113
114
115
116
117struct DMA_ENGINE_REGISTERS {
118 u32 src_q_low;
119 u32 src_q_high;
120 u32 src_q_size;
121 u32 src_q_limit;
122 u32 dst_q_low;
123 u32 dst_q_high;
124 u32 dst_q_size;
125 u32 dst_q_limit;
126 u32 stas_q_low;
127 u32 stas_q_high;
128 u32 stas_q_size;
129 u32 stas_q_limit;
130 u32 stad_q_low;
131 u32 stad_q_high;
132 u32 stad_q_size;
133 u32 stad_q_limit;
134 u32 src_q_next;
135 u32 dst_q_next;
136 u32 stas_q_next;
137 u32 stad_q_next;
138 u32 scrathc0;
139 u32 scrathc1;
140 u32 scrathc2;
141 u32 scrathc3;
142 u32 pcie_intr_cntrl;
143 u32 pcie_intr_status;
144 u32 axi_intr_cntrl;
145 u32 axi_intr_status;
146 u32 pcie_intr_assert;
147 u32 axi_intr_assert;
148 u32 dma_channel_ctrl;
149 u32 dma_channel_status;
150} __attribute__((__packed__));
151
152
153
154
155
156
157
158
159struct SOURCE_DMA_DESCRIPTOR {
160 u64 system_address;
161 u32 control_byte_count;
162 u16 user_handle;
163 u16 user_id;
164} __attribute__((__packed__));
165
166
167
168
169
170
171
172
173struct DEST_DMA_DESCRIPTOR {
174 u64 system_address;
175 u32 control_byte_count;
176 u16 user_handle;
177 u16 reserved;
178} __attribute__((__packed__));
179
180
181
182
183
184
185
186struct STATUS_DMA_DESCRIPTOR {
187 u32 status_flag_byte_count;
188 u16 user_handle;
189 u16 user_id;
190} __attribute__((__packed__));
191
192enum PACKET_CONTEXT_AVAILABILITY {
193 FREE = 0,
194 IN_USE
195};
196
197struct ps_pcie_transfer_elements {
198 struct list_head node;
199 dma_addr_t src_pa;
200 dma_addr_t dst_pa;
201 u32 transfer_bytes;
202};
203
204struct ps_pcie_tx_segment {
205 struct list_head node;
206 struct dma_async_tx_descriptor async_tx;
207 struct list_head transfer_nodes;
208 u32 src_elements;
209 u32 dst_elements;
210 u32 total_transfer_bytes;
211};
212
213struct ps_pcie_intr_segment {
214 struct list_head node;
215 struct dma_async_tx_descriptor async_intr_tx;
216};
217
218
219
220
221
222
223
224
225
226struct PACKET_TRANSFER_PARAMS {
227 enum PACKET_CONTEXT_AVAILABILITY availability_status;
228 u16 idx_sop;
229 u16 idx_eop;
230 struct ps_pcie_tx_segment *seg;
231};
232
233enum CHANNEL_STATE {
234 CHANNEL_RESOURCE_UNALLOCATED = 0,
235 CHANNEL_UNAVIALBLE,
236 CHANNEL_AVAILABLE,
237 CHANNEL_ERROR
238};
239
240enum BUFFER_LOCATION {
241 BUFFER_LOC_PCI = 0,
242 BUFFER_LOC_AXI,
243 BUFFER_LOC_INVALID
244};
245
246enum dev_channel_properties {
247 DMA_CHANNEL_DIRECTION = 0,
248 NUM_DESCRIPTORS,
249 NUM_QUEUES,
250 COALESE_COUNT,
251 POLL_TIMER_FREQUENCY
252};
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334struct ps_pcie_dma_chan {
335 struct xlnx_pcie_dma_device *xdev;
336 struct device *dev;
337
338 struct dma_chan common;
339
340 struct DMA_ENGINE_REGISTERS *chan_base;
341 u16 channel_number;
342
343 u32 num_queues;
344 enum dma_data_direction direction;
345 enum BUFFER_LOCATION srcq_buffer_location;
346 enum BUFFER_LOCATION dstq_buffer_location;
347
348 u32 total_descriptors;
349
350 enum CHANNEL_STATE state;
351 spinlock_t channel_lock;
352
353 spinlock_t cookie_lock;
354
355 u32 coalesce_count;
356 u32 poll_timer_freq;
357
358 struct timer_list poll_timer;
359
360 u32 src_avail_descriptors;
361 spinlock_t src_desc_lock;
362
363 u32 dst_avail_descriptors;
364 spinlock_t dst_desc_lock;
365
366 dma_addr_t src_sgl_bd_pa;
367 struct SOURCE_DMA_DESCRIPTOR *psrc_sgl_bd;
368 u32 src_sgl_freeidx;
369
370 dma_addr_t dst_sgl_bd_pa;
371 struct DEST_DMA_DESCRIPTOR *pdst_sgl_bd;
372 u32 dst_sgl_freeidx;
373
374 dma_addr_t src_sta_bd_pa;
375 struct STATUS_DMA_DESCRIPTOR *psrc_sta_bd;
376 u32 src_staprobe_idx;
377 u32 src_sta_hw_probe_idx;
378
379 dma_addr_t dst_sta_bd_pa;
380 struct STATUS_DMA_DESCRIPTOR *pdst_sta_bd;
381 u32 dst_staprobe_idx;
382 u32 dst_sta_hw_probe_idx;
383
384 u32 read_attribute;
385 u32 write_attribute;
386
387 u32 intr_status_offset;
388 u32 intr_control_offset;
389
390 struct PACKET_TRANSFER_PARAMS *ppkt_ctx_srcq;
391 u16 idx_ctx_srcq_head;
392 u16 idx_ctx_srcq_tail;
393
394 struct PACKET_TRANSFER_PARAMS *ppkt_ctx_dstq;
395 u16 idx_ctx_dstq_head;
396 u16 idx_ctx_dstq_tail;
397
398 spinlock_t pending_list_lock;
399 struct list_head pending_list;
400 spinlock_t active_list_lock;
401 struct list_head active_list;
402
403 spinlock_t pending_interrupts_lock;
404 struct list_head pending_interrupts_list;
405 spinlock_t active_interrupts_lock;
406 struct list_head active_interrupts_list;
407
408 mempool_t *transactions_pool;
409 mempool_t *tx_elements_pool;
410 mempool_t *intr_transactions_pool;
411
412 struct workqueue_struct *sw_intrs_wrkq;
413 struct work_struct handle_sw_intrs;
414
415 struct workqueue_struct *maintenance_workq;
416 struct work_struct handle_chan_reset;
417 struct work_struct handle_chan_shutdown;
418 struct work_struct handle_chan_terminate;
419
420 struct completion chan_shutdown_complt;
421 struct completion chan_terminate_complete;
422
423 struct workqueue_struct *primary_desc_cleanup;
424 struct work_struct handle_primary_desc_cleanup;
425
426 struct workqueue_struct *chan_programming;
427 struct work_struct handle_chan_programming;
428
429 struct workqueue_struct *srcq_desc_cleanup;
430 struct work_struct handle_srcq_desc_cleanup;
431 struct completion srcq_work_complete;
432
433 struct workqueue_struct *dstq_desc_cleanup;
434 struct work_struct handle_dstq_desc_cleanup;
435 struct completion dstq_work_complete;
436};
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456struct xlnx_pcie_dma_device {
457 bool is_rootdma;
458 bool dma_buf_ext_addr;
459 u32 bar_mask;
460 u16 board_number;
461 struct device *dev;
462 struct ps_pcie_dma_chan *channels;
463 struct dma_device common;
464 int num_channels;
465 int irq_vecs;
466 void __iomem *reg_base;
467 struct pci_dev *pci_dev;
468 struct BAR_PARAMS bar_info[MAX_BARS];
469 int platform_irq_vec;
470 u16 rootdma_vendor;
471 u16 rootdma_device;
472};
473
474#define to_xilinx_chan(chan) \
475 container_of(chan, struct ps_pcie_dma_chan, common)
476#define to_ps_pcie_dma_tx_descriptor(tx) \
477 container_of(tx, struct ps_pcie_tx_segment, async_tx)
478#define to_ps_pcie_dma_tx_intr_descriptor(tx) \
479 container_of(tx, struct ps_pcie_intr_segment, async_intr_tx)
480
481
482static u32 ps_pcie_dma_read(struct ps_pcie_dma_chan *chan, u32 reg);
483static void ps_pcie_dma_write(struct ps_pcie_dma_chan *chan, u32 reg,
484 u32 value);
485static void ps_pcie_dma_clr_mask(struct ps_pcie_dma_chan *chan, u32 reg,
486 u32 mask);
487static void ps_pcie_dma_set_mask(struct ps_pcie_dma_chan *chan, u32 reg,
488 u32 mask);
489static int irq_setup(struct xlnx_pcie_dma_device *xdev);
490static int platform_irq_setup(struct xlnx_pcie_dma_device *xdev);
491static int chan_intr_setup(struct xlnx_pcie_dma_device *xdev);
492static int device_intr_setup(struct xlnx_pcie_dma_device *xdev);
493static int irq_probe(struct xlnx_pcie_dma_device *xdev);
494static int ps_pcie_check_intr_status(struct ps_pcie_dma_chan *chan);
495static irqreturn_t ps_pcie_dma_dev_intr_handler(int irq, void *data);
496static irqreturn_t ps_pcie_dma_chan_intr_handler(int irq, void *data);
497static int init_hw_components(struct ps_pcie_dma_chan *chan);
498static int init_sw_components(struct ps_pcie_dma_chan *chan);
499static void update_channel_read_attribute(struct ps_pcie_dma_chan *chan);
500static void update_channel_write_attribute(struct ps_pcie_dma_chan *chan);
501static void ps_pcie_chan_reset(struct ps_pcie_dma_chan *chan);
502static void poll_completed_transactions(struct timer_list *t);
503static bool check_descriptors_for_two_queues(struct ps_pcie_dma_chan *chan,
504 struct ps_pcie_tx_segment *seg);
505static bool check_descriptors_for_all_queues(struct ps_pcie_dma_chan *chan,
506 struct ps_pcie_tx_segment *seg);
507static bool check_descriptor_availability(struct ps_pcie_dma_chan *chan,
508 struct ps_pcie_tx_segment *seg);
509static void handle_error(struct ps_pcie_dma_chan *chan);
510static void xlnx_ps_pcie_update_srcq(struct ps_pcie_dma_chan *chan,
511 struct ps_pcie_tx_segment *seg);
512static void xlnx_ps_pcie_update_dstq(struct ps_pcie_dma_chan *chan,
513 struct ps_pcie_tx_segment *seg);
514static void ps_pcie_chan_program_work(struct work_struct *work);
515static void dst_cleanup_work(struct work_struct *work);
516static void src_cleanup_work(struct work_struct *work);
517static void ps_pcie_chan_primary_work(struct work_struct *work);
518static int probe_channel_properties(struct platform_device *platform_dev,
519 struct xlnx_pcie_dma_device *xdev,
520 u16 channel_number);
521static void xlnx_ps_pcie_destroy_mempool(struct ps_pcie_dma_chan *chan);
522static void xlnx_ps_pcie_free_worker_queues(struct ps_pcie_dma_chan *chan);
523static void xlnx_ps_pcie_free_pkt_ctxts(struct ps_pcie_dma_chan *chan);
524static void xlnx_ps_pcie_free_descriptors(struct ps_pcie_dma_chan *chan);
525static int xlnx_ps_pcie_channel_activate(struct ps_pcie_dma_chan *chan);
526static void xlnx_ps_pcie_channel_quiesce(struct ps_pcie_dma_chan *chan);
527static void ivk_cbk_for_pending(struct ps_pcie_dma_chan *chan);
528static void xlnx_ps_pcie_reset_channel(struct ps_pcie_dma_chan *chan);
529static void xlnx_ps_pcie_free_poll_timer(struct ps_pcie_dma_chan *chan);
530static int xlnx_ps_pcie_alloc_poll_timer(struct ps_pcie_dma_chan *chan);
531static void terminate_transactions_work(struct work_struct *work);
532static void chan_shutdown_work(struct work_struct *work);
533static void chan_reset_work(struct work_struct *work);
534static int xlnx_ps_pcie_alloc_worker_threads(struct ps_pcie_dma_chan *chan);
535static int xlnx_ps_pcie_alloc_mempool(struct ps_pcie_dma_chan *chan);
536static int xlnx_ps_pcie_alloc_pkt_contexts(struct ps_pcie_dma_chan *chan);
537static int dma_alloc_descriptors_two_queues(struct ps_pcie_dma_chan *chan);
538static int dma_alloc_decriptors_all_queues(struct ps_pcie_dma_chan *chan);
539static void xlnx_ps_pcie_dma_free_chan_resources(struct dma_chan *dchan);
540static int xlnx_ps_pcie_dma_alloc_chan_resources(struct dma_chan *dchan);
541static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx);
542static dma_cookie_t xilinx_intr_tx_submit(struct dma_async_tx_descriptor *tx);
543static struct dma_async_tx_descriptor *
544xlnx_ps_pcie_dma_prep_memcpy(struct dma_chan *channel, dma_addr_t dma_dst,
545 dma_addr_t dma_src, size_t len,
546 unsigned long flags);
547static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_slave_sg(
548 struct dma_chan *channel, struct scatterlist *sgl,
549 unsigned int sg_len, enum dma_transfer_direction direction,
550 unsigned long flags, void *context);
551static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_interrupt(
552 struct dma_chan *channel, unsigned long flags);
553static void xlnx_ps_pcie_dma_issue_pending(struct dma_chan *channel);
554static int xlnx_ps_pcie_dma_terminate_all(struct dma_chan *channel);
555static int read_rootdma_config(struct platform_device *platform_dev,
556 struct xlnx_pcie_dma_device *xdev);
557static int read_epdma_config(struct platform_device *platform_dev,
558 struct xlnx_pcie_dma_device *xdev);
559static int xlnx_pcie_dma_driver_probe(struct platform_device *platform_dev);
560static int xlnx_pcie_dma_driver_remove(struct platform_device *platform_dev);
561
562
563static inline u32 ps_pcie_dma_read(struct ps_pcie_dma_chan *chan, u32 reg)
564{
565 return ioread32((void __iomem *)((char *)(chan->chan_base) + reg));
566}
567
568static inline void ps_pcie_dma_write(struct ps_pcie_dma_chan *chan, u32 reg,
569 u32 value)
570{
571 iowrite32(value, (void __iomem *)((char *)(chan->chan_base) + reg));
572}
573
574static inline void ps_pcie_dma_clr_mask(struct ps_pcie_dma_chan *chan, u32 reg,
575 u32 mask)
576{
577 ps_pcie_dma_write(chan, reg, ps_pcie_dma_read(chan, reg) & ~mask);
578}
579
580static inline void ps_pcie_dma_set_mask(struct ps_pcie_dma_chan *chan, u32 reg,
581 u32 mask)
582{
583 ps_pcie_dma_write(chan, reg, ps_pcie_dma_read(chan, reg) | mask);
584}
585
586
587
588
589
590
591
592
593
594static irqreturn_t ps_pcie_dma_dev_intr_handler(int irq, void *data)
595{
596 struct xlnx_pcie_dma_device *xdev =
597 (struct xlnx_pcie_dma_device *)data;
598 struct ps_pcie_dma_chan *chan = NULL;
599 int i;
600 int err = -1;
601 int ret = -1;
602
603 for (i = 0; i < xdev->num_channels; i++) {
604 chan = &xdev->channels[i];
605 err = ps_pcie_check_intr_status(chan);
606 if (err == 0)
607 ret = 0;
608 }
609
610 return (ret == 0) ? IRQ_HANDLED : IRQ_NONE;
611}
612
613
614
615
616
617
618
619
620
621static irqreturn_t ps_pcie_dma_chan_intr_handler(int irq, void *data)
622{
623 struct ps_pcie_dma_chan *chan = (struct ps_pcie_dma_chan *)data;
624
625 ps_pcie_check_intr_status(chan);
626
627 return IRQ_HANDLED;
628}
629
630
631
632
633
634
635
636
637static int chan_intr_setup(struct xlnx_pcie_dma_device *xdev)
638{
639 struct ps_pcie_dma_chan *chan;
640 int i;
641 int err = 0;
642
643 for (i = 0; i < xdev->num_channels; i++) {
644 chan = &xdev->channels[i];
645 err = devm_request_irq(xdev->dev,
646 pci_irq_vector(xdev->pci_dev, i),
647 ps_pcie_dma_chan_intr_handler,
648 PS_PCIE_DMA_IRQ_NOSHARE,
649 "PS PCIe DMA Chan Intr handler", chan);
650 if (err) {
651 dev_err(xdev->dev,
652 "Irq %d for chan %d error %d\n",
653 pci_irq_vector(xdev->pci_dev, i),
654 chan->channel_number, err);
655 break;
656 }
657 }
658
659 if (err) {
660 while (--i >= 0) {
661 chan = &xdev->channels[i];
662 devm_free_irq(xdev->dev,
663 pci_irq_vector(xdev->pci_dev, i), chan);
664 }
665 }
666
667 return err;
668}
669
670
671
672
673
674
675
676
677static int device_intr_setup(struct xlnx_pcie_dma_device *xdev)
678{
679 int err;
680 unsigned long intr_flags = IRQF_SHARED;
681
682 if (xdev->pci_dev->msix_enabled || xdev->pci_dev->msi_enabled)
683 intr_flags = PS_PCIE_DMA_IRQ_NOSHARE;
684
685 err = devm_request_irq(xdev->dev,
686 pci_irq_vector(xdev->pci_dev, 0),
687 ps_pcie_dma_dev_intr_handler,
688 intr_flags,
689 "PS PCIe DMA Intr Handler", xdev);
690 if (err)
691 dev_err(xdev->dev, "Couldn't request irq %d\n",
692 pci_irq_vector(xdev->pci_dev, 0));
693
694 return err;
695}
696
697
698
699
700
701
702
703
704static int irq_setup(struct xlnx_pcie_dma_device *xdev)
705{
706 int err;
707
708 if (xdev->irq_vecs == xdev->num_channels)
709 err = chan_intr_setup(xdev);
710 else
711 err = device_intr_setup(xdev);
712
713 return err;
714}
715
716static int platform_irq_setup(struct xlnx_pcie_dma_device *xdev)
717{
718 int err;
719
720 err = devm_request_irq(xdev->dev,
721 xdev->platform_irq_vec,
722 ps_pcie_dma_dev_intr_handler,
723 IRQF_SHARED,
724 "PS PCIe Root DMA Handler", xdev);
725 if (err)
726 dev_err(xdev->dev, "Couldn't request irq %d\n",
727 xdev->platform_irq_vec);
728
729 return err;
730}
731
732
733
734
735
736
737
738
739static int irq_probe(struct xlnx_pcie_dma_device *xdev)
740{
741 struct pci_dev *pdev;
742
743 pdev = xdev->pci_dev;
744
745 xdev->irq_vecs = pci_alloc_irq_vectors(pdev, 1, xdev->num_channels,
746 PCI_IRQ_ALL_TYPES);
747 return xdev->irq_vecs;
748}
749
750
751
752
753
754
755
756
757
758static int ps_pcie_check_intr_status(struct ps_pcie_dma_chan *chan)
759{
760 int err = -1;
761 u32 status;
762
763 if (chan->state != CHANNEL_AVAILABLE)
764 return err;
765
766 status = ps_pcie_dma_read(chan, chan->intr_status_offset);
767
768 if (status & DMA_INTSTATUS_SGLINTR_BIT) {
769 if (chan->primary_desc_cleanup) {
770 queue_work(chan->primary_desc_cleanup,
771 &chan->handle_primary_desc_cleanup);
772 }
773
774 ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
775 DMA_INTSTATUS_SGLINTR_BIT);
776 err = 0;
777 }
778
779 if (status & DMA_INTSTATUS_SWINTR_BIT) {
780 if (chan->sw_intrs_wrkq)
781 queue_work(chan->sw_intrs_wrkq, &chan->handle_sw_intrs);
782
783 ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
784 DMA_INTSTATUS_SWINTR_BIT);
785 err = 0;
786 }
787
788 if (status & DMA_INTSTATUS_DMAERR_BIT) {
789 dev_err(chan->dev,
790 "DMA Channel %d ControlStatus Reg: 0x%x",
791 chan->channel_number, status);
792 dev_err(chan->dev,
793 "Chn %d SrcQLmt = %d SrcQSz = %d SrcQNxt = %d",
794 chan->channel_number,
795 chan->chan_base->src_q_limit,
796 chan->chan_base->src_q_size,
797 chan->chan_base->src_q_next);
798 dev_err(chan->dev,
799 "Chn %d SrcStaLmt = %d SrcStaSz = %d SrcStaNxt = %d",
800 chan->channel_number,
801 chan->chan_base->stas_q_limit,
802 chan->chan_base->stas_q_size,
803 chan->chan_base->stas_q_next);
804 dev_err(chan->dev,
805 "Chn %d DstQLmt = %d DstQSz = %d DstQNxt = %d",
806 chan->channel_number,
807 chan->chan_base->dst_q_limit,
808 chan->chan_base->dst_q_size,
809 chan->chan_base->dst_q_next);
810 dev_err(chan->dev,
811 "Chan %d DstStaLmt = %d DstStaSz = %d DstStaNxt = %d",
812 chan->channel_number,
813 chan->chan_base->stad_q_limit,
814 chan->chan_base->stad_q_size,
815 chan->chan_base->stad_q_next);
816
817 ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
818 DMA_INTSTATUS_DMAERR_BIT);
819
820 handle_error(chan);
821
822 err = 0;
823 }
824
825 return err;
826}
827
828static int init_hw_components(struct ps_pcie_dma_chan *chan)
829{
830 if (chan->psrc_sgl_bd && chan->psrc_sta_bd) {
831
832 chan->chan_base->src_q_next = 0;
833 chan->chan_base->src_q_high =
834 upper_32_bits(chan->src_sgl_bd_pa);
835 chan->chan_base->src_q_size = chan->total_descriptors;
836 chan->chan_base->src_q_limit = 0;
837 if (chan->xdev->is_rootdma) {
838 chan->chan_base->src_q_low = ROOTDMA_Q_READ_ATTRIBUTE
839 | DMA_QPTRLO_QLOCAXI_BIT;
840 } else {
841 chan->chan_base->src_q_low = 0;
842 }
843 chan->chan_base->src_q_low |=
844 (lower_32_bits((chan->src_sgl_bd_pa))
845 & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
846 | DMA_QPTRLO_Q_ENABLE_BIT;
847
848 chan->chan_base->stas_q_next = 0;
849 chan->chan_base->stas_q_high =
850 upper_32_bits(chan->src_sta_bd_pa);
851 chan->chan_base->stas_q_size = chan->total_descriptors;
852 chan->chan_base->stas_q_limit = chan->total_descriptors - 1;
853 if (chan->xdev->is_rootdma) {
854 chan->chan_base->stas_q_low = ROOTDMA_Q_READ_ATTRIBUTE
855 | DMA_QPTRLO_QLOCAXI_BIT;
856 } else {
857 chan->chan_base->stas_q_low = 0;
858 }
859 chan->chan_base->stas_q_low |=
860 (lower_32_bits(chan->src_sta_bd_pa)
861 & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
862 | DMA_QPTRLO_Q_ENABLE_BIT;
863 }
864
865 if (chan->pdst_sgl_bd && chan->pdst_sta_bd) {
866
867 chan->chan_base->dst_q_next = 0;
868 chan->chan_base->dst_q_high =
869 upper_32_bits(chan->dst_sgl_bd_pa);
870 chan->chan_base->dst_q_size = chan->total_descriptors;
871 chan->chan_base->dst_q_limit = 0;
872 if (chan->xdev->is_rootdma) {
873 chan->chan_base->dst_q_low = ROOTDMA_Q_READ_ATTRIBUTE
874 | DMA_QPTRLO_QLOCAXI_BIT;
875 } else {
876 chan->chan_base->dst_q_low = 0;
877 }
878 chan->chan_base->dst_q_low |=
879 (lower_32_bits(chan->dst_sgl_bd_pa)
880 & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
881 | DMA_QPTRLO_Q_ENABLE_BIT;
882
883 chan->chan_base->stad_q_next = 0;
884 chan->chan_base->stad_q_high =
885 upper_32_bits(chan->dst_sta_bd_pa);
886 chan->chan_base->stad_q_size = chan->total_descriptors;
887 chan->chan_base->stad_q_limit = chan->total_descriptors - 1;
888 if (chan->xdev->is_rootdma) {
889 chan->chan_base->stad_q_low = ROOTDMA_Q_READ_ATTRIBUTE
890 | DMA_QPTRLO_QLOCAXI_BIT;
891 } else {
892 chan->chan_base->stad_q_low = 0;
893 }
894 chan->chan_base->stad_q_low |=
895 (lower_32_bits(chan->dst_sta_bd_pa)
896 & ~(DMA_SRC_Q_LOW_BIT_SHIFT))
897 | DMA_QPTRLO_Q_ENABLE_BIT;
898 }
899
900 return 0;
901}
902
903static void update_channel_read_attribute(struct ps_pcie_dma_chan *chan)
904{
905 if (chan->xdev->is_rootdma) {
906
907
908
909 if (chan->srcq_buffer_location == BUFFER_LOC_PCI) {
910 chan->read_attribute = (AXI_ATTRIBUTE <<
911 SRC_CTL_ATTRIB_BIT_SHIFT) |
912 SOURCE_CONTROL_BD_LOC_AXI;
913 } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
914 chan->read_attribute = AXI_ATTRIBUTE <<
915 SRC_CTL_ATTRIB_BIT_SHIFT;
916 }
917 } else {
918 if (chan->srcq_buffer_location == BUFFER_LOC_PCI) {
919 chan->read_attribute = PCI_ATTRIBUTE <<
920 SRC_CTL_ATTRIB_BIT_SHIFT;
921 } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
922 chan->read_attribute = (AXI_ATTRIBUTE <<
923 SRC_CTL_ATTRIB_BIT_SHIFT) |
924 SOURCE_CONTROL_BD_LOC_AXI;
925 }
926 }
927}
928
929static void update_channel_write_attribute(struct ps_pcie_dma_chan *chan)
930{
931 if (chan->xdev->is_rootdma) {
932
933
934
935 if (chan->dstq_buffer_location == BUFFER_LOC_PCI) {
936 chan->write_attribute = (AXI_ATTRIBUTE <<
937 SRC_CTL_ATTRIB_BIT_SHIFT) |
938 SOURCE_CONTROL_BD_LOC_AXI;
939 } else if (chan->srcq_buffer_location == BUFFER_LOC_AXI) {
940 chan->write_attribute = AXI_ATTRIBUTE <<
941 SRC_CTL_ATTRIB_BIT_SHIFT;
942 }
943 } else {
944 if (chan->dstq_buffer_location == BUFFER_LOC_PCI) {
945 chan->write_attribute = PCI_ATTRIBUTE <<
946 SRC_CTL_ATTRIB_BIT_SHIFT;
947 } else if (chan->dstq_buffer_location == BUFFER_LOC_AXI) {
948 chan->write_attribute = (AXI_ATTRIBUTE <<
949 SRC_CTL_ATTRIB_BIT_SHIFT) |
950 SOURCE_CONTROL_BD_LOC_AXI;
951 }
952 }
953 chan->write_attribute |= SOURCE_CONTROL_BACK_TO_BACK_PACK_BIT;
954}
955
956static int init_sw_components(struct ps_pcie_dma_chan *chan)
957{
958 if (chan->ppkt_ctx_srcq && chan->psrc_sgl_bd &&
959 chan->psrc_sta_bd) {
960 memset(chan->ppkt_ctx_srcq, 0,
961 sizeof(struct PACKET_TRANSFER_PARAMS)
962 * chan->total_descriptors);
963
964 memset(chan->psrc_sgl_bd, 0,
965 sizeof(struct SOURCE_DMA_DESCRIPTOR)
966 * chan->total_descriptors);
967
968 memset(chan->psrc_sta_bd, 0,
969 sizeof(struct STATUS_DMA_DESCRIPTOR)
970 * chan->total_descriptors);
971
972 chan->src_avail_descriptors = chan->total_descriptors;
973
974 chan->src_sgl_freeidx = 0;
975 chan->src_staprobe_idx = 0;
976 chan->src_sta_hw_probe_idx = chan->total_descriptors - 1;
977 chan->idx_ctx_srcq_head = 0;
978 chan->idx_ctx_srcq_tail = 0;
979 }
980
981 if (chan->ppkt_ctx_dstq && chan->pdst_sgl_bd &&
982 chan->pdst_sta_bd) {
983 memset(chan->ppkt_ctx_dstq, 0,
984 sizeof(struct PACKET_TRANSFER_PARAMS)
985 * chan->total_descriptors);
986
987 memset(chan->pdst_sgl_bd, 0,
988 sizeof(struct DEST_DMA_DESCRIPTOR)
989 * chan->total_descriptors);
990
991 memset(chan->pdst_sta_bd, 0,
992 sizeof(struct STATUS_DMA_DESCRIPTOR)
993 * chan->total_descriptors);
994
995 chan->dst_avail_descriptors = chan->total_descriptors;
996
997 chan->dst_sgl_freeidx = 0;
998 chan->dst_staprobe_idx = 0;
999 chan->dst_sta_hw_probe_idx = chan->total_descriptors - 1;
1000 chan->idx_ctx_dstq_head = 0;
1001 chan->idx_ctx_dstq_tail = 0;
1002 }
1003
1004 return 0;
1005}
1006
1007
1008
1009
1010
1011
1012
1013static void ps_pcie_chan_reset(struct ps_pcie_dma_chan *chan)
1014{
1015
1016 ps_pcie_dma_set_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_RST_BIT);
1017
1018 mdelay(10);
1019
1020
1021 ps_pcie_dma_clr_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_RST_BIT);
1022}
1023
1024
1025
1026
1027
1028
1029
1030static void poll_completed_transactions(struct timer_list *t)
1031{
1032 struct ps_pcie_dma_chan *chan = from_timer(chan, t, poll_timer);
1033
1034 if (chan->state == CHANNEL_AVAILABLE) {
1035 queue_work(chan->primary_desc_cleanup,
1036 &chan->handle_primary_desc_cleanup);
1037 }
1038
1039 mod_timer(&chan->poll_timer, jiffies + chan->poll_timer_freq);
1040}
1041
1042static bool check_descriptors_for_two_queues(struct ps_pcie_dma_chan *chan,
1043 struct ps_pcie_tx_segment *seg)
1044{
1045 if (seg->src_elements) {
1046 if (chan->src_avail_descriptors >=
1047 seg->src_elements) {
1048 return true;
1049 }
1050 } else if (seg->dst_elements) {
1051 if (chan->dst_avail_descriptors >=
1052 seg->dst_elements) {
1053 return true;
1054 }
1055 }
1056
1057 return false;
1058}
1059
1060static bool check_descriptors_for_all_queues(struct ps_pcie_dma_chan *chan,
1061 struct ps_pcie_tx_segment *seg)
1062{
1063 if (chan->src_avail_descriptors >=
1064 seg->src_elements &&
1065 chan->dst_avail_descriptors >=
1066 seg->dst_elements) {
1067 return true;
1068 }
1069
1070 return false;
1071}
1072
1073static bool check_descriptor_availability(struct ps_pcie_dma_chan *chan,
1074 struct ps_pcie_tx_segment *seg)
1075{
1076 if (chan->num_queues == DEFAULT_DMA_QUEUES)
1077 return check_descriptors_for_all_queues(chan, seg);
1078 else
1079 return check_descriptors_for_two_queues(chan, seg);
1080}
1081
1082static void handle_error(struct ps_pcie_dma_chan *chan)
1083{
1084 if (chan->state != CHANNEL_AVAILABLE)
1085 return;
1086
1087 spin_lock(&chan->channel_lock);
1088 chan->state = CHANNEL_ERROR;
1089 spin_unlock(&chan->channel_lock);
1090
1091 if (chan->maintenance_workq)
1092 queue_work(chan->maintenance_workq, &chan->handle_chan_reset);
1093}
1094
1095static void xlnx_ps_pcie_update_srcq(struct ps_pcie_dma_chan *chan,
1096 struct ps_pcie_tx_segment *seg)
1097{
1098 struct SOURCE_DMA_DESCRIPTOR *pdesc;
1099 struct PACKET_TRANSFER_PARAMS *pkt_ctx = NULL;
1100 struct ps_pcie_transfer_elements *ele = NULL;
1101 u32 i = 0;
1102
1103 pkt_ctx = chan->ppkt_ctx_srcq + chan->idx_ctx_srcq_head;
1104 if (pkt_ctx->availability_status == IN_USE) {
1105 dev_err(chan->dev,
1106 "src pkt context not avail for channel %d\n",
1107 chan->channel_number);
1108 handle_error(chan);
1109 return;
1110 }
1111
1112 pkt_ctx->availability_status = IN_USE;
1113
1114 if (chan->srcq_buffer_location == BUFFER_LOC_PCI)
1115 pkt_ctx->seg = seg;
1116
1117
1118 pdesc = chan->psrc_sgl_bd + chan->src_sgl_freeidx;
1119 pkt_ctx->idx_sop = chan->src_sgl_freeidx;
1120
1121
1122 list_for_each_entry(ele, &seg->transfer_nodes, node) {
1123 if (chan->xdev->dma_buf_ext_addr) {
1124 pdesc->system_address =
1125 (u64)ele->src_pa;
1126 } else {
1127 pdesc->system_address =
1128 (u32)ele->src_pa;
1129 }
1130
1131 pdesc->control_byte_count = (ele->transfer_bytes &
1132 SOURCE_CONTROL_BD_BYTE_COUNT_MASK) |
1133 chan->read_attribute;
1134
1135 pdesc->user_handle = chan->idx_ctx_srcq_head;
1136 pdesc->user_id = DEFAULT_UID;
1137
1138 if (i == (seg->src_elements - 1)) {
1139 pkt_ctx->idx_eop = chan->src_sgl_freeidx;
1140 pdesc->control_byte_count |= SOURCE_CONTROL_BD_EOP_BIT;
1141 if ((seg->async_tx.flags & DMA_PREP_INTERRUPT) ==
1142 DMA_PREP_INTERRUPT) {
1143 pdesc->control_byte_count |=
1144 SOURCE_CONTROL_BD_INTR_BIT;
1145 }
1146 }
1147 chan->src_sgl_freeidx++;
1148 if (chan->src_sgl_freeidx == chan->total_descriptors)
1149 chan->src_sgl_freeidx = 0;
1150 pdesc = chan->psrc_sgl_bd + chan->src_sgl_freeidx;
1151 spin_lock(&chan->src_desc_lock);
1152 chan->src_avail_descriptors--;
1153 spin_unlock(&chan->src_desc_lock);
1154 i++;
1155 }
1156
1157 chan->chan_base->src_q_limit = chan->src_sgl_freeidx;
1158 chan->idx_ctx_srcq_head++;
1159 if (chan->idx_ctx_srcq_head == chan->total_descriptors)
1160 chan->idx_ctx_srcq_head = 0;
1161}
1162
1163static void xlnx_ps_pcie_update_dstq(struct ps_pcie_dma_chan *chan,
1164 struct ps_pcie_tx_segment *seg)
1165{
1166 struct DEST_DMA_DESCRIPTOR *pdesc;
1167 struct PACKET_TRANSFER_PARAMS *pkt_ctx = NULL;
1168 struct ps_pcie_transfer_elements *ele = NULL;
1169 u32 i = 0;
1170
1171 pkt_ctx = chan->ppkt_ctx_dstq + chan->idx_ctx_dstq_head;
1172 if (pkt_ctx->availability_status == IN_USE) {
1173 dev_err(chan->dev,
1174 "dst pkt context not avail for channel %d\n",
1175 chan->channel_number);
1176 handle_error(chan);
1177
1178 return;
1179 }
1180
1181 pkt_ctx->availability_status = IN_USE;
1182
1183 if (chan->dstq_buffer_location == BUFFER_LOC_PCI)
1184 pkt_ctx->seg = seg;
1185
1186 pdesc = chan->pdst_sgl_bd + chan->dst_sgl_freeidx;
1187 pkt_ctx->idx_sop = chan->dst_sgl_freeidx;
1188
1189
1190 list_for_each_entry(ele, &seg->transfer_nodes, node) {
1191 if (chan->xdev->dma_buf_ext_addr) {
1192 pdesc->system_address =
1193 (u64)ele->dst_pa;
1194 } else {
1195 pdesc->system_address =
1196 (u32)ele->dst_pa;
1197 }
1198 pdesc->control_byte_count = (ele->transfer_bytes &
1199 SOURCE_CONTROL_BD_BYTE_COUNT_MASK) |
1200 chan->write_attribute;
1201
1202 pdesc->user_handle = chan->idx_ctx_dstq_head;
1203
1204 if (i == (seg->dst_elements - 1))
1205 pkt_ctx->idx_eop = chan->dst_sgl_freeidx;
1206 chan->dst_sgl_freeidx++;
1207 if (chan->dst_sgl_freeidx == chan->total_descriptors)
1208 chan->dst_sgl_freeidx = 0;
1209 pdesc = chan->pdst_sgl_bd + chan->dst_sgl_freeidx;
1210 spin_lock(&chan->dst_desc_lock);
1211 chan->dst_avail_descriptors--;
1212 spin_unlock(&chan->dst_desc_lock);
1213 i++;
1214 }
1215
1216 chan->chan_base->dst_q_limit = chan->dst_sgl_freeidx;
1217 chan->idx_ctx_dstq_head++;
1218 if (chan->idx_ctx_dstq_head == chan->total_descriptors)
1219 chan->idx_ctx_dstq_head = 0;
1220}
1221
1222static void ps_pcie_chan_program_work(struct work_struct *work)
1223{
1224 struct ps_pcie_dma_chan *chan =
1225 (struct ps_pcie_dma_chan *)container_of(work,
1226 struct ps_pcie_dma_chan,
1227 handle_chan_programming);
1228 struct ps_pcie_tx_segment *seg = NULL;
1229
1230 while (chan->state == CHANNEL_AVAILABLE) {
1231 spin_lock(&chan->active_list_lock);
1232 seg = list_first_entry_or_null(&chan->active_list,
1233 struct ps_pcie_tx_segment, node);
1234 spin_unlock(&chan->active_list_lock);
1235
1236 if (!seg)
1237 break;
1238
1239 if (check_descriptor_availability(chan, seg) == false)
1240 break;
1241
1242 spin_lock(&chan->active_list_lock);
1243 list_del(&seg->node);
1244 spin_unlock(&chan->active_list_lock);
1245
1246 if (seg->src_elements)
1247 xlnx_ps_pcie_update_srcq(chan, seg);
1248
1249 if (seg->dst_elements)
1250 xlnx_ps_pcie_update_dstq(chan, seg);
1251 }
1252}
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262static void dst_cleanup_work(struct work_struct *work)
1263{
1264 struct ps_pcie_dma_chan *chan =
1265 (struct ps_pcie_dma_chan *)container_of(work,
1266 struct ps_pcie_dma_chan, handle_dstq_desc_cleanup);
1267
1268 struct STATUS_DMA_DESCRIPTOR *psta_bd;
1269 struct DEST_DMA_DESCRIPTOR *pdst_bd;
1270 struct PACKET_TRANSFER_PARAMS *ppkt_ctx;
1271 struct dmaengine_result rslt;
1272 u32 completed_bytes;
1273 u32 dstq_desc_idx;
1274 struct ps_pcie_transfer_elements *ele, *ele_nxt;
1275
1276 psta_bd = chan->pdst_sta_bd + chan->dst_staprobe_idx;
1277
1278 while (psta_bd->status_flag_byte_count & STA_BD_COMPLETED_BIT) {
1279 if (psta_bd->status_flag_byte_count &
1280 STA_BD_DESTINATION_ERROR_BIT) {
1281 dev_err(chan->dev,
1282 "Dst Sts Elmnt %d chan %d has Destination Err",
1283 chan->dst_staprobe_idx + 1,
1284 chan->channel_number);
1285 handle_error(chan);
1286 break;
1287 }
1288 if (psta_bd->status_flag_byte_count & STA_BD_SOURCE_ERROR_BIT) {
1289 dev_err(chan->dev,
1290 "Dst Sts Elmnt %d chan %d has Source Error",
1291 chan->dst_staprobe_idx + 1,
1292 chan->channel_number);
1293 handle_error(chan);
1294 break;
1295 }
1296 if (psta_bd->status_flag_byte_count &
1297 STA_BD_INTERNAL_ERROR_BIT) {
1298 dev_err(chan->dev,
1299 "Dst Sts Elmnt %d chan %d has Internal Error",
1300 chan->dst_staprobe_idx + 1,
1301 chan->channel_number);
1302 handle_error(chan);
1303 break;
1304 }
1305
1306 if ((psta_bd->status_flag_byte_count &
1307 STA_BD_UPPER_STATUS_NONZERO_BIT) == 0) {
1308 dev_err(chan->dev,
1309 "Dst Sts Elmnt %d for chan %d has NON ZERO",
1310 chan->dst_staprobe_idx + 1,
1311 chan->channel_number);
1312 handle_error(chan);
1313 break;
1314 }
1315
1316 chan->idx_ctx_dstq_tail = psta_bd->user_handle;
1317 ppkt_ctx = chan->ppkt_ctx_dstq + chan->idx_ctx_dstq_tail;
1318 completed_bytes = (psta_bd->status_flag_byte_count &
1319 STA_BD_BYTE_COUNT_MASK) >>
1320 STA_BD_BYTE_COUNT_SHIFT;
1321
1322 memset(psta_bd, 0, sizeof(struct STATUS_DMA_DESCRIPTOR));
1323
1324 chan->dst_staprobe_idx++;
1325
1326 if (chan->dst_staprobe_idx == chan->total_descriptors)
1327 chan->dst_staprobe_idx = 0;
1328
1329 chan->dst_sta_hw_probe_idx++;
1330
1331 if (chan->dst_sta_hw_probe_idx == chan->total_descriptors)
1332 chan->dst_sta_hw_probe_idx = 0;
1333
1334 chan->chan_base->stad_q_limit = chan->dst_sta_hw_probe_idx;
1335
1336 psta_bd = chan->pdst_sta_bd + chan->dst_staprobe_idx;
1337
1338 dstq_desc_idx = ppkt_ctx->idx_sop;
1339
1340 do {
1341 pdst_bd = chan->pdst_sgl_bd + dstq_desc_idx;
1342 memset(pdst_bd, 0,
1343 sizeof(struct DEST_DMA_DESCRIPTOR));
1344
1345 spin_lock(&chan->dst_desc_lock);
1346 chan->dst_avail_descriptors++;
1347 spin_unlock(&chan->dst_desc_lock);
1348
1349 if (dstq_desc_idx == ppkt_ctx->idx_eop)
1350 break;
1351
1352 dstq_desc_idx++;
1353
1354 if (dstq_desc_idx == chan->total_descriptors)
1355 dstq_desc_idx = 0;
1356
1357 } while (1);
1358
1359
1360 if (ppkt_ctx->seg) {
1361 spin_lock(&chan->cookie_lock);
1362 dma_cookie_complete(&ppkt_ctx->seg->async_tx);
1363 spin_unlock(&chan->cookie_lock);
1364 rslt.result = DMA_TRANS_NOERROR;
1365 rslt.residue = ppkt_ctx->seg->total_transfer_bytes -
1366 completed_bytes;
1367 dmaengine_desc_get_callback_invoke(&ppkt_ctx->seg->async_tx,
1368 &rslt);
1369 list_for_each_entry_safe(ele, ele_nxt,
1370 &ppkt_ctx->seg->transfer_nodes,
1371 node) {
1372 list_del(&ele->node);
1373 mempool_free(ele, chan->tx_elements_pool);
1374 }
1375 mempool_free(ppkt_ctx->seg, chan->transactions_pool);
1376 }
1377 memset(ppkt_ctx, 0, sizeof(struct PACKET_TRANSFER_PARAMS));
1378 }
1379
1380 complete(&chan->dstq_work_complete);
1381}
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391static void src_cleanup_work(struct work_struct *work)
1392{
1393 struct ps_pcie_dma_chan *chan =
1394 (struct ps_pcie_dma_chan *)container_of(
1395 work, struct ps_pcie_dma_chan, handle_srcq_desc_cleanup);
1396
1397 struct STATUS_DMA_DESCRIPTOR *psta_bd;
1398 struct SOURCE_DMA_DESCRIPTOR *psrc_bd;
1399 struct PACKET_TRANSFER_PARAMS *ppkt_ctx;
1400 struct dmaengine_result rslt;
1401 u32 completed_bytes;
1402 u32 srcq_desc_idx;
1403 struct ps_pcie_transfer_elements *ele, *ele_nxt;
1404
1405 psta_bd = chan->psrc_sta_bd + chan->src_staprobe_idx;
1406
1407 while (psta_bd->status_flag_byte_count & STA_BD_COMPLETED_BIT) {
1408 if (psta_bd->status_flag_byte_count &
1409 STA_BD_DESTINATION_ERROR_BIT) {
1410 dev_err(chan->dev,
1411 "Src Sts Elmnt %d chan %d has Dst Error",
1412 chan->src_staprobe_idx + 1,
1413 chan->channel_number);
1414 handle_error(chan);
1415 break;
1416 }
1417 if (psta_bd->status_flag_byte_count & STA_BD_SOURCE_ERROR_BIT) {
1418 dev_err(chan->dev,
1419 "Src Sts Elmnt %d chan %d has Source Error",
1420 chan->src_staprobe_idx + 1,
1421 chan->channel_number);
1422 handle_error(chan);
1423 break;
1424 }
1425 if (psta_bd->status_flag_byte_count &
1426 STA_BD_INTERNAL_ERROR_BIT) {
1427 dev_err(chan->dev,
1428 "Src Sts Elmnt %d chan %d has Internal Error",
1429 chan->src_staprobe_idx + 1,
1430 chan->channel_number);
1431 handle_error(chan);
1432 break;
1433 }
1434 if ((psta_bd->status_flag_byte_count
1435 & STA_BD_UPPER_STATUS_NONZERO_BIT) == 0) {
1436 dev_err(chan->dev,
1437 "Src Sts Elmnt %d chan %d has NonZero",
1438 chan->src_staprobe_idx + 1,
1439 chan->channel_number);
1440 handle_error(chan);
1441 break;
1442 }
1443 chan->idx_ctx_srcq_tail = psta_bd->user_handle;
1444 ppkt_ctx = chan->ppkt_ctx_srcq + chan->idx_ctx_srcq_tail;
1445 completed_bytes = (psta_bd->status_flag_byte_count
1446 & STA_BD_BYTE_COUNT_MASK) >>
1447 STA_BD_BYTE_COUNT_SHIFT;
1448
1449 memset(psta_bd, 0, sizeof(struct STATUS_DMA_DESCRIPTOR));
1450
1451 chan->src_staprobe_idx++;
1452
1453 if (chan->src_staprobe_idx == chan->total_descriptors)
1454 chan->src_staprobe_idx = 0;
1455
1456 chan->src_sta_hw_probe_idx++;
1457
1458 if (chan->src_sta_hw_probe_idx == chan->total_descriptors)
1459 chan->src_sta_hw_probe_idx = 0;
1460
1461 chan->chan_base->stas_q_limit = chan->src_sta_hw_probe_idx;
1462
1463 psta_bd = chan->psrc_sta_bd + chan->src_staprobe_idx;
1464
1465 srcq_desc_idx = ppkt_ctx->idx_sop;
1466
1467 do {
1468 psrc_bd = chan->psrc_sgl_bd + srcq_desc_idx;
1469 memset(psrc_bd, 0,
1470 sizeof(struct SOURCE_DMA_DESCRIPTOR));
1471
1472 spin_lock(&chan->src_desc_lock);
1473 chan->src_avail_descriptors++;
1474 spin_unlock(&chan->src_desc_lock);
1475
1476 if (srcq_desc_idx == ppkt_ctx->idx_eop)
1477 break;
1478 srcq_desc_idx++;
1479
1480 if (srcq_desc_idx == chan->total_descriptors)
1481 srcq_desc_idx = 0;
1482
1483 } while (1);
1484
1485
1486 if (ppkt_ctx->seg) {
1487 spin_lock(&chan->cookie_lock);
1488 dma_cookie_complete(&ppkt_ctx->seg->async_tx);
1489 spin_unlock(&chan->cookie_lock);
1490 rslt.result = DMA_TRANS_NOERROR;
1491 rslt.residue = ppkt_ctx->seg->total_transfer_bytes -
1492 completed_bytes;
1493 dmaengine_desc_get_callback_invoke(&ppkt_ctx->seg->async_tx,
1494 &rslt);
1495 list_for_each_entry_safe(ele, ele_nxt,
1496 &ppkt_ctx->seg->transfer_nodes,
1497 node) {
1498 list_del(&ele->node);
1499 mempool_free(ele, chan->tx_elements_pool);
1500 }
1501 mempool_free(ppkt_ctx->seg, chan->transactions_pool);
1502 }
1503 memset(ppkt_ctx, 0, sizeof(struct PACKET_TRANSFER_PARAMS));
1504 }
1505
1506 complete(&chan->srcq_work_complete);
1507}
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519static void ps_pcie_chan_primary_work(struct work_struct *work)
1520{
1521 struct ps_pcie_dma_chan *chan =
1522 (struct ps_pcie_dma_chan *)container_of(
1523 work, struct ps_pcie_dma_chan,
1524 handle_primary_desc_cleanup);
1525
1526
1527 ps_pcie_dma_clr_mask(chan, chan->intr_control_offset,
1528 DMA_INTCNTRL_ENABLINTR_BIT);
1529
1530 if (chan->psrc_sgl_bd) {
1531 reinit_completion(&chan->srcq_work_complete);
1532 if (chan->srcq_desc_cleanup)
1533 queue_work(chan->srcq_desc_cleanup,
1534 &chan->handle_srcq_desc_cleanup);
1535 }
1536 if (chan->pdst_sgl_bd) {
1537 reinit_completion(&chan->dstq_work_complete);
1538 if (chan->dstq_desc_cleanup)
1539 queue_work(chan->dstq_desc_cleanup,
1540 &chan->handle_dstq_desc_cleanup);
1541 }
1542
1543 if (chan->psrc_sgl_bd)
1544 wait_for_completion_interruptible(&chan->srcq_work_complete);
1545 if (chan->pdst_sgl_bd)
1546 wait_for_completion_interruptible(&chan->dstq_work_complete);
1547
1548
1549 ps_pcie_dma_set_mask(chan, chan->intr_control_offset,
1550 DMA_INTCNTRL_ENABLINTR_BIT);
1551
1552 if (chan->chan_programming) {
1553 queue_work(chan->chan_programming,
1554 &chan->handle_chan_programming);
1555 }
1556
1557 if (chan->coalesce_count > 0 && chan->poll_timer.function)
1558 mod_timer(&chan->poll_timer, jiffies + chan->poll_timer_freq);
1559}
1560
1561static int read_rootdma_config(struct platform_device *platform_dev,
1562 struct xlnx_pcie_dma_device *xdev)
1563{
1564 int err;
1565 struct resource *r;
1566
1567 err = dma_set_mask(&platform_dev->dev, DMA_BIT_MASK(64));
1568 if (err) {
1569 dev_info(&platform_dev->dev, "Cannot set 64 bit DMA mask\n");
1570 err = dma_set_mask(&platform_dev->dev, DMA_BIT_MASK(32));
1571 if (err) {
1572 dev_err(&platform_dev->dev, "DMA mask set error\n");
1573 return err;
1574 }
1575 }
1576
1577 err = dma_set_coherent_mask(&platform_dev->dev, DMA_BIT_MASK(64));
1578 if (err) {
1579 dev_info(&platform_dev->dev, "Cannot set 64 bit consistent DMA mask\n");
1580 err = dma_set_coherent_mask(&platform_dev->dev,
1581 DMA_BIT_MASK(32));
1582 if (err) {
1583 dev_err(&platform_dev->dev, "Cannot set consistent DMA mask\n");
1584 return err;
1585 }
1586 }
1587
1588 r = platform_get_resource_byname(platform_dev, IORESOURCE_MEM,
1589 "ps_pcie_regbase");
1590 if (!r) {
1591 dev_err(&platform_dev->dev,
1592 "Unable to find memory resource for root dma\n");
1593 return PTR_ERR(r);
1594 }
1595
1596 xdev->reg_base = devm_ioremap_resource(&platform_dev->dev, r);
1597 if (IS_ERR(xdev->reg_base)) {
1598 dev_err(&platform_dev->dev, "ioresource error for root dma\n");
1599 return PTR_ERR(xdev->reg_base);
1600 }
1601
1602 xdev->platform_irq_vec =
1603 platform_get_irq_byname(platform_dev,
1604 "ps_pcie_rootdma_intr");
1605 if (xdev->platform_irq_vec < 0) {
1606 dev_err(&platform_dev->dev,
1607 "Unable to get interrupt number for root dma\n");
1608 return xdev->platform_irq_vec;
1609 }
1610
1611 err = device_property_read_u16(&platform_dev->dev, "dma_vendorid",
1612 &xdev->rootdma_vendor);
1613 if (err) {
1614 dev_err(&platform_dev->dev,
1615 "Unable to find RootDMA PCI Vendor Id\n");
1616 return err;
1617 }
1618
1619 err = device_property_read_u16(&platform_dev->dev, "dma_deviceid",
1620 &xdev->rootdma_device);
1621 if (err) {
1622 dev_err(&platform_dev->dev,
1623 "Unable to find RootDMA PCI Device Id\n");
1624 return err;
1625 }
1626
1627 xdev->common.dev = xdev->dev;
1628
1629 return 0;
1630}
1631
1632static int read_epdma_config(struct platform_device *platform_dev,
1633 struct xlnx_pcie_dma_device *xdev)
1634{
1635 int err;
1636 struct pci_dev *pdev;
1637 u16 i;
1638 void __iomem * const *pci_iomap;
1639 unsigned long pci_bar_length;
1640
1641 pdev = *((struct pci_dev **)(platform_dev->dev.platform_data));
1642 xdev->pci_dev = pdev;
1643
1644 for (i = 0; i < MAX_BARS; i++) {
1645 if (pci_resource_len(pdev, i) == 0)
1646 continue;
1647 xdev->bar_mask = xdev->bar_mask | (1 << (i));
1648 }
1649
1650 err = pcim_iomap_regions(pdev, xdev->bar_mask, PLATFORM_DRIVER_NAME);
1651 if (err) {
1652 dev_err(&pdev->dev, "Cannot request PCI regions, aborting\n");
1653 return err;
1654 }
1655
1656 pci_iomap = pcim_iomap_table(pdev);
1657 if (!pci_iomap) {
1658 err = -ENOMEM;
1659 return err;
1660 }
1661
1662 for (i = 0; i < MAX_BARS; i++) {
1663 pci_bar_length = pci_resource_len(pdev, i);
1664 if (pci_bar_length == 0) {
1665 xdev->bar_info[i].BAR_LENGTH = 0;
1666 xdev->bar_info[i].BAR_PHYS_ADDR = 0;
1667 xdev->bar_info[i].BAR_VIRT_ADDR = NULL;
1668 } else {
1669 xdev->bar_info[i].BAR_LENGTH =
1670 pci_bar_length;
1671 xdev->bar_info[i].BAR_PHYS_ADDR =
1672 pci_resource_start(pdev, i);
1673 xdev->bar_info[i].BAR_VIRT_ADDR =
1674 (void *)pci_iomap[i];
1675 }
1676 }
1677
1678 xdev->reg_base = pci_iomap[DMA_BAR_NUMBER];
1679
1680 err = irq_probe(xdev);
1681 if (err < 0) {
1682 dev_err(&pdev->dev, "Cannot probe irq lines for device %d\n",
1683 platform_dev->id);
1684 return err;
1685 }
1686
1687 xdev->common.dev = &pdev->dev;
1688
1689 return 0;
1690}
1691
1692static int probe_channel_properties(struct platform_device *platform_dev,
1693 struct xlnx_pcie_dma_device *xdev,
1694 u16 channel_number)
1695{
1696 int i;
1697 char propertyname[CHANNEL_PROPERTY_LENGTH];
1698 int numvals, ret;
1699 u32 *val;
1700 struct ps_pcie_dma_chan *channel;
1701 struct ps_pcie_dma_channel_match *xlnx_match;
1702
1703 snprintf(propertyname, CHANNEL_PROPERTY_LENGTH,
1704 "ps_pcie_channel%d", channel_number);
1705
1706 channel = &xdev->channels[channel_number];
1707
1708 spin_lock_init(&channel->channel_lock);
1709 spin_lock_init(&channel->cookie_lock);
1710
1711 INIT_LIST_HEAD(&channel->pending_list);
1712 spin_lock_init(&channel->pending_list_lock);
1713
1714 INIT_LIST_HEAD(&channel->active_list);
1715 spin_lock_init(&channel->active_list_lock);
1716
1717 spin_lock_init(&channel->src_desc_lock);
1718 spin_lock_init(&channel->dst_desc_lock);
1719
1720 INIT_LIST_HEAD(&channel->pending_interrupts_list);
1721 spin_lock_init(&channel->pending_interrupts_lock);
1722
1723 INIT_LIST_HEAD(&channel->active_interrupts_list);
1724 spin_lock_init(&channel->active_interrupts_lock);
1725
1726 init_completion(&channel->srcq_work_complete);
1727 init_completion(&channel->dstq_work_complete);
1728 init_completion(&channel->chan_shutdown_complt);
1729 init_completion(&channel->chan_terminate_complete);
1730
1731 if (device_property_present(&platform_dev->dev, propertyname)) {
1732 numvals = device_property_read_u32_array(&platform_dev->dev,
1733 propertyname, NULL, 0);
1734
1735 if (numvals < 0)
1736 return numvals;
1737
1738 val = devm_kzalloc(&platform_dev->dev, sizeof(u32) * numvals,
1739 GFP_KERNEL);
1740
1741 if (!val)
1742 return -ENOMEM;
1743
1744 ret = device_property_read_u32_array(&platform_dev->dev,
1745 propertyname, val,
1746 numvals);
1747 if (ret < 0) {
1748 dev_err(&platform_dev->dev,
1749 "Unable to read property %s\n", propertyname);
1750 return ret;
1751 }
1752
1753 for (i = 0; i < numvals; i++) {
1754 switch (i) {
1755 case DMA_CHANNEL_DIRECTION:
1756 channel->direction =
1757 (val[DMA_CHANNEL_DIRECTION] ==
1758 PCIE_AXI_DIRECTION) ?
1759 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1760 break;
1761 case NUM_DESCRIPTORS:
1762 channel->total_descriptors =
1763 val[NUM_DESCRIPTORS];
1764 if (channel->total_descriptors >
1765 MAX_DESCRIPTORS) {
1766 dev_info(&platform_dev->dev,
1767 "Descriptors > alowd max\n");
1768 channel->total_descriptors =
1769 MAX_DESCRIPTORS;
1770 }
1771 break;
1772 case NUM_QUEUES:
1773 channel->num_queues = val[NUM_QUEUES];
1774 switch (channel->num_queues) {
1775 case DEFAULT_DMA_QUEUES:
1776 break;
1777 case TWO_DMA_QUEUES:
1778 break;
1779 default:
1780 dev_info(&platform_dev->dev,
1781 "Incorrect Q number for dma chan\n");
1782 channel->num_queues = DEFAULT_DMA_QUEUES;
1783 }
1784 break;
1785 case COALESE_COUNT:
1786 channel->coalesce_count = val[COALESE_COUNT];
1787
1788 if (channel->coalesce_count >
1789 MAX_COALESCE_COUNT) {
1790 dev_info(&platform_dev->dev,
1791 "Invalid coalesce Count\n");
1792 channel->coalesce_count =
1793 MAX_COALESCE_COUNT;
1794 }
1795 break;
1796 case POLL_TIMER_FREQUENCY:
1797 channel->poll_timer_freq =
1798 val[POLL_TIMER_FREQUENCY];
1799 break;
1800 default:
1801 dev_err(&platform_dev->dev,
1802 "Check order of channel properties!\n");
1803 }
1804 }
1805 } else {
1806 dev_err(&platform_dev->dev,
1807 "Property %s not present. Invalid configuration!\n",
1808 propertyname);
1809 return -ENOTSUPP;
1810 }
1811
1812 if (channel->direction == DMA_TO_DEVICE) {
1813 if (channel->num_queues == DEFAULT_DMA_QUEUES) {
1814 channel->srcq_buffer_location = BUFFER_LOC_PCI;
1815 channel->dstq_buffer_location = BUFFER_LOC_AXI;
1816 } else {
1817 channel->srcq_buffer_location = BUFFER_LOC_PCI;
1818 channel->dstq_buffer_location = BUFFER_LOC_INVALID;
1819 }
1820 } else {
1821 if (channel->num_queues == DEFAULT_DMA_QUEUES) {
1822 channel->srcq_buffer_location = BUFFER_LOC_AXI;
1823 channel->dstq_buffer_location = BUFFER_LOC_PCI;
1824 } else {
1825 channel->srcq_buffer_location = BUFFER_LOC_INVALID;
1826 channel->dstq_buffer_location = BUFFER_LOC_PCI;
1827 }
1828 }
1829
1830 channel->xdev = xdev;
1831 channel->channel_number = channel_number;
1832
1833 if (xdev->is_rootdma) {
1834 channel->dev = xdev->dev;
1835 channel->intr_status_offset = DMA_AXI_INTR_STATUS_REG_OFFSET;
1836 channel->intr_control_offset = DMA_AXI_INTR_CNTRL_REG_OFFSET;
1837 } else {
1838 channel->dev = &xdev->pci_dev->dev;
1839 channel->intr_status_offset = DMA_PCIE_INTR_STATUS_REG_OFFSET;
1840 channel->intr_control_offset = DMA_PCIE_INTR_CNTRL_REG_OFFSET;
1841 }
1842
1843 channel->chan_base =
1844 (struct DMA_ENGINE_REGISTERS *)((__force char *)(xdev->reg_base) +
1845 (channel_number * DMA_CHANNEL_REGS_SIZE));
1846
1847 if ((channel->chan_base->dma_channel_status &
1848 DMA_STATUS_DMA_PRES_BIT) == 0) {
1849 dev_err(&platform_dev->dev,
1850 "Hardware reports channel not present\n");
1851 return -ENOTSUPP;
1852 }
1853
1854 update_channel_read_attribute(channel);
1855 update_channel_write_attribute(channel);
1856
1857 xlnx_match = devm_kzalloc(&platform_dev->dev,
1858 sizeof(struct ps_pcie_dma_channel_match),
1859 GFP_KERNEL);
1860
1861 if (!xlnx_match)
1862 return -ENOMEM;
1863
1864 if (xdev->is_rootdma) {
1865 xlnx_match->pci_vendorid = xdev->rootdma_vendor;
1866 xlnx_match->pci_deviceid = xdev->rootdma_device;
1867 } else {
1868 xlnx_match->pci_vendorid = xdev->pci_dev->vendor;
1869 xlnx_match->pci_deviceid = xdev->pci_dev->device;
1870 xlnx_match->bar_params = xdev->bar_info;
1871 }
1872
1873 xlnx_match->board_number = xdev->board_number;
1874 xlnx_match->channel_number = channel_number;
1875 xlnx_match->direction = xdev->channels[channel_number].direction;
1876
1877 channel->common.private = (void *)xlnx_match;
1878
1879 channel->common.device = &xdev->common;
1880 list_add_tail(&channel->common.device_node, &xdev->common.channels);
1881
1882 return 0;
1883}
1884
1885static void xlnx_ps_pcie_destroy_mempool(struct ps_pcie_dma_chan *chan)
1886{
1887 mempool_destroy(chan->transactions_pool);
1888
1889 mempool_destroy(chan->tx_elements_pool);
1890
1891 mempool_destroy(chan->intr_transactions_pool);
1892}
1893
1894static void xlnx_ps_pcie_free_worker_queues(struct ps_pcie_dma_chan *chan)
1895{
1896 if (chan->maintenance_workq)
1897 destroy_workqueue(chan->maintenance_workq);
1898
1899 if (chan->sw_intrs_wrkq)
1900 destroy_workqueue(chan->sw_intrs_wrkq);
1901
1902 if (chan->srcq_desc_cleanup)
1903 destroy_workqueue(chan->srcq_desc_cleanup);
1904
1905 if (chan->dstq_desc_cleanup)
1906 destroy_workqueue(chan->dstq_desc_cleanup);
1907
1908 if (chan->chan_programming)
1909 destroy_workqueue(chan->chan_programming);
1910
1911 if (chan->primary_desc_cleanup)
1912 destroy_workqueue(chan->primary_desc_cleanup);
1913}
1914
1915static void xlnx_ps_pcie_free_pkt_ctxts(struct ps_pcie_dma_chan *chan)
1916{
1917 kfree(chan->ppkt_ctx_srcq);
1918
1919 kfree(chan->ppkt_ctx_dstq);
1920}
1921
1922static void xlnx_ps_pcie_free_descriptors(struct ps_pcie_dma_chan *chan)
1923{
1924 ssize_t size;
1925
1926 if (chan->psrc_sgl_bd) {
1927 size = chan->total_descriptors *
1928 sizeof(struct SOURCE_DMA_DESCRIPTOR);
1929 dma_free_coherent(chan->dev, size, chan->psrc_sgl_bd,
1930 chan->src_sgl_bd_pa);
1931 }
1932
1933 if (chan->pdst_sgl_bd) {
1934 size = chan->total_descriptors *
1935 sizeof(struct DEST_DMA_DESCRIPTOR);
1936 dma_free_coherent(chan->dev, size, chan->pdst_sgl_bd,
1937 chan->dst_sgl_bd_pa);
1938 }
1939
1940 if (chan->psrc_sta_bd) {
1941 size = chan->total_descriptors *
1942 sizeof(struct STATUS_DMA_DESCRIPTOR);
1943 dma_free_coherent(chan->dev, size, chan->psrc_sta_bd,
1944 chan->src_sta_bd_pa);
1945 }
1946
1947 if (chan->pdst_sta_bd) {
1948 size = chan->total_descriptors *
1949 sizeof(struct STATUS_DMA_DESCRIPTOR);
1950 dma_free_coherent(chan->dev, size, chan->pdst_sta_bd,
1951 chan->dst_sta_bd_pa);
1952 }
1953}
1954
1955static int xlnx_ps_pcie_channel_activate(struct ps_pcie_dma_chan *chan)
1956{
1957 u32 reg = chan->coalesce_count;
1958
1959 reg = reg << DMA_INTCNTRL_SGCOLSCCNT_BIT_SHIFT;
1960
1961
1962 ps_pcie_dma_set_mask(chan, chan->intr_control_offset,
1963 reg | DMA_INTCNTRL_ENABLINTR_BIT |
1964 DMA_INTCNTRL_DMAERRINTR_BIT |
1965 DMA_INTCNTRL_DMASGINTR_BIT);
1966
1967
1968 ps_pcie_dma_set_mask(chan, DMA_CNTRL_REG_OFFSET,
1969 DMA_CNTRL_ENABL_BIT |
1970 DMA_CNTRL_64BIT_STAQ_ELEMSZ_BIT);
1971
1972 spin_lock(&chan->channel_lock);
1973 chan->state = CHANNEL_AVAILABLE;
1974 spin_unlock(&chan->channel_lock);
1975
1976
1977 if (chan->coalesce_count > 0 && !chan->poll_timer.function)
1978 xlnx_ps_pcie_alloc_poll_timer(chan);
1979
1980 return 0;
1981}
1982
1983static void xlnx_ps_pcie_channel_quiesce(struct ps_pcie_dma_chan *chan)
1984{
1985
1986 ps_pcie_dma_clr_mask(chan, chan->intr_control_offset,
1987 DMA_INTCNTRL_ENABLINTR_BIT);
1988
1989
1990 if (chan->coalesce_count > 0 && !chan->poll_timer.function)
1991 xlnx_ps_pcie_free_poll_timer(chan);
1992
1993
1994 if (chan->primary_desc_cleanup)
1995 flush_workqueue(chan->primary_desc_cleanup);
1996
1997
1998 if (chan->chan_programming)
1999 flush_workqueue(chan->chan_programming);
2000
2001
2002 ps_pcie_dma_set_mask(chan, chan->intr_status_offset,
2003 DMA_INTSTATUS_DMAERR_BIT |
2004 DMA_INTSTATUS_SGLINTR_BIT |
2005 DMA_INTSTATUS_SWINTR_BIT);
2006
2007
2008 ps_pcie_dma_clr_mask(chan, DMA_CNTRL_REG_OFFSET, DMA_CNTRL_ENABL_BIT);
2009
2010 spin_lock(&chan->channel_lock);
2011 chan->state = CHANNEL_UNAVIALBLE;
2012 spin_unlock(&chan->channel_lock);
2013}
2014
2015static void ivk_cbk_intr_seg(struct ps_pcie_intr_segment *intr_seg,
2016 struct ps_pcie_dma_chan *chan,
2017 enum dmaengine_tx_result result)
2018{
2019 struct dmaengine_result rslt;
2020
2021 rslt.result = result;
2022 rslt.residue = 0;
2023
2024 spin_lock(&chan->cookie_lock);
2025 dma_cookie_complete(&intr_seg->async_intr_tx);
2026 spin_unlock(&chan->cookie_lock);
2027
2028 dmaengine_desc_get_callback_invoke(&intr_seg->async_intr_tx, &rslt);
2029}
2030
2031static void ivk_cbk_seg(struct ps_pcie_tx_segment *seg,
2032 struct ps_pcie_dma_chan *chan,
2033 enum dmaengine_tx_result result)
2034{
2035 struct dmaengine_result rslt, *prslt;
2036
2037 spin_lock(&chan->cookie_lock);
2038 dma_cookie_complete(&seg->async_tx);
2039 spin_unlock(&chan->cookie_lock);
2040
2041 rslt.result = result;
2042 if (seg->src_elements &&
2043 chan->srcq_buffer_location == BUFFER_LOC_PCI) {
2044 rslt.residue = seg->total_transfer_bytes;
2045 prslt = &rslt;
2046 } else if (seg->dst_elements &&
2047 chan->dstq_buffer_location == BUFFER_LOC_PCI) {
2048 rslt.residue = seg->total_transfer_bytes;
2049 prslt = &rslt;
2050 } else {
2051 prslt = NULL;
2052 }
2053
2054 dmaengine_desc_get_callback_invoke(&seg->async_tx, prslt);
2055}
2056
2057static void ivk_cbk_ctx(struct PACKET_TRANSFER_PARAMS *ppkt_ctxt,
2058 struct ps_pcie_dma_chan *chan,
2059 enum dmaengine_tx_result result)
2060{
2061 if (ppkt_ctxt->availability_status == IN_USE) {
2062 if (ppkt_ctxt->seg) {
2063 ivk_cbk_seg(ppkt_ctxt->seg, chan, result);
2064 mempool_free(ppkt_ctxt->seg,
2065 chan->transactions_pool);
2066 }
2067 }
2068}
2069
2070static void ivk_cbk_for_pending(struct ps_pcie_dma_chan *chan)
2071{
2072 int i;
2073 struct PACKET_TRANSFER_PARAMS *ppkt_ctxt;
2074 struct ps_pcie_tx_segment *seg, *seg_nxt;
2075 struct ps_pcie_intr_segment *intr_seg, *intr_seg_next;
2076 struct ps_pcie_transfer_elements *ele, *ele_nxt;
2077
2078 if (chan->ppkt_ctx_srcq) {
2079 if (chan->idx_ctx_srcq_tail != chan->idx_ctx_srcq_head) {
2080 i = chan->idx_ctx_srcq_tail;
2081 while (i != chan->idx_ctx_srcq_head) {
2082 ppkt_ctxt = chan->ppkt_ctx_srcq + i;
2083 ivk_cbk_ctx(ppkt_ctxt, chan,
2084 DMA_TRANS_READ_FAILED);
2085 memset(ppkt_ctxt, 0,
2086 sizeof(struct PACKET_TRANSFER_PARAMS));
2087 i++;
2088 if (i == chan->total_descriptors)
2089 i = 0;
2090 }
2091 }
2092 }
2093
2094 if (chan->ppkt_ctx_dstq) {
2095 if (chan->idx_ctx_dstq_tail != chan->idx_ctx_dstq_head) {
2096 i = chan->idx_ctx_dstq_tail;
2097 while (i != chan->idx_ctx_dstq_head) {
2098 ppkt_ctxt = chan->ppkt_ctx_dstq + i;
2099 ivk_cbk_ctx(ppkt_ctxt, chan,
2100 DMA_TRANS_WRITE_FAILED);
2101 memset(ppkt_ctxt, 0,
2102 sizeof(struct PACKET_TRANSFER_PARAMS));
2103 i++;
2104 if (i == chan->total_descriptors)
2105 i = 0;
2106 }
2107 }
2108 }
2109
2110 list_for_each_entry_safe(seg, seg_nxt, &chan->active_list, node) {
2111 ivk_cbk_seg(seg, chan, DMA_TRANS_ABORTED);
2112 spin_lock(&chan->active_list_lock);
2113 list_del(&seg->node);
2114 spin_unlock(&chan->active_list_lock);
2115 list_for_each_entry_safe(ele, ele_nxt,
2116 &seg->transfer_nodes, node) {
2117 list_del(&ele->node);
2118 mempool_free(ele, chan->tx_elements_pool);
2119 }
2120 mempool_free(seg, chan->transactions_pool);
2121 }
2122
2123 list_for_each_entry_safe(seg, seg_nxt, &chan->pending_list, node) {
2124 ivk_cbk_seg(seg, chan, DMA_TRANS_ABORTED);
2125 spin_lock(&chan->pending_list_lock);
2126 list_del(&seg->node);
2127 spin_unlock(&chan->pending_list_lock);
2128 list_for_each_entry_safe(ele, ele_nxt,
2129 &seg->transfer_nodes, node) {
2130 list_del(&ele->node);
2131 mempool_free(ele, chan->tx_elements_pool);
2132 }
2133 mempool_free(seg, chan->transactions_pool);
2134 }
2135
2136 list_for_each_entry_safe(intr_seg, intr_seg_next,
2137 &chan->active_interrupts_list, node) {
2138 ivk_cbk_intr_seg(intr_seg, chan, DMA_TRANS_ABORTED);
2139 spin_lock(&chan->active_interrupts_lock);
2140 list_del(&intr_seg->node);
2141 spin_unlock(&chan->active_interrupts_lock);
2142 mempool_free(intr_seg, chan->intr_transactions_pool);
2143 }
2144
2145 list_for_each_entry_safe(intr_seg, intr_seg_next,
2146 &chan->pending_interrupts_list, node) {
2147 ivk_cbk_intr_seg(intr_seg, chan, DMA_TRANS_ABORTED);
2148 spin_lock(&chan->pending_interrupts_lock);
2149 list_del(&intr_seg->node);
2150 spin_unlock(&chan->pending_interrupts_lock);
2151 mempool_free(intr_seg, chan->intr_transactions_pool);
2152 }
2153}
2154
2155static void xlnx_ps_pcie_reset_channel(struct ps_pcie_dma_chan *chan)
2156{
2157 xlnx_ps_pcie_channel_quiesce(chan);
2158
2159 ivk_cbk_for_pending(chan);
2160
2161 ps_pcie_chan_reset(chan);
2162
2163 init_sw_components(chan);
2164 init_hw_components(chan);
2165
2166 xlnx_ps_pcie_channel_activate(chan);
2167}
2168
2169static void xlnx_ps_pcie_free_poll_timer(struct ps_pcie_dma_chan *chan)
2170{
2171 if (chan->poll_timer.function) {
2172 del_timer_sync(&chan->poll_timer);
2173 chan->poll_timer.function = NULL;
2174 }
2175}
2176
2177static int xlnx_ps_pcie_alloc_poll_timer(struct ps_pcie_dma_chan *chan)
2178{
2179 timer_setup(&chan->poll_timer, poll_completed_transactions, 0);
2180 chan->poll_timer.expires = jiffies + chan->poll_timer_freq;
2181
2182 add_timer(&chan->poll_timer);
2183
2184 return 0;
2185}
2186
2187static void terminate_transactions_work(struct work_struct *work)
2188{
2189 struct ps_pcie_dma_chan *chan =
2190 (struct ps_pcie_dma_chan *)container_of(work,
2191 struct ps_pcie_dma_chan, handle_chan_terminate);
2192
2193 xlnx_ps_pcie_channel_quiesce(chan);
2194 ivk_cbk_for_pending(chan);
2195 xlnx_ps_pcie_channel_activate(chan);
2196
2197 complete(&chan->chan_terminate_complete);
2198}
2199
2200static void chan_shutdown_work(struct work_struct *work)
2201{
2202 struct ps_pcie_dma_chan *chan =
2203 (struct ps_pcie_dma_chan *)container_of(work,
2204 struct ps_pcie_dma_chan, handle_chan_shutdown);
2205
2206 xlnx_ps_pcie_channel_quiesce(chan);
2207
2208 complete(&chan->chan_shutdown_complt);
2209}
2210
2211static void chan_reset_work(struct work_struct *work)
2212{
2213 struct ps_pcie_dma_chan *chan =
2214 (struct ps_pcie_dma_chan *)container_of(work,
2215 struct ps_pcie_dma_chan, handle_chan_reset);
2216
2217 xlnx_ps_pcie_reset_channel(chan);
2218}
2219
2220static void sw_intr_work(struct work_struct *work)
2221{
2222 struct ps_pcie_dma_chan *chan =
2223 (struct ps_pcie_dma_chan *)container_of(work,
2224 struct ps_pcie_dma_chan, handle_sw_intrs);
2225 struct ps_pcie_intr_segment *intr_seg, *intr_seg_next;
2226
2227 list_for_each_entry_safe(intr_seg, intr_seg_next,
2228 &chan->active_interrupts_list, node) {
2229 spin_lock(&chan->cookie_lock);
2230 dma_cookie_complete(&intr_seg->async_intr_tx);
2231 spin_unlock(&chan->cookie_lock);
2232 dmaengine_desc_get_callback_invoke(&intr_seg->async_intr_tx,
2233 NULL);
2234 spin_lock(&chan->active_interrupts_lock);
2235 list_del(&intr_seg->node);
2236 spin_unlock(&chan->active_interrupts_lock);
2237 }
2238}
2239
2240static int xlnx_ps_pcie_alloc_worker_threads(struct ps_pcie_dma_chan *chan)
2241{
2242 char wq_name[WORKQ_NAME_SIZE];
2243
2244 snprintf(wq_name, WORKQ_NAME_SIZE,
2245 "PS PCIe channel %d descriptor programming wq",
2246 chan->channel_number);
2247 chan->chan_programming =
2248 create_singlethread_workqueue((const char *)wq_name);
2249 if (!chan->chan_programming) {
2250 dev_err(chan->dev,
2251 "Unable to create programming wq for chan %d",
2252 chan->channel_number);
2253 goto err_no_desc_program_wq;
2254 } else {
2255 INIT_WORK(&chan->handle_chan_programming,
2256 ps_pcie_chan_program_work);
2257 }
2258 memset(wq_name, 0, WORKQ_NAME_SIZE);
2259
2260 snprintf(wq_name, WORKQ_NAME_SIZE,
2261 "PS PCIe channel %d primary cleanup wq", chan->channel_number);
2262 chan->primary_desc_cleanup =
2263 create_singlethread_workqueue((const char *)wq_name);
2264 if (!chan->primary_desc_cleanup) {
2265 dev_err(chan->dev,
2266 "Unable to create primary cleanup wq for channel %d",
2267 chan->channel_number);
2268 goto err_no_primary_clean_wq;
2269 } else {
2270 INIT_WORK(&chan->handle_primary_desc_cleanup,
2271 ps_pcie_chan_primary_work);
2272 }
2273 memset(wq_name, 0, WORKQ_NAME_SIZE);
2274
2275 snprintf(wq_name, WORKQ_NAME_SIZE,
2276 "PS PCIe channel %d maintenance works wq",
2277 chan->channel_number);
2278 chan->maintenance_workq =
2279 create_singlethread_workqueue((const char *)wq_name);
2280 if (!chan->maintenance_workq) {
2281 dev_err(chan->dev,
2282 "Unable to create maintenance wq for channel %d",
2283 chan->channel_number);
2284 goto err_no_maintenance_wq;
2285 } else {
2286 INIT_WORK(&chan->handle_chan_reset, chan_reset_work);
2287 INIT_WORK(&chan->handle_chan_shutdown, chan_shutdown_work);
2288 INIT_WORK(&chan->handle_chan_terminate,
2289 terminate_transactions_work);
2290 }
2291 memset(wq_name, 0, WORKQ_NAME_SIZE);
2292
2293 snprintf(wq_name, WORKQ_NAME_SIZE,
2294 "PS PCIe channel %d software Interrupts wq",
2295 chan->channel_number);
2296 chan->sw_intrs_wrkq =
2297 create_singlethread_workqueue((const char *)wq_name);
2298 if (!chan->sw_intrs_wrkq) {
2299 dev_err(chan->dev,
2300 "Unable to create sw interrupts wq for channel %d",
2301 chan->channel_number);
2302 goto err_no_sw_intrs_wq;
2303 } else {
2304 INIT_WORK(&chan->handle_sw_intrs, sw_intr_work);
2305 }
2306 memset(wq_name, 0, WORKQ_NAME_SIZE);
2307
2308 if (chan->psrc_sgl_bd) {
2309 snprintf(wq_name, WORKQ_NAME_SIZE,
2310 "PS PCIe channel %d srcq handling wq",
2311 chan->channel_number);
2312 chan->srcq_desc_cleanup =
2313 create_singlethread_workqueue((const char *)wq_name);
2314 if (!chan->srcq_desc_cleanup) {
2315 dev_err(chan->dev,
2316 "Unable to create src q completion wq chan %d",
2317 chan->channel_number);
2318 goto err_no_src_q_completion_wq;
2319 } else {
2320 INIT_WORK(&chan->handle_srcq_desc_cleanup,
2321 src_cleanup_work);
2322 }
2323 memset(wq_name, 0, WORKQ_NAME_SIZE);
2324 }
2325
2326 if (chan->pdst_sgl_bd) {
2327 snprintf(wq_name, WORKQ_NAME_SIZE,
2328 "PS PCIe channel %d dstq handling wq",
2329 chan->channel_number);
2330 chan->dstq_desc_cleanup =
2331 create_singlethread_workqueue((const char *)wq_name);
2332 if (!chan->dstq_desc_cleanup) {
2333 dev_err(chan->dev,
2334 "Unable to create dst q completion wq chan %d",
2335 chan->channel_number);
2336 goto err_no_dst_q_completion_wq;
2337 } else {
2338 INIT_WORK(&chan->handle_dstq_desc_cleanup,
2339 dst_cleanup_work);
2340 }
2341 memset(wq_name, 0, WORKQ_NAME_SIZE);
2342 }
2343
2344 return 0;
2345err_no_dst_q_completion_wq:
2346 if (chan->srcq_desc_cleanup)
2347 destroy_workqueue(chan->srcq_desc_cleanup);
2348err_no_src_q_completion_wq:
2349 if (chan->sw_intrs_wrkq)
2350 destroy_workqueue(chan->sw_intrs_wrkq);
2351err_no_sw_intrs_wq:
2352 if (chan->maintenance_workq)
2353 destroy_workqueue(chan->maintenance_workq);
2354err_no_maintenance_wq:
2355 if (chan->primary_desc_cleanup)
2356 destroy_workqueue(chan->primary_desc_cleanup);
2357err_no_primary_clean_wq:
2358 if (chan->chan_programming)
2359 destroy_workqueue(chan->chan_programming);
2360err_no_desc_program_wq:
2361 return -ENOMEM;
2362}
2363
2364static int xlnx_ps_pcie_alloc_mempool(struct ps_pcie_dma_chan *chan)
2365{
2366 chan->transactions_pool =
2367 mempool_create_kmalloc_pool(chan->total_descriptors,
2368 sizeof(struct ps_pcie_tx_segment));
2369
2370 if (!chan->transactions_pool)
2371 goto no_transactions_pool;
2372
2373 chan->tx_elements_pool =
2374 mempool_create_kmalloc_pool(chan->total_descriptors,
2375 sizeof(struct ps_pcie_transfer_elements));
2376
2377 if (!chan->tx_elements_pool)
2378 goto no_tx_elements_pool;
2379
2380 chan->intr_transactions_pool =
2381 mempool_create_kmalloc_pool(MIN_SW_INTR_TRANSACTIONS,
2382 sizeof(struct ps_pcie_intr_segment));
2383
2384 if (!chan->intr_transactions_pool)
2385 goto no_intr_transactions_pool;
2386
2387 return 0;
2388
2389no_intr_transactions_pool:
2390 mempool_destroy(chan->tx_elements_pool);
2391no_tx_elements_pool:
2392 mempool_destroy(chan->transactions_pool);
2393no_transactions_pool:
2394 return -ENOMEM;
2395}
2396
2397static int xlnx_ps_pcie_alloc_pkt_contexts(struct ps_pcie_dma_chan *chan)
2398{
2399 if (chan->psrc_sgl_bd) {
2400 chan->ppkt_ctx_srcq =
2401 kcalloc(chan->total_descriptors,
2402 sizeof(struct PACKET_TRANSFER_PARAMS),
2403 GFP_KERNEL);
2404 if (!chan->ppkt_ctx_srcq) {
2405 dev_err(chan->dev,
2406 "Src pkt cxt allocation for chan %d failed\n",
2407 chan->channel_number);
2408 goto err_no_src_pkt_ctx;
2409 }
2410 }
2411
2412 if (chan->pdst_sgl_bd) {
2413 chan->ppkt_ctx_dstq =
2414 kcalloc(chan->total_descriptors,
2415 sizeof(struct PACKET_TRANSFER_PARAMS),
2416 GFP_KERNEL);
2417 if (!chan->ppkt_ctx_dstq) {
2418 dev_err(chan->dev,
2419 "Dst pkt cxt for chan %d failed\n",
2420 chan->channel_number);
2421 goto err_no_dst_pkt_ctx;
2422 }
2423 }
2424
2425 return 0;
2426
2427err_no_dst_pkt_ctx:
2428 kfree(chan->ppkt_ctx_srcq);
2429
2430err_no_src_pkt_ctx:
2431 return -ENOMEM;
2432}
2433
2434static int dma_alloc_descriptors_two_queues(struct ps_pcie_dma_chan *chan)
2435{
2436 size_t size;
2437
2438 void *sgl_base;
2439 void *sta_base;
2440 dma_addr_t phy_addr_sglbase;
2441 dma_addr_t phy_addr_stabase;
2442
2443 size = chan->total_descriptors *
2444 sizeof(struct SOURCE_DMA_DESCRIPTOR);
2445
2446 sgl_base = dma_alloc_coherent(chan->dev, size, &phy_addr_sglbase,
2447 GFP_KERNEL);
2448
2449 if (!sgl_base) {
2450 dev_err(chan->dev,
2451 "Sgl bds in two channel mode for chan %d failed\n",
2452 chan->channel_number);
2453 goto err_no_sgl_bds;
2454 }
2455
2456 size = chan->total_descriptors * sizeof(struct STATUS_DMA_DESCRIPTOR);
2457 sta_base = dma_alloc_coherent(chan->dev, size, &phy_addr_stabase,
2458 GFP_KERNEL);
2459
2460 if (!sta_base) {
2461 dev_err(chan->dev,
2462 "Sta bds in two channel mode for chan %d failed\n",
2463 chan->channel_number);
2464 goto err_no_sta_bds;
2465 }
2466
2467 if (chan->direction == DMA_TO_DEVICE) {
2468 chan->psrc_sgl_bd = sgl_base;
2469 chan->src_sgl_bd_pa = phy_addr_sglbase;
2470
2471 chan->psrc_sta_bd = sta_base;
2472 chan->src_sta_bd_pa = phy_addr_stabase;
2473
2474 chan->pdst_sgl_bd = NULL;
2475 chan->dst_sgl_bd_pa = 0;
2476
2477 chan->pdst_sta_bd = NULL;
2478 chan->dst_sta_bd_pa = 0;
2479
2480 } else if (chan->direction == DMA_FROM_DEVICE) {
2481 chan->psrc_sgl_bd = NULL;
2482 chan->src_sgl_bd_pa = 0;
2483
2484 chan->psrc_sta_bd = NULL;
2485 chan->src_sta_bd_pa = 0;
2486
2487 chan->pdst_sgl_bd = sgl_base;
2488 chan->dst_sgl_bd_pa = phy_addr_sglbase;
2489
2490 chan->pdst_sta_bd = sta_base;
2491 chan->dst_sta_bd_pa = phy_addr_stabase;
2492
2493 } else {
2494 dev_err(chan->dev,
2495 "%d %s() Unsupported channel direction\n",
2496 __LINE__, __func__);
2497 goto unsupported_channel_direction;
2498 }
2499
2500 return 0;
2501
2502unsupported_channel_direction:
2503 size = chan->total_descriptors *
2504 sizeof(struct STATUS_DMA_DESCRIPTOR);
2505 dma_free_coherent(chan->dev, size, sta_base, phy_addr_stabase);
2506err_no_sta_bds:
2507 size = chan->total_descriptors *
2508 sizeof(struct SOURCE_DMA_DESCRIPTOR);
2509 dma_free_coherent(chan->dev, size, sgl_base, phy_addr_sglbase);
2510err_no_sgl_bds:
2511
2512 return -ENOMEM;
2513}
2514
2515static int dma_alloc_decriptors_all_queues(struct ps_pcie_dma_chan *chan)
2516{
2517 size_t size;
2518
2519 size = chan->total_descriptors *
2520 sizeof(struct SOURCE_DMA_DESCRIPTOR);
2521 chan->psrc_sgl_bd =
2522 dma_alloc_coherent(chan->dev, size, &chan->src_sgl_bd_pa,
2523 GFP_KERNEL);
2524
2525 if (!chan->psrc_sgl_bd) {
2526 dev_err(chan->dev,
2527 "Alloc fail src q buffer descriptors for chan %d\n",
2528 chan->channel_number);
2529 goto err_no_src_sgl_descriptors;
2530 }
2531
2532 size = chan->total_descriptors * sizeof(struct DEST_DMA_DESCRIPTOR);
2533 chan->pdst_sgl_bd =
2534 dma_alloc_coherent(chan->dev, size, &chan->dst_sgl_bd_pa,
2535 GFP_KERNEL);
2536
2537 if (!chan->pdst_sgl_bd) {
2538 dev_err(chan->dev,
2539 "Alloc fail dst q buffer descriptors for chan %d\n",
2540 chan->channel_number);
2541 goto err_no_dst_sgl_descriptors;
2542 }
2543
2544 size = chan->total_descriptors * sizeof(struct STATUS_DMA_DESCRIPTOR);
2545 chan->psrc_sta_bd =
2546 dma_alloc_coherent(chan->dev, size, &chan->src_sta_bd_pa,
2547 GFP_KERNEL);
2548
2549 if (!chan->psrc_sta_bd) {
2550 dev_err(chan->dev,
2551 "Unable to allocate src q status bds for chan %d\n",
2552 chan->channel_number);
2553 goto err_no_src_sta_descriptors;
2554 }
2555
2556 chan->pdst_sta_bd =
2557 dma_alloc_coherent(chan->dev, size, &chan->dst_sta_bd_pa,
2558 GFP_KERNEL);
2559
2560 if (!chan->pdst_sta_bd) {
2561 dev_err(chan->dev,
2562 "Unable to allocate Dst q status bds for chan %d\n",
2563 chan->channel_number);
2564 goto err_no_dst_sta_descriptors;
2565 }
2566
2567 return 0;
2568
2569err_no_dst_sta_descriptors:
2570 size = chan->total_descriptors *
2571 sizeof(struct STATUS_DMA_DESCRIPTOR);
2572 dma_free_coherent(chan->dev, size, chan->psrc_sta_bd,
2573 chan->src_sta_bd_pa);
2574err_no_src_sta_descriptors:
2575 size = chan->total_descriptors *
2576 sizeof(struct DEST_DMA_DESCRIPTOR);
2577 dma_free_coherent(chan->dev, size, chan->pdst_sgl_bd,
2578 chan->dst_sgl_bd_pa);
2579err_no_dst_sgl_descriptors:
2580 size = chan->total_descriptors *
2581 sizeof(struct SOURCE_DMA_DESCRIPTOR);
2582 dma_free_coherent(chan->dev, size, chan->psrc_sgl_bd,
2583 chan->src_sgl_bd_pa);
2584
2585err_no_src_sgl_descriptors:
2586 return -ENOMEM;
2587}
2588
2589static void xlnx_ps_pcie_dma_free_chan_resources(struct dma_chan *dchan)
2590{
2591 struct ps_pcie_dma_chan *chan;
2592
2593 if (!dchan)
2594 return;
2595
2596 chan = to_xilinx_chan(dchan);
2597
2598 if (chan->state == CHANNEL_RESOURCE_UNALLOCATED)
2599 return;
2600
2601 if (chan->maintenance_workq) {
2602 if (completion_done(&chan->chan_shutdown_complt))
2603 reinit_completion(&chan->chan_shutdown_complt);
2604 queue_work(chan->maintenance_workq,
2605 &chan->handle_chan_shutdown);
2606 wait_for_completion_interruptible(&chan->chan_shutdown_complt);
2607
2608 xlnx_ps_pcie_free_worker_queues(chan);
2609 xlnx_ps_pcie_free_pkt_ctxts(chan);
2610 xlnx_ps_pcie_destroy_mempool(chan);
2611 xlnx_ps_pcie_free_descriptors(chan);
2612
2613 spin_lock(&chan->channel_lock);
2614 chan->state = CHANNEL_RESOURCE_UNALLOCATED;
2615 spin_unlock(&chan->channel_lock);
2616 }
2617}
2618
2619static int xlnx_ps_pcie_dma_alloc_chan_resources(struct dma_chan *dchan)
2620{
2621 struct ps_pcie_dma_chan *chan;
2622
2623 if (!dchan)
2624 return PTR_ERR(dchan);
2625
2626 chan = to_xilinx_chan(dchan);
2627
2628 if (chan->state != CHANNEL_RESOURCE_UNALLOCATED)
2629 return 0;
2630
2631 if (chan->num_queues == DEFAULT_DMA_QUEUES) {
2632 if (dma_alloc_decriptors_all_queues(chan) != 0) {
2633 dev_err(chan->dev,
2634 "Alloc fail bds for channel %d\n",
2635 chan->channel_number);
2636 goto err_no_descriptors;
2637 }
2638 } else if (chan->num_queues == TWO_DMA_QUEUES) {
2639 if (dma_alloc_descriptors_two_queues(chan) != 0) {
2640 dev_err(chan->dev,
2641 "Alloc fail bds for two queues of channel %d\n",
2642 chan->channel_number);
2643 goto err_no_descriptors;
2644 }
2645 }
2646
2647 if (xlnx_ps_pcie_alloc_mempool(chan) != 0) {
2648 dev_err(chan->dev,
2649 "Unable to allocate memory pool for channel %d\n",
2650 chan->channel_number);
2651 goto err_no_mempools;
2652 }
2653
2654 if (xlnx_ps_pcie_alloc_pkt_contexts(chan) != 0) {
2655 dev_err(chan->dev,
2656 "Unable to allocate packet contexts for channel %d\n",
2657 chan->channel_number);
2658 goto err_no_pkt_ctxts;
2659 }
2660
2661 if (xlnx_ps_pcie_alloc_worker_threads(chan) != 0) {
2662 dev_err(chan->dev,
2663 "Unable to allocate worker queues for channel %d\n",
2664 chan->channel_number);
2665 goto err_no_worker_queues;
2666 }
2667
2668 xlnx_ps_pcie_reset_channel(chan);
2669
2670 dma_cookie_init(dchan);
2671
2672 return 0;
2673
2674err_no_worker_queues:
2675 xlnx_ps_pcie_free_pkt_ctxts(chan);
2676err_no_pkt_ctxts:
2677 xlnx_ps_pcie_destroy_mempool(chan);
2678err_no_mempools:
2679 xlnx_ps_pcie_free_descriptors(chan);
2680err_no_descriptors:
2681 return -ENOMEM;
2682}
2683
2684static dma_cookie_t xilinx_intr_tx_submit(struct dma_async_tx_descriptor *tx)
2685{
2686 struct ps_pcie_intr_segment *intr_seg =
2687 to_ps_pcie_dma_tx_intr_descriptor(tx);
2688 struct ps_pcie_dma_chan *chan = to_xilinx_chan(tx->chan);
2689 dma_cookie_t cookie;
2690
2691 if (chan->state != CHANNEL_AVAILABLE)
2692 return -EINVAL;
2693
2694 spin_lock(&chan->cookie_lock);
2695 cookie = dma_cookie_assign(tx);
2696 spin_unlock(&chan->cookie_lock);
2697
2698 spin_lock(&chan->pending_interrupts_lock);
2699 list_add_tail(&intr_seg->node, &chan->pending_interrupts_list);
2700 spin_unlock(&chan->pending_interrupts_lock);
2701
2702 return cookie;
2703}
2704
2705static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
2706{
2707 struct ps_pcie_tx_segment *seg = to_ps_pcie_dma_tx_descriptor(tx);
2708 struct ps_pcie_dma_chan *chan = to_xilinx_chan(tx->chan);
2709 dma_cookie_t cookie;
2710
2711 if (chan->state != CHANNEL_AVAILABLE)
2712 return -EINVAL;
2713
2714 spin_lock(&chan->cookie_lock);
2715 cookie = dma_cookie_assign(tx);
2716 spin_unlock(&chan->cookie_lock);
2717
2718 spin_lock(&chan->pending_list_lock);
2719 list_add_tail(&seg->node, &chan->pending_list);
2720 spin_unlock(&chan->pending_list_lock);
2721
2722 return cookie;
2723}
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735static struct dma_async_tx_descriptor *
2736xlnx_ps_pcie_dma_prep_memcpy(struct dma_chan *channel, dma_addr_t dma_dst,
2737 dma_addr_t dma_src, size_t len,
2738 unsigned long flags)
2739{
2740 struct ps_pcie_dma_chan *chan = to_xilinx_chan(channel);
2741 struct ps_pcie_tx_segment *seg = NULL;
2742 struct ps_pcie_transfer_elements *ele = NULL;
2743 struct ps_pcie_transfer_elements *ele_nxt = NULL;
2744 u32 i;
2745
2746 if (chan->state != CHANNEL_AVAILABLE)
2747 return NULL;
2748
2749 if (chan->num_queues != DEFAULT_DMA_QUEUES) {
2750 dev_err(chan->dev, "Only prep_slave_sg for channel %d\n",
2751 chan->channel_number);
2752 return NULL;
2753 }
2754
2755 seg = mempool_alloc(chan->transactions_pool, GFP_ATOMIC);
2756 if (!seg) {
2757 dev_err(chan->dev, "Tx segment alloc for channel %d\n",
2758 chan->channel_number);
2759 return NULL;
2760 }
2761
2762 memset(seg, 0, sizeof(*seg));
2763 INIT_LIST_HEAD(&seg->transfer_nodes);
2764
2765 for (i = 0; i < len / MAX_TRANSFER_LENGTH; i++) {
2766 ele = mempool_alloc(chan->tx_elements_pool, GFP_ATOMIC);
2767 if (!ele) {
2768 dev_err(chan->dev, "Tx element %d for channel %d\n",
2769 i, chan->channel_number);
2770 goto err_elements_prep_memcpy;
2771 }
2772 ele->src_pa = dma_src + (i * MAX_TRANSFER_LENGTH);
2773 ele->dst_pa = dma_dst + (i * MAX_TRANSFER_LENGTH);
2774 ele->transfer_bytes = MAX_TRANSFER_LENGTH;
2775 list_add_tail(&ele->node, &seg->transfer_nodes);
2776 seg->src_elements++;
2777 seg->dst_elements++;
2778 seg->total_transfer_bytes += ele->transfer_bytes;
2779 ele = NULL;
2780 }
2781
2782 if (len % MAX_TRANSFER_LENGTH) {
2783 ele = mempool_alloc(chan->tx_elements_pool, GFP_ATOMIC);
2784 if (!ele) {
2785 dev_err(chan->dev, "Tx element %d for channel %d\n",
2786 i, chan->channel_number);
2787 goto err_elements_prep_memcpy;
2788 }
2789 ele->src_pa = dma_src + (i * MAX_TRANSFER_LENGTH);
2790 ele->dst_pa = dma_dst + (i * MAX_TRANSFER_LENGTH);
2791 ele->transfer_bytes = len % MAX_TRANSFER_LENGTH;
2792 list_add_tail(&ele->node, &seg->transfer_nodes);
2793 seg->src_elements++;
2794 seg->dst_elements++;
2795 seg->total_transfer_bytes += ele->transfer_bytes;
2796 }
2797
2798 if (seg->src_elements > chan->total_descriptors) {
2799 dev_err(chan->dev, "Insufficient descriptors in channel %d for dma transaction\n",
2800 chan->channel_number);
2801 goto err_elements_prep_memcpy;
2802 }
2803
2804 dma_async_tx_descriptor_init(&seg->async_tx, &chan->common);
2805 seg->async_tx.flags = flags;
2806 async_tx_ack(&seg->async_tx);
2807 seg->async_tx.tx_submit = xilinx_dma_tx_submit;
2808
2809 return &seg->async_tx;
2810
2811err_elements_prep_memcpy:
2812 list_for_each_entry_safe(ele, ele_nxt, &seg->transfer_nodes, node) {
2813 list_del(&ele->node);
2814 mempool_free(ele, chan->tx_elements_pool);
2815 }
2816 mempool_free(seg, chan->transactions_pool);
2817 return NULL;
2818}
2819
2820static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_slave_sg(
2821 struct dma_chan *channel, struct scatterlist *sgl,
2822 unsigned int sg_len, enum dma_transfer_direction direction,
2823 unsigned long flags, void *context)
2824{
2825 struct ps_pcie_dma_chan *chan = to_xilinx_chan(channel);
2826 struct ps_pcie_tx_segment *seg = NULL;
2827 struct scatterlist *sgl_ptr;
2828 struct ps_pcie_transfer_elements *ele = NULL;
2829 struct ps_pcie_transfer_elements *ele_nxt = NULL;
2830 u32 i, j;
2831
2832 if (chan->state != CHANNEL_AVAILABLE)
2833 return NULL;
2834
2835 if (!(is_slave_direction(direction)))
2836 return NULL;
2837
2838 if (!sgl || sg_len == 0)
2839 return NULL;
2840
2841 if (chan->num_queues != TWO_DMA_QUEUES) {
2842 dev_err(chan->dev, "Only prep_dma_memcpy is supported channel %d\n",
2843 chan->channel_number);
2844 return NULL;
2845 }
2846
2847 seg = mempool_alloc(chan->transactions_pool, GFP_ATOMIC);
2848 if (!seg) {
2849 dev_err(chan->dev, "Unable to allocate tx segment channel %d\n",
2850 chan->channel_number);
2851 return NULL;
2852 }
2853
2854 memset(seg, 0, sizeof(*seg));
2855
2856 for_each_sg(sgl, sgl_ptr, sg_len, j) {
2857 for (i = 0; i < sg_dma_len(sgl_ptr) / MAX_TRANSFER_LENGTH; i++) {
2858 ele = mempool_alloc(chan->tx_elements_pool, GFP_ATOMIC);
2859 if (!ele) {
2860 dev_err(chan->dev, "Tx element %d for channel %d\n",
2861 i, chan->channel_number);
2862 goto err_elements_prep_slave_sg;
2863 }
2864 if (chan->direction == DMA_TO_DEVICE) {
2865 ele->src_pa = sg_dma_address(sgl_ptr) +
2866 (i * MAX_TRANSFER_LENGTH);
2867 seg->src_elements++;
2868 } else {
2869 ele->dst_pa = sg_dma_address(sgl_ptr) +
2870 (i * MAX_TRANSFER_LENGTH);
2871 seg->dst_elements++;
2872 }
2873 ele->transfer_bytes = MAX_TRANSFER_LENGTH;
2874 list_add_tail(&ele->node, &seg->transfer_nodes);
2875 seg->total_transfer_bytes += ele->transfer_bytes;
2876 ele = NULL;
2877 }
2878 if (sg_dma_len(sgl_ptr) % MAX_TRANSFER_LENGTH) {
2879 ele = mempool_alloc(chan->tx_elements_pool, GFP_ATOMIC);
2880 if (!ele) {
2881 dev_err(chan->dev, "Tx element %d for channel %d\n",
2882 i, chan->channel_number);
2883 goto err_elements_prep_slave_sg;
2884 }
2885 if (chan->direction == DMA_TO_DEVICE) {
2886 ele->src_pa = sg_dma_address(sgl_ptr) +
2887 (i * MAX_TRANSFER_LENGTH);
2888 seg->src_elements++;
2889 } else {
2890 ele->dst_pa = sg_dma_address(sgl_ptr) +
2891 (i * MAX_TRANSFER_LENGTH);
2892 seg->dst_elements++;
2893 }
2894 ele->transfer_bytes = sg_dma_len(sgl_ptr) %
2895 MAX_TRANSFER_LENGTH;
2896 list_add_tail(&ele->node, &seg->transfer_nodes);
2897 seg->total_transfer_bytes += ele->transfer_bytes;
2898 }
2899 }
2900
2901 if (max(seg->src_elements, seg->dst_elements) >
2902 chan->total_descriptors) {
2903 dev_err(chan->dev, "Insufficient descriptors in channel %d for dma transaction\n",
2904 chan->channel_number);
2905 goto err_elements_prep_slave_sg;
2906 }
2907
2908 dma_async_tx_descriptor_init(&seg->async_tx, &chan->common);
2909 seg->async_tx.flags = flags;
2910 async_tx_ack(&seg->async_tx);
2911 seg->async_tx.tx_submit = xilinx_dma_tx_submit;
2912
2913 return &seg->async_tx;
2914
2915err_elements_prep_slave_sg:
2916 list_for_each_entry_safe(ele, ele_nxt, &seg->transfer_nodes, node) {
2917 list_del(&ele->node);
2918 mempool_free(ele, chan->tx_elements_pool);
2919 }
2920 mempool_free(seg, chan->transactions_pool);
2921 return NULL;
2922}
2923
2924static void xlnx_ps_pcie_dma_issue_pending(struct dma_chan *channel)
2925{
2926 struct ps_pcie_dma_chan *chan;
2927
2928 if (!channel)
2929 return;
2930
2931 chan = to_xilinx_chan(channel);
2932
2933 if (!list_empty(&chan->pending_list)) {
2934 spin_lock(&chan->pending_list_lock);
2935 spin_lock(&chan->active_list_lock);
2936 list_splice_tail_init(&chan->pending_list,
2937 &chan->active_list);
2938 spin_unlock(&chan->active_list_lock);
2939 spin_unlock(&chan->pending_list_lock);
2940 }
2941
2942 if (!list_empty(&chan->pending_interrupts_list)) {
2943 spin_lock(&chan->pending_interrupts_lock);
2944 spin_lock(&chan->active_interrupts_lock);
2945 list_splice_tail_init(&chan->pending_interrupts_list,
2946 &chan->active_interrupts_list);
2947 spin_unlock(&chan->active_interrupts_lock);
2948 spin_unlock(&chan->pending_interrupts_lock);
2949 }
2950
2951 if (chan->chan_programming)
2952 queue_work(chan->chan_programming,
2953 &chan->handle_chan_programming);
2954}
2955
2956static int xlnx_ps_pcie_dma_terminate_all(struct dma_chan *channel)
2957{
2958 struct ps_pcie_dma_chan *chan;
2959
2960 if (!channel)
2961 return PTR_ERR(channel);
2962
2963 chan = to_xilinx_chan(channel);
2964
2965 if (chan->state != CHANNEL_AVAILABLE)
2966 return 1;
2967
2968 if (chan->maintenance_workq) {
2969 if (completion_done(&chan->chan_terminate_complete))
2970 reinit_completion(&chan->chan_terminate_complete);
2971 queue_work(chan->maintenance_workq,
2972 &chan->handle_chan_terminate);
2973 wait_for_completion_interruptible(
2974 &chan->chan_terminate_complete);
2975 }
2976
2977 return 0;
2978}
2979
2980static struct dma_async_tx_descriptor *xlnx_ps_pcie_dma_prep_interrupt(
2981 struct dma_chan *channel, unsigned long flags)
2982{
2983 struct ps_pcie_dma_chan *chan;
2984 struct ps_pcie_intr_segment *intr_segment = NULL;
2985
2986 if (!channel)
2987 return NULL;
2988
2989 chan = to_xilinx_chan(channel);
2990
2991 if (chan->state != CHANNEL_AVAILABLE)
2992 return NULL;
2993
2994 intr_segment = mempool_alloc(chan->intr_transactions_pool, GFP_ATOMIC);
2995
2996 memset(intr_segment, 0, sizeof(*intr_segment));
2997
2998 dma_async_tx_descriptor_init(&intr_segment->async_intr_tx,
2999 &chan->common);
3000 intr_segment->async_intr_tx.flags = flags;
3001 async_tx_ack(&intr_segment->async_intr_tx);
3002 intr_segment->async_intr_tx.tx_submit = xilinx_intr_tx_submit;
3003
3004 return &intr_segment->async_intr_tx;
3005}
3006
3007static int xlnx_pcie_dma_driver_probe(struct platform_device *platform_dev)
3008{
3009 int err, i;
3010 struct xlnx_pcie_dma_device *xdev;
3011 static u16 board_number;
3012
3013 xdev = devm_kzalloc(&platform_dev->dev,
3014 sizeof(struct xlnx_pcie_dma_device), GFP_KERNEL);
3015
3016 if (!xdev)
3017 return -ENOMEM;
3018
3019#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
3020 xdev->dma_buf_ext_addr = true;
3021#else
3022 xdev->dma_buf_ext_addr = false;
3023#endif
3024
3025 xdev->is_rootdma = device_property_read_bool(&platform_dev->dev,
3026 "rootdma");
3027
3028 xdev->dev = &platform_dev->dev;
3029 xdev->board_number = board_number;
3030
3031 err = device_property_read_u32(&platform_dev->dev, "numchannels",
3032 &xdev->num_channels);
3033 if (err) {
3034 dev_err(&platform_dev->dev,
3035 "Unable to find numchannels property\n");
3036 goto platform_driver_probe_return;
3037 }
3038
3039 if (xdev->num_channels == 0 || xdev->num_channels >
3040 MAX_ALLOWED_CHANNELS_IN_HW) {
3041 dev_warn(&platform_dev->dev,
3042 "Invalid xlnx-num_channels property value\n");
3043 xdev->num_channels = MAX_ALLOWED_CHANNELS_IN_HW;
3044 }
3045
3046 xdev->channels =
3047 (struct ps_pcie_dma_chan *)devm_kzalloc(&platform_dev->dev,
3048 sizeof(struct ps_pcie_dma_chan)
3049 * xdev->num_channels,
3050 GFP_KERNEL);
3051 if (!xdev->channels) {
3052 err = -ENOMEM;
3053 goto platform_driver_probe_return;
3054 }
3055
3056 if (xdev->is_rootdma)
3057 err = read_rootdma_config(platform_dev, xdev);
3058 else
3059 err = read_epdma_config(platform_dev, xdev);
3060
3061 if (err) {
3062 dev_err(&platform_dev->dev,
3063 "Unable to initialize dma configuration\n");
3064 goto platform_driver_probe_return;
3065 }
3066
3067
3068 INIT_LIST_HEAD(&xdev->common.channels);
3069
3070 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
3071 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
3072 dma_cap_set(DMA_INTERRUPT, xdev->common.cap_mask);
3073 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
3074
3075 xdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_UNDEFINED;
3076 xdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_UNDEFINED;
3077 xdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
3078 xdev->common.device_alloc_chan_resources =
3079 xlnx_ps_pcie_dma_alloc_chan_resources;
3080 xdev->common.device_free_chan_resources =
3081 xlnx_ps_pcie_dma_free_chan_resources;
3082 xdev->common.device_terminate_all = xlnx_ps_pcie_dma_terminate_all;
3083 xdev->common.device_tx_status = dma_cookie_status;
3084 xdev->common.device_issue_pending = xlnx_ps_pcie_dma_issue_pending;
3085 xdev->common.device_prep_dma_interrupt =
3086 xlnx_ps_pcie_dma_prep_interrupt;
3087 xdev->common.device_prep_dma_memcpy = xlnx_ps_pcie_dma_prep_memcpy;
3088 xdev->common.device_prep_slave_sg = xlnx_ps_pcie_dma_prep_slave_sg;
3089 xdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
3090
3091 for (i = 0; i < xdev->num_channels; i++) {
3092 err = probe_channel_properties(platform_dev, xdev, i);
3093
3094 if (err != 0) {
3095 dev_err(xdev->dev,
3096 "Unable to read channel properties\n");
3097 goto platform_driver_probe_return;
3098 }
3099 }
3100
3101 if (xdev->is_rootdma)
3102 err = platform_irq_setup(xdev);
3103 else
3104 err = irq_setup(xdev);
3105 if (err) {
3106 dev_err(xdev->dev, "Cannot request irq lines for device %d\n",
3107 xdev->board_number);
3108 goto platform_driver_probe_return;
3109 }
3110
3111 err = dma_async_device_register(&xdev->common);
3112 if (err) {
3113 dev_err(xdev->dev,
3114 "Unable to register board %d with dma framework\n",
3115 xdev->board_number);
3116 goto platform_driver_probe_return;
3117 }
3118
3119 platform_set_drvdata(platform_dev, xdev);
3120
3121 board_number++;
3122
3123 dev_info(&platform_dev->dev, "PS PCIe Platform driver probed\n");
3124 return 0;
3125
3126platform_driver_probe_return:
3127 return err;
3128}
3129
3130static int xlnx_pcie_dma_driver_remove(struct platform_device *platform_dev)
3131{
3132 struct xlnx_pcie_dma_device *xdev =
3133 platform_get_drvdata(platform_dev);
3134 int i;
3135
3136 for (i = 0; i < xdev->num_channels; i++)
3137 xlnx_ps_pcie_dma_free_chan_resources(&xdev->channels[i].common);
3138
3139 dma_async_device_unregister(&xdev->common);
3140
3141 return 0;
3142}
3143
3144#ifdef CONFIG_OF
3145static const struct of_device_id xlnx_pcie_root_dma_of_ids[] = {
3146 { .compatible = "xlnx,ps_pcie_dma-1.00.a", },
3147 {}
3148};
3149MODULE_DEVICE_TABLE(of, xlnx_pcie_root_dma_of_ids);
3150#endif
3151
3152static struct platform_driver xlnx_pcie_dma_driver = {
3153 .driver = {
3154 .name = XLNX_PLATFORM_DRIVER_NAME,
3155 .of_match_table = of_match_ptr(xlnx_pcie_root_dma_of_ids),
3156 .owner = THIS_MODULE,
3157 },
3158 .probe = xlnx_pcie_dma_driver_probe,
3159 .remove = xlnx_pcie_dma_driver_remove,
3160};
3161
3162int dma_platform_driver_register(void)
3163{
3164 return platform_driver_register(&xlnx_pcie_dma_driver);
3165}
3166
3167void dma_platform_driver_unregister(void)
3168{
3169 platform_driver_unregister(&xlnx_pcie_dma_driver);
3170}
3171