1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <linux/kernel.h>
46#include <linux/cdev.h>
47#include <linux/delay.h>
48#include <linux/dma-mapping.h>
49#include <linux/init.h>
50#include <linux/interrupt.h>
51#include <linux/io.h>
52#include <linux/jiffies.h>
53#include <linux/module.h>
54#include <linux/pci.h>
55
56
57
58
59#ifndef ALTPCIECHDMA_CDEV
60# define ALTPCIECHDMA_CDEV 0
61#endif
62
63
64#if ALTPCIECHDMA_CDEV
65# define MAX_CHDMA_SIZE (8 * 1024 * 1024)
66# include "mapper_user_to_sg.h"
67#endif
68
69
70#define DRV_NAME "altpciechdma"
71
72#define APE_BAR_NUM (6)
73
74#define APE_BAR_RCSLAVE (0)
75
76#define APE_BAR_HEADER (2)
77
78
79#define APE_CHDMA_TABLE_SIZE (4096)
80
81
82
83
84#define APE_CHDMA_MAX_TRANSFER_LEN (253 * PAGE_SIZE)
85
86
87
88
89
90
91
92
93
94
95static const unsigned long bar_min_len[APE_BAR_NUM] =
96 { 32768, 0, 256, 0, 32768, 0 };
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111struct ape_chdma_header {
112
113
114
115
116
117 u32 w0;
118
119 u32 bdt_addr_h;
120 u32 bdt_addr_l;
121
122
123
124
125
126
127
128
129
130 u32 w3;
131} __attribute__ ((packed));
132
133
134
135
136
137
138
139
140
141
142
143
144struct ape_chdma_desc {
145
146
147
148
149
150
151
152 u32 w0;
153
154 u32 ep_addr;
155
156 u32 rc_addr_h;
157 u32 rc_addr_l;
158} __attribute__ ((packed));
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175struct ape_chdma_table {
176
177 u32 reserved1[3];
178
179 u32 w3;
180
181
182
183 struct ape_chdma_desc desc[255];
184} __attribute__ ((packed));
185
186
187
188
189
190
191
192struct ape_dev {
193
194 struct pci_dev *pci_dev;
195
196
197
198
199 void * __iomem bar[APE_BAR_NUM];
200
201 struct ape_chdma_table *table_virt;
202
203
204
205
206 dma_addr_t table_bus;
207
208
209
210 int in_use;
211
212 int msi_enabled;
213
214 int got_regions;
215
216 int irq_line;
217
218 u8 revision;
219
220 int irq_count;
221#if ALTPCIECHDMA_CDEV
222
223 dev_t cdevno;
224 struct cdev cdev;
225
226 struct sg_mapping_t *sgm;
227#endif
228};
229
230
231
232
233
234
235
236
237
238static const struct pci_device_id ids[] = {
239 { PCI_DEVICE(0x1172, 0xE001), },
240 { PCI_DEVICE(0x2071, 0x2071), },
241 { 0, }
242};
243MODULE_DEVICE_TABLE(pci, ids);
244
245#if ALTPCIECHDMA_CDEV
246
247static int sg_init(struct ape_dev *ape);
248static void sg_exit(struct ape_dev *ape);
249#endif
250
251
252
253
254
255static irqreturn_t altpciechdma_isr(int irq, void *dev_id)
256{
257 struct ape_dev *ape = (struct ape_dev *)dev_id;
258 if (!ape)
259 return IRQ_NONE;
260 ape->irq_count++;
261 return IRQ_HANDLED;
262}
263
264static int __devinit scan_bars(struct ape_dev *ape, struct pci_dev *dev)
265{
266 int i;
267 for (i = 0; i < APE_BAR_NUM; i++) {
268 unsigned long bar_start = pci_resource_start(dev, i);
269 if (bar_start) {
270 unsigned long bar_end = pci_resource_end(dev, i);
271 unsigned long bar_flags = pci_resource_flags(dev, i);
272 printk(KERN_DEBUG "BAR%d 0x%08lx-0x%08lx flags 0x%08lx\n",
273 i, bar_start, bar_end, bar_flags);
274 }
275 }
276 return 0;
277}
278
279
280
281
282static void unmap_bars(struct ape_dev *ape, struct pci_dev *dev)
283{
284 int i;
285 for (i = 0; i < APE_BAR_NUM; i++) {
286
287 if (ape->bar[i]) {
288
289 pci_iounmap(dev, ape->bar[i]);
290 ape->bar[i] = NULL;
291 }
292 }
293}
294
295
296
297
298
299
300static int __devinit map_bars(struct ape_dev *ape, struct pci_dev *dev)
301{
302 int rc;
303 int i;
304
305 for (i = 0; i < APE_BAR_NUM; i++) {
306 unsigned long bar_start = pci_resource_start(dev, i);
307 unsigned long bar_end = pci_resource_end(dev, i);
308 unsigned long bar_length = bar_end - bar_start + 1;
309 ape->bar[i] = NULL;
310
311 if (!bar_min_len[i])
312 continue;
313
314 if (!bar_start || !bar_end) {
315 printk(KERN_DEBUG "BAR #%d is not present?!\n", i);
316 rc = -1;
317 goto fail;
318 }
319 bar_length = bar_end - bar_start + 1;
320
321 if (bar_length < bar_min_len[i]) {
322 printk(KERN_DEBUG "BAR #%d length = %lu bytes but driver "
323 "requires at least %lu bytes\n",
324 i, bar_length, bar_min_len[i]);
325 rc = -1;
326 goto fail;
327 }
328
329
330 ape->bar[i] = pci_iomap(dev, i, bar_min_len[i]);
331 if (!ape->bar[i]) {
332 printk(KERN_DEBUG "Could not map BAR #%d.\n", i);
333 rc = -1;
334 goto fail;
335 }
336 printk(KERN_DEBUG "BAR[%d] mapped at 0x%p with length %lu(/%lu).\n", i,
337 ape->bar[i], bar_min_len[i], bar_length);
338 }
339
340 rc = 0;
341 goto success;
342fail:
343
344 unmap_bars(ape, dev);
345success:
346 return rc;
347}
348
349#if 0
350static void __devinit rcslave_test(struct ape_dev *ape, struct pci_dev *dev)
351{
352 u32 *rcslave_mem = (u32 *)ape->bar[APE_BAR_RCSLAVE];
353 u32 result = 0;
354
355 u32 seed = (u32)jiffies;
356 u32 value = seed;
357 int i;
358
359
360 value = seed;
361 for (i = 1024; i < 32768 / 4 ; i++) {
362 printk(KERN_DEBUG "Writing 0x%08x to 0x%p.\n",
363 (u32)value, (void *)rcslave_mem + i);
364 iowrite32(value, rcslave_mem + i);
365 value++;
366 }
367
368 value = seed;
369 for (i = 1024; i < 32768 / 4; i++) {
370 result = ioread32(rcslave_mem + i);
371 if (result != value) {
372 printk(KERN_DEBUG "Wrote 0x%08x to 0x%p, but read back 0x%08x.\n",
373 (u32)value, (void *)rcslave_mem + i, (u32)result);
374 break;
375 }
376 value++;
377 }
378}
379#endif
380
381
382#define pci_dma_h(addr) ((addr >> 16) >> 16)
383
384#define pci_dma_l(addr) (addr & 0xffffffffUL)
385
386
387
388
389
390
391
392
393static inline void ape_chdma_desc_set(struct ape_chdma_desc *desc, dma_addr_t addr, u32 ep_addr, int len)
394{
395 BUG_ON(len & 3);
396 desc->w0 = cpu_to_le32(len / 4);
397 desc->ep_addr = cpu_to_le32(ep_addr);
398 desc->rc_addr_h = cpu_to_le32(pci_dma_h(addr));
399 desc->rc_addr_l = cpu_to_le32(pci_dma_l(addr));
400}
401
402#if ALTPCIECHDMA_CDEV
403
404
405
406
407
408
409
410
411
412
413
414
415
416static int ape_sg_to_chdma_table(struct scatterlist *sgl, int nents, int first, struct ape_chdma_desc *desc, u32 ep_addr)
417{
418 int i = first, j = 0;
419
420 dma_addr_t addr = sg_dma_address(&sgl[i]);
421 unsigned int len = sg_dma_len(&sgl[i]);
422
423 dma_addr_t cont_addr = addr;
424 unsigned int cont_len = len;
425
426 for (; j < 25 && i < nents - 1; i++) {
427
428 dma_addr_t next = sg_dma_address(&sgl[i + 1]);
429
430 len = sg_dma_len(&sgl[i]);
431 printk(KERN_DEBUG "%04d: addr=0x%Lx length=0x%08x\n", i,
432 (unsigned long long)addr, len);
433
434 if (next != addr + len) {
435
436 printk(KERN_DEBUG "%4d: cont_addr=0x%Lx cont_len=0x%08x\n", j,
437 (unsigned long long)cont_addr, cont_len);
438
439 ape_chdma_desc_set(&desc[j], cont_addr, ep_addr, cont_len);
440
441 ep_addr += cont_len;
442
443 cont_addr = next;
444 cont_len = 0;
445 j++;
446 }
447
448 cont_len += len;
449
450 addr = next;
451 }
452
453 printk(KERN_DEBUG "%04d: addr=0x%Lx length=0x%08x\n", i,
454 (unsigned long long)addr, len);
455 printk(KERN_DEBUG "%4d: cont_addr=0x%Lx length=0x%08x\n", j,
456 (unsigned long long)cont_addr, cont_len);
457 j++;
458 return j;
459}
460#endif
461
462
463static inline int compare(u32 *p, u32 *q, int len)
464{
465 int result = -1;
466 int fail = 0;
467 int i;
468 for (i = 0; i < len / 4; i++) {
469 if (*p == *q) {
470
471 if ((i & 255) == 0)
472 printk(KERN_DEBUG "[%p] = 0x%08x [%p] = 0x%08x\n", p, *p, q, *q);
473 } else {
474 fail++;
475
476 if (fail < 10)
477 printk(KERN_DEBUG "[%p] = 0x%08x != [%p] = 0x%08x ?!\n", p, *p, q, *q);
478
479 else if (fail == 10)
480 printk(KERN_DEBUG "---more errors follow! not printed---\n");
481 else
482
483 break;
484 }
485 p++;
486 q++;
487 }
488 if (!fail)
489 result = 0;
490 return result;
491}
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511static int __devinit dma_test(struct ape_dev *ape, struct pci_dev *dev)
512{
513
514 int result = -1;
515
516 struct ape_chdma_header *write_header = (struct ape_chdma_header *)ape->bar[APE_BAR_HEADER];
517
518 struct ape_chdma_header *read_header = write_header + 1;
519
520 u8 *buffer_virt = 0;
521
522 dma_addr_t buffer_bus = 0;
523 int i, n = 0, irq_count;
524
525
526 u32 w;
527
528 printk(KERN_DEBUG "bar_tests(), PAGE_SIZE = 0x%0x\n", (int)PAGE_SIZE);
529 printk(KERN_DEBUG "write_header = 0x%p.\n", write_header);
530 printk(KERN_DEBUG "read_header = 0x%p.\n", read_header);
531 printk(KERN_DEBUG "&write_header->w3 = 0x%p\n", &write_header->w3);
532 printk(KERN_DEBUG "&read_header->w3 = 0x%p\n", &read_header->w3);
533 printk(KERN_DEBUG "ape->table_virt = 0x%p.\n", ape->table_virt);
534
535 if (!write_header || !read_header || !ape->table_virt)
536 goto fail;
537
538
539
540 buffer_virt = (u8 *)pci_alloc_consistent(dev, PAGE_SIZE * 4, &buffer_bus);
541 if (!buffer_virt) {
542 printk(KERN_DEBUG "Could not allocate coherent DMA buffer.\n");
543 goto fail;
544 }
545 printk(KERN_DEBUG "Allocated cache-coherent DMA buffer (virtual address = %p, bus address = 0x%016llx).\n",
546 buffer_virt, (u64)buffer_bus);
547
548
549 for (i = 0; i < 4 * PAGE_SIZE; i += 4)
550#if 0
551 *(u32 *)(buffer_virt + i) = i / PAGE_SIZE + 1;
552#else
553 *(u32 *)(buffer_virt + i) = (u32)(unsigned long)(buffer_virt + i);
554#endif
555#if 0
556 compare((u32 *)buffer_virt, (u32 *)(buffer_virt + 2 * PAGE_SIZE), 8192);
557#endif
558
559#if 0
560
561 for (i = 2 * PAGE_SIZE; i < 4 * PAGE_SIZE; i += 4)
562 *(u32 *)(buffer_virt + i) = 0;
563#endif
564
565
566 ape->table_virt->w3 = cpu_to_le32(0x0000FADE);
567
568
569 n = 0;
570
571 ape_chdma_desc_set(&ape->table_virt->desc[n], buffer_bus, 4096, 2 * PAGE_SIZE);
572#if 1
573 for (i = 0; i < 255; i++)
574 ape_chdma_desc_set(&ape->table_virt->desc[i], buffer_bus, 4096, 2 * PAGE_SIZE);
575
576 n = i - 1;
577#endif
578#if 0
579
580 n++;
581
582 ape_chdma_desc_set(&ape->table_virt->desc[n], buffer_bus + 1024, 4096 + 1024, 1024);
583#endif
584
585#if 1
586
587 if (ape->msi_enabled)
588 ape->table_virt->desc[n].w0 |= cpu_to_le32(1UL << 16);
589#endif
590#if 0
591
592 printk(KERN_DEBUG "Descriptor Table (Read, in Root Complex Memory, # = %d)\n", n + 1);
593 for (i = 0; i < 4 + (n + 1) * 4; i += 4) {
594 u32 *p = (u32 *)ape->table_virt;
595 p += i;
596 printk(KERN_DEBUG "0x%08x/0x%02x: 0x%08x (LEN=0x%x)\n", (u32)p, (u32)p & 15, *p, 4 * le32_to_cpu(*p));
597 p++;
598 printk(KERN_DEBUG "0x%08x/0x%02x: 0x%08x (EPA=0x%x)\n", (u32)p, (u32)p & 15, *p, le32_to_cpu(*p));
599 p++;
600 printk(KERN_DEBUG "0x%08x/0x%02x: 0x%08x (RCH=0x%x)\n", (u32)p, (u32)p & 15, *p, le32_to_cpu(*p));
601 p++;
602 printk(KERN_DEBUG "0x%08x/0x%02x: 0x%08x (RCL=0x%x)\n", (u32)p, (u32)p & 15, *p, le32_to_cpu(*p));
603 }
604#endif
605
606 w = (u32)(n + 1);
607 w |= (1UL << 18);
608#if 0
609 if (ape->msi_enabled)
610 w |= (1UL << 17);
611#endif
612 printk(KERN_DEBUG "writing 0x%08x to 0x%p\n", w, (void *)&read_header->w0);
613 iowrite32(w, &read_header->w0);
614
615
616 printk(KERN_DEBUG "writing 0x%08x to 0x%p\n", (u32)((ape->table_bus >> 16) >> 16), (void *)&read_header->bdt_addr_h);
617 iowrite32(pci_dma_h(ape->table_bus), &read_header->bdt_addr_h);
618
619
620 printk(KERN_DEBUG "writing 0x%08x to 0x%p\n", (u32)(ape->table_bus & 0xffffffffUL), (void *)&read_header->bdt_addr_l);
621 iowrite32(pci_dma_l(ape->table_bus), &read_header->bdt_addr_l);
622
623
624 wmb();
625 printk(KERN_DEBUG "Flush posted writes\n");
626
627#if 0
628 (void)ioread32();
629#endif
630
631
632 irq_count = ape->irq_count;
633
634 printk(KERN_DEBUG "\nStart DMA read\n");
635 printk(KERN_DEBUG "writing 0x%08x to 0x%p\n", (u32)n, (void *)&read_header->w3);
636 iowrite32(n, &read_header->w3);
637 printk(KERN_DEBUG "EPLAST = %lu\n", le32_to_cpu(*(u32 *)&ape->table_virt->w3) & 0xffffUL);
638
639
640 wmb();
641
642
643#if 0
644 (void)ioread32();
645#endif
646 printk(KERN_DEBUG "POLL FOR READ:\n");
647
648 for (i = 0; i < 100; i++) {
649 volatile u32 *p = &ape->table_virt->w3;
650 u32 eplast = le32_to_cpu(*p) & 0xffffUL;
651 printk(KERN_DEBUG "EPLAST = %u, n = %d\n", eplast, n);
652 if (eplast == n) {
653 printk(KERN_DEBUG "DONE\n");
654
655 printk(KERN_DEBUG "#IRQs during transfer: %d\n", ape->irq_count - irq_count);
656 break;
657 }
658 udelay(100);
659 }
660
661
662 ape->table_virt->w3 = cpu_to_le32(0x0000FADE);
663
664
665 n = 0;
666 ape_chdma_desc_set(&ape->table_virt->desc[n], buffer_bus + 8192, 4096, 2 * PAGE_SIZE);
667#if 1
668 for (i = 0; i < 255; i++)
669 ape_chdma_desc_set(&ape->table_virt->desc[i], buffer_bus + 8192, 4096, 2 * PAGE_SIZE);
670
671
672 n = i - 1;
673#endif
674#if 1
675 if (ape->msi_enabled)
676 ape->table_virt->desc[n].w0 |= cpu_to_le32(1UL << 16);
677#endif
678#if 0
679
680 printk(KERN_DEBUG "Descriptor Table (Write, in Root Complex Memory, # = %d)\n", n + 1);
681 for (i = 0; i < 4 + (n + 1) * 4; i += 4) {
682 u32 *p = (u32 *)ape->table_virt;
683 p += i;
684 printk(KERN_DEBUG "0x%08x/0x%02x: 0x%08x (LEN=0x%x)\n", (u32)p, (u32)p & 15, *p, 4 * le32_to_cpu(*p));
685 p++;
686 printk(KERN_DEBUG "0x%08x/0x%02x: 0x%08x (EPA=0x%x)\n", (u32)p, (u32)p & 15, *p, le32_to_cpu(*p));
687 p++;
688 printk(KERN_DEBUG "0x%08x/0x%02x: 0x%08x (RCH=0x%x)\n", (u32)p, (u32)p & 15, *p, le32_to_cpu(*p));
689 p++;
690 printk(KERN_DEBUG "0x%08x/0x%02x: 0x%08x (RCL=0x%x)\n", (u32)p, (u32)p & 15, *p, le32_to_cpu(*p));
691 }
692#endif
693
694
695 w = (u32)(n + 1);
696
697 w |= (u32)(1UL << 18);
698#if 0
699
700 if (ape->msi_enabled)
701 w |= (1UL << 17);
702#endif
703 iowrite32(w, &write_header->w0);
704 iowrite32(pci_dma_h(ape->table_bus), &write_header->bdt_addr_h);
705 iowrite32(pci_dma_l(ape->table_bus), &write_header->bdt_addr_l);
706
707
708 wmb();
709
710
711#if 0
712 (void)ioread32();
713#endif
714 irq_count = ape->irq_count;
715
716 printk(KERN_DEBUG "\nStart DMA write\n");
717 iowrite32(n, &write_header->w3);
718
719
720 wmb();
721
722
723
724 printk(KERN_DEBUG "POLL FOR WRITE:\n");
725
726 for (i = 0; i < 100; i++) {
727 volatile u32 *p = &ape->table_virt->w3;
728 u32 eplast = le32_to_cpu(*p) & 0xffffUL;
729 printk(KERN_DEBUG "EPLAST = %u, n = %d\n", eplast, n);
730 if (eplast == n) {
731 printk(KERN_DEBUG "DONE\n");
732
733 printk(KERN_DEBUG "#IRQs during transfer: %d\n", ape->irq_count - irq_count);
734 break;
735 }
736 udelay(100);
737 }
738
739 iowrite32(0x0000ffffUL, &write_header->w0);
740
741 iowrite32(0x0000ffffUL, &read_header->w0);
742
743
744 wmb();
745
746
747#if 0
748 (void)ioread32();
749#endif
750
751 result = compare((u32 *)buffer_virt, (u32 *)(buffer_virt + 2 * PAGE_SIZE), 8192);
752 printk(KERN_DEBUG "DMA loop back test %s.\n", result ? "FAILED" : "PASSED");
753
754 pci_free_consistent(dev, 4 * PAGE_SIZE, buffer_virt, buffer_bus);
755fail:
756 printk(KERN_DEBUG "bar_tests() end, result %d\n", result);
757 return result;
758}
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774static int __devinit probe(struct pci_dev *dev, const struct pci_device_id *id)
775{
776 int rc = 0;
777 struct ape_dev *ape = NULL;
778 u8 irq_pin, irq_line;
779 printk(KERN_DEBUG "probe(dev = 0x%p, pciid = 0x%p)\n", dev, id);
780
781
782 ape = kzalloc(sizeof(struct ape_dev), GFP_KERNEL);
783 if (!ape) {
784 printk(KERN_DEBUG "Could not kzalloc()ate memory.\n");
785 goto err_ape;
786 }
787 ape->pci_dev = dev;
788 dev_set_drvdata(&dev->dev, ape);
789 printk(KERN_DEBUG "probe() ape = 0x%p\n", ape);
790
791 printk(KERN_DEBUG "sizeof(struct ape_chdma_table) = %d.\n",
792 (int)sizeof(struct ape_chdma_table));
793
794 BUG_ON(sizeof(struct ape_chdma_table) > APE_CHDMA_TABLE_SIZE);
795
796
797
798 ape->table_virt = (struct ape_chdma_table *)pci_alloc_consistent(dev,
799 APE_CHDMA_TABLE_SIZE, &ape->table_bus);
800
801 if (!ape->table_virt) {
802 printk(KERN_DEBUG "Could not dma_alloc()ate_coherent memory.\n");
803 goto err_table;
804 }
805
806 printk(KERN_DEBUG "table_virt = %p, table_bus = 0x%16llx.\n",
807 ape->table_virt, (u64)ape->table_bus);
808
809
810 rc = pci_enable_device(dev);
811 if (rc) {
812 printk(KERN_DEBUG "pci_enable_device() failed\n");
813 goto err_enable;
814 }
815
816
817 pci_set_master(dev);
818
819 rc = pci_enable_msi(dev);
820
821 if (rc) {
822
823 printk(KERN_DEBUG "Could not enable MSI interrupting.\n");
824 ape->msi_enabled = 0;
825
826 } else {
827 printk(KERN_DEBUG "Enabled MSI interrupting.\n");
828 ape->msi_enabled = 1;
829 }
830
831 pci_read_config_byte(dev, PCI_REVISION_ID, &ape->revision);
832#if 0
833
834 if (ape->revision == 0x42) {
835 printk(KERN_DEBUG "Revision 0x42 is not supported by this driver.\n");
836 rc = -ENODEV;
837 goto err_rev;
838 }
839#endif
840
841
842 rc = pci_request_regions(dev, DRV_NAME);
843
844 if (rc) {
845
846 ape->in_use = 1;
847 goto err_regions;
848 }
849 ape->got_regions = 1;
850
851#if 1
852
853
854 if (!pci_set_dma_mask(dev, DMA_BIT_MASK(64))) {
855 pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64));
856
857 printk(KERN_DEBUG "Using a 64-bit DMA mask.\n");
858 } else
859#endif
860 if (!pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
861 printk(KERN_DEBUG "Could not set 64-bit DMA mask.\n");
862 pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32));
863
864 printk(KERN_DEBUG "Using a 32-bit DMA mask.\n");
865 } else {
866 printk(KERN_DEBUG "No suitable DMA possible.\n");
867
868 rc = -1;
869 goto err_mask;
870 }
871
872 rc = pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq_pin);
873
874 if (rc)
875 goto err_irq;
876 printk(KERN_DEBUG "IRQ pin #%d (0=none, 1=INTA#...4=INTD#).\n", irq_pin);
877
878
879 rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq_line);
880
881 if (rc) {
882 printk(KERN_DEBUG "Could not query PCI_INTERRUPT_LINE, error %d\n", rc);
883 goto err_irq;
884 }
885 printk(KERN_DEBUG "IRQ line #%d.\n", irq_line);
886#if 1
887 irq_line = dev->irq;
888
889 rc = request_irq(irq_line, altpciechdma_isr, IRQF_SHARED, DRV_NAME, (void *)ape);
890 if (rc) {
891 printk(KERN_DEBUG "Could not request IRQ #%d, error %d\n", irq_line, rc);
892 ape->irq_line = -1;
893 goto err_irq;
894 }
895
896 ape->irq_line = (int)irq_line;
897 printk(KERN_DEBUG "Succesfully requested IRQ #%d with dev_id 0x%p\n", irq_line, ape);
898#endif
899
900 scan_bars(ape, dev);
901
902 rc = map_bars(ape, dev);
903 if (rc)
904 goto err_map;
905#if ALTPCIECHDMA_CDEV
906
907 rc = sg_init(ape);
908 if (rc)
909 goto err_cdev;
910#endif
911
912 rc = dma_test(ape, dev);
913 (void)rc;
914
915 rc = 0;
916 printk(KERN_DEBUG "probe() successful.\n");
917 goto end;
918#if ALTPCIECHDMA_CDEV
919err_cdev:
920
921 unmap_bars(ape, dev);
922#endif
923err_map:
924
925 if (ape->irq_line >= 0)
926 free_irq(ape->irq_line, (void *)ape);
927err_irq:
928 if (ape->msi_enabled)
929 pci_disable_msi(dev);
930
931 if (!ape->in_use)
932 pci_disable_device(dev);
933 if (ape->got_regions)
934 pci_release_regions(dev);
935err_mask:
936err_regions:
937
938
939err_enable:
940 if (ape->table_virt)
941 pci_free_consistent(dev, APE_CHDMA_TABLE_SIZE, ape->table_virt, ape->table_bus);
942
943err_table:
944 if (ape)
945 kfree(ape);
946err_ape:
947end:
948 return rc;
949}
950
951static void __devexit remove(struct pci_dev *dev)
952{
953 struct ape_dev *ape = dev_get_drvdata(&dev->dev);
954
955 printk(KERN_DEBUG "remove(0x%p)\n", dev);
956 printk(KERN_DEBUG "remove(dev = 0x%p) where ape = 0x%p\n", dev, ape);
957
958
959#if ALTPCIECHDMA_CDEV
960 sg_exit(ape);
961#endif
962
963 if (ape->table_virt)
964 pci_free_consistent(dev, APE_CHDMA_TABLE_SIZE, ape->table_virt, ape->table_bus);
965
966
967
968
969 if (ape->irq_line >= 0) {
970 printk(KERN_DEBUG "Freeing IRQ #%d for dev_id 0x%08lx.\n",
971 ape->irq_line, (unsigned long)ape);
972 free_irq(ape->irq_line, (void *)ape);
973 }
974
975 if (ape->msi_enabled) {
976
977 pci_disable_msi(dev);
978 ape->msi_enabled = 0;
979 }
980
981 unmap_bars(ape, dev);
982 if (!ape->in_use)
983 pci_disable_device(dev);
984 if (ape->got_regions)
985
986 pci_release_regions(dev);
987}
988
989#if ALTPCIECHDMA_CDEV
990
991
992
993
994static int sg_open(struct inode *inode, struct file *file)
995{
996 struct ape_dev *ape;
997 printk(KERN_DEBUG DRV_NAME "_open()\n");
998
999 ape = container_of(inode->i_cdev, struct ape_dev, cdev);
1000
1001 file->private_data = ape;
1002
1003 ape->sgm = sg_create_mapper(MAX_CHDMA_SIZE);
1004 return 0;
1005}
1006
1007
1008
1009
1010static int sg_close(struct inode *inode, struct file *file)
1011{
1012
1013 struct ape_dev *ape = (struct ape_dev *)file->private_data;
1014 printk(KERN_DEBUG DRV_NAME "_close()\n");
1015
1016 sg_destroy_mapper(ape->sgm);
1017 return 0;
1018}
1019
1020static ssize_t sg_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
1021{
1022
1023 struct ape_dev *ape = (struct ape_dev *)file->private_data;
1024 (void)ape;
1025 printk(KERN_DEBUG DRV_NAME "_read(buf=0x%p, count=%lld, pos=%llu)\n", buf, (s64)count, (u64)*pos);
1026 return count;
1027}
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040static ssize_t sg_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
1041{
1042 int hwnents, tents;
1043 size_t transfer_len, remaining = count, done = 0;
1044 u64 transfer_addr = (u64)buf;
1045
1046 struct ape_dev *ape = (struct ape_dev *)file->private_data;
1047 printk(KERN_DEBUG DRV_NAME "_write(buf=0x%p, count=%lld, pos=%llu)\n",
1048 buf, (s64)count, (u64)*pos);
1049
1050 while (remaining > 0) {
1051
1052 transfer_len = (remaining < APE_CHDMA_MAX_TRANSFER_LEN) ? remaining :
1053 APE_CHDMA_MAX_TRANSFER_LEN;
1054
1055 sgm_map_user_pages(ape->sgm, transfer_addr, transfer_len, 0);
1056 printk(KERN_DEBUG DRV_NAME "mapped_pages=%d\n", ape->sgm->mapped_pages);
1057
1058 hwnents = pci_map_sg(ape->pci_dev, ape->sgm->sgl, ape->sgm->mapped_pages, DMA_TO_DEVICE);
1059 printk(KERN_DEBUG DRV_NAME "hwnents=%d\n", hwnents);
1060
1061 tents = ape_sg_to_chdma_table(ape->sgm->sgl, hwnents, 0, &ape->table_virt->desc[0], 4096);
1062 printk(KERN_DEBUG DRV_NAME "tents=%d\n", hwnents);
1063#if 0
1064 while (tables) {
1065
1066
1067
1068 }
1069 put ourselves on wait queue
1070#endif
1071
1072 dma_unmap_sg(NULL, ape->sgm->sgl, ape->sgm->mapped_pages, DMA_TO_DEVICE);
1073
1074 sgm_unmap_user_pages(ape->sgm, 1);
1075
1076 transfer_addr += transfer_len;
1077 remaining -= transfer_len;
1078 done += transfer_len;
1079 }
1080 return done;
1081}
1082
1083
1084
1085
1086static const struct file_operations sg_fops = {
1087 .owner = THIS_MODULE,
1088 .open = sg_open,
1089 .release = sg_close,
1090 .read = sg_read,
1091 .write = sg_write,
1092};
1093
1094
1095
1096
1097
1098static int sg_init(struct ape_dev *ape)
1099{
1100 int rc;
1101 printk(KERN_DEBUG DRV_NAME " sg_init()\n");
1102
1103 rc = alloc_chrdev_region(&ape->cdevno, 0, 1, DRV_NAME);
1104
1105 if (rc < 0) {
1106 printk("alloc_chrdev_region() = %d\n", rc);
1107 goto fail_alloc;
1108 }
1109
1110 cdev_init(&ape->cdev, &sg_fops);
1111 ape->cdev.owner = THIS_MODULE;
1112
1113 rc = cdev_add(&ape->cdev, ape->cdevno, 1);
1114 if (rc < 0) {
1115 printk("cdev_add() = %d\n", rc);
1116 goto fail_add;
1117 }
1118 printk(KERN_DEBUG "altpciechdma = %d:%d\n", MAJOR(ape->cdevno), MINOR(ape->cdevno));
1119 return 0;
1120fail_add:
1121
1122 unregister_chrdev_region(ape->cdevno, 1);
1123fail_alloc:
1124 return -1;
1125}
1126
1127
1128
1129
1130
1131
1132static void sg_exit(struct ape_dev *ape)
1133{
1134 printk(KERN_DEBUG DRV_NAME " sg_exit()\n");
1135
1136 cdev_del(&ape->cdev);
1137
1138 unregister_chrdev_region(ape->cdevno, 1);
1139}
1140
1141#endif
1142
1143
1144
1145
1146static struct pci_driver pci_driver = {
1147 .name = DRV_NAME,
1148 .id_table = ids,
1149 .probe = probe,
1150 .remove = __devexit_p(remove),
1151
1152};
1153
1154
1155
1156
1157static int __init alterapciechdma_init(void)
1158{
1159 int rc = 0;
1160 printk(KERN_DEBUG DRV_NAME " init(), built at " __DATE__ " " __TIME__ "\n");
1161
1162 rc = pci_register_driver(&pci_driver);
1163 if (rc < 0)
1164 return rc;
1165 return 0;
1166}
1167
1168
1169
1170
1171static void __exit alterapciechdma_exit(void)
1172{
1173 printk(KERN_DEBUG DRV_NAME " exit(), built at " __DATE__ " " __TIME__ "\n");
1174
1175 pci_unregister_driver(&pci_driver);
1176}
1177
1178MODULE_LICENSE("GPL");
1179
1180module_init(alterapciechdma_init);
1181module_exit(alterapciechdma_exit);
1182
1183