1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/types.h>
35#include <linux/kernel.h>
36#include <linux/init.h>
37#include <linux/mm.h>
38#include <linux/spinlock.h>
39#include <linux/slab.h>
40#include <linux/string.h>
41#include <linux/pci.h>
42#include <linux/reboot.h>
43#include <linux/proc_fs.h>
44#include <linux/seq_file.h>
45#include <linux/scatterlist.h>
46#include <linux/iommu-helper.h>
47#include <linux/export.h>
48
49#include <asm/byteorder.h>
50#include <asm/cache.h>
51#include <asm/uaccess.h>
52#include <asm/page.h>
53#include <asm/dma.h>
54#include <asm/io.h>
55#include <asm/hardware.h>
56#include <asm/parisc-device.h>
57
58
59
60
61
62#define MODULE_NAME "ccio"
63
64#undef DEBUG_CCIO_RES
65#undef DEBUG_CCIO_RUN
66#undef DEBUG_CCIO_INIT
67#undef DEBUG_CCIO_RUN_SG
68
69#ifdef CONFIG_PROC_FS
70
71#undef CCIO_COLLECT_STATS
72#endif
73
74#include <asm/runway.h>
75
76#ifdef DEBUG_CCIO_INIT
77#define DBG_INIT(x...) printk(x)
78#else
79#define DBG_INIT(x...)
80#endif
81
82#ifdef DEBUG_CCIO_RUN
83#define DBG_RUN(x...) printk(x)
84#else
85#define DBG_RUN(x...)
86#endif
87
88#ifdef DEBUG_CCIO_RES
89#define DBG_RES(x...) printk(x)
90#else
91#define DBG_RES(x...)
92#endif
93
94#ifdef DEBUG_CCIO_RUN_SG
95#define DBG_RUN_SG(x...) printk(x)
96#else
97#define DBG_RUN_SG(x...)
98#endif
99
100#define CCIO_INLINE inline
101#define WRITE_U32(value, addr) __raw_writel(value, addr)
102#define READ_U32(addr) __raw_readl(addr)
103
104#define U2_IOA_RUNWAY 0x580
105#define U2_BC_GSC 0x501
106#define UTURN_IOA_RUNWAY 0x581
107#define UTURN_BC_GSC 0x502
108
109#define IOA_NORMAL_MODE 0x00020080
110#define CMD_TLB_DIRECT_WRITE 35
111#define CMD_TLB_PURGE 33
112
113struct ioa_registers {
114
115 int32_t unused1[12];
116 uint32_t io_command;
117 uint32_t io_status;
118 uint32_t io_control;
119 int32_t unused2[1];
120
121
122 uint32_t io_err_resp;
123 uint32_t io_err_info;
124 uint32_t io_err_req;
125 uint32_t io_err_resp_hi;
126 uint32_t io_tlb_entry_m;
127 uint32_t io_tlb_entry_l;
128 uint32_t unused3[1];
129 uint32_t io_pdir_base;
130 uint32_t io_io_low_hv;
131 uint32_t io_io_high_hv;
132 uint32_t unused4[1];
133 uint32_t io_chain_id_mask;
134 uint32_t unused5[2];
135 uint32_t io_io_low;
136 uint32_t io_io_high;
137};
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225struct ioc {
226 struct ioa_registers __iomem *ioc_regs;
227 u8 *res_map;
228 u64 *pdir_base;
229 u32 pdir_size;
230 u32 res_hint;
231
232 u32 res_size;
233 spinlock_t res_lock;
234
235#ifdef CCIO_COLLECT_STATS
236#define CCIO_SEARCH_SAMPLE 0x100
237 unsigned long avg_search[CCIO_SEARCH_SAMPLE];
238 unsigned long avg_idx;
239 unsigned long used_pages;
240 unsigned long msingle_calls;
241 unsigned long msingle_pages;
242 unsigned long msg_calls;
243 unsigned long msg_pages;
244 unsigned long usingle_calls;
245 unsigned long usingle_pages;
246 unsigned long usg_calls;
247 unsigned long usg_pages;
248#endif
249 unsigned short cujo20_bug;
250
251
252 u32 chainid_shift;
253 struct ioc *next;
254 const char *name;
255 unsigned int hw_path;
256 struct pci_dev *fake_pci_dev;
257 struct resource mmio_region[2];
258};
259
260static struct ioc *ioc_list;
261static int ioc_count;
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278#define IOVP_SIZE PAGE_SIZE
279#define IOVP_SHIFT PAGE_SHIFT
280#define IOVP_MASK PAGE_MASK
281
282
283#define CCIO_IOVA(iovp,offset) ((iovp) | (offset))
284#define CCIO_IOVP(iova) ((iova) & IOVP_MASK)
285
286#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
287#define MKIOVP(pdir_idx) ((long)(pdir_idx) << IOVP_SHIFT)
288#define MKIOVA(iovp,offset) (dma_addr_t)((long)iovp | (long)offset)
289
290
291
292
293
294
295#define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \
296 for(; res_ptr < res_end; ++res_ptr) { \
297 int ret;\
298 unsigned int idx;\
299 idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \
300 ret = iommu_is_span_boundary(idx << 3, pages_needed, 0, boundary_size);\
301 if ((0 == (*res_ptr & mask)) && !ret) { \
302 *res_ptr |= mask; \
303 res_idx = idx;\
304 ioc->res_hint = res_idx + (size >> 3); \
305 goto resource_found; \
306 } \
307 }
308
309#define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \
310 u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \
311 u##size *res_end = (u##size *)&(ioc)->res_map[ioa->res_size]; \
312 CCIO_SEARCH_LOOP(ioc, res_idx, mask, size); \
313 res_ptr = (u##size *)&(ioc)->res_map[0]; \
314 CCIO_SEARCH_LOOP(ioa, res_idx, mask, size);
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339static int
340ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
341{
342 unsigned int pages_needed = size >> IOVP_SHIFT;
343 unsigned int res_idx;
344 unsigned long boundary_size;
345#ifdef CCIO_COLLECT_STATS
346 unsigned long cr_start = mfctl(16);
347#endif
348
349 BUG_ON(pages_needed == 0);
350 BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE);
351
352 DBG_RES("%s() size: %d pages_needed %d\n",
353 __func__, size, pages_needed);
354
355
356
357
358
359
360 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
361 1ULL << IOVP_SHIFT) >> IOVP_SHIFT;
362
363 if (pages_needed <= 8) {
364
365
366
367
368
369#if 0
370
371
372
373
374 unsigned long mask = ~(~0UL >> pages_needed);
375 CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 8);
376#else
377 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xff, 8);
378#endif
379 } else if (pages_needed <= 16) {
380 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xffff, 16);
381 } else if (pages_needed <= 32) {
382 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~(unsigned int)0, 32);
383#ifdef __LP64__
384 } else if (pages_needed <= 64) {
385 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~0UL, 64);
386#endif
387 } else {
388 panic("%s: %s() Too many pages to map. pages_needed: %u\n",
389 __FILE__, __func__, pages_needed);
390 }
391
392 panic("%s: %s() I/O MMU is out of mapping resources.\n", __FILE__,
393 __func__);
394
395resource_found:
396
397 DBG_RES("%s() res_idx %d res_hint: %d\n",
398 __func__, res_idx, ioc->res_hint);
399
400#ifdef CCIO_COLLECT_STATS
401 {
402 unsigned long cr_end = mfctl(16);
403 unsigned long tmp = cr_end - cr_start;
404
405 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
406 }
407 ioc->avg_search[ioc->avg_idx++] = cr_start;
408 ioc->avg_idx &= CCIO_SEARCH_SAMPLE - 1;
409 ioc->used_pages += pages_needed;
410#endif
411
412
413
414 return res_idx << 3;
415}
416
417#define CCIO_FREE_MAPPINGS(ioc, res_idx, mask, size) \
418 u##size *res_ptr = (u##size *)&((ioc)->res_map[res_idx]); \
419 BUG_ON((*res_ptr & mask) != mask); \
420 *res_ptr &= ~(mask);
421
422
423
424
425
426
427
428
429
430
431static void
432ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
433{
434 unsigned long iovp = CCIO_IOVP(iova);
435 unsigned int res_idx = PDIR_INDEX(iovp) >> 3;
436
437 BUG_ON(pages_mapped == 0);
438 BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE);
439 BUG_ON(pages_mapped > BITS_PER_LONG);
440
441 DBG_RES("%s(): res_idx: %d pages_mapped %d\n",
442 __func__, res_idx, pages_mapped);
443
444#ifdef CCIO_COLLECT_STATS
445 ioc->used_pages -= pages_mapped;
446#endif
447
448 if(pages_mapped <= 8) {
449#if 0
450
451 unsigned long mask = ~(~0UL >> pages_mapped);
452 CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 8);
453#else
454 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffUL, 8);
455#endif
456 } else if(pages_mapped <= 16) {
457 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffffUL, 16);
458 } else if(pages_mapped <= 32) {
459 CCIO_FREE_MAPPINGS(ioc, res_idx, ~(unsigned int)0, 32);
460#ifdef __LP64__
461 } else if(pages_mapped <= 64) {
462 CCIO_FREE_MAPPINGS(ioc, res_idx, ~0UL, 64);
463#endif
464 } else {
465 panic("%s:%s() Too many pages to unmap.\n", __FILE__,
466 __func__);
467 }
468}
469
470
471
472
473
474
475
476typedef unsigned long space_t;
477#define KERNEL_SPACE 0
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505#define IOPDIR_VALID 0x01UL
506#define HINT_SAFE_DMA 0x02UL
507#ifdef CONFIG_EISA
508#define HINT_STOP_MOST 0x04UL
509#else
510#define HINT_STOP_MOST 0x00UL
511#endif
512#define HINT_UDPATE_ENB 0x08UL
513#define HINT_PREFETCH 0x10UL
514
515
516
517
518
519
520
521static u32 hint_lookup[] = {
522 [PCI_DMA_BIDIRECTIONAL] = HINT_STOP_MOST | HINT_SAFE_DMA | IOPDIR_VALID,
523 [PCI_DMA_TODEVICE] = HINT_STOP_MOST | HINT_PREFETCH | IOPDIR_VALID,
524 [PCI_DMA_FROMDEVICE] = HINT_STOP_MOST | IOPDIR_VALID,
525};
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556static void CCIO_INLINE
557ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
558 unsigned long hints)
559{
560 register unsigned long pa;
561 register unsigned long ci;
562
563
564 BUG_ON(sid != KERNEL_SPACE);
565
566 mtsp(sid,1);
567
568
569
570
571
572
573 pa = virt_to_phys(vba);
574 asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
575 ((u32 *)pdir_ptr)[1] = (u32) pa;
576
577
578
579
580
581#ifdef __LP64__
582
583
584
585
586
587 asm volatile ("extrd,u %1,15,4,%0" : "=r" (ci) : "r" (pa));
588 asm volatile ("extrd,u %1,31,16,%0" : "+r" (pa) : "r" (pa));
589 asm volatile ("depd %1,35,4,%0" : "+r" (pa) : "r" (ci));
590#else
591 pa = 0;
592#endif
593
594
595
596
597
598 asm volatile ("lci %%r0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
599 asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
600 asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
601
602 ((u32 *)pdir_ptr)[0] = (u32) pa;
603
604
605
606
607
608
609
610
611
612
613
614
615
616 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
617 asm volatile("sync");
618}
619
620
621
622
623
624
625
626
627
628
629
630static CCIO_INLINE void
631ccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt)
632{
633 u32 chain_size = 1 << ioc->chainid_shift;
634
635 iovp &= IOVP_MASK;
636 byte_cnt += chain_size;
637
638 while(byte_cnt > chain_size) {
639 WRITE_U32(CMD_TLB_PURGE | iovp, &ioc->ioc_regs->io_command);
640 iovp += chain_size;
641 byte_cnt -= chain_size;
642 }
643}
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663static CCIO_INLINE void
664ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
665{
666 u32 iovp = (u32)CCIO_IOVP(iova);
667 size_t saved_byte_cnt;
668
669
670 saved_byte_cnt = byte_cnt = ALIGN(byte_cnt, IOVP_SIZE);
671
672 while(byte_cnt > 0) {
673
674 unsigned int idx = PDIR_INDEX(iovp);
675 char *pdir_ptr = (char *) &(ioc->pdir_base[idx]);
676
677 BUG_ON(idx >= (ioc->pdir_size / sizeof(u64)));
678 pdir_ptr[7] = 0;
679
680
681
682
683
684
685
686
687 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr[7]));
688
689 iovp += IOVP_SIZE;
690 byte_cnt -= IOVP_SIZE;
691 }
692
693 asm volatile("sync");
694 ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt);
695}
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710static int
711ccio_dma_supported(struct device *dev, u64 mask)
712{
713 if(dev == NULL) {
714 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
715 BUG();
716 return 0;
717 }
718
719
720 return (int)(mask == 0xffffffffUL);
721}
722
723
724
725
726
727
728
729
730
731
732static dma_addr_t
733ccio_map_single(struct device *dev, void *addr, size_t size,
734 enum dma_data_direction direction)
735{
736 int idx;
737 struct ioc *ioc;
738 unsigned long flags;
739 dma_addr_t iovp;
740 dma_addr_t offset;
741 u64 *pdir_start;
742 unsigned long hint = hint_lookup[(int)direction];
743
744 BUG_ON(!dev);
745 ioc = GET_IOC(dev);
746
747 BUG_ON(size <= 0);
748
749
750 offset = ((unsigned long) addr) & ~IOVP_MASK;
751
752
753 size = ALIGN(size + offset, IOVP_SIZE);
754 spin_lock_irqsave(&ioc->res_lock, flags);
755
756#ifdef CCIO_COLLECT_STATS
757 ioc->msingle_calls++;
758 ioc->msingle_pages += size >> IOVP_SHIFT;
759#endif
760
761 idx = ccio_alloc_range(ioc, dev, size);
762 iovp = (dma_addr_t)MKIOVP(idx);
763
764 pdir_start = &(ioc->pdir_base[idx]);
765
766 DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n",
767 __func__, addr, (long)iovp | offset, size);
768
769
770 if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
771 hint |= HINT_SAFE_DMA;
772
773 while(size > 0) {
774 ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long)addr, hint);
775
776 DBG_RUN(" pdir %p %08x%08x\n",
777 pdir_start,
778 (u32) (((u32 *) pdir_start)[0]),
779 (u32) (((u32 *) pdir_start)[1]));
780 ++pdir_start;
781 addr += IOVP_SIZE;
782 size -= IOVP_SIZE;
783 }
784
785 spin_unlock_irqrestore(&ioc->res_lock, flags);
786
787
788 return CCIO_IOVA(iovp, offset);
789}
790
791
792
793
794
795
796
797
798
799
800static void
801ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
802 enum dma_data_direction direction)
803{
804 struct ioc *ioc;
805 unsigned long flags;
806 dma_addr_t offset = iova & ~IOVP_MASK;
807
808 BUG_ON(!dev);
809 ioc = GET_IOC(dev);
810
811 DBG_RUN("%s() iovp 0x%lx/%x\n",
812 __func__, (long)iova, size);
813
814 iova ^= offset;
815 size += offset;
816 size = ALIGN(size, IOVP_SIZE);
817
818 spin_lock_irqsave(&ioc->res_lock, flags);
819
820#ifdef CCIO_COLLECT_STATS
821 ioc->usingle_calls++;
822 ioc->usingle_pages += size >> IOVP_SHIFT;
823#endif
824
825 ccio_mark_invalid(ioc, iova, size);
826 ccio_free_range(ioc, iova, (size >> IOVP_SHIFT));
827 spin_unlock_irqrestore(&ioc->res_lock, flags);
828}
829
830
831
832
833
834
835
836
837
838static void *
839ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
840{
841 void *ret;
842#if 0
843
844
845
846 if(!hwdev) {
847
848 *dma_handle = 0;
849 return 0;
850 }
851#endif
852 ret = (void *) __get_free_pages(flag, get_order(size));
853
854 if (ret) {
855 memset(ret, 0, size);
856 *dma_handle = ccio_map_single(dev, ret, size, PCI_DMA_BIDIRECTIONAL);
857 }
858
859 return ret;
860}
861
862
863
864
865
866
867
868
869
870
871static void
872ccio_free_consistent(struct device *dev, size_t size, void *cpu_addr,
873 dma_addr_t dma_handle)
874{
875 ccio_unmap_single(dev, dma_handle, size, 0);
876 free_pages((unsigned long)cpu_addr, get_order(size));
877}
878
879
880
881
882
883
884#define PIDE_FLAG 0x80000000UL
885
886#ifdef CCIO_COLLECT_STATS
887#define IOMMU_MAP_STATS
888#endif
889#include "iommu-helpers.h"
890
891
892
893
894
895
896
897
898
899
900static int
901ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
902 enum dma_data_direction direction)
903{
904 struct ioc *ioc;
905 int coalesced, filled = 0;
906 unsigned long flags;
907 unsigned long hint = hint_lookup[(int)direction];
908 unsigned long prev_len = 0, current_len = 0;
909 int i;
910
911 BUG_ON(!dev);
912 ioc = GET_IOC(dev);
913
914 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
915
916
917 if (nents == 1) {
918 sg_dma_address(sglist) = ccio_map_single(dev,
919 (void *)sg_virt_addr(sglist), sglist->length,
920 direction);
921 sg_dma_len(sglist) = sglist->length;
922 return 1;
923 }
924
925 for(i = 0; i < nents; i++)
926 prev_len += sglist[i].length;
927
928 spin_lock_irqsave(&ioc->res_lock, flags);
929
930#ifdef CCIO_COLLECT_STATS
931 ioc->msg_calls++;
932#endif
933
934
935
936
937
938
939
940
941
942 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, ccio_alloc_range);
943
944
945
946
947
948
949
950
951
952 filled = iommu_fill_pdir(ioc, sglist, nents, hint, ccio_io_pdir_entry);
953
954 spin_unlock_irqrestore(&ioc->res_lock, flags);
955
956 BUG_ON(coalesced != filled);
957
958 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
959
960 for (i = 0; i < filled; i++)
961 current_len += sg_dma_len(sglist + i);
962
963 BUG_ON(current_len != prev_len);
964
965 return filled;
966}
967
968
969
970
971
972
973
974
975
976
977static void
978ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
979 enum dma_data_direction direction)
980{
981 struct ioc *ioc;
982
983 BUG_ON(!dev);
984 ioc = GET_IOC(dev);
985
986 DBG_RUN_SG("%s() START %d entries, %08lx,%x\n",
987 __func__, nents, sg_virt_addr(sglist), sglist->length);
988
989#ifdef CCIO_COLLECT_STATS
990 ioc->usg_calls++;
991#endif
992
993 while(sg_dma_len(sglist) && nents--) {
994
995#ifdef CCIO_COLLECT_STATS
996 ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
997#endif
998 ccio_unmap_single(dev, sg_dma_address(sglist),
999 sg_dma_len(sglist), direction);
1000 ++sglist;
1001 }
1002
1003 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1004}
1005
1006static struct hppa_dma_ops ccio_ops = {
1007 .dma_supported = ccio_dma_supported,
1008 .alloc_consistent = ccio_alloc_consistent,
1009 .alloc_noncoherent = ccio_alloc_consistent,
1010 .free_consistent = ccio_free_consistent,
1011 .map_single = ccio_map_single,
1012 .unmap_single = ccio_unmap_single,
1013 .map_sg = ccio_map_sg,
1014 .unmap_sg = ccio_unmap_sg,
1015 .dma_sync_single_for_cpu = NULL,
1016 .dma_sync_single_for_device = NULL,
1017 .dma_sync_sg_for_cpu = NULL,
1018 .dma_sync_sg_for_device = NULL,
1019};
1020
1021#ifdef CONFIG_PROC_FS
1022static int ccio_proc_info(struct seq_file *m, void *p)
1023{
1024 int len = 0;
1025 struct ioc *ioc = ioc_list;
1026
1027 while (ioc != NULL) {
1028 unsigned int total_pages = ioc->res_size << 3;
1029#ifdef CCIO_COLLECT_STATS
1030 unsigned long avg = 0, min, max;
1031 int j;
1032#endif
1033
1034 len += seq_printf(m, "%s\n", ioc->name);
1035
1036 len += seq_printf(m, "Cujo 2.0 bug : %s\n",
1037 (ioc->cujo20_bug ? "yes" : "no"));
1038
1039 len += seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
1040 total_pages * 8, total_pages);
1041
1042#ifdef CCIO_COLLECT_STATS
1043 len += seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1044 total_pages - ioc->used_pages, ioc->used_pages,
1045 (int)(ioc->used_pages * 100 / total_pages));
1046#endif
1047
1048 len += seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
1049 ioc->res_size, total_pages);
1050
1051#ifdef CCIO_COLLECT_STATS
1052 min = max = ioc->avg_search[0];
1053 for(j = 0; j < CCIO_SEARCH_SAMPLE; ++j) {
1054 avg += ioc->avg_search[j];
1055 if(ioc->avg_search[j] > max)
1056 max = ioc->avg_search[j];
1057 if(ioc->avg_search[j] < min)
1058 min = ioc->avg_search[j];
1059 }
1060 avg /= CCIO_SEARCH_SAMPLE;
1061 len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1062 min, avg, max);
1063
1064 len += seq_printf(m, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n",
1065 ioc->msingle_calls, ioc->msingle_pages,
1066 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1067
1068
1069 min = ioc->usingle_calls - ioc->usg_calls;
1070 max = ioc->usingle_pages - ioc->usg_pages;
1071 len += seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
1072 min, max, (int)((max * 1000)/min));
1073
1074 len += seq_printf(m, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n",
1075 ioc->msg_calls, ioc->msg_pages,
1076 (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
1077
1078 len += seq_printf(m, "pci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n\n\n",
1079 ioc->usg_calls, ioc->usg_pages,
1080 (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
1081#endif
1082
1083 ioc = ioc->next;
1084 }
1085
1086 return 0;
1087}
1088
1089static int ccio_proc_info_open(struct inode *inode, struct file *file)
1090{
1091 return single_open(file, &ccio_proc_info, NULL);
1092}
1093
1094static const struct file_operations ccio_proc_info_fops = {
1095 .owner = THIS_MODULE,
1096 .open = ccio_proc_info_open,
1097 .read = seq_read,
1098 .llseek = seq_lseek,
1099 .release = single_release,
1100};
1101
1102static int ccio_proc_bitmap_info(struct seq_file *m, void *p)
1103{
1104 int len = 0;
1105 struct ioc *ioc = ioc_list;
1106
1107 while (ioc != NULL) {
1108 u32 *res_ptr = (u32 *)ioc->res_map;
1109 int j;
1110
1111 for (j = 0; j < (ioc->res_size / sizeof(u32)); j++) {
1112 if ((j & 7) == 0)
1113 len += seq_puts(m, "\n ");
1114 len += seq_printf(m, "%08x", *res_ptr);
1115 res_ptr++;
1116 }
1117 len += seq_puts(m, "\n\n");
1118 ioc = ioc->next;
1119 break;
1120 }
1121
1122 return 0;
1123}
1124
1125static int ccio_proc_bitmap_open(struct inode *inode, struct file *file)
1126{
1127 return single_open(file, &ccio_proc_bitmap_info, NULL);
1128}
1129
1130static const struct file_operations ccio_proc_bitmap_fops = {
1131 .owner = THIS_MODULE,
1132 .open = ccio_proc_bitmap_open,
1133 .read = seq_read,
1134 .llseek = seq_lseek,
1135 .release = single_release,
1136};
1137#endif
1138
1139
1140
1141
1142
1143
1144
1145
1146static struct ioc * ccio_find_ioc(int hw_path)
1147{
1148 int i;
1149 struct ioc *ioc;
1150
1151 ioc = ioc_list;
1152 for (i = 0; i < ioc_count; i++) {
1153 if (ioc->hw_path == hw_path)
1154 return ioc;
1155
1156 ioc = ioc->next;
1157 }
1158
1159 return NULL;
1160}
1161
1162
1163
1164
1165
1166
1167
1168
1169void * ccio_get_iommu(const struct parisc_device *dev)
1170{
1171 dev = find_pa_parent_type(dev, HPHW_IOA);
1172 if (!dev)
1173 return NULL;
1174
1175 return ccio_find_ioc(dev->hw_path);
1176}
1177
1178#define CUJO_20_STEP 0x10000000
1179
1180
1181
1182
1183
1184void ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp)
1185{
1186 unsigned int idx;
1187 struct parisc_device *dev = parisc_parent(cujo);
1188 struct ioc *ioc = ccio_get_iommu(dev);
1189 u8 *res_ptr;
1190
1191 ioc->cujo20_bug = 1;
1192 res_ptr = ioc->res_map;
1193 idx = PDIR_INDEX(iovp) >> 3;
1194
1195 while (idx < ioc->res_size) {
1196 res_ptr[idx] |= 0xff;
1197 idx += PDIR_INDEX(CUJO_20_STEP) >> 3;
1198 }
1199}
1200
1201#if 0
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214static int
1215ccio_get_iotlb_size(struct parisc_device *dev)
1216{
1217 if (dev->spa_shift == 0) {
1218 panic("%s() : Can't determine I/O TLB size.\n", __func__);
1219 }
1220 return (1 << dev->spa_shift);
1221}
1222#else
1223
1224
1225#define CCIO_CHAINID_SHIFT 8
1226#define CCIO_CHAINID_MASK 0xff
1227#endif
1228
1229
1230static const struct parisc_device_id ccio_tbl[] = {
1231 { HPHW_IOA, HVERSION_REV_ANY_ID, U2_IOA_RUNWAY, 0xb },
1232 { HPHW_IOA, HVERSION_REV_ANY_ID, UTURN_IOA_RUNWAY, 0xb },
1233 { 0, }
1234};
1235
1236static int ccio_probe(struct parisc_device *dev);
1237
1238static struct parisc_driver ccio_driver = {
1239 .name = "ccio",
1240 .id_table = ccio_tbl,
1241 .probe = ccio_probe,
1242};
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252static void
1253ccio_ioc_init(struct ioc *ioc)
1254{
1255 int i;
1256 unsigned int iov_order;
1257 u32 iova_space_size;
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270 iova_space_size = (u32) (totalram_pages / count_parisc_driver(&ccio_driver));
1271
1272
1273
1274 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1275 iova_space_size = 1 << (20 - PAGE_SHIFT);
1276#ifdef __LP64__
1277 } else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1278 iova_space_size = 1 << (30 - PAGE_SHIFT);
1279#endif
1280 }
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1296
1297
1298 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1299
1300 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1301
1302 BUG_ON(ioc->pdir_size > 8 * 1024 * 1024);
1303
1304
1305 BUG_ON((1 << get_order(ioc->pdir_size)) != (ioc->pdir_size >> PAGE_SHIFT));
1306
1307 DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n",
1308 __func__, ioc->ioc_regs,
1309 (unsigned long) totalram_pages >> (20 - PAGE_SHIFT),
1310 iova_space_size>>20,
1311 iov_order + PAGE_SHIFT);
1312
1313 ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL,
1314 get_order(ioc->pdir_size));
1315 if(NULL == ioc->pdir_base) {
1316 panic("%s() could not allocate I/O Page Table\n", __func__);
1317 }
1318 memset(ioc->pdir_base, 0, ioc->pdir_size);
1319
1320 BUG_ON((((unsigned long)ioc->pdir_base) & PAGE_MASK) != (unsigned long)ioc->pdir_base);
1321 DBG_INIT(" base %p\n", ioc->pdir_base);
1322
1323
1324 ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3;
1325 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1326
1327 ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL,
1328 get_order(ioc->res_size));
1329 if(NULL == ioc->res_map) {
1330 panic("%s() could not allocate resource map\n", __func__);
1331 }
1332 memset(ioc->res_map, 0, ioc->res_size);
1333
1334
1335 ioc->res_hint = 16;
1336
1337
1338 spin_lock_init(&ioc->res_lock);
1339
1340
1341
1342
1343
1344 ioc->chainid_shift = get_order(iova_space_size) + PAGE_SHIFT - CCIO_CHAINID_SHIFT;
1345 DBG_INIT(" chainid_shift 0x%x\n", ioc->chainid_shift);
1346
1347
1348
1349
1350 WRITE_U32(CCIO_CHAINID_MASK << ioc->chainid_shift,
1351 &ioc->ioc_regs->io_chain_id_mask);
1352
1353 WRITE_U32(virt_to_phys(ioc->pdir_base),
1354 &ioc->ioc_regs->io_pdir_base);
1355
1356
1357
1358
1359 WRITE_U32(IOA_NORMAL_MODE, &ioc->ioc_regs->io_control);
1360
1361
1362
1363
1364 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_m);
1365 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_l);
1366
1367 for(i = 1 << CCIO_CHAINID_SHIFT; i ; i--) {
1368 WRITE_U32((CMD_TLB_DIRECT_WRITE | (i << ioc->chainid_shift)),
1369 &ioc->ioc_regs->io_command);
1370 }
1371}
1372
1373static void __init
1374ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr)
1375{
1376 int result;
1377
1378 res->parent = NULL;
1379 res->flags = IORESOURCE_MEM;
1380
1381
1382
1383
1384
1385 res->start = (unsigned long)((signed) READ_U32(ioaddr) << 16);
1386 res->end = (unsigned long)((signed) (READ_U32(ioaddr + 4) << 16) - 1);
1387 res->name = name;
1388
1389
1390
1391 if (res->end + 1 == res->start)
1392 return;
1393
1394
1395
1396
1397
1398
1399 result = insert_resource(&iomem_resource, res);
1400 if (result < 0) {
1401 printk(KERN_ERR "%s() failed to claim CCIO bus address space (%08lx,%08lx)\n",
1402 __func__, (unsigned long)res->start, (unsigned long)res->end);
1403 }
1404}
1405
1406static void __init ccio_init_resources(struct ioc *ioc)
1407{
1408 struct resource *res = ioc->mmio_region;
1409 char *name = kmalloc(14, GFP_KERNEL);
1410
1411 snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
1412
1413 ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
1414 ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
1415}
1416
1417static int new_ioc_area(struct resource *res, unsigned long size,
1418 unsigned long min, unsigned long max, unsigned long align)
1419{
1420 if (max <= min)
1421 return -EBUSY;
1422
1423 res->start = (max - size + 1) &~ (align - 1);
1424 res->end = res->start + size;
1425
1426
1427
1428
1429
1430 if (!insert_resource(&iomem_resource, res))
1431 return 0;
1432
1433 return new_ioc_area(res, size, min, max - size, align);
1434}
1435
1436static int expand_ioc_area(struct resource *res, unsigned long size,
1437 unsigned long min, unsigned long max, unsigned long align)
1438{
1439 unsigned long start, len;
1440
1441 if (!res->parent)
1442 return new_ioc_area(res, size, min, max, align);
1443
1444 start = (res->start - size) &~ (align - 1);
1445 len = res->end - start + 1;
1446 if (start >= min) {
1447 if (!adjust_resource(res, start, len))
1448 return 0;
1449 }
1450
1451 start = res->start;
1452 len = ((size + res->end + align) &~ (align - 1)) - start;
1453 if (start + len <= max) {
1454 if (!adjust_resource(res, start, len))
1455 return 0;
1456 }
1457
1458 return -EBUSY;
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468int ccio_allocate_resource(const struct parisc_device *dev,
1469 struct resource *res, unsigned long size,
1470 unsigned long min, unsigned long max, unsigned long align)
1471{
1472 struct resource *parent = &iomem_resource;
1473 struct ioc *ioc = ccio_get_iommu(dev);
1474 if (!ioc)
1475 goto out;
1476
1477 parent = ioc->mmio_region;
1478 if (parent->parent &&
1479 !allocate_resource(parent, res, size, min, max, align, NULL, NULL))
1480 return 0;
1481
1482 if ((parent + 1)->parent &&
1483 !allocate_resource(parent + 1, res, size, min, max, align,
1484 NULL, NULL))
1485 return 0;
1486
1487 if (!expand_ioc_area(parent, size, min, max, align)) {
1488 __raw_writel(((parent->start)>>16) | 0xffff0000,
1489 &ioc->ioc_regs->io_io_low);
1490 __raw_writel(((parent->end)>>16) | 0xffff0000,
1491 &ioc->ioc_regs->io_io_high);
1492 } else if (!expand_ioc_area(parent + 1, size, min, max, align)) {
1493 parent++;
1494 __raw_writel(((parent->start)>>16) | 0xffff0000,
1495 &ioc->ioc_regs->io_io_low_hv);
1496 __raw_writel(((parent->end)>>16) | 0xffff0000,
1497 &ioc->ioc_regs->io_io_high_hv);
1498 } else {
1499 return -EBUSY;
1500 }
1501
1502 out:
1503 return allocate_resource(parent, res, size, min, max, align, NULL,NULL);
1504}
1505
1506int ccio_request_resource(const struct parisc_device *dev,
1507 struct resource *res)
1508{
1509 struct resource *parent;
1510 struct ioc *ioc = ccio_get_iommu(dev);
1511
1512 if (!ioc) {
1513 parent = &iomem_resource;
1514 } else if ((ioc->mmio_region->start <= res->start) &&
1515 (res->end <= ioc->mmio_region->end)) {
1516 parent = ioc->mmio_region;
1517 } else if (((ioc->mmio_region + 1)->start <= res->start) &&
1518 (res->end <= (ioc->mmio_region + 1)->end)) {
1519 parent = ioc->mmio_region + 1;
1520 } else {
1521 return -EBUSY;
1522 }
1523
1524
1525
1526
1527
1528
1529 return insert_resource(parent, res);
1530}
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540static int __init ccio_probe(struct parisc_device *dev)
1541{
1542 int i;
1543 struct ioc *ioc, **ioc_p = &ioc_list;
1544
1545 ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL);
1546 if (ioc == NULL) {
1547 printk(KERN_ERR MODULE_NAME ": memory allocation failure\n");
1548 return 1;
1549 }
1550
1551 ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn";
1552
1553 printk(KERN_INFO "Found %s at 0x%lx\n", ioc->name,
1554 (unsigned long)dev->hpa.start);
1555
1556 for (i = 0; i < ioc_count; i++) {
1557 ioc_p = &(*ioc_p)->next;
1558 }
1559 *ioc_p = ioc;
1560
1561 ioc->hw_path = dev->hw_path;
1562 ioc->ioc_regs = ioremap_nocache(dev->hpa.start, 4096);
1563 ccio_ioc_init(ioc);
1564 ccio_init_resources(ioc);
1565 hppa_dma_ops = &ccio_ops;
1566 dev->dev.platform_data = kzalloc(sizeof(struct pci_hba_data), GFP_KERNEL);
1567
1568
1569 BUG_ON(dev->dev.platform_data == NULL);
1570 HBA_DATA(dev->dev.platform_data)->iommu = ioc;
1571
1572#ifdef CONFIG_PROC_FS
1573 if (ioc_count == 0) {
1574 proc_create(MODULE_NAME, 0, proc_runway_root,
1575 &ccio_proc_info_fops);
1576 proc_create(MODULE_NAME"-bitmap", 0, proc_runway_root,
1577 &ccio_proc_bitmap_fops);
1578 }
1579#endif
1580 ioc_count++;
1581
1582 parisc_has_iommu();
1583 return 0;
1584}
1585
1586
1587
1588
1589
1590
1591void __init ccio_init(void)
1592{
1593 register_parisc_driver(&ccio_driver);
1594}
1595
1596