1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <linux/types.h>
32#include <linux/kernel.h>
33#include <linux/init.h>
34#include <linux/mm.h>
35#include <linux/spinlock.h>
36#include <linux/slab.h>
37#include <linux/string.h>
38#include <linux/pci.h>
39#include <linux/reboot.h>
40#include <linux/proc_fs.h>
41#include <linux/seq_file.h>
42#include <linux/scatterlist.h>
43#include <linux/iommu-helper.h>
44#include <linux/export.h>
45
46#include <asm/byteorder.h>
47#include <asm/cache.h>
48#include <linux/uaccess.h>
49#include <asm/page.h>
50#include <asm/dma.h>
51#include <asm/io.h>
52#include <asm/hardware.h>
53#include <asm/parisc-device.h>
54
55#include "iommu.h"
56
57
58
59
60
61#define MODULE_NAME "ccio"
62
63#undef DEBUG_CCIO_RES
64#undef DEBUG_CCIO_RUN
65#undef DEBUG_CCIO_INIT
66#undef DEBUG_CCIO_RUN_SG
67
68#ifdef CONFIG_PROC_FS
69
70#undef CCIO_COLLECT_STATS
71#endif
72
73#include <asm/runway.h>
74
75#ifdef DEBUG_CCIO_INIT
76#define DBG_INIT(x...) printk(x)
77#else
78#define DBG_INIT(x...)
79#endif
80
81#ifdef DEBUG_CCIO_RUN
82#define DBG_RUN(x...) printk(x)
83#else
84#define DBG_RUN(x...)
85#endif
86
87#ifdef DEBUG_CCIO_RES
88#define DBG_RES(x...) printk(x)
89#else
90#define DBG_RES(x...)
91#endif
92
93#ifdef DEBUG_CCIO_RUN_SG
94#define DBG_RUN_SG(x...) printk(x)
95#else
96#define DBG_RUN_SG(x...)
97#endif
98
99#define CCIO_INLINE inline
100#define WRITE_U32(value, addr) __raw_writel(value, addr)
101#define READ_U32(addr) __raw_readl(addr)
102
103#define U2_IOA_RUNWAY 0x580
104#define U2_BC_GSC 0x501
105#define UTURN_IOA_RUNWAY 0x581
106#define UTURN_BC_GSC 0x502
107
108#define IOA_NORMAL_MODE 0x00020080
109#define CMD_TLB_DIRECT_WRITE 35
110#define CMD_TLB_PURGE 33
111
112struct ioa_registers {
113
114 int32_t unused1[12];
115 uint32_t io_command;
116 uint32_t io_status;
117 uint32_t io_control;
118 int32_t unused2[1];
119
120
121 uint32_t io_err_resp;
122 uint32_t io_err_info;
123 uint32_t io_err_req;
124 uint32_t io_err_resp_hi;
125 uint32_t io_tlb_entry_m;
126 uint32_t io_tlb_entry_l;
127 uint32_t unused3[1];
128 uint32_t io_pdir_base;
129 uint32_t io_io_low_hv;
130 uint32_t io_io_high_hv;
131 uint32_t unused4[1];
132 uint32_t io_chain_id_mask;
133 uint32_t unused5[2];
134 uint32_t io_io_low;
135 uint32_t io_io_high;
136};
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224struct ioc {
225 struct ioa_registers __iomem *ioc_regs;
226 u8 *res_map;
227 u64 *pdir_base;
228 u32 pdir_size;
229 u32 res_hint;
230
231 u32 res_size;
232 spinlock_t res_lock;
233
234#ifdef CCIO_COLLECT_STATS
235#define CCIO_SEARCH_SAMPLE 0x100
236 unsigned long avg_search[CCIO_SEARCH_SAMPLE];
237 unsigned long avg_idx;
238 unsigned long used_pages;
239 unsigned long msingle_calls;
240 unsigned long msingle_pages;
241 unsigned long msg_calls;
242 unsigned long msg_pages;
243 unsigned long usingle_calls;
244 unsigned long usingle_pages;
245 unsigned long usg_calls;
246 unsigned long usg_pages;
247#endif
248 unsigned short cujo20_bug;
249
250
251 u32 chainid_shift;
252 struct ioc *next;
253 const char *name;
254 unsigned int hw_path;
255 struct pci_dev *fake_pci_dev;
256 struct resource mmio_region[2];
257};
258
259static struct ioc *ioc_list;
260static int ioc_count;
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277#define IOVP_SIZE PAGE_SIZE
278#define IOVP_SHIFT PAGE_SHIFT
279#define IOVP_MASK PAGE_MASK
280
281
282#define CCIO_IOVA(iovp,offset) ((iovp) | (offset))
283#define CCIO_IOVP(iova) ((iova) & IOVP_MASK)
284
285#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
286#define MKIOVP(pdir_idx) ((long)(pdir_idx) << IOVP_SHIFT)
287#define MKIOVA(iovp,offset) (dma_addr_t)((long)iovp | (long)offset)
288
289
290
291
292
293
294#define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \
295 for(; res_ptr < res_end; ++res_ptr) { \
296 int ret;\
297 unsigned int idx;\
298 idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \
299 ret = iommu_is_span_boundary(idx << 3, pages_needed, 0, boundary_size);\
300 if ((0 == (*res_ptr & mask)) && !ret) { \
301 *res_ptr |= mask; \
302 res_idx = idx;\
303 ioc->res_hint = res_idx + (size >> 3); \
304 goto resource_found; \
305 } \
306 }
307
308#define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \
309 u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \
310 u##size *res_end = (u##size *)&(ioc)->res_map[ioa->res_size]; \
311 CCIO_SEARCH_LOOP(ioc, res_idx, mask, size); \
312 res_ptr = (u##size *)&(ioc)->res_map[0]; \
313 CCIO_SEARCH_LOOP(ioa, res_idx, mask, size);
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338static int
339ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
340{
341 unsigned int pages_needed = size >> IOVP_SHIFT;
342 unsigned int res_idx;
343 unsigned long boundary_size;
344#ifdef CCIO_COLLECT_STATS
345 unsigned long cr_start = mfctl(16);
346#endif
347
348 BUG_ON(pages_needed == 0);
349 BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE);
350
351 DBG_RES("%s() size: %d pages_needed %d\n",
352 __func__, size, pages_needed);
353
354
355
356
357
358
359 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
360 1ULL << IOVP_SHIFT) >> IOVP_SHIFT;
361
362 if (pages_needed <= 8) {
363
364
365
366
367
368#if 0
369
370
371
372
373 unsigned long mask = ~(~0UL >> pages_needed);
374 CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 8);
375#else
376 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xff, 8);
377#endif
378 } else if (pages_needed <= 16) {
379 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xffff, 16);
380 } else if (pages_needed <= 32) {
381 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~(unsigned int)0, 32);
382#ifdef __LP64__
383 } else if (pages_needed <= 64) {
384 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~0UL, 64);
385#endif
386 } else {
387 panic("%s: %s() Too many pages to map. pages_needed: %u\n",
388 __FILE__, __func__, pages_needed);
389 }
390
391 panic("%s: %s() I/O MMU is out of mapping resources.\n", __FILE__,
392 __func__);
393
394resource_found:
395
396 DBG_RES("%s() res_idx %d res_hint: %d\n",
397 __func__, res_idx, ioc->res_hint);
398
399#ifdef CCIO_COLLECT_STATS
400 {
401 unsigned long cr_end = mfctl(16);
402 unsigned long tmp = cr_end - cr_start;
403
404 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
405 }
406 ioc->avg_search[ioc->avg_idx++] = cr_start;
407 ioc->avg_idx &= CCIO_SEARCH_SAMPLE - 1;
408 ioc->used_pages += pages_needed;
409#endif
410
411
412
413 return res_idx << 3;
414}
415
416#define CCIO_FREE_MAPPINGS(ioc, res_idx, mask, size) \
417 u##size *res_ptr = (u##size *)&((ioc)->res_map[res_idx]); \
418 BUG_ON((*res_ptr & mask) != mask); \
419 *res_ptr &= ~(mask);
420
421
422
423
424
425
426
427
428
429
430static void
431ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
432{
433 unsigned long iovp = CCIO_IOVP(iova);
434 unsigned int res_idx = PDIR_INDEX(iovp) >> 3;
435
436 BUG_ON(pages_mapped == 0);
437 BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE);
438 BUG_ON(pages_mapped > BITS_PER_LONG);
439
440 DBG_RES("%s(): res_idx: %d pages_mapped %d\n",
441 __func__, res_idx, pages_mapped);
442
443#ifdef CCIO_COLLECT_STATS
444 ioc->used_pages -= pages_mapped;
445#endif
446
447 if(pages_mapped <= 8) {
448#if 0
449
450 unsigned long mask = ~(~0UL >> pages_mapped);
451 CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 8);
452#else
453 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffUL, 8);
454#endif
455 } else if(pages_mapped <= 16) {
456 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffffUL, 16);
457 } else if(pages_mapped <= 32) {
458 CCIO_FREE_MAPPINGS(ioc, res_idx, ~(unsigned int)0, 32);
459#ifdef __LP64__
460 } else if(pages_mapped <= 64) {
461 CCIO_FREE_MAPPINGS(ioc, res_idx, ~0UL, 64);
462#endif
463 } else {
464 panic("%s:%s() Too many pages to unmap.\n", __FILE__,
465 __func__);
466 }
467}
468
469
470
471
472
473
474
475typedef unsigned long space_t;
476#define KERNEL_SPACE 0
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504#define IOPDIR_VALID 0x01UL
505#define HINT_SAFE_DMA 0x02UL
506#ifdef CONFIG_EISA
507#define HINT_STOP_MOST 0x04UL
508#else
509#define HINT_STOP_MOST 0x00UL
510#endif
511#define HINT_UDPATE_ENB 0x08UL
512#define HINT_PREFETCH 0x10UL
513
514
515
516
517
518
519
520static u32 hint_lookup[] = {
521 [PCI_DMA_BIDIRECTIONAL] = HINT_STOP_MOST | HINT_SAFE_DMA | IOPDIR_VALID,
522 [PCI_DMA_TODEVICE] = HINT_STOP_MOST | HINT_PREFETCH | IOPDIR_VALID,
523 [PCI_DMA_FROMDEVICE] = HINT_STOP_MOST | IOPDIR_VALID,
524};
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555static void CCIO_INLINE
556ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
557 unsigned long hints)
558{
559 register unsigned long pa;
560 register unsigned long ci;
561
562
563 BUG_ON(sid != KERNEL_SPACE);
564
565
566
567
568
569
570 pa = lpa(vba);
571 asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
572 ((u32 *)pdir_ptr)[1] = (u32) pa;
573
574
575
576
577
578#ifdef __LP64__
579
580
581
582
583
584 asm volatile ("extrd,u %1,15,4,%0" : "=r" (ci) : "r" (pa));
585 asm volatile ("extrd,u %1,31,16,%0" : "+r" (pa) : "r" (pa));
586 asm volatile ("depd %1,35,4,%0" : "+r" (pa) : "r" (ci));
587#else
588 pa = 0;
589#endif
590
591
592
593
594
595 asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba));
596 asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
597 asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
598
599 ((u32 *)pdir_ptr)[0] = (u32) pa;
600
601
602
603
604
605
606
607
608
609
610
611
612 asm_io_fdc(pdir_ptr);
613 asm_io_sync();
614}
615
616
617
618
619
620
621
622
623
624
625
626static CCIO_INLINE void
627ccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt)
628{
629 u32 chain_size = 1 << ioc->chainid_shift;
630
631 iovp &= IOVP_MASK;
632 byte_cnt += chain_size;
633
634 while(byte_cnt > chain_size) {
635 WRITE_U32(CMD_TLB_PURGE | iovp, &ioc->ioc_regs->io_command);
636 iovp += chain_size;
637 byte_cnt -= chain_size;
638 }
639}
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659static CCIO_INLINE void
660ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
661{
662 u32 iovp = (u32)CCIO_IOVP(iova);
663 size_t saved_byte_cnt;
664
665
666 saved_byte_cnt = byte_cnt = ALIGN(byte_cnt, IOVP_SIZE);
667
668 while(byte_cnt > 0) {
669
670 unsigned int idx = PDIR_INDEX(iovp);
671 char *pdir_ptr = (char *) &(ioc->pdir_base[idx]);
672
673 BUG_ON(idx >= (ioc->pdir_size / sizeof(u64)));
674 pdir_ptr[7] = 0;
675
676
677
678
679
680 asm_io_fdc(pdir_ptr);
681
682 iovp += IOVP_SIZE;
683 byte_cnt -= IOVP_SIZE;
684 }
685
686 asm_io_sync();
687 ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt);
688}
689
690
691
692
693
694
695
696
697
698
699
700
701static int
702ccio_dma_supported(struct device *dev, u64 mask)
703{
704 if(dev == NULL) {
705 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
706 BUG();
707 return 0;
708 }
709
710
711 return (int)(mask >= 0xffffffffUL);
712}
713
714
715
716
717
718
719
720
721
722
723static dma_addr_t
724ccio_map_single(struct device *dev, void *addr, size_t size,
725 enum dma_data_direction direction)
726{
727 int idx;
728 struct ioc *ioc;
729 unsigned long flags;
730 dma_addr_t iovp;
731 dma_addr_t offset;
732 u64 *pdir_start;
733 unsigned long hint = hint_lookup[(int)direction];
734
735 BUG_ON(!dev);
736 ioc = GET_IOC(dev);
737 if (!ioc)
738 return DMA_MAPPING_ERROR;
739
740 BUG_ON(size <= 0);
741
742
743 offset = ((unsigned long) addr) & ~IOVP_MASK;
744
745
746 size = ALIGN(size + offset, IOVP_SIZE);
747 spin_lock_irqsave(&ioc->res_lock, flags);
748
749#ifdef CCIO_COLLECT_STATS
750 ioc->msingle_calls++;
751 ioc->msingle_pages += size >> IOVP_SHIFT;
752#endif
753
754 idx = ccio_alloc_range(ioc, dev, size);
755 iovp = (dma_addr_t)MKIOVP(idx);
756
757 pdir_start = &(ioc->pdir_base[idx]);
758
759 DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n",
760 __func__, addr, (long)iovp | offset, size);
761
762
763 if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
764 hint |= HINT_SAFE_DMA;
765
766 while(size > 0) {
767 ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long)addr, hint);
768
769 DBG_RUN(" pdir %p %08x%08x\n",
770 pdir_start,
771 (u32) (((u32 *) pdir_start)[0]),
772 (u32) (((u32 *) pdir_start)[1]));
773 ++pdir_start;
774 addr += IOVP_SIZE;
775 size -= IOVP_SIZE;
776 }
777
778 spin_unlock_irqrestore(&ioc->res_lock, flags);
779
780
781 return CCIO_IOVA(iovp, offset);
782}
783
784
785static dma_addr_t
786ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
787 size_t size, enum dma_data_direction direction,
788 unsigned long attrs)
789{
790 return ccio_map_single(dev, page_address(page) + offset, size,
791 direction);
792}
793
794
795
796
797
798
799
800
801
802static void
803ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
804 enum dma_data_direction direction, unsigned long attrs)
805{
806 struct ioc *ioc;
807 unsigned long flags;
808 dma_addr_t offset = iova & ~IOVP_MASK;
809
810 BUG_ON(!dev);
811 ioc = GET_IOC(dev);
812 if (!ioc) {
813 WARN_ON(!ioc);
814 return;
815 }
816
817 DBG_RUN("%s() iovp 0x%lx/%x\n",
818 __func__, (long)iova, size);
819
820 iova ^= offset;
821 size += offset;
822 size = ALIGN(size, IOVP_SIZE);
823
824 spin_lock_irqsave(&ioc->res_lock, flags);
825
826#ifdef CCIO_COLLECT_STATS
827 ioc->usingle_calls++;
828 ioc->usingle_pages += size >> IOVP_SHIFT;
829#endif
830
831 ccio_mark_invalid(ioc, iova, size);
832 ccio_free_range(ioc, iova, (size >> IOVP_SHIFT));
833 spin_unlock_irqrestore(&ioc->res_lock, flags);
834}
835
836
837
838
839
840
841
842
843
844static void *
845ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
846 unsigned long attrs)
847{
848 void *ret;
849#if 0
850
851
852
853 if(!hwdev) {
854
855 *dma_handle = 0;
856 return 0;
857 }
858#endif
859 ret = (void *) __get_free_pages(flag, get_order(size));
860
861 if (ret) {
862 memset(ret, 0, size);
863 *dma_handle = ccio_map_single(dev, ret, size, PCI_DMA_BIDIRECTIONAL);
864 }
865
866 return ret;
867}
868
869
870
871
872
873
874
875
876
877
878static void
879ccio_free(struct device *dev, size_t size, void *cpu_addr,
880 dma_addr_t dma_handle, unsigned long attrs)
881{
882 ccio_unmap_page(dev, dma_handle, size, 0, 0);
883 free_pages((unsigned long)cpu_addr, get_order(size));
884}
885
886
887
888
889
890
891#define PIDE_FLAG 0x80000000UL
892
893#ifdef CCIO_COLLECT_STATS
894#define IOMMU_MAP_STATS
895#endif
896#include "iommu-helpers.h"
897
898
899
900
901
902
903
904
905
906
907static int
908ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
909 enum dma_data_direction direction, unsigned long attrs)
910{
911 struct ioc *ioc;
912 int coalesced, filled = 0;
913 unsigned long flags;
914 unsigned long hint = hint_lookup[(int)direction];
915 unsigned long prev_len = 0, current_len = 0;
916 int i;
917
918 BUG_ON(!dev);
919 ioc = GET_IOC(dev);
920 if (!ioc)
921 return 0;
922
923 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
924
925
926 if (nents == 1) {
927 sg_dma_address(sglist) = ccio_map_single(dev,
928 sg_virt(sglist), sglist->length,
929 direction);
930 sg_dma_len(sglist) = sglist->length;
931 return 1;
932 }
933
934 for(i = 0; i < nents; i++)
935 prev_len += sglist[i].length;
936
937 spin_lock_irqsave(&ioc->res_lock, flags);
938
939#ifdef CCIO_COLLECT_STATS
940 ioc->msg_calls++;
941#endif
942
943
944
945
946
947
948
949
950
951 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, ccio_alloc_range);
952
953
954
955
956
957
958
959
960
961 filled = iommu_fill_pdir(ioc, sglist, nents, hint, ccio_io_pdir_entry);
962
963 spin_unlock_irqrestore(&ioc->res_lock, flags);
964
965 BUG_ON(coalesced != filled);
966
967 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
968
969 for (i = 0; i < filled; i++)
970 current_len += sg_dma_len(sglist + i);
971
972 BUG_ON(current_len != prev_len);
973
974 return filled;
975}
976
977
978
979
980
981
982
983
984
985
986static void
987ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
988 enum dma_data_direction direction, unsigned long attrs)
989{
990 struct ioc *ioc;
991
992 BUG_ON(!dev);
993 ioc = GET_IOC(dev);
994 if (!ioc) {
995 WARN_ON(!ioc);
996 return;
997 }
998
999 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1000 __func__, nents, sg_virt(sglist), sglist->length);
1001
1002#ifdef CCIO_COLLECT_STATS
1003 ioc->usg_calls++;
1004#endif
1005
1006 while(sg_dma_len(sglist) && nents--) {
1007
1008#ifdef CCIO_COLLECT_STATS
1009 ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
1010#endif
1011 ccio_unmap_page(dev, sg_dma_address(sglist),
1012 sg_dma_len(sglist), direction, 0);
1013 ++sglist;
1014 }
1015
1016 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1017}
1018
1019static const struct dma_map_ops ccio_ops = {
1020 .dma_supported = ccio_dma_supported,
1021 .alloc = ccio_alloc,
1022 .free = ccio_free,
1023 .map_page = ccio_map_page,
1024 .unmap_page = ccio_unmap_page,
1025 .map_sg = ccio_map_sg,
1026 .unmap_sg = ccio_unmap_sg,
1027 .get_sgtable = dma_common_get_sgtable,
1028};
1029
1030#ifdef CONFIG_PROC_FS
1031static int ccio_proc_info(struct seq_file *m, void *p)
1032{
1033 struct ioc *ioc = ioc_list;
1034
1035 while (ioc != NULL) {
1036 unsigned int total_pages = ioc->res_size << 3;
1037#ifdef CCIO_COLLECT_STATS
1038 unsigned long avg = 0, min, max;
1039 int j;
1040#endif
1041
1042 seq_printf(m, "%s\n", ioc->name);
1043
1044 seq_printf(m, "Cujo 2.0 bug : %s\n",
1045 (ioc->cujo20_bug ? "yes" : "no"));
1046
1047 seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
1048 total_pages * 8, total_pages);
1049
1050#ifdef CCIO_COLLECT_STATS
1051 seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1052 total_pages - ioc->used_pages, ioc->used_pages,
1053 (int)(ioc->used_pages * 100 / total_pages));
1054#endif
1055
1056 seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
1057 ioc->res_size, total_pages);
1058
1059#ifdef CCIO_COLLECT_STATS
1060 min = max = ioc->avg_search[0];
1061 for(j = 0; j < CCIO_SEARCH_SAMPLE; ++j) {
1062 avg += ioc->avg_search[j];
1063 if(ioc->avg_search[j] > max)
1064 max = ioc->avg_search[j];
1065 if(ioc->avg_search[j] < min)
1066 min = ioc->avg_search[j];
1067 }
1068 avg /= CCIO_SEARCH_SAMPLE;
1069 seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1070 min, avg, max);
1071
1072 seq_printf(m, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n",
1073 ioc->msingle_calls, ioc->msingle_pages,
1074 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1075
1076
1077 min = ioc->usingle_calls - ioc->usg_calls;
1078 max = ioc->usingle_pages - ioc->usg_pages;
1079 seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
1080 min, max, (int)((max * 1000)/min));
1081
1082 seq_printf(m, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n",
1083 ioc->msg_calls, ioc->msg_pages,
1084 (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
1085
1086 seq_printf(m, "pci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n\n\n",
1087 ioc->usg_calls, ioc->usg_pages,
1088 (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
1089#endif
1090
1091 ioc = ioc->next;
1092 }
1093
1094 return 0;
1095}
1096
1097static int ccio_proc_bitmap_info(struct seq_file *m, void *p)
1098{
1099 struct ioc *ioc = ioc_list;
1100
1101 while (ioc != NULL) {
1102 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map,
1103 ioc->res_size, false);
1104 seq_putc(m, '\n');
1105 ioc = ioc->next;
1106 break;
1107 }
1108
1109 return 0;
1110}
1111#endif
1112
1113
1114
1115
1116
1117
1118
1119
1120static struct ioc * ccio_find_ioc(int hw_path)
1121{
1122 int i;
1123 struct ioc *ioc;
1124
1125 ioc = ioc_list;
1126 for (i = 0; i < ioc_count; i++) {
1127 if (ioc->hw_path == hw_path)
1128 return ioc;
1129
1130 ioc = ioc->next;
1131 }
1132
1133 return NULL;
1134}
1135
1136
1137
1138
1139
1140
1141
1142
1143void * ccio_get_iommu(const struct parisc_device *dev)
1144{
1145 dev = find_pa_parent_type(dev, HPHW_IOA);
1146 if (!dev)
1147 return NULL;
1148
1149 return ccio_find_ioc(dev->hw_path);
1150}
1151
1152#define CUJO_20_STEP 0x10000000
1153
1154
1155
1156
1157
1158void __init ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp)
1159{
1160 unsigned int idx;
1161 struct parisc_device *dev = parisc_parent(cujo);
1162 struct ioc *ioc = ccio_get_iommu(dev);
1163 u8 *res_ptr;
1164
1165 ioc->cujo20_bug = 1;
1166 res_ptr = ioc->res_map;
1167 idx = PDIR_INDEX(iovp) >> 3;
1168
1169 while (idx < ioc->res_size) {
1170 res_ptr[idx] |= 0xff;
1171 idx += PDIR_INDEX(CUJO_20_STEP) >> 3;
1172 }
1173}
1174
1175#if 0
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188static int
1189ccio_get_iotlb_size(struct parisc_device *dev)
1190{
1191 if (dev->spa_shift == 0) {
1192 panic("%s() : Can't determine I/O TLB size.\n", __func__);
1193 }
1194 return (1 << dev->spa_shift);
1195}
1196#else
1197
1198
1199#define CCIO_CHAINID_SHIFT 8
1200#define CCIO_CHAINID_MASK 0xff
1201#endif
1202
1203
1204static const struct parisc_device_id ccio_tbl[] __initconst = {
1205 { HPHW_IOA, HVERSION_REV_ANY_ID, U2_IOA_RUNWAY, 0xb },
1206 { HPHW_IOA, HVERSION_REV_ANY_ID, UTURN_IOA_RUNWAY, 0xb },
1207 { 0, }
1208};
1209
1210static int ccio_probe(struct parisc_device *dev);
1211
1212static struct parisc_driver ccio_driver __refdata = {
1213 .name = "ccio",
1214 .id_table = ccio_tbl,
1215 .probe = ccio_probe,
1216};
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226static void __init
1227ccio_ioc_init(struct ioc *ioc)
1228{
1229 int i;
1230 unsigned int iov_order;
1231 u32 iova_space_size;
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244 iova_space_size = (u32) (totalram_pages() / count_parisc_driver(&ccio_driver));
1245
1246
1247
1248 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1249 iova_space_size = 1 << (20 - PAGE_SHIFT);
1250#ifdef __LP64__
1251 } else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1252 iova_space_size = 1 << (30 - PAGE_SHIFT);
1253#endif
1254 }
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1270
1271
1272 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1273
1274 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1275
1276 BUG_ON(ioc->pdir_size > 8 * 1024 * 1024);
1277
1278
1279 BUG_ON((1 << get_order(ioc->pdir_size)) != (ioc->pdir_size >> PAGE_SHIFT));
1280
1281 DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n",
1282 __func__, ioc->ioc_regs,
1283 (unsigned long) totalram_pages() >> (20 - PAGE_SHIFT),
1284 iova_space_size>>20,
1285 iov_order + PAGE_SHIFT);
1286
1287 ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL,
1288 get_order(ioc->pdir_size));
1289 if(NULL == ioc->pdir_base) {
1290 panic("%s() could not allocate I/O Page Table\n", __func__);
1291 }
1292 memset(ioc->pdir_base, 0, ioc->pdir_size);
1293
1294 BUG_ON((((unsigned long)ioc->pdir_base) & PAGE_MASK) != (unsigned long)ioc->pdir_base);
1295 DBG_INIT(" base %p\n", ioc->pdir_base);
1296
1297
1298 ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3;
1299 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1300
1301 ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL,
1302 get_order(ioc->res_size));
1303 if(NULL == ioc->res_map) {
1304 panic("%s() could not allocate resource map\n", __func__);
1305 }
1306 memset(ioc->res_map, 0, ioc->res_size);
1307
1308
1309 ioc->res_hint = 16;
1310
1311
1312 spin_lock_init(&ioc->res_lock);
1313
1314
1315
1316
1317
1318 ioc->chainid_shift = get_order(iova_space_size) + PAGE_SHIFT - CCIO_CHAINID_SHIFT;
1319 DBG_INIT(" chainid_shift 0x%x\n", ioc->chainid_shift);
1320
1321
1322
1323
1324 WRITE_U32(CCIO_CHAINID_MASK << ioc->chainid_shift,
1325 &ioc->ioc_regs->io_chain_id_mask);
1326
1327 WRITE_U32(virt_to_phys(ioc->pdir_base),
1328 &ioc->ioc_regs->io_pdir_base);
1329
1330
1331
1332
1333 WRITE_U32(IOA_NORMAL_MODE, &ioc->ioc_regs->io_control);
1334
1335
1336
1337
1338 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_m);
1339 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_l);
1340
1341 for(i = 1 << CCIO_CHAINID_SHIFT; i ; i--) {
1342 WRITE_U32((CMD_TLB_DIRECT_WRITE | (i << ioc->chainid_shift)),
1343 &ioc->ioc_regs->io_command);
1344 }
1345}
1346
1347static void __init
1348ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr)
1349{
1350 int result;
1351
1352 res->parent = NULL;
1353 res->flags = IORESOURCE_MEM;
1354
1355
1356
1357
1358
1359 res->start = (unsigned long)((signed) READ_U32(ioaddr) << 16);
1360 res->end = (unsigned long)((signed) (READ_U32(ioaddr + 4) << 16) - 1);
1361 res->name = name;
1362
1363
1364
1365 if (res->end + 1 == res->start)
1366 return;
1367
1368
1369
1370
1371
1372
1373 result = insert_resource(&iomem_resource, res);
1374 if (result < 0) {
1375 printk(KERN_ERR "%s() failed to claim CCIO bus address space (%08lx,%08lx)\n",
1376 __func__, (unsigned long)res->start, (unsigned long)res->end);
1377 }
1378}
1379
1380static void __init ccio_init_resources(struct ioc *ioc)
1381{
1382 struct resource *res = ioc->mmio_region;
1383 char *name = kmalloc(14, GFP_KERNEL);
1384
1385 snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
1386
1387 ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
1388 ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
1389}
1390
1391static int new_ioc_area(struct resource *res, unsigned long size,
1392 unsigned long min, unsigned long max, unsigned long align)
1393{
1394 if (max <= min)
1395 return -EBUSY;
1396
1397 res->start = (max - size + 1) &~ (align - 1);
1398 res->end = res->start + size;
1399
1400
1401
1402
1403
1404 if (!insert_resource(&iomem_resource, res))
1405 return 0;
1406
1407 return new_ioc_area(res, size, min, max - size, align);
1408}
1409
1410static int expand_ioc_area(struct resource *res, unsigned long size,
1411 unsigned long min, unsigned long max, unsigned long align)
1412{
1413 unsigned long start, len;
1414
1415 if (!res->parent)
1416 return new_ioc_area(res, size, min, max, align);
1417
1418 start = (res->start - size) &~ (align - 1);
1419 len = res->end - start + 1;
1420 if (start >= min) {
1421 if (!adjust_resource(res, start, len))
1422 return 0;
1423 }
1424
1425 start = res->start;
1426 len = ((size + res->end + align) &~ (align - 1)) - start;
1427 if (start + len <= max) {
1428 if (!adjust_resource(res, start, len))
1429 return 0;
1430 }
1431
1432 return -EBUSY;
1433}
1434
1435
1436
1437
1438
1439
1440
1441
1442int ccio_allocate_resource(const struct parisc_device *dev,
1443 struct resource *res, unsigned long size,
1444 unsigned long min, unsigned long max, unsigned long align)
1445{
1446 struct resource *parent = &iomem_resource;
1447 struct ioc *ioc = ccio_get_iommu(dev);
1448 if (!ioc)
1449 goto out;
1450
1451 parent = ioc->mmio_region;
1452 if (parent->parent &&
1453 !allocate_resource(parent, res, size, min, max, align, NULL, NULL))
1454 return 0;
1455
1456 if ((parent + 1)->parent &&
1457 !allocate_resource(parent + 1, res, size, min, max, align,
1458 NULL, NULL))
1459 return 0;
1460
1461 if (!expand_ioc_area(parent, size, min, max, align)) {
1462 __raw_writel(((parent->start)>>16) | 0xffff0000,
1463 &ioc->ioc_regs->io_io_low);
1464 __raw_writel(((parent->end)>>16) | 0xffff0000,
1465 &ioc->ioc_regs->io_io_high);
1466 } else if (!expand_ioc_area(parent + 1, size, min, max, align)) {
1467 parent++;
1468 __raw_writel(((parent->start)>>16) | 0xffff0000,
1469 &ioc->ioc_regs->io_io_low_hv);
1470 __raw_writel(((parent->end)>>16) | 0xffff0000,
1471 &ioc->ioc_regs->io_io_high_hv);
1472 } else {
1473 return -EBUSY;
1474 }
1475
1476 out:
1477 return allocate_resource(parent, res, size, min, max, align, NULL,NULL);
1478}
1479
1480int ccio_request_resource(const struct parisc_device *dev,
1481 struct resource *res)
1482{
1483 struct resource *parent;
1484 struct ioc *ioc = ccio_get_iommu(dev);
1485
1486 if (!ioc) {
1487 parent = &iomem_resource;
1488 } else if ((ioc->mmio_region->start <= res->start) &&
1489 (res->end <= ioc->mmio_region->end)) {
1490 parent = ioc->mmio_region;
1491 } else if (((ioc->mmio_region + 1)->start <= res->start) &&
1492 (res->end <= (ioc->mmio_region + 1)->end)) {
1493 parent = ioc->mmio_region + 1;
1494 } else {
1495 return -EBUSY;
1496 }
1497
1498
1499
1500
1501
1502
1503 return insert_resource(parent, res);
1504}
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514static int __init ccio_probe(struct parisc_device *dev)
1515{
1516 int i;
1517 struct ioc *ioc, **ioc_p = &ioc_list;
1518 struct pci_hba_data *hba;
1519
1520 ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL);
1521 if (ioc == NULL) {
1522 printk(KERN_ERR MODULE_NAME ": memory allocation failure\n");
1523 return -ENOMEM;
1524 }
1525
1526 ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn";
1527
1528 printk(KERN_INFO "Found %s at 0x%lx\n", ioc->name,
1529 (unsigned long)dev->hpa.start);
1530
1531 for (i = 0; i < ioc_count; i++) {
1532 ioc_p = &(*ioc_p)->next;
1533 }
1534 *ioc_p = ioc;
1535
1536 ioc->hw_path = dev->hw_path;
1537 ioc->ioc_regs = ioremap(dev->hpa.start, 4096);
1538 if (!ioc->ioc_regs) {
1539 kfree(ioc);
1540 return -ENOMEM;
1541 }
1542 ccio_ioc_init(ioc);
1543 ccio_init_resources(ioc);
1544 hppa_dma_ops = &ccio_ops;
1545
1546 hba = kzalloc(sizeof(*hba), GFP_KERNEL);
1547
1548 BUG_ON(hba == NULL);
1549
1550 hba->iommu = ioc;
1551 dev->dev.platform_data = hba;
1552
1553#ifdef CONFIG_PROC_FS
1554 if (ioc_count == 0) {
1555 proc_create_single(MODULE_NAME, 0, proc_runway_root,
1556 ccio_proc_info);
1557 proc_create_single(MODULE_NAME"-bitmap", 0, proc_runway_root,
1558 ccio_proc_bitmap_info);
1559 }
1560#endif
1561 ioc_count++;
1562 return 0;
1563}
1564
1565
1566
1567
1568
1569
1570void __init ccio_init(void)
1571{
1572 register_parisc_driver(&ccio_driver);
1573}
1574
1575