1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <linux/types.h>
32#include <linux/kernel.h>
33#include <linux/init.h>
34#include <linux/mm.h>
35#include <linux/spinlock.h>
36#include <linux/slab.h>
37#include <linux/string.h>
38#include <linux/pci.h>
39#include <linux/reboot.h>
40#include <linux/proc_fs.h>
41#include <linux/seq_file.h>
42#include <linux/dma-map-ops.h>
43#include <linux/scatterlist.h>
44#include <linux/iommu-helper.h>
45#include <linux/export.h>
46
47#include <asm/byteorder.h>
48#include <asm/cache.h>
49#include <linux/uaccess.h>
50#include <asm/page.h>
51#include <asm/dma.h>
52#include <asm/io.h>
53#include <asm/hardware.h>
54#include <asm/parisc-device.h>
55
56#include "iommu.h"
57
58
59
60
61
62#define MODULE_NAME "ccio"
63
64#undef DEBUG_CCIO_RES
65#undef DEBUG_CCIO_RUN
66#undef DEBUG_CCIO_INIT
67#undef DEBUG_CCIO_RUN_SG
68
69#ifdef CONFIG_PROC_FS
70
71#undef CCIO_COLLECT_STATS
72#endif
73
74#include <asm/runway.h>
75
76#ifdef DEBUG_CCIO_INIT
77#define DBG_INIT(x...) printk(x)
78#else
79#define DBG_INIT(x...)
80#endif
81
82#ifdef DEBUG_CCIO_RUN
83#define DBG_RUN(x...) printk(x)
84#else
85#define DBG_RUN(x...)
86#endif
87
88#ifdef DEBUG_CCIO_RES
89#define DBG_RES(x...) printk(x)
90#else
91#define DBG_RES(x...)
92#endif
93
94#ifdef DEBUG_CCIO_RUN_SG
95#define DBG_RUN_SG(x...) printk(x)
96#else
97#define DBG_RUN_SG(x...)
98#endif
99
100#define CCIO_INLINE inline
101#define WRITE_U32(value, addr) __raw_writel(value, addr)
102#define READ_U32(addr) __raw_readl(addr)
103
104#define U2_IOA_RUNWAY 0x580
105#define U2_BC_GSC 0x501
106#define UTURN_IOA_RUNWAY 0x581
107#define UTURN_BC_GSC 0x502
108
109#define IOA_NORMAL_MODE 0x00020080
110#define CMD_TLB_DIRECT_WRITE 35
111#define CMD_TLB_PURGE 33
112
113struct ioa_registers {
114
115 int32_t unused1[12];
116 uint32_t io_command;
117 uint32_t io_status;
118 uint32_t io_control;
119 int32_t unused2[1];
120
121
122 uint32_t io_err_resp;
123 uint32_t io_err_info;
124 uint32_t io_err_req;
125 uint32_t io_err_resp_hi;
126 uint32_t io_tlb_entry_m;
127 uint32_t io_tlb_entry_l;
128 uint32_t unused3[1];
129 uint32_t io_pdir_base;
130 uint32_t io_io_low_hv;
131 uint32_t io_io_high_hv;
132 uint32_t unused4[1];
133 uint32_t io_chain_id_mask;
134 uint32_t unused5[2];
135 uint32_t io_io_low;
136 uint32_t io_io_high;
137};
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225struct ioc {
226 struct ioa_registers __iomem *ioc_regs;
227 u8 *res_map;
228 u64 *pdir_base;
229 u32 pdir_size;
230 u32 res_hint;
231
232 u32 res_size;
233 spinlock_t res_lock;
234
235#ifdef CCIO_COLLECT_STATS
236#define CCIO_SEARCH_SAMPLE 0x100
237 unsigned long avg_search[CCIO_SEARCH_SAMPLE];
238 unsigned long avg_idx;
239 unsigned long used_pages;
240 unsigned long msingle_calls;
241 unsigned long msingle_pages;
242 unsigned long msg_calls;
243 unsigned long msg_pages;
244 unsigned long usingle_calls;
245 unsigned long usingle_pages;
246 unsigned long usg_calls;
247 unsigned long usg_pages;
248#endif
249 unsigned short cujo20_bug;
250
251
252 u32 chainid_shift;
253 struct ioc *next;
254 const char *name;
255 unsigned int hw_path;
256 struct pci_dev *fake_pci_dev;
257 struct resource mmio_region[2];
258};
259
260static struct ioc *ioc_list;
261static int ioc_count;
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278#define IOVP_SIZE PAGE_SIZE
279#define IOVP_SHIFT PAGE_SHIFT
280#define IOVP_MASK PAGE_MASK
281
282
283#define CCIO_IOVA(iovp,offset) ((iovp) | (offset))
284#define CCIO_IOVP(iova) ((iova) & IOVP_MASK)
285
286#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
287#define MKIOVP(pdir_idx) ((long)(pdir_idx) << IOVP_SHIFT)
288#define MKIOVA(iovp,offset) (dma_addr_t)((long)iovp | (long)offset)
289
290
291
292
293
294
295#define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \
296 for(; res_ptr < res_end; ++res_ptr) { \
297 int ret;\
298 unsigned int idx;\
299 idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \
300 ret = iommu_is_span_boundary(idx << 3, pages_needed, 0, boundary_size);\
301 if ((0 == (*res_ptr & mask)) && !ret) { \
302 *res_ptr |= mask; \
303 res_idx = idx;\
304 ioc->res_hint = res_idx + (size >> 3); \
305 goto resource_found; \
306 } \
307 }
308
309#define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \
310 u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \
311 u##size *res_end = (u##size *)&(ioc)->res_map[ioa->res_size]; \
312 CCIO_SEARCH_LOOP(ioc, res_idx, mask, size); \
313 res_ptr = (u##size *)&(ioc)->res_map[0]; \
314 CCIO_SEARCH_LOOP(ioa, res_idx, mask, size);
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339static int
340ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
341{
342 unsigned int pages_needed = size >> IOVP_SHIFT;
343 unsigned int res_idx;
344 unsigned long boundary_size;
345#ifdef CCIO_COLLECT_STATS
346 unsigned long cr_start = mfctl(16);
347#endif
348
349 BUG_ON(pages_needed == 0);
350 BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE);
351
352 DBG_RES("%s() size: %d pages_needed %d\n",
353 __func__, size, pages_needed);
354
355
356
357
358
359
360 boundary_size = dma_get_seg_boundary_nr_pages(dev, IOVP_SHIFT);
361
362 if (pages_needed <= 8) {
363
364
365
366
367
368#if 0
369
370
371
372
373 unsigned long mask = ~(~0UL >> pages_needed);
374 CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 8);
375#else
376 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xff, 8);
377#endif
378 } else if (pages_needed <= 16) {
379 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xffff, 16);
380 } else if (pages_needed <= 32) {
381 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~(unsigned int)0, 32);
382#ifdef __LP64__
383 } else if (pages_needed <= 64) {
384 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~0UL, 64);
385#endif
386 } else {
387 panic("%s: %s() Too many pages to map. pages_needed: %u\n",
388 __FILE__, __func__, pages_needed);
389 }
390
391 panic("%s: %s() I/O MMU is out of mapping resources.\n", __FILE__,
392 __func__);
393
394resource_found:
395
396 DBG_RES("%s() res_idx %d res_hint: %d\n",
397 __func__, res_idx, ioc->res_hint);
398
399#ifdef CCIO_COLLECT_STATS
400 {
401 unsigned long cr_end = mfctl(16);
402 unsigned long tmp = cr_end - cr_start;
403
404 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
405 }
406 ioc->avg_search[ioc->avg_idx++] = cr_start;
407 ioc->avg_idx &= CCIO_SEARCH_SAMPLE - 1;
408 ioc->used_pages += pages_needed;
409#endif
410
411
412
413 return res_idx << 3;
414}
415
416#define CCIO_FREE_MAPPINGS(ioc, res_idx, mask, size) \
417 u##size *res_ptr = (u##size *)&((ioc)->res_map[res_idx]); \
418 BUG_ON((*res_ptr & mask) != mask); \
419 *res_ptr &= ~(mask);
420
421
422
423
424
425
426
427
428
429
430static void
431ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
432{
433 unsigned long iovp = CCIO_IOVP(iova);
434 unsigned int res_idx = PDIR_INDEX(iovp) >> 3;
435
436 BUG_ON(pages_mapped == 0);
437 BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE);
438 BUG_ON(pages_mapped > BITS_PER_LONG);
439
440 DBG_RES("%s(): res_idx: %d pages_mapped %d\n",
441 __func__, res_idx, pages_mapped);
442
443#ifdef CCIO_COLLECT_STATS
444 ioc->used_pages -= pages_mapped;
445#endif
446
447 if(pages_mapped <= 8) {
448#if 0
449
450 unsigned long mask = ~(~0UL >> pages_mapped);
451 CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 8);
452#else
453 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffUL, 8);
454#endif
455 } else if(pages_mapped <= 16) {
456 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffffUL, 16);
457 } else if(pages_mapped <= 32) {
458 CCIO_FREE_MAPPINGS(ioc, res_idx, ~(unsigned int)0, 32);
459#ifdef __LP64__
460 } else if(pages_mapped <= 64) {
461 CCIO_FREE_MAPPINGS(ioc, res_idx, ~0UL, 64);
462#endif
463 } else {
464 panic("%s:%s() Too many pages to unmap.\n", __FILE__,
465 __func__);
466 }
467}
468
469
470
471
472
473
474
475typedef unsigned long space_t;
476#define KERNEL_SPACE 0
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504#define IOPDIR_VALID 0x01UL
505#define HINT_SAFE_DMA 0x02UL
506#ifdef CONFIG_EISA
507#define HINT_STOP_MOST 0x04UL
508#else
509#define HINT_STOP_MOST 0x00UL
510#endif
511#define HINT_UDPATE_ENB 0x08UL
512#define HINT_PREFETCH 0x10UL
513
514
515
516
517
518
519
520static u32 hint_lookup[] = {
521 [PCI_DMA_BIDIRECTIONAL] = HINT_STOP_MOST | HINT_SAFE_DMA | IOPDIR_VALID,
522 [PCI_DMA_TODEVICE] = HINT_STOP_MOST | HINT_PREFETCH | IOPDIR_VALID,
523 [PCI_DMA_FROMDEVICE] = HINT_STOP_MOST | IOPDIR_VALID,
524};
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555static void CCIO_INLINE
556ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
557 unsigned long hints)
558{
559 register unsigned long pa;
560 register unsigned long ci;
561
562
563 BUG_ON(sid != KERNEL_SPACE);
564
565
566
567
568
569
570 pa = lpa(vba);
571 asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
572 ((u32 *)pdir_ptr)[1] = (u32) pa;
573
574
575
576
577
578#ifdef __LP64__
579
580
581
582
583
584 asm volatile ("extrd,u %1,15,4,%0" : "=r" (ci) : "r" (pa));
585 asm volatile ("extrd,u %1,31,16,%0" : "+r" (pa) : "r" (pa));
586 asm volatile ("depd %1,35,4,%0" : "+r" (pa) : "r" (ci));
587#else
588 pa = 0;
589#endif
590
591
592
593
594
595 asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba));
596 asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
597 asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
598
599 ((u32 *)pdir_ptr)[0] = (u32) pa;
600
601
602
603
604
605
606
607
608
609
610
611
612 asm_io_fdc(pdir_ptr);
613 asm_io_sync();
614}
615
616
617
618
619
620
621
622
623
624
625
626static CCIO_INLINE void
627ccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt)
628{
629 u32 chain_size = 1 << ioc->chainid_shift;
630
631 iovp &= IOVP_MASK;
632 byte_cnt += chain_size;
633
634 while(byte_cnt > chain_size) {
635 WRITE_U32(CMD_TLB_PURGE | iovp, &ioc->ioc_regs->io_command);
636 iovp += chain_size;
637 byte_cnt -= chain_size;
638 }
639}
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659static CCIO_INLINE void
660ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
661{
662 u32 iovp = (u32)CCIO_IOVP(iova);
663 size_t saved_byte_cnt;
664
665
666 saved_byte_cnt = byte_cnt = ALIGN(byte_cnt, IOVP_SIZE);
667
668 while(byte_cnt > 0) {
669
670 unsigned int idx = PDIR_INDEX(iovp);
671 char *pdir_ptr = (char *) &(ioc->pdir_base[idx]);
672
673 BUG_ON(idx >= (ioc->pdir_size / sizeof(u64)));
674 pdir_ptr[7] = 0;
675
676
677
678
679
680 asm_io_fdc(pdir_ptr);
681
682 iovp += IOVP_SIZE;
683 byte_cnt -= IOVP_SIZE;
684 }
685
686 asm_io_sync();
687 ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt);
688}
689
690
691
692
693
694
695
696
697
698
699
700
701static int
702ccio_dma_supported(struct device *dev, u64 mask)
703{
704 if(dev == NULL) {
705 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
706 BUG();
707 return 0;
708 }
709
710
711 return (int)(mask >= 0xffffffffUL);
712}
713
714
715
716
717
718
719
720
721
722
723static dma_addr_t
724ccio_map_single(struct device *dev, void *addr, size_t size,
725 enum dma_data_direction direction)
726{
727 int idx;
728 struct ioc *ioc;
729 unsigned long flags;
730 dma_addr_t iovp;
731 dma_addr_t offset;
732 u64 *pdir_start;
733 unsigned long hint = hint_lookup[(int)direction];
734
735 BUG_ON(!dev);
736 ioc = GET_IOC(dev);
737 if (!ioc)
738 return DMA_MAPPING_ERROR;
739
740 BUG_ON(size <= 0);
741
742
743 offset = ((unsigned long) addr) & ~IOVP_MASK;
744
745
746 size = ALIGN(size + offset, IOVP_SIZE);
747 spin_lock_irqsave(&ioc->res_lock, flags);
748
749#ifdef CCIO_COLLECT_STATS
750 ioc->msingle_calls++;
751 ioc->msingle_pages += size >> IOVP_SHIFT;
752#endif
753
754 idx = ccio_alloc_range(ioc, dev, size);
755 iovp = (dma_addr_t)MKIOVP(idx);
756
757 pdir_start = &(ioc->pdir_base[idx]);
758
759 DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n",
760 __func__, addr, (long)iovp | offset, size);
761
762
763 if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
764 hint |= HINT_SAFE_DMA;
765
766 while(size > 0) {
767 ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long)addr, hint);
768
769 DBG_RUN(" pdir %p %08x%08x\n",
770 pdir_start,
771 (u32) (((u32 *) pdir_start)[0]),
772 (u32) (((u32 *) pdir_start)[1]));
773 ++pdir_start;
774 addr += IOVP_SIZE;
775 size -= IOVP_SIZE;
776 }
777
778 spin_unlock_irqrestore(&ioc->res_lock, flags);
779
780
781 return CCIO_IOVA(iovp, offset);
782}
783
784
785static dma_addr_t
786ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
787 size_t size, enum dma_data_direction direction,
788 unsigned long attrs)
789{
790 return ccio_map_single(dev, page_address(page) + offset, size,
791 direction);
792}
793
794
795
796
797
798
799
800
801
802static void
803ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
804 enum dma_data_direction direction, unsigned long attrs)
805{
806 struct ioc *ioc;
807 unsigned long flags;
808 dma_addr_t offset = iova & ~IOVP_MASK;
809
810 BUG_ON(!dev);
811 ioc = GET_IOC(dev);
812 if (!ioc) {
813 WARN_ON(!ioc);
814 return;
815 }
816
817 DBG_RUN("%s() iovp 0x%lx/%x\n",
818 __func__, (long)iova, size);
819
820 iova ^= offset;
821 size += offset;
822 size = ALIGN(size, IOVP_SIZE);
823
824 spin_lock_irqsave(&ioc->res_lock, flags);
825
826#ifdef CCIO_COLLECT_STATS
827 ioc->usingle_calls++;
828 ioc->usingle_pages += size >> IOVP_SHIFT;
829#endif
830
831 ccio_mark_invalid(ioc, iova, size);
832 ccio_free_range(ioc, iova, (size >> IOVP_SHIFT));
833 spin_unlock_irqrestore(&ioc->res_lock, flags);
834}
835
836
837
838
839
840
841
842
843
844static void *
845ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
846 unsigned long attrs)
847{
848 void *ret;
849#if 0
850
851
852
853 if(!hwdev) {
854
855 *dma_handle = 0;
856 return 0;
857 }
858#endif
859 ret = (void *) __get_free_pages(flag, get_order(size));
860
861 if (ret) {
862 memset(ret, 0, size);
863 *dma_handle = ccio_map_single(dev, ret, size, PCI_DMA_BIDIRECTIONAL);
864 }
865
866 return ret;
867}
868
869
870
871
872
873
874
875
876
877
878static void
879ccio_free(struct device *dev, size_t size, void *cpu_addr,
880 dma_addr_t dma_handle, unsigned long attrs)
881{
882 ccio_unmap_page(dev, dma_handle, size, 0, 0);
883 free_pages((unsigned long)cpu_addr, get_order(size));
884}
885
886
887
888
889
890
891#define PIDE_FLAG 0x80000000UL
892
893#ifdef CCIO_COLLECT_STATS
894#define IOMMU_MAP_STATS
895#endif
896#include "iommu-helpers.h"
897
898
899
900
901
902
903
904
905
906
907static int
908ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
909 enum dma_data_direction direction, unsigned long attrs)
910{
911 struct ioc *ioc;
912 int coalesced, filled = 0;
913 unsigned long flags;
914 unsigned long hint = hint_lookup[(int)direction];
915 unsigned long prev_len = 0, current_len = 0;
916 int i;
917
918 BUG_ON(!dev);
919 ioc = GET_IOC(dev);
920 if (!ioc)
921 return 0;
922
923 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
924
925
926 if (nents == 1) {
927 sg_dma_address(sglist) = ccio_map_single(dev,
928 sg_virt(sglist), sglist->length,
929 direction);
930 sg_dma_len(sglist) = sglist->length;
931 return 1;
932 }
933
934 for(i = 0; i < nents; i++)
935 prev_len += sglist[i].length;
936
937 spin_lock_irqsave(&ioc->res_lock, flags);
938
939#ifdef CCIO_COLLECT_STATS
940 ioc->msg_calls++;
941#endif
942
943
944
945
946
947
948
949
950
951 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, ccio_alloc_range);
952
953
954
955
956
957
958
959
960
961 filled = iommu_fill_pdir(ioc, sglist, nents, hint, ccio_io_pdir_entry);
962
963 spin_unlock_irqrestore(&ioc->res_lock, flags);
964
965 BUG_ON(coalesced != filled);
966
967 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
968
969 for (i = 0; i < filled; i++)
970 current_len += sg_dma_len(sglist + i);
971
972 BUG_ON(current_len != prev_len);
973
974 return filled;
975}
976
977
978
979
980
981
982
983
984
985
986static void
987ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
988 enum dma_data_direction direction, unsigned long attrs)
989{
990 struct ioc *ioc;
991
992 BUG_ON(!dev);
993 ioc = GET_IOC(dev);
994 if (!ioc) {
995 WARN_ON(!ioc);
996 return;
997 }
998
999 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1000 __func__, nents, sg_virt(sglist), sglist->length);
1001
1002#ifdef CCIO_COLLECT_STATS
1003 ioc->usg_calls++;
1004#endif
1005
1006 while(sg_dma_len(sglist) && nents--) {
1007
1008#ifdef CCIO_COLLECT_STATS
1009 ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
1010#endif
1011 ccio_unmap_page(dev, sg_dma_address(sglist),
1012 sg_dma_len(sglist), direction, 0);
1013 ++sglist;
1014 }
1015
1016 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1017}
1018
1019static const struct dma_map_ops ccio_ops = {
1020 .dma_supported = ccio_dma_supported,
1021 .alloc = ccio_alloc,
1022 .free = ccio_free,
1023 .map_page = ccio_map_page,
1024 .unmap_page = ccio_unmap_page,
1025 .map_sg = ccio_map_sg,
1026 .unmap_sg = ccio_unmap_sg,
1027 .get_sgtable = dma_common_get_sgtable,
1028 .alloc_pages = dma_common_alloc_pages,
1029 .free_pages = dma_common_free_pages,
1030};
1031
1032#ifdef CONFIG_PROC_FS
1033static int ccio_proc_info(struct seq_file *m, void *p)
1034{
1035 struct ioc *ioc = ioc_list;
1036
1037 while (ioc != NULL) {
1038 unsigned int total_pages = ioc->res_size << 3;
1039#ifdef CCIO_COLLECT_STATS
1040 unsigned long avg = 0, min, max;
1041 int j;
1042#endif
1043
1044 seq_printf(m, "%s\n", ioc->name);
1045
1046 seq_printf(m, "Cujo 2.0 bug : %s\n",
1047 (ioc->cujo20_bug ? "yes" : "no"));
1048
1049 seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
1050 total_pages * 8, total_pages);
1051
1052#ifdef CCIO_COLLECT_STATS
1053 seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1054 total_pages - ioc->used_pages, ioc->used_pages,
1055 (int)(ioc->used_pages * 100 / total_pages));
1056#endif
1057
1058 seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
1059 ioc->res_size, total_pages);
1060
1061#ifdef CCIO_COLLECT_STATS
1062 min = max = ioc->avg_search[0];
1063 for(j = 0; j < CCIO_SEARCH_SAMPLE; ++j) {
1064 avg += ioc->avg_search[j];
1065 if(ioc->avg_search[j] > max)
1066 max = ioc->avg_search[j];
1067 if(ioc->avg_search[j] < min)
1068 min = ioc->avg_search[j];
1069 }
1070 avg /= CCIO_SEARCH_SAMPLE;
1071 seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1072 min, avg, max);
1073
1074 seq_printf(m, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n",
1075 ioc->msingle_calls, ioc->msingle_pages,
1076 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1077
1078
1079 min = ioc->usingle_calls - ioc->usg_calls;
1080 max = ioc->usingle_pages - ioc->usg_pages;
1081 seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
1082 min, max, (int)((max * 1000)/min));
1083
1084 seq_printf(m, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n",
1085 ioc->msg_calls, ioc->msg_pages,
1086 (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
1087
1088 seq_printf(m, "pci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n\n\n",
1089 ioc->usg_calls, ioc->usg_pages,
1090 (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
1091#endif
1092
1093 ioc = ioc->next;
1094 }
1095
1096 return 0;
1097}
1098
1099static int ccio_proc_bitmap_info(struct seq_file *m, void *p)
1100{
1101 struct ioc *ioc = ioc_list;
1102
1103 while (ioc != NULL) {
1104 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map,
1105 ioc->res_size, false);
1106 seq_putc(m, '\n');
1107 ioc = ioc->next;
1108 break;
1109 }
1110
1111 return 0;
1112}
1113#endif
1114
1115
1116
1117
1118
1119
1120
1121
1122static struct ioc * ccio_find_ioc(int hw_path)
1123{
1124 int i;
1125 struct ioc *ioc;
1126
1127 ioc = ioc_list;
1128 for (i = 0; i < ioc_count; i++) {
1129 if (ioc->hw_path == hw_path)
1130 return ioc;
1131
1132 ioc = ioc->next;
1133 }
1134
1135 return NULL;
1136}
1137
1138
1139
1140
1141
1142
1143
1144
1145void * ccio_get_iommu(const struct parisc_device *dev)
1146{
1147 dev = find_pa_parent_type(dev, HPHW_IOA);
1148 if (!dev)
1149 return NULL;
1150
1151 return ccio_find_ioc(dev->hw_path);
1152}
1153
1154#define CUJO_20_STEP 0x10000000
1155
1156
1157
1158
1159
1160void __init ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp)
1161{
1162 unsigned int idx;
1163 struct parisc_device *dev = parisc_parent(cujo);
1164 struct ioc *ioc = ccio_get_iommu(dev);
1165 u8 *res_ptr;
1166
1167 ioc->cujo20_bug = 1;
1168 res_ptr = ioc->res_map;
1169 idx = PDIR_INDEX(iovp) >> 3;
1170
1171 while (idx < ioc->res_size) {
1172 res_ptr[idx] |= 0xff;
1173 idx += PDIR_INDEX(CUJO_20_STEP) >> 3;
1174 }
1175}
1176
1177#if 0
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190static int
1191ccio_get_iotlb_size(struct parisc_device *dev)
1192{
1193 if (dev->spa_shift == 0) {
1194 panic("%s() : Can't determine I/O TLB size.\n", __func__);
1195 }
1196 return (1 << dev->spa_shift);
1197}
1198#else
1199
1200
1201#define CCIO_CHAINID_SHIFT 8
1202#define CCIO_CHAINID_MASK 0xff
1203#endif
1204
1205
1206static const struct parisc_device_id ccio_tbl[] __initconst = {
1207 { HPHW_IOA, HVERSION_REV_ANY_ID, U2_IOA_RUNWAY, 0xb },
1208 { HPHW_IOA, HVERSION_REV_ANY_ID, UTURN_IOA_RUNWAY, 0xb },
1209 { 0, }
1210};
1211
1212static int ccio_probe(struct parisc_device *dev);
1213
1214static struct parisc_driver ccio_driver __refdata = {
1215 .name = "ccio",
1216 .id_table = ccio_tbl,
1217 .probe = ccio_probe,
1218};
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228static void __init
1229ccio_ioc_init(struct ioc *ioc)
1230{
1231 int i;
1232 unsigned int iov_order;
1233 u32 iova_space_size;
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246 iova_space_size = (u32) (totalram_pages() / count_parisc_driver(&ccio_driver));
1247
1248
1249
1250 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1251 iova_space_size = 1 << (20 - PAGE_SHIFT);
1252#ifdef __LP64__
1253 } else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1254 iova_space_size = 1 << (30 - PAGE_SHIFT);
1255#endif
1256 }
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1272
1273
1274 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1275
1276 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1277
1278 BUG_ON(ioc->pdir_size > 8 * 1024 * 1024);
1279
1280
1281 BUG_ON((1 << get_order(ioc->pdir_size)) != (ioc->pdir_size >> PAGE_SHIFT));
1282
1283 DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n",
1284 __func__, ioc->ioc_regs,
1285 (unsigned long) totalram_pages() >> (20 - PAGE_SHIFT),
1286 iova_space_size>>20,
1287 iov_order + PAGE_SHIFT);
1288
1289 ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL,
1290 get_order(ioc->pdir_size));
1291 if(NULL == ioc->pdir_base) {
1292 panic("%s() could not allocate I/O Page Table\n", __func__);
1293 }
1294 memset(ioc->pdir_base, 0, ioc->pdir_size);
1295
1296 BUG_ON((((unsigned long)ioc->pdir_base) & PAGE_MASK) != (unsigned long)ioc->pdir_base);
1297 DBG_INIT(" base %p\n", ioc->pdir_base);
1298
1299
1300 ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3;
1301 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1302
1303 ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL,
1304 get_order(ioc->res_size));
1305 if(NULL == ioc->res_map) {
1306 panic("%s() could not allocate resource map\n", __func__);
1307 }
1308 memset(ioc->res_map, 0, ioc->res_size);
1309
1310
1311 ioc->res_hint = 16;
1312
1313
1314 spin_lock_init(&ioc->res_lock);
1315
1316
1317
1318
1319
1320 ioc->chainid_shift = get_order(iova_space_size) + PAGE_SHIFT - CCIO_CHAINID_SHIFT;
1321 DBG_INIT(" chainid_shift 0x%x\n", ioc->chainid_shift);
1322
1323
1324
1325
1326 WRITE_U32(CCIO_CHAINID_MASK << ioc->chainid_shift,
1327 &ioc->ioc_regs->io_chain_id_mask);
1328
1329 WRITE_U32(virt_to_phys(ioc->pdir_base),
1330 &ioc->ioc_regs->io_pdir_base);
1331
1332
1333
1334
1335 WRITE_U32(IOA_NORMAL_MODE, &ioc->ioc_regs->io_control);
1336
1337
1338
1339
1340 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_m);
1341 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_l);
1342
1343 for(i = 1 << CCIO_CHAINID_SHIFT; i ; i--) {
1344 WRITE_U32((CMD_TLB_DIRECT_WRITE | (i << ioc->chainid_shift)),
1345 &ioc->ioc_regs->io_command);
1346 }
1347}
1348
1349static void __init
1350ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr)
1351{
1352 int result;
1353
1354 res->parent = NULL;
1355 res->flags = IORESOURCE_MEM;
1356
1357
1358
1359
1360
1361 res->start = (unsigned long)((signed) READ_U32(ioaddr) << 16);
1362 res->end = (unsigned long)((signed) (READ_U32(ioaddr + 4) << 16) - 1);
1363 res->name = name;
1364
1365
1366
1367 if (res->end + 1 == res->start)
1368 return;
1369
1370
1371
1372
1373
1374
1375 result = insert_resource(&iomem_resource, res);
1376 if (result < 0) {
1377 printk(KERN_ERR "%s() failed to claim CCIO bus address space (%08lx,%08lx)\n",
1378 __func__, (unsigned long)res->start, (unsigned long)res->end);
1379 }
1380}
1381
1382static void __init ccio_init_resources(struct ioc *ioc)
1383{
1384 struct resource *res = ioc->mmio_region;
1385 char *name = kmalloc(14, GFP_KERNEL);
1386
1387 snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
1388
1389 ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
1390 ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
1391}
1392
1393static int new_ioc_area(struct resource *res, unsigned long size,
1394 unsigned long min, unsigned long max, unsigned long align)
1395{
1396 if (max <= min)
1397 return -EBUSY;
1398
1399 res->start = (max - size + 1) &~ (align - 1);
1400 res->end = res->start + size;
1401
1402
1403
1404
1405
1406 if (!insert_resource(&iomem_resource, res))
1407 return 0;
1408
1409 return new_ioc_area(res, size, min, max - size, align);
1410}
1411
1412static int expand_ioc_area(struct resource *res, unsigned long size,
1413 unsigned long min, unsigned long max, unsigned long align)
1414{
1415 unsigned long start, len;
1416
1417 if (!res->parent)
1418 return new_ioc_area(res, size, min, max, align);
1419
1420 start = (res->start - size) &~ (align - 1);
1421 len = res->end - start + 1;
1422 if (start >= min) {
1423 if (!adjust_resource(res, start, len))
1424 return 0;
1425 }
1426
1427 start = res->start;
1428 len = ((size + res->end + align) &~ (align - 1)) - start;
1429 if (start + len <= max) {
1430 if (!adjust_resource(res, start, len))
1431 return 0;
1432 }
1433
1434 return -EBUSY;
1435}
1436
1437
1438
1439
1440
1441
1442
1443
1444int ccio_allocate_resource(const struct parisc_device *dev,
1445 struct resource *res, unsigned long size,
1446 unsigned long min, unsigned long max, unsigned long align)
1447{
1448 struct resource *parent = &iomem_resource;
1449 struct ioc *ioc = ccio_get_iommu(dev);
1450 if (!ioc)
1451 goto out;
1452
1453 parent = ioc->mmio_region;
1454 if (parent->parent &&
1455 !allocate_resource(parent, res, size, min, max, align, NULL, NULL))
1456 return 0;
1457
1458 if ((parent + 1)->parent &&
1459 !allocate_resource(parent + 1, res, size, min, max, align,
1460 NULL, NULL))
1461 return 0;
1462
1463 if (!expand_ioc_area(parent, size, min, max, align)) {
1464 __raw_writel(((parent->start)>>16) | 0xffff0000,
1465 &ioc->ioc_regs->io_io_low);
1466 __raw_writel(((parent->end)>>16) | 0xffff0000,
1467 &ioc->ioc_regs->io_io_high);
1468 } else if (!expand_ioc_area(parent + 1, size, min, max, align)) {
1469 parent++;
1470 __raw_writel(((parent->start)>>16) | 0xffff0000,
1471 &ioc->ioc_regs->io_io_low_hv);
1472 __raw_writel(((parent->end)>>16) | 0xffff0000,
1473 &ioc->ioc_regs->io_io_high_hv);
1474 } else {
1475 return -EBUSY;
1476 }
1477
1478 out:
1479 return allocate_resource(parent, res, size, min, max, align, NULL,NULL);
1480}
1481
1482int ccio_request_resource(const struct parisc_device *dev,
1483 struct resource *res)
1484{
1485 struct resource *parent;
1486 struct ioc *ioc = ccio_get_iommu(dev);
1487
1488 if (!ioc) {
1489 parent = &iomem_resource;
1490 } else if ((ioc->mmio_region->start <= res->start) &&
1491 (res->end <= ioc->mmio_region->end)) {
1492 parent = ioc->mmio_region;
1493 } else if (((ioc->mmio_region + 1)->start <= res->start) &&
1494 (res->end <= (ioc->mmio_region + 1)->end)) {
1495 parent = ioc->mmio_region + 1;
1496 } else {
1497 return -EBUSY;
1498 }
1499
1500
1501
1502
1503
1504
1505 return insert_resource(parent, res);
1506}
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516static int __init ccio_probe(struct parisc_device *dev)
1517{
1518 int i;
1519 struct ioc *ioc, **ioc_p = &ioc_list;
1520 struct pci_hba_data *hba;
1521
1522 ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL);
1523 if (ioc == NULL) {
1524 printk(KERN_ERR MODULE_NAME ": memory allocation failure\n");
1525 return -ENOMEM;
1526 }
1527
1528 ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn";
1529
1530 printk(KERN_INFO "Found %s at 0x%lx\n", ioc->name,
1531 (unsigned long)dev->hpa.start);
1532
1533 for (i = 0; i < ioc_count; i++) {
1534 ioc_p = &(*ioc_p)->next;
1535 }
1536 *ioc_p = ioc;
1537
1538 ioc->hw_path = dev->hw_path;
1539 ioc->ioc_regs = ioremap(dev->hpa.start, 4096);
1540 if (!ioc->ioc_regs) {
1541 kfree(ioc);
1542 return -ENOMEM;
1543 }
1544 ccio_ioc_init(ioc);
1545 ccio_init_resources(ioc);
1546 hppa_dma_ops = &ccio_ops;
1547
1548 hba = kzalloc(sizeof(*hba), GFP_KERNEL);
1549
1550 BUG_ON(hba == NULL);
1551
1552 hba->iommu = ioc;
1553 dev->dev.platform_data = hba;
1554
1555#ifdef CONFIG_PROC_FS
1556 if (ioc_count == 0) {
1557 proc_create_single(MODULE_NAME, 0, proc_runway_root,
1558 ccio_proc_info);
1559 proc_create_single(MODULE_NAME"-bitmap", 0, proc_runway_root,
1560 ccio_proc_bitmap_info);
1561 }
1562#endif
1563 ioc_count++;
1564 return 0;
1565}
1566
1567
1568
1569
1570
1571
1572void __init ccio_init(void)
1573{
1574 register_parisc_driver(&ccio_driver);
1575}
1576
1577