1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/types.h>
35#include <linux/kernel.h>
36#include <linux/init.h>
37#include <linux/mm.h>
38#include <linux/spinlock.h>
39#include <linux/slab.h>
40#include <linux/string.h>
41#include <linux/pci.h>
42#include <linux/reboot.h>
43#include <linux/proc_fs.h>
44#include <linux/seq_file.h>
45#include <linux/scatterlist.h>
46#include <linux/iommu-helper.h>
47#include <linux/export.h>
48
49#include <asm/byteorder.h>
50#include <asm/cache.h>
51#include <linux/uaccess.h>
52#include <asm/page.h>
53#include <asm/dma.h>
54#include <asm/io.h>
55#include <asm/hardware.h>
56#include <asm/parisc-device.h>
57
58
59
60
61
62#define MODULE_NAME "ccio"
63
64#undef DEBUG_CCIO_RES
65#undef DEBUG_CCIO_RUN
66#undef DEBUG_CCIO_INIT
67#undef DEBUG_CCIO_RUN_SG
68
69#ifdef CONFIG_PROC_FS
70
71#undef CCIO_COLLECT_STATS
72#endif
73
74#include <asm/runway.h>
75
76#ifdef DEBUG_CCIO_INIT
77#define DBG_INIT(x...) printk(x)
78#else
79#define DBG_INIT(x...)
80#endif
81
82#ifdef DEBUG_CCIO_RUN
83#define DBG_RUN(x...) printk(x)
84#else
85#define DBG_RUN(x...)
86#endif
87
88#ifdef DEBUG_CCIO_RES
89#define DBG_RES(x...) printk(x)
90#else
91#define DBG_RES(x...)
92#endif
93
94#ifdef DEBUG_CCIO_RUN_SG
95#define DBG_RUN_SG(x...) printk(x)
96#else
97#define DBG_RUN_SG(x...)
98#endif
99
100#define CCIO_INLINE inline
101#define WRITE_U32(value, addr) __raw_writel(value, addr)
102#define READ_U32(addr) __raw_readl(addr)
103
104#define U2_IOA_RUNWAY 0x580
105#define U2_BC_GSC 0x501
106#define UTURN_IOA_RUNWAY 0x581
107#define UTURN_BC_GSC 0x502
108
109#define IOA_NORMAL_MODE 0x00020080
110#define CMD_TLB_DIRECT_WRITE 35
111#define CMD_TLB_PURGE 33
112
113#define CCIO_MAPPING_ERROR (~(dma_addr_t)0)
114
115struct ioa_registers {
116
117 int32_t unused1[12];
118 uint32_t io_command;
119 uint32_t io_status;
120 uint32_t io_control;
121 int32_t unused2[1];
122
123
124 uint32_t io_err_resp;
125 uint32_t io_err_info;
126 uint32_t io_err_req;
127 uint32_t io_err_resp_hi;
128 uint32_t io_tlb_entry_m;
129 uint32_t io_tlb_entry_l;
130 uint32_t unused3[1];
131 uint32_t io_pdir_base;
132 uint32_t io_io_low_hv;
133 uint32_t io_io_high_hv;
134 uint32_t unused4[1];
135 uint32_t io_chain_id_mask;
136 uint32_t unused5[2];
137 uint32_t io_io_low;
138 uint32_t io_io_high;
139};
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227struct ioc {
228 struct ioa_registers __iomem *ioc_regs;
229 u8 *res_map;
230 u64 *pdir_base;
231 u32 pdir_size;
232 u32 res_hint;
233
234 u32 res_size;
235 spinlock_t res_lock;
236
237#ifdef CCIO_COLLECT_STATS
238#define CCIO_SEARCH_SAMPLE 0x100
239 unsigned long avg_search[CCIO_SEARCH_SAMPLE];
240 unsigned long avg_idx;
241 unsigned long used_pages;
242 unsigned long msingle_calls;
243 unsigned long msingle_pages;
244 unsigned long msg_calls;
245 unsigned long msg_pages;
246 unsigned long usingle_calls;
247 unsigned long usingle_pages;
248 unsigned long usg_calls;
249 unsigned long usg_pages;
250#endif
251 unsigned short cujo20_bug;
252
253
254 u32 chainid_shift;
255 struct ioc *next;
256 const char *name;
257 unsigned int hw_path;
258 struct pci_dev *fake_pci_dev;
259 struct resource mmio_region[2];
260};
261
262static struct ioc *ioc_list;
263static int ioc_count;
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280#define IOVP_SIZE PAGE_SIZE
281#define IOVP_SHIFT PAGE_SHIFT
282#define IOVP_MASK PAGE_MASK
283
284
285#define CCIO_IOVA(iovp,offset) ((iovp) | (offset))
286#define CCIO_IOVP(iova) ((iova) & IOVP_MASK)
287
288#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
289#define MKIOVP(pdir_idx) ((long)(pdir_idx) << IOVP_SHIFT)
290#define MKIOVA(iovp,offset) (dma_addr_t)((long)iovp | (long)offset)
291
292
293
294
295
296
297#define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \
298 for(; res_ptr < res_end; ++res_ptr) { \
299 int ret;\
300 unsigned int idx;\
301 idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \
302 ret = iommu_is_span_boundary(idx << 3, pages_needed, 0, boundary_size);\
303 if ((0 == (*res_ptr & mask)) && !ret) { \
304 *res_ptr |= mask; \
305 res_idx = idx;\
306 ioc->res_hint = res_idx + (size >> 3); \
307 goto resource_found; \
308 } \
309 }
310
311#define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \
312 u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \
313 u##size *res_end = (u##size *)&(ioc)->res_map[ioa->res_size]; \
314 CCIO_SEARCH_LOOP(ioc, res_idx, mask, size); \
315 res_ptr = (u##size *)&(ioc)->res_map[0]; \
316 CCIO_SEARCH_LOOP(ioa, res_idx, mask, size);
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341static int
342ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
343{
344 unsigned int pages_needed = size >> IOVP_SHIFT;
345 unsigned int res_idx;
346 unsigned long boundary_size;
347#ifdef CCIO_COLLECT_STATS
348 unsigned long cr_start = mfctl(16);
349#endif
350
351 BUG_ON(pages_needed == 0);
352 BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE);
353
354 DBG_RES("%s() size: %d pages_needed %d\n",
355 __func__, size, pages_needed);
356
357
358
359
360
361
362 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
363 1ULL << IOVP_SHIFT) >> IOVP_SHIFT;
364
365 if (pages_needed <= 8) {
366
367
368
369
370
371#if 0
372
373
374
375
376 unsigned long mask = ~(~0UL >> pages_needed);
377 CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 8);
378#else
379 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xff, 8);
380#endif
381 } else if (pages_needed <= 16) {
382 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xffff, 16);
383 } else if (pages_needed <= 32) {
384 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~(unsigned int)0, 32);
385#ifdef __LP64__
386 } else if (pages_needed <= 64) {
387 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~0UL, 64);
388#endif
389 } else {
390 panic("%s: %s() Too many pages to map. pages_needed: %u\n",
391 __FILE__, __func__, pages_needed);
392 }
393
394 panic("%s: %s() I/O MMU is out of mapping resources.\n", __FILE__,
395 __func__);
396
397resource_found:
398
399 DBG_RES("%s() res_idx %d res_hint: %d\n",
400 __func__, res_idx, ioc->res_hint);
401
402#ifdef CCIO_COLLECT_STATS
403 {
404 unsigned long cr_end = mfctl(16);
405 unsigned long tmp = cr_end - cr_start;
406
407 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
408 }
409 ioc->avg_search[ioc->avg_idx++] = cr_start;
410 ioc->avg_idx &= CCIO_SEARCH_SAMPLE - 1;
411 ioc->used_pages += pages_needed;
412#endif
413
414
415
416 return res_idx << 3;
417}
418
419#define CCIO_FREE_MAPPINGS(ioc, res_idx, mask, size) \
420 u##size *res_ptr = (u##size *)&((ioc)->res_map[res_idx]); \
421 BUG_ON((*res_ptr & mask) != mask); \
422 *res_ptr &= ~(mask);
423
424
425
426
427
428
429
430
431
432
433static void
434ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
435{
436 unsigned long iovp = CCIO_IOVP(iova);
437 unsigned int res_idx = PDIR_INDEX(iovp) >> 3;
438
439 BUG_ON(pages_mapped == 0);
440 BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE);
441 BUG_ON(pages_mapped > BITS_PER_LONG);
442
443 DBG_RES("%s(): res_idx: %d pages_mapped %d\n",
444 __func__, res_idx, pages_mapped);
445
446#ifdef CCIO_COLLECT_STATS
447 ioc->used_pages -= pages_mapped;
448#endif
449
450 if(pages_mapped <= 8) {
451#if 0
452
453 unsigned long mask = ~(~0UL >> pages_mapped);
454 CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 8);
455#else
456 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffUL, 8);
457#endif
458 } else if(pages_mapped <= 16) {
459 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffffUL, 16);
460 } else if(pages_mapped <= 32) {
461 CCIO_FREE_MAPPINGS(ioc, res_idx, ~(unsigned int)0, 32);
462#ifdef __LP64__
463 } else if(pages_mapped <= 64) {
464 CCIO_FREE_MAPPINGS(ioc, res_idx, ~0UL, 64);
465#endif
466 } else {
467 panic("%s:%s() Too many pages to unmap.\n", __FILE__,
468 __func__);
469 }
470}
471
472
473
474
475
476
477
478typedef unsigned long space_t;
479#define KERNEL_SPACE 0
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507#define IOPDIR_VALID 0x01UL
508#define HINT_SAFE_DMA 0x02UL
509#ifdef CONFIG_EISA
510#define HINT_STOP_MOST 0x04UL
511#else
512#define HINT_STOP_MOST 0x00UL
513#endif
514#define HINT_UDPATE_ENB 0x08UL
515#define HINT_PREFETCH 0x10UL
516
517
518
519
520
521
522
523static u32 hint_lookup[] = {
524 [PCI_DMA_BIDIRECTIONAL] = HINT_STOP_MOST | HINT_SAFE_DMA | IOPDIR_VALID,
525 [PCI_DMA_TODEVICE] = HINT_STOP_MOST | HINT_PREFETCH | IOPDIR_VALID,
526 [PCI_DMA_FROMDEVICE] = HINT_STOP_MOST | IOPDIR_VALID,
527};
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558static void CCIO_INLINE
559ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
560 unsigned long hints)
561{
562 register unsigned long pa;
563 register unsigned long ci;
564
565
566 BUG_ON(sid != KERNEL_SPACE);
567
568 mtsp(sid,1);
569
570
571
572
573
574
575 pa = virt_to_phys(vba);
576 asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
577 ((u32 *)pdir_ptr)[1] = (u32) pa;
578
579
580
581
582
583#ifdef __LP64__
584
585
586
587
588
589 asm volatile ("extrd,u %1,15,4,%0" : "=r" (ci) : "r" (pa));
590 asm volatile ("extrd,u %1,31,16,%0" : "+r" (pa) : "r" (pa));
591 asm volatile ("depd %1,35,4,%0" : "+r" (pa) : "r" (ci));
592#else
593 pa = 0;
594#endif
595
596
597
598
599
600 asm volatile ("lci %%r0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
601 asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
602 asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
603
604 ((u32 *)pdir_ptr)[0] = (u32) pa;
605
606
607
608
609
610
611
612
613
614
615
616
617
618 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
619 asm volatile("sync");
620}
621
622
623
624
625
626
627
628
629
630
631
632static CCIO_INLINE void
633ccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt)
634{
635 u32 chain_size = 1 << ioc->chainid_shift;
636
637 iovp &= IOVP_MASK;
638 byte_cnt += chain_size;
639
640 while(byte_cnt > chain_size) {
641 WRITE_U32(CMD_TLB_PURGE | iovp, &ioc->ioc_regs->io_command);
642 iovp += chain_size;
643 byte_cnt -= chain_size;
644 }
645}
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665static CCIO_INLINE void
666ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
667{
668 u32 iovp = (u32)CCIO_IOVP(iova);
669 size_t saved_byte_cnt;
670
671
672 saved_byte_cnt = byte_cnt = ALIGN(byte_cnt, IOVP_SIZE);
673
674 while(byte_cnt > 0) {
675
676 unsigned int idx = PDIR_INDEX(iovp);
677 char *pdir_ptr = (char *) &(ioc->pdir_base[idx]);
678
679 BUG_ON(idx >= (ioc->pdir_size / sizeof(u64)));
680 pdir_ptr[7] = 0;
681
682
683
684
685
686
687
688
689 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr[7]));
690
691 iovp += IOVP_SIZE;
692 byte_cnt -= IOVP_SIZE;
693 }
694
695 asm volatile("sync");
696 ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt);
697}
698
699
700
701
702
703
704
705
706
707
708
709
710static int
711ccio_dma_supported(struct device *dev, u64 mask)
712{
713 if(dev == NULL) {
714 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
715 BUG();
716 return 0;
717 }
718
719
720 return (int)(mask == 0xffffffffUL);
721}
722
723
724
725
726
727
728
729
730
731
732static dma_addr_t
733ccio_map_single(struct device *dev, void *addr, size_t size,
734 enum dma_data_direction direction)
735{
736 int idx;
737 struct ioc *ioc;
738 unsigned long flags;
739 dma_addr_t iovp;
740 dma_addr_t offset;
741 u64 *pdir_start;
742 unsigned long hint = hint_lookup[(int)direction];
743
744 BUG_ON(!dev);
745 ioc = GET_IOC(dev);
746 if (!ioc)
747 return CCIO_MAPPING_ERROR;
748
749 BUG_ON(size <= 0);
750
751
752 offset = ((unsigned long) addr) & ~IOVP_MASK;
753
754
755 size = ALIGN(size + offset, IOVP_SIZE);
756 spin_lock_irqsave(&ioc->res_lock, flags);
757
758#ifdef CCIO_COLLECT_STATS
759 ioc->msingle_calls++;
760 ioc->msingle_pages += size >> IOVP_SHIFT;
761#endif
762
763 idx = ccio_alloc_range(ioc, dev, size);
764 iovp = (dma_addr_t)MKIOVP(idx);
765
766 pdir_start = &(ioc->pdir_base[idx]);
767
768 DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n",
769 __func__, addr, (long)iovp | offset, size);
770
771
772 if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
773 hint |= HINT_SAFE_DMA;
774
775 while(size > 0) {
776 ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long)addr, hint);
777
778 DBG_RUN(" pdir %p %08x%08x\n",
779 pdir_start,
780 (u32) (((u32 *) pdir_start)[0]),
781 (u32) (((u32 *) pdir_start)[1]));
782 ++pdir_start;
783 addr += IOVP_SIZE;
784 size -= IOVP_SIZE;
785 }
786
787 spin_unlock_irqrestore(&ioc->res_lock, flags);
788
789
790 return CCIO_IOVA(iovp, offset);
791}
792
793
794static dma_addr_t
795ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
796 size_t size, enum dma_data_direction direction,
797 unsigned long attrs)
798{
799 return ccio_map_single(dev, page_address(page) + offset, size,
800 direction);
801}
802
803
804
805
806
807
808
809
810
811static void
812ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
813 enum dma_data_direction direction, unsigned long attrs)
814{
815 struct ioc *ioc;
816 unsigned long flags;
817 dma_addr_t offset = iova & ~IOVP_MASK;
818
819 BUG_ON(!dev);
820 ioc = GET_IOC(dev);
821 if (!ioc) {
822 WARN_ON(!ioc);
823 return;
824 }
825
826 DBG_RUN("%s() iovp 0x%lx/%x\n",
827 __func__, (long)iova, size);
828
829 iova ^= offset;
830 size += offset;
831 size = ALIGN(size, IOVP_SIZE);
832
833 spin_lock_irqsave(&ioc->res_lock, flags);
834
835#ifdef CCIO_COLLECT_STATS
836 ioc->usingle_calls++;
837 ioc->usingle_pages += size >> IOVP_SHIFT;
838#endif
839
840 ccio_mark_invalid(ioc, iova, size);
841 ccio_free_range(ioc, iova, (size >> IOVP_SHIFT));
842 spin_unlock_irqrestore(&ioc->res_lock, flags);
843}
844
845
846
847
848
849
850
851
852
853static void *
854ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
855 unsigned long attrs)
856{
857 void *ret;
858#if 0
859
860
861
862 if(!hwdev) {
863
864 *dma_handle = 0;
865 return 0;
866 }
867#endif
868 ret = (void *) __get_free_pages(flag, get_order(size));
869
870 if (ret) {
871 memset(ret, 0, size);
872 *dma_handle = ccio_map_single(dev, ret, size, PCI_DMA_BIDIRECTIONAL);
873 }
874
875 return ret;
876}
877
878
879
880
881
882
883
884
885
886
887static void
888ccio_free(struct device *dev, size_t size, void *cpu_addr,
889 dma_addr_t dma_handle, unsigned long attrs)
890{
891 ccio_unmap_page(dev, dma_handle, size, 0, 0);
892 free_pages((unsigned long)cpu_addr, get_order(size));
893}
894
895
896
897
898
899
900#define PIDE_FLAG 0x80000000UL
901
902#ifdef CCIO_COLLECT_STATS
903#define IOMMU_MAP_STATS
904#endif
905#include "iommu-helpers.h"
906
907
908
909
910
911
912
913
914
915
916static int
917ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
918 enum dma_data_direction direction, unsigned long attrs)
919{
920 struct ioc *ioc;
921 int coalesced, filled = 0;
922 unsigned long flags;
923 unsigned long hint = hint_lookup[(int)direction];
924 unsigned long prev_len = 0, current_len = 0;
925 int i;
926
927 BUG_ON(!dev);
928 ioc = GET_IOC(dev);
929 if (!ioc)
930 return 0;
931
932 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
933
934
935 if (nents == 1) {
936 sg_dma_address(sglist) = ccio_map_single(dev,
937 sg_virt(sglist), sglist->length,
938 direction);
939 sg_dma_len(sglist) = sglist->length;
940 return 1;
941 }
942
943 for(i = 0; i < nents; i++)
944 prev_len += sglist[i].length;
945
946 spin_lock_irqsave(&ioc->res_lock, flags);
947
948#ifdef CCIO_COLLECT_STATS
949 ioc->msg_calls++;
950#endif
951
952
953
954
955
956
957
958
959
960 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, ccio_alloc_range);
961
962
963
964
965
966
967
968
969
970 filled = iommu_fill_pdir(ioc, sglist, nents, hint, ccio_io_pdir_entry);
971
972 spin_unlock_irqrestore(&ioc->res_lock, flags);
973
974 BUG_ON(coalesced != filled);
975
976 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
977
978 for (i = 0; i < filled; i++)
979 current_len += sg_dma_len(sglist + i);
980
981 BUG_ON(current_len != prev_len);
982
983 return filled;
984}
985
986
987
988
989
990
991
992
993
994
995static void
996ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
997 enum dma_data_direction direction, unsigned long attrs)
998{
999 struct ioc *ioc;
1000
1001 BUG_ON(!dev);
1002 ioc = GET_IOC(dev);
1003 if (!ioc) {
1004 WARN_ON(!ioc);
1005 return;
1006 }
1007
1008 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1009 __func__, nents, sg_virt(sglist), sglist->length);
1010
1011#ifdef CCIO_COLLECT_STATS
1012 ioc->usg_calls++;
1013#endif
1014
1015 while(sg_dma_len(sglist) && nents--) {
1016
1017#ifdef CCIO_COLLECT_STATS
1018 ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
1019#endif
1020 ccio_unmap_page(dev, sg_dma_address(sglist),
1021 sg_dma_len(sglist), direction, 0);
1022 ++sglist;
1023 }
1024
1025 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1026}
1027
1028static int ccio_mapping_error(struct device *dev, dma_addr_t dma_addr)
1029{
1030 return dma_addr == CCIO_MAPPING_ERROR;
1031}
1032
1033static const struct dma_map_ops ccio_ops = {
1034 .dma_supported = ccio_dma_supported,
1035 .alloc = ccio_alloc,
1036 .free = ccio_free,
1037 .map_page = ccio_map_page,
1038 .unmap_page = ccio_unmap_page,
1039 .map_sg = ccio_map_sg,
1040 .unmap_sg = ccio_unmap_sg,
1041 .mapping_error = ccio_mapping_error,
1042};
1043
1044#ifdef CONFIG_PROC_FS
1045static int ccio_proc_info(struct seq_file *m, void *p)
1046{
1047 struct ioc *ioc = ioc_list;
1048
1049 while (ioc != NULL) {
1050 unsigned int total_pages = ioc->res_size << 3;
1051#ifdef CCIO_COLLECT_STATS
1052 unsigned long avg = 0, min, max;
1053 int j;
1054#endif
1055
1056 seq_printf(m, "%s\n", ioc->name);
1057
1058 seq_printf(m, "Cujo 2.0 bug : %s\n",
1059 (ioc->cujo20_bug ? "yes" : "no"));
1060
1061 seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
1062 total_pages * 8, total_pages);
1063
1064#ifdef CCIO_COLLECT_STATS
1065 seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1066 total_pages - ioc->used_pages, ioc->used_pages,
1067 (int)(ioc->used_pages * 100 / total_pages));
1068#endif
1069
1070 seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
1071 ioc->res_size, total_pages);
1072
1073#ifdef CCIO_COLLECT_STATS
1074 min = max = ioc->avg_search[0];
1075 for(j = 0; j < CCIO_SEARCH_SAMPLE; ++j) {
1076 avg += ioc->avg_search[j];
1077 if(ioc->avg_search[j] > max)
1078 max = ioc->avg_search[j];
1079 if(ioc->avg_search[j] < min)
1080 min = ioc->avg_search[j];
1081 }
1082 avg /= CCIO_SEARCH_SAMPLE;
1083 seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1084 min, avg, max);
1085
1086 seq_printf(m, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n",
1087 ioc->msingle_calls, ioc->msingle_pages,
1088 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1089
1090
1091 min = ioc->usingle_calls - ioc->usg_calls;
1092 max = ioc->usingle_pages - ioc->usg_pages;
1093 seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
1094 min, max, (int)((max * 1000)/min));
1095
1096 seq_printf(m, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n",
1097 ioc->msg_calls, ioc->msg_pages,
1098 (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
1099
1100 seq_printf(m, "pci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n\n\n",
1101 ioc->usg_calls, ioc->usg_pages,
1102 (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
1103#endif
1104
1105 ioc = ioc->next;
1106 }
1107
1108 return 0;
1109}
1110
1111static int ccio_proc_info_open(struct inode *inode, struct file *file)
1112{
1113 return single_open(file, &ccio_proc_info, NULL);
1114}
1115
1116static const struct file_operations ccio_proc_info_fops = {
1117 .owner = THIS_MODULE,
1118 .open = ccio_proc_info_open,
1119 .read = seq_read,
1120 .llseek = seq_lseek,
1121 .release = single_release,
1122};
1123
1124static int ccio_proc_bitmap_info(struct seq_file *m, void *p)
1125{
1126 struct ioc *ioc = ioc_list;
1127
1128 while (ioc != NULL) {
1129 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map,
1130 ioc->res_size, false);
1131 seq_putc(m, '\n');
1132 ioc = ioc->next;
1133 break;
1134 }
1135
1136 return 0;
1137}
1138
1139static int ccio_proc_bitmap_open(struct inode *inode, struct file *file)
1140{
1141 return single_open(file, &ccio_proc_bitmap_info, NULL);
1142}
1143
1144static const struct file_operations ccio_proc_bitmap_fops = {
1145 .owner = THIS_MODULE,
1146 .open = ccio_proc_bitmap_open,
1147 .read = seq_read,
1148 .llseek = seq_lseek,
1149 .release = single_release,
1150};
1151#endif
1152
1153
1154
1155
1156
1157
1158
1159
1160static struct ioc * ccio_find_ioc(int hw_path)
1161{
1162 int i;
1163 struct ioc *ioc;
1164
1165 ioc = ioc_list;
1166 for (i = 0; i < ioc_count; i++) {
1167 if (ioc->hw_path == hw_path)
1168 return ioc;
1169
1170 ioc = ioc->next;
1171 }
1172
1173 return NULL;
1174}
1175
1176
1177
1178
1179
1180
1181
1182
1183void * ccio_get_iommu(const struct parisc_device *dev)
1184{
1185 dev = find_pa_parent_type(dev, HPHW_IOA);
1186 if (!dev)
1187 return NULL;
1188
1189 return ccio_find_ioc(dev->hw_path);
1190}
1191
1192#define CUJO_20_STEP 0x10000000
1193
1194
1195
1196
1197
1198void ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp)
1199{
1200 unsigned int idx;
1201 struct parisc_device *dev = parisc_parent(cujo);
1202 struct ioc *ioc = ccio_get_iommu(dev);
1203 u8 *res_ptr;
1204
1205 ioc->cujo20_bug = 1;
1206 res_ptr = ioc->res_map;
1207 idx = PDIR_INDEX(iovp) >> 3;
1208
1209 while (idx < ioc->res_size) {
1210 res_ptr[idx] |= 0xff;
1211 idx += PDIR_INDEX(CUJO_20_STEP) >> 3;
1212 }
1213}
1214
1215#if 0
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228static int
1229ccio_get_iotlb_size(struct parisc_device *dev)
1230{
1231 if (dev->spa_shift == 0) {
1232 panic("%s() : Can't determine I/O TLB size.\n", __func__);
1233 }
1234 return (1 << dev->spa_shift);
1235}
1236#else
1237
1238
1239#define CCIO_CHAINID_SHIFT 8
1240#define CCIO_CHAINID_MASK 0xff
1241#endif
1242
1243
1244static const struct parisc_device_id ccio_tbl[] __initconst = {
1245 { HPHW_IOA, HVERSION_REV_ANY_ID, U2_IOA_RUNWAY, 0xb },
1246 { HPHW_IOA, HVERSION_REV_ANY_ID, UTURN_IOA_RUNWAY, 0xb },
1247 { 0, }
1248};
1249
1250static int ccio_probe(struct parisc_device *dev);
1251
1252static struct parisc_driver ccio_driver __refdata = {
1253 .name = "ccio",
1254 .id_table = ccio_tbl,
1255 .probe = ccio_probe,
1256};
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266static void
1267ccio_ioc_init(struct ioc *ioc)
1268{
1269 int i;
1270 unsigned int iov_order;
1271 u32 iova_space_size;
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284 iova_space_size = (u32) (totalram_pages / count_parisc_driver(&ccio_driver));
1285
1286
1287
1288 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1289 iova_space_size = 1 << (20 - PAGE_SHIFT);
1290#ifdef __LP64__
1291 } else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1292 iova_space_size = 1 << (30 - PAGE_SHIFT);
1293#endif
1294 }
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1310
1311
1312 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1313
1314 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1315
1316 BUG_ON(ioc->pdir_size > 8 * 1024 * 1024);
1317
1318
1319 BUG_ON((1 << get_order(ioc->pdir_size)) != (ioc->pdir_size >> PAGE_SHIFT));
1320
1321 DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n",
1322 __func__, ioc->ioc_regs,
1323 (unsigned long) totalram_pages >> (20 - PAGE_SHIFT),
1324 iova_space_size>>20,
1325 iov_order + PAGE_SHIFT);
1326
1327 ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL,
1328 get_order(ioc->pdir_size));
1329 if(NULL == ioc->pdir_base) {
1330 panic("%s() could not allocate I/O Page Table\n", __func__);
1331 }
1332 memset(ioc->pdir_base, 0, ioc->pdir_size);
1333
1334 BUG_ON((((unsigned long)ioc->pdir_base) & PAGE_MASK) != (unsigned long)ioc->pdir_base);
1335 DBG_INIT(" base %p\n", ioc->pdir_base);
1336
1337
1338 ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3;
1339 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1340
1341 ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL,
1342 get_order(ioc->res_size));
1343 if(NULL == ioc->res_map) {
1344 panic("%s() could not allocate resource map\n", __func__);
1345 }
1346 memset(ioc->res_map, 0, ioc->res_size);
1347
1348
1349 ioc->res_hint = 16;
1350
1351
1352 spin_lock_init(&ioc->res_lock);
1353
1354
1355
1356
1357
1358 ioc->chainid_shift = get_order(iova_space_size) + PAGE_SHIFT - CCIO_CHAINID_SHIFT;
1359 DBG_INIT(" chainid_shift 0x%x\n", ioc->chainid_shift);
1360
1361
1362
1363
1364 WRITE_U32(CCIO_CHAINID_MASK << ioc->chainid_shift,
1365 &ioc->ioc_regs->io_chain_id_mask);
1366
1367 WRITE_U32(virt_to_phys(ioc->pdir_base),
1368 &ioc->ioc_regs->io_pdir_base);
1369
1370
1371
1372
1373 WRITE_U32(IOA_NORMAL_MODE, &ioc->ioc_regs->io_control);
1374
1375
1376
1377
1378 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_m);
1379 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_l);
1380
1381 for(i = 1 << CCIO_CHAINID_SHIFT; i ; i--) {
1382 WRITE_U32((CMD_TLB_DIRECT_WRITE | (i << ioc->chainid_shift)),
1383 &ioc->ioc_regs->io_command);
1384 }
1385}
1386
1387static void __init
1388ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr)
1389{
1390 int result;
1391
1392 res->parent = NULL;
1393 res->flags = IORESOURCE_MEM;
1394
1395
1396
1397
1398
1399 res->start = (unsigned long)((signed) READ_U32(ioaddr) << 16);
1400 res->end = (unsigned long)((signed) (READ_U32(ioaddr + 4) << 16) - 1);
1401 res->name = name;
1402
1403
1404
1405 if (res->end + 1 == res->start)
1406 return;
1407
1408
1409
1410
1411
1412
1413 result = insert_resource(&iomem_resource, res);
1414 if (result < 0) {
1415 printk(KERN_ERR "%s() failed to claim CCIO bus address space (%08lx,%08lx)\n",
1416 __func__, (unsigned long)res->start, (unsigned long)res->end);
1417 }
1418}
1419
1420static void __init ccio_init_resources(struct ioc *ioc)
1421{
1422 struct resource *res = ioc->mmio_region;
1423 char *name = kmalloc(14, GFP_KERNEL);
1424
1425 snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
1426
1427 ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
1428 ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
1429}
1430
1431static int new_ioc_area(struct resource *res, unsigned long size,
1432 unsigned long min, unsigned long max, unsigned long align)
1433{
1434 if (max <= min)
1435 return -EBUSY;
1436
1437 res->start = (max - size + 1) &~ (align - 1);
1438 res->end = res->start + size;
1439
1440
1441
1442
1443
1444 if (!insert_resource(&iomem_resource, res))
1445 return 0;
1446
1447 return new_ioc_area(res, size, min, max - size, align);
1448}
1449
1450static int expand_ioc_area(struct resource *res, unsigned long size,
1451 unsigned long min, unsigned long max, unsigned long align)
1452{
1453 unsigned long start, len;
1454
1455 if (!res->parent)
1456 return new_ioc_area(res, size, min, max, align);
1457
1458 start = (res->start - size) &~ (align - 1);
1459 len = res->end - start + 1;
1460 if (start >= min) {
1461 if (!adjust_resource(res, start, len))
1462 return 0;
1463 }
1464
1465 start = res->start;
1466 len = ((size + res->end + align) &~ (align - 1)) - start;
1467 if (start + len <= max) {
1468 if (!adjust_resource(res, start, len))
1469 return 0;
1470 }
1471
1472 return -EBUSY;
1473}
1474
1475
1476
1477
1478
1479
1480
1481
1482int ccio_allocate_resource(const struct parisc_device *dev,
1483 struct resource *res, unsigned long size,
1484 unsigned long min, unsigned long max, unsigned long align)
1485{
1486 struct resource *parent = &iomem_resource;
1487 struct ioc *ioc = ccio_get_iommu(dev);
1488 if (!ioc)
1489 goto out;
1490
1491 parent = ioc->mmio_region;
1492 if (parent->parent &&
1493 !allocate_resource(parent, res, size, min, max, align, NULL, NULL))
1494 return 0;
1495
1496 if ((parent + 1)->parent &&
1497 !allocate_resource(parent + 1, res, size, min, max, align,
1498 NULL, NULL))
1499 return 0;
1500
1501 if (!expand_ioc_area(parent, size, min, max, align)) {
1502 __raw_writel(((parent->start)>>16) | 0xffff0000,
1503 &ioc->ioc_regs->io_io_low);
1504 __raw_writel(((parent->end)>>16) | 0xffff0000,
1505 &ioc->ioc_regs->io_io_high);
1506 } else if (!expand_ioc_area(parent + 1, size, min, max, align)) {
1507 parent++;
1508 __raw_writel(((parent->start)>>16) | 0xffff0000,
1509 &ioc->ioc_regs->io_io_low_hv);
1510 __raw_writel(((parent->end)>>16) | 0xffff0000,
1511 &ioc->ioc_regs->io_io_high_hv);
1512 } else {
1513 return -EBUSY;
1514 }
1515
1516 out:
1517 return allocate_resource(parent, res, size, min, max, align, NULL,NULL);
1518}
1519
1520int ccio_request_resource(const struct parisc_device *dev,
1521 struct resource *res)
1522{
1523 struct resource *parent;
1524 struct ioc *ioc = ccio_get_iommu(dev);
1525
1526 if (!ioc) {
1527 parent = &iomem_resource;
1528 } else if ((ioc->mmio_region->start <= res->start) &&
1529 (res->end <= ioc->mmio_region->end)) {
1530 parent = ioc->mmio_region;
1531 } else if (((ioc->mmio_region + 1)->start <= res->start) &&
1532 (res->end <= (ioc->mmio_region + 1)->end)) {
1533 parent = ioc->mmio_region + 1;
1534 } else {
1535 return -EBUSY;
1536 }
1537
1538
1539
1540
1541
1542
1543 return insert_resource(parent, res);
1544}
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554static int __init ccio_probe(struct parisc_device *dev)
1555{
1556 int i;
1557 struct ioc *ioc, **ioc_p = &ioc_list;
1558
1559 ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL);
1560 if (ioc == NULL) {
1561 printk(KERN_ERR MODULE_NAME ": memory allocation failure\n");
1562 return -ENOMEM;
1563 }
1564
1565 ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn";
1566
1567 printk(KERN_INFO "Found %s at 0x%lx\n", ioc->name,
1568 (unsigned long)dev->hpa.start);
1569
1570 for (i = 0; i < ioc_count; i++) {
1571 ioc_p = &(*ioc_p)->next;
1572 }
1573 *ioc_p = ioc;
1574
1575 ioc->hw_path = dev->hw_path;
1576 ioc->ioc_regs = ioremap_nocache(dev->hpa.start, 4096);
1577 if (!ioc->ioc_regs) {
1578 kfree(ioc);
1579 return -ENOMEM;
1580 }
1581 ccio_ioc_init(ioc);
1582 ccio_init_resources(ioc);
1583 hppa_dma_ops = &ccio_ops;
1584 dev->dev.platform_data = kzalloc(sizeof(struct pci_hba_data), GFP_KERNEL);
1585
1586
1587 BUG_ON(dev->dev.platform_data == NULL);
1588 HBA_DATA(dev->dev.platform_data)->iommu = ioc;
1589
1590#ifdef CONFIG_PROC_FS
1591 if (ioc_count == 0) {
1592 proc_create(MODULE_NAME, 0, proc_runway_root,
1593 &ccio_proc_info_fops);
1594 proc_create(MODULE_NAME"-bitmap", 0, proc_runway_root,
1595 &ccio_proc_bitmap_fops);
1596 }
1597#endif
1598 ioc_count++;
1599
1600 parisc_has_iommu();
1601 return 0;
1602}
1603
1604
1605
1606
1607
1608
1609void __init ccio_init(void)
1610{
1611 register_parisc_driver(&ccio_driver);
1612}
1613
1614