1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/types.h>
35#include <linux/kernel.h>
36#include <linux/init.h>
37#include <linux/mm.h>
38#include <linux/spinlock.h>
39#include <linux/slab.h>
40#include <linux/string.h>
41#include <linux/pci.h>
42#include <linux/reboot.h>
43#include <linux/proc_fs.h>
44#include <linux/seq_file.h>
45#include <linux/scatterlist.h>
46#include <linux/iommu-helper.h>
47#include <linux/export.h>
48
49#include <asm/byteorder.h>
50#include <asm/cache.h>
51#include <asm/uaccess.h>
52#include <asm/page.h>
53#include <asm/dma.h>
54#include <asm/io.h>
55#include <asm/hardware.h>
56#include <asm/parisc-device.h>
57
58
59
60
61
62#define MODULE_NAME "ccio"
63
64#undef DEBUG_CCIO_RES
65#undef DEBUG_CCIO_RUN
66#undef DEBUG_CCIO_INIT
67#undef DEBUG_CCIO_RUN_SG
68
69#ifdef CONFIG_PROC_FS
70
71#undef CCIO_COLLECT_STATS
72#endif
73
74#include <asm/runway.h>
75
76#ifdef DEBUG_CCIO_INIT
77#define DBG_INIT(x...) printk(x)
78#else
79#define DBG_INIT(x...)
80#endif
81
82#ifdef DEBUG_CCIO_RUN
83#define DBG_RUN(x...) printk(x)
84#else
85#define DBG_RUN(x...)
86#endif
87
88#ifdef DEBUG_CCIO_RES
89#define DBG_RES(x...) printk(x)
90#else
91#define DBG_RES(x...)
92#endif
93
94#ifdef DEBUG_CCIO_RUN_SG
95#define DBG_RUN_SG(x...) printk(x)
96#else
97#define DBG_RUN_SG(x...)
98#endif
99
100#define CCIO_INLINE inline
101#define WRITE_U32(value, addr) __raw_writel(value, addr)
102#define READ_U32(addr) __raw_readl(addr)
103
104#define U2_IOA_RUNWAY 0x580
105#define U2_BC_GSC 0x501
106#define UTURN_IOA_RUNWAY 0x581
107#define UTURN_BC_GSC 0x502
108
109#define IOA_NORMAL_MODE 0x00020080
110#define CMD_TLB_DIRECT_WRITE 35
111#define CMD_TLB_PURGE 33
112
113struct ioa_registers {
114
115 int32_t unused1[12];
116 uint32_t io_command;
117 uint32_t io_status;
118 uint32_t io_control;
119 int32_t unused2[1];
120
121
122 uint32_t io_err_resp;
123 uint32_t io_err_info;
124 uint32_t io_err_req;
125 uint32_t io_err_resp_hi;
126 uint32_t io_tlb_entry_m;
127 uint32_t io_tlb_entry_l;
128 uint32_t unused3[1];
129 uint32_t io_pdir_base;
130 uint32_t io_io_low_hv;
131 uint32_t io_io_high_hv;
132 uint32_t unused4[1];
133 uint32_t io_chain_id_mask;
134 uint32_t unused5[2];
135 uint32_t io_io_low;
136 uint32_t io_io_high;
137};
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225struct ioc {
226 struct ioa_registers __iomem *ioc_regs;
227 u8 *res_map;
228 u64 *pdir_base;
229 u32 pdir_size;
230 u32 res_hint;
231
232 u32 res_size;
233 spinlock_t res_lock;
234
235#ifdef CCIO_COLLECT_STATS
236#define CCIO_SEARCH_SAMPLE 0x100
237 unsigned long avg_search[CCIO_SEARCH_SAMPLE];
238 unsigned long avg_idx;
239 unsigned long used_pages;
240 unsigned long msingle_calls;
241 unsigned long msingle_pages;
242 unsigned long msg_calls;
243 unsigned long msg_pages;
244 unsigned long usingle_calls;
245 unsigned long usingle_pages;
246 unsigned long usg_calls;
247 unsigned long usg_pages;
248#endif
249 unsigned short cujo20_bug;
250
251
252 u32 chainid_shift;
253 struct ioc *next;
254 const char *name;
255 unsigned int hw_path;
256 struct pci_dev *fake_pci_dev;
257 struct resource mmio_region[2];
258};
259
260static struct ioc *ioc_list;
261static int ioc_count;
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278#define IOVP_SIZE PAGE_SIZE
279#define IOVP_SHIFT PAGE_SHIFT
280#define IOVP_MASK PAGE_MASK
281
282
283#define CCIO_IOVA(iovp,offset) ((iovp) | (offset))
284#define CCIO_IOVP(iova) ((iova) & IOVP_MASK)
285
286#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
287#define MKIOVP(pdir_idx) ((long)(pdir_idx) << IOVP_SHIFT)
288#define MKIOVA(iovp,offset) (dma_addr_t)((long)iovp | (long)offset)
289
290
291
292
293
294
295#define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \
296 for(; res_ptr < res_end; ++res_ptr) { \
297 int ret;\
298 unsigned int idx;\
299 idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \
300 ret = iommu_is_span_boundary(idx << 3, pages_needed, 0, boundary_size);\
301 if ((0 == (*res_ptr & mask)) && !ret) { \
302 *res_ptr |= mask; \
303 res_idx = idx;\
304 ioc->res_hint = res_idx + (size >> 3); \
305 goto resource_found; \
306 } \
307 }
308
309#define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \
310 u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \
311 u##size *res_end = (u##size *)&(ioc)->res_map[ioa->res_size]; \
312 CCIO_SEARCH_LOOP(ioc, res_idx, mask, size); \
313 res_ptr = (u##size *)&(ioc)->res_map[0]; \
314 CCIO_SEARCH_LOOP(ioa, res_idx, mask, size);
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339static int
340ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
341{
342 unsigned int pages_needed = size >> IOVP_SHIFT;
343 unsigned int res_idx;
344 unsigned long boundary_size;
345#ifdef CCIO_COLLECT_STATS
346 unsigned long cr_start = mfctl(16);
347#endif
348
349 BUG_ON(pages_needed == 0);
350 BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE);
351
352 DBG_RES("%s() size: %d pages_needed %d\n",
353 __func__, size, pages_needed);
354
355
356
357
358
359
360 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
361 1ULL << IOVP_SHIFT) >> IOVP_SHIFT;
362
363 if (pages_needed <= 8) {
364
365
366
367
368
369#if 0
370
371
372
373
374 unsigned long mask = ~(~0UL >> pages_needed);
375 CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 8);
376#else
377 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xff, 8);
378#endif
379 } else if (pages_needed <= 16) {
380 CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xffff, 16);
381 } else if (pages_needed <= 32) {
382 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~(unsigned int)0, 32);
383#ifdef __LP64__
384 } else if (pages_needed <= 64) {
385 CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~0UL, 64);
386#endif
387 } else {
388 panic("%s: %s() Too many pages to map. pages_needed: %u\n",
389 __FILE__, __func__, pages_needed);
390 }
391
392 panic("%s: %s() I/O MMU is out of mapping resources.\n", __FILE__,
393 __func__);
394
395resource_found:
396
397 DBG_RES("%s() res_idx %d res_hint: %d\n",
398 __func__, res_idx, ioc->res_hint);
399
400#ifdef CCIO_COLLECT_STATS
401 {
402 unsigned long cr_end = mfctl(16);
403 unsigned long tmp = cr_end - cr_start;
404
405 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
406 }
407 ioc->avg_search[ioc->avg_idx++] = cr_start;
408 ioc->avg_idx &= CCIO_SEARCH_SAMPLE - 1;
409 ioc->used_pages += pages_needed;
410#endif
411
412
413
414 return res_idx << 3;
415}
416
417#define CCIO_FREE_MAPPINGS(ioc, res_idx, mask, size) \
418 u##size *res_ptr = (u##size *)&((ioc)->res_map[res_idx]); \
419 BUG_ON((*res_ptr & mask) != mask); \
420 *res_ptr &= ~(mask);
421
422
423
424
425
426
427
428
429
430
431static void
432ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
433{
434 unsigned long iovp = CCIO_IOVP(iova);
435 unsigned int res_idx = PDIR_INDEX(iovp) >> 3;
436
437 BUG_ON(pages_mapped == 0);
438 BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE);
439 BUG_ON(pages_mapped > BITS_PER_LONG);
440
441 DBG_RES("%s(): res_idx: %d pages_mapped %d\n",
442 __func__, res_idx, pages_mapped);
443
444#ifdef CCIO_COLLECT_STATS
445 ioc->used_pages -= pages_mapped;
446#endif
447
448 if(pages_mapped <= 8) {
449#if 0
450
451 unsigned long mask = ~(~0UL >> pages_mapped);
452 CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 8);
453#else
454 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffUL, 8);
455#endif
456 } else if(pages_mapped <= 16) {
457 CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffffUL, 16);
458 } else if(pages_mapped <= 32) {
459 CCIO_FREE_MAPPINGS(ioc, res_idx, ~(unsigned int)0, 32);
460#ifdef __LP64__
461 } else if(pages_mapped <= 64) {
462 CCIO_FREE_MAPPINGS(ioc, res_idx, ~0UL, 64);
463#endif
464 } else {
465 panic("%s:%s() Too many pages to unmap.\n", __FILE__,
466 __func__);
467 }
468}
469
470
471
472
473
474
475
476typedef unsigned long space_t;
477#define KERNEL_SPACE 0
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505#define IOPDIR_VALID 0x01UL
506#define HINT_SAFE_DMA 0x02UL
507#ifdef CONFIG_EISA
508#define HINT_STOP_MOST 0x04UL
509#else
510#define HINT_STOP_MOST 0x00UL
511#endif
512#define HINT_UDPATE_ENB 0x08UL
513#define HINT_PREFETCH 0x10UL
514
515
516
517
518
519
520
521static u32 hint_lookup[] = {
522 [PCI_DMA_BIDIRECTIONAL] = HINT_STOP_MOST | HINT_SAFE_DMA | IOPDIR_VALID,
523 [PCI_DMA_TODEVICE] = HINT_STOP_MOST | HINT_PREFETCH | IOPDIR_VALID,
524 [PCI_DMA_FROMDEVICE] = HINT_STOP_MOST | IOPDIR_VALID,
525};
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556static void CCIO_INLINE
557ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
558 unsigned long hints)
559{
560 register unsigned long pa;
561 register unsigned long ci;
562
563
564 BUG_ON(sid != KERNEL_SPACE);
565
566 mtsp(sid,1);
567
568
569
570
571
572
573 pa = virt_to_phys(vba);
574 asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
575 ((u32 *)pdir_ptr)[1] = (u32) pa;
576
577
578
579
580
581#ifdef __LP64__
582
583
584
585
586
587 asm volatile ("extrd,u %1,15,4,%0" : "=r" (ci) : "r" (pa));
588 asm volatile ("extrd,u %1,31,16,%0" : "+r" (pa) : "r" (pa));
589 asm volatile ("depd %1,35,4,%0" : "+r" (pa) : "r" (ci));
590#else
591 pa = 0;
592#endif
593
594
595
596
597
598 asm volatile ("lci %%r0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
599 asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
600 asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
601
602 ((u32 *)pdir_ptr)[0] = (u32) pa;
603
604
605
606
607
608
609
610
611
612
613
614
615
616 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
617 asm volatile("sync");
618}
619
620
621
622
623
624
625
626
627
628
629
630static CCIO_INLINE void
631ccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt)
632{
633 u32 chain_size = 1 << ioc->chainid_shift;
634
635 iovp &= IOVP_MASK;
636 byte_cnt += chain_size;
637
638 while(byte_cnt > chain_size) {
639 WRITE_U32(CMD_TLB_PURGE | iovp, &ioc->ioc_regs->io_command);
640 iovp += chain_size;
641 byte_cnt -= chain_size;
642 }
643}
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663static CCIO_INLINE void
664ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
665{
666 u32 iovp = (u32)CCIO_IOVP(iova);
667 size_t saved_byte_cnt;
668
669
670 saved_byte_cnt = byte_cnt = ALIGN(byte_cnt, IOVP_SIZE);
671
672 while(byte_cnt > 0) {
673
674 unsigned int idx = PDIR_INDEX(iovp);
675 char *pdir_ptr = (char *) &(ioc->pdir_base[idx]);
676
677 BUG_ON(idx >= (ioc->pdir_size / sizeof(u64)));
678 pdir_ptr[7] = 0;
679
680
681
682
683
684
685
686
687 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr[7]));
688
689 iovp += IOVP_SIZE;
690 byte_cnt -= IOVP_SIZE;
691 }
692
693 asm volatile("sync");
694 ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt);
695}
696
697
698
699
700
701
702
703
704
705
706
707
708static int
709ccio_dma_supported(struct device *dev, u64 mask)
710{
711 if(dev == NULL) {
712 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
713 BUG();
714 return 0;
715 }
716
717
718 return (int)(mask == 0xffffffffUL);
719}
720
721
722
723
724
725
726
727
728
729
730static dma_addr_t
731ccio_map_single(struct device *dev, void *addr, size_t size,
732 enum dma_data_direction direction)
733{
734 int idx;
735 struct ioc *ioc;
736 unsigned long flags;
737 dma_addr_t iovp;
738 dma_addr_t offset;
739 u64 *pdir_start;
740 unsigned long hint = hint_lookup[(int)direction];
741
742 BUG_ON(!dev);
743 ioc = GET_IOC(dev);
744
745 BUG_ON(size <= 0);
746
747
748 offset = ((unsigned long) addr) & ~IOVP_MASK;
749
750
751 size = ALIGN(size + offset, IOVP_SIZE);
752 spin_lock_irqsave(&ioc->res_lock, flags);
753
754#ifdef CCIO_COLLECT_STATS
755 ioc->msingle_calls++;
756 ioc->msingle_pages += size >> IOVP_SHIFT;
757#endif
758
759 idx = ccio_alloc_range(ioc, dev, size);
760 iovp = (dma_addr_t)MKIOVP(idx);
761
762 pdir_start = &(ioc->pdir_base[idx]);
763
764 DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n",
765 __func__, addr, (long)iovp | offset, size);
766
767
768 if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES))
769 hint |= HINT_SAFE_DMA;
770
771 while(size > 0) {
772 ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long)addr, hint);
773
774 DBG_RUN(" pdir %p %08x%08x\n",
775 pdir_start,
776 (u32) (((u32 *) pdir_start)[0]),
777 (u32) (((u32 *) pdir_start)[1]));
778 ++pdir_start;
779 addr += IOVP_SIZE;
780 size -= IOVP_SIZE;
781 }
782
783 spin_unlock_irqrestore(&ioc->res_lock, flags);
784
785
786 return CCIO_IOVA(iovp, offset);
787}
788
789
790static dma_addr_t
791ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
792 size_t size, enum dma_data_direction direction,
793 unsigned long attrs)
794{
795 return ccio_map_single(dev, page_address(page) + offset, size,
796 direction);
797}
798
799
800
801
802
803
804
805
806
807static void
808ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
809 enum dma_data_direction direction, unsigned long attrs)
810{
811 struct ioc *ioc;
812 unsigned long flags;
813 dma_addr_t offset = iova & ~IOVP_MASK;
814
815 BUG_ON(!dev);
816 ioc = GET_IOC(dev);
817
818 DBG_RUN("%s() iovp 0x%lx/%x\n",
819 __func__, (long)iova, size);
820
821 iova ^= offset;
822 size += offset;
823 size = ALIGN(size, IOVP_SIZE);
824
825 spin_lock_irqsave(&ioc->res_lock, flags);
826
827#ifdef CCIO_COLLECT_STATS
828 ioc->usingle_calls++;
829 ioc->usingle_pages += size >> IOVP_SHIFT;
830#endif
831
832 ccio_mark_invalid(ioc, iova, size);
833 ccio_free_range(ioc, iova, (size >> IOVP_SHIFT));
834 spin_unlock_irqrestore(&ioc->res_lock, flags);
835}
836
837
838
839
840
841
842
843
844
845static void *
846ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
847 unsigned long attrs)
848{
849 void *ret;
850#if 0
851
852
853
854 if(!hwdev) {
855
856 *dma_handle = 0;
857 return 0;
858 }
859#endif
860 ret = (void *) __get_free_pages(flag, get_order(size));
861
862 if (ret) {
863 memset(ret, 0, size);
864 *dma_handle = ccio_map_single(dev, ret, size, PCI_DMA_BIDIRECTIONAL);
865 }
866
867 return ret;
868}
869
870
871
872
873
874
875
876
877
878
879static void
880ccio_free(struct device *dev, size_t size, void *cpu_addr,
881 dma_addr_t dma_handle, unsigned long attrs)
882{
883 ccio_unmap_page(dev, dma_handle, size, 0, 0);
884 free_pages((unsigned long)cpu_addr, get_order(size));
885}
886
887
888
889
890
891
892#define PIDE_FLAG 0x80000000UL
893
894#ifdef CCIO_COLLECT_STATS
895#define IOMMU_MAP_STATS
896#endif
897#include "iommu-helpers.h"
898
899
900
901
902
903
904
905
906
907
908static int
909ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
910 enum dma_data_direction direction, unsigned long attrs)
911{
912 struct ioc *ioc;
913 int coalesced, filled = 0;
914 unsigned long flags;
915 unsigned long hint = hint_lookup[(int)direction];
916 unsigned long prev_len = 0, current_len = 0;
917 int i;
918
919 BUG_ON(!dev);
920 ioc = GET_IOC(dev);
921
922 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
923
924
925 if (nents == 1) {
926 sg_dma_address(sglist) = ccio_map_single(dev,
927 sg_virt(sglist), sglist->length,
928 direction);
929 sg_dma_len(sglist) = sglist->length;
930 return 1;
931 }
932
933 for(i = 0; i < nents; i++)
934 prev_len += sglist[i].length;
935
936 spin_lock_irqsave(&ioc->res_lock, flags);
937
938#ifdef CCIO_COLLECT_STATS
939 ioc->msg_calls++;
940#endif
941
942
943
944
945
946
947
948
949
950 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, ccio_alloc_range);
951
952
953
954
955
956
957
958
959
960 filled = iommu_fill_pdir(ioc, sglist, nents, hint, ccio_io_pdir_entry);
961
962 spin_unlock_irqrestore(&ioc->res_lock, flags);
963
964 BUG_ON(coalesced != filled);
965
966 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
967
968 for (i = 0; i < filled; i++)
969 current_len += sg_dma_len(sglist + i);
970
971 BUG_ON(current_len != prev_len);
972
973 return filled;
974}
975
976
977
978
979
980
981
982
983
984
985static void
986ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
987 enum dma_data_direction direction, unsigned long attrs)
988{
989 struct ioc *ioc;
990
991 BUG_ON(!dev);
992 ioc = GET_IOC(dev);
993
994 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
995 __func__, nents, sg_virt(sglist), sglist->length);
996
997#ifdef CCIO_COLLECT_STATS
998 ioc->usg_calls++;
999#endif
1000
1001 while(sg_dma_len(sglist) && nents--) {
1002
1003#ifdef CCIO_COLLECT_STATS
1004 ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
1005#endif
1006 ccio_unmap_page(dev, sg_dma_address(sglist),
1007 sg_dma_len(sglist), direction, 0);
1008 ++sglist;
1009 }
1010
1011 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1012}
1013
1014static struct dma_map_ops ccio_ops = {
1015 .dma_supported = ccio_dma_supported,
1016 .alloc = ccio_alloc,
1017 .free = ccio_free,
1018 .map_page = ccio_map_page,
1019 .unmap_page = ccio_unmap_page,
1020 .map_sg = ccio_map_sg,
1021 .unmap_sg = ccio_unmap_sg,
1022};
1023
1024#ifdef CONFIG_PROC_FS
1025static int ccio_proc_info(struct seq_file *m, void *p)
1026{
1027 struct ioc *ioc = ioc_list;
1028
1029 while (ioc != NULL) {
1030 unsigned int total_pages = ioc->res_size << 3;
1031#ifdef CCIO_COLLECT_STATS
1032 unsigned long avg = 0, min, max;
1033 int j;
1034#endif
1035
1036 seq_printf(m, "%s\n", ioc->name);
1037
1038 seq_printf(m, "Cujo 2.0 bug : %s\n",
1039 (ioc->cujo20_bug ? "yes" : "no"));
1040
1041 seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
1042 total_pages * 8, total_pages);
1043
1044#ifdef CCIO_COLLECT_STATS
1045 seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1046 total_pages - ioc->used_pages, ioc->used_pages,
1047 (int)(ioc->used_pages * 100 / total_pages));
1048#endif
1049
1050 seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
1051 ioc->res_size, total_pages);
1052
1053#ifdef CCIO_COLLECT_STATS
1054 min = max = ioc->avg_search[0];
1055 for(j = 0; j < CCIO_SEARCH_SAMPLE; ++j) {
1056 avg += ioc->avg_search[j];
1057 if(ioc->avg_search[j] > max)
1058 max = ioc->avg_search[j];
1059 if(ioc->avg_search[j] < min)
1060 min = ioc->avg_search[j];
1061 }
1062 avg /= CCIO_SEARCH_SAMPLE;
1063 seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1064 min, avg, max);
1065
1066 seq_printf(m, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n",
1067 ioc->msingle_calls, ioc->msingle_pages,
1068 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1069
1070
1071 min = ioc->usingle_calls - ioc->usg_calls;
1072 max = ioc->usingle_pages - ioc->usg_pages;
1073 seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
1074 min, max, (int)((max * 1000)/min));
1075
1076 seq_printf(m, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n",
1077 ioc->msg_calls, ioc->msg_pages,
1078 (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
1079
1080 seq_printf(m, "pci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n\n\n",
1081 ioc->usg_calls, ioc->usg_pages,
1082 (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
1083#endif
1084
1085 ioc = ioc->next;
1086 }
1087
1088 return 0;
1089}
1090
1091static int ccio_proc_info_open(struct inode *inode, struct file *file)
1092{
1093 return single_open(file, &ccio_proc_info, NULL);
1094}
1095
1096static const struct file_operations ccio_proc_info_fops = {
1097 .owner = THIS_MODULE,
1098 .open = ccio_proc_info_open,
1099 .read = seq_read,
1100 .llseek = seq_lseek,
1101 .release = single_release,
1102};
1103
1104static int ccio_proc_bitmap_info(struct seq_file *m, void *p)
1105{
1106 struct ioc *ioc = ioc_list;
1107
1108 while (ioc != NULL) {
1109 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map,
1110 ioc->res_size, false);
1111 seq_putc(m, '\n');
1112 ioc = ioc->next;
1113 break;
1114 }
1115
1116 return 0;
1117}
1118
1119static int ccio_proc_bitmap_open(struct inode *inode, struct file *file)
1120{
1121 return single_open(file, &ccio_proc_bitmap_info, NULL);
1122}
1123
1124static const struct file_operations ccio_proc_bitmap_fops = {
1125 .owner = THIS_MODULE,
1126 .open = ccio_proc_bitmap_open,
1127 .read = seq_read,
1128 .llseek = seq_lseek,
1129 .release = single_release,
1130};
1131#endif
1132
1133
1134
1135
1136
1137
1138
1139
1140static struct ioc * ccio_find_ioc(int hw_path)
1141{
1142 int i;
1143 struct ioc *ioc;
1144
1145 ioc = ioc_list;
1146 for (i = 0; i < ioc_count; i++) {
1147 if (ioc->hw_path == hw_path)
1148 return ioc;
1149
1150 ioc = ioc->next;
1151 }
1152
1153 return NULL;
1154}
1155
1156
1157
1158
1159
1160
1161
1162
1163void * ccio_get_iommu(const struct parisc_device *dev)
1164{
1165 dev = find_pa_parent_type(dev, HPHW_IOA);
1166 if (!dev)
1167 return NULL;
1168
1169 return ccio_find_ioc(dev->hw_path);
1170}
1171
1172#define CUJO_20_STEP 0x10000000
1173
1174
1175
1176
1177
1178void ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp)
1179{
1180 unsigned int idx;
1181 struct parisc_device *dev = parisc_parent(cujo);
1182 struct ioc *ioc = ccio_get_iommu(dev);
1183 u8 *res_ptr;
1184
1185 ioc->cujo20_bug = 1;
1186 res_ptr = ioc->res_map;
1187 idx = PDIR_INDEX(iovp) >> 3;
1188
1189 while (idx < ioc->res_size) {
1190 res_ptr[idx] |= 0xff;
1191 idx += PDIR_INDEX(CUJO_20_STEP) >> 3;
1192 }
1193}
1194
1195#if 0
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208static int
1209ccio_get_iotlb_size(struct parisc_device *dev)
1210{
1211 if (dev->spa_shift == 0) {
1212 panic("%s() : Can't determine I/O TLB size.\n", __func__);
1213 }
1214 return (1 << dev->spa_shift);
1215}
1216#else
1217
1218
1219#define CCIO_CHAINID_SHIFT 8
1220#define CCIO_CHAINID_MASK 0xff
1221#endif
1222
1223
1224static const struct parisc_device_id ccio_tbl[] = {
1225 { HPHW_IOA, HVERSION_REV_ANY_ID, U2_IOA_RUNWAY, 0xb },
1226 { HPHW_IOA, HVERSION_REV_ANY_ID, UTURN_IOA_RUNWAY, 0xb },
1227 { 0, }
1228};
1229
1230static int ccio_probe(struct parisc_device *dev);
1231
1232static struct parisc_driver ccio_driver = {
1233 .name = "ccio",
1234 .id_table = ccio_tbl,
1235 .probe = ccio_probe,
1236};
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246static void
1247ccio_ioc_init(struct ioc *ioc)
1248{
1249 int i;
1250 unsigned int iov_order;
1251 u32 iova_space_size;
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264 iova_space_size = (u32) (totalram_pages / count_parisc_driver(&ccio_driver));
1265
1266
1267
1268 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1269 iova_space_size = 1 << (20 - PAGE_SHIFT);
1270#ifdef __LP64__
1271 } else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1272 iova_space_size = 1 << (30 - PAGE_SHIFT);
1273#endif
1274 }
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1290
1291
1292 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1293
1294 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1295
1296 BUG_ON(ioc->pdir_size > 8 * 1024 * 1024);
1297
1298
1299 BUG_ON((1 << get_order(ioc->pdir_size)) != (ioc->pdir_size >> PAGE_SHIFT));
1300
1301 DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n",
1302 __func__, ioc->ioc_regs,
1303 (unsigned long) totalram_pages >> (20 - PAGE_SHIFT),
1304 iova_space_size>>20,
1305 iov_order + PAGE_SHIFT);
1306
1307 ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL,
1308 get_order(ioc->pdir_size));
1309 if(NULL == ioc->pdir_base) {
1310 panic("%s() could not allocate I/O Page Table\n", __func__);
1311 }
1312 memset(ioc->pdir_base, 0, ioc->pdir_size);
1313
1314 BUG_ON((((unsigned long)ioc->pdir_base) & PAGE_MASK) != (unsigned long)ioc->pdir_base);
1315 DBG_INIT(" base %p\n", ioc->pdir_base);
1316
1317
1318 ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3;
1319 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1320
1321 ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL,
1322 get_order(ioc->res_size));
1323 if(NULL == ioc->res_map) {
1324 panic("%s() could not allocate resource map\n", __func__);
1325 }
1326 memset(ioc->res_map, 0, ioc->res_size);
1327
1328
1329 ioc->res_hint = 16;
1330
1331
1332 spin_lock_init(&ioc->res_lock);
1333
1334
1335
1336
1337
1338 ioc->chainid_shift = get_order(iova_space_size) + PAGE_SHIFT - CCIO_CHAINID_SHIFT;
1339 DBG_INIT(" chainid_shift 0x%x\n", ioc->chainid_shift);
1340
1341
1342
1343
1344 WRITE_U32(CCIO_CHAINID_MASK << ioc->chainid_shift,
1345 &ioc->ioc_regs->io_chain_id_mask);
1346
1347 WRITE_U32(virt_to_phys(ioc->pdir_base),
1348 &ioc->ioc_regs->io_pdir_base);
1349
1350
1351
1352
1353 WRITE_U32(IOA_NORMAL_MODE, &ioc->ioc_regs->io_control);
1354
1355
1356
1357
1358 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_m);
1359 WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_l);
1360
1361 for(i = 1 << CCIO_CHAINID_SHIFT; i ; i--) {
1362 WRITE_U32((CMD_TLB_DIRECT_WRITE | (i << ioc->chainid_shift)),
1363 &ioc->ioc_regs->io_command);
1364 }
1365}
1366
1367static void __init
1368ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr)
1369{
1370 int result;
1371
1372 res->parent = NULL;
1373 res->flags = IORESOURCE_MEM;
1374
1375
1376
1377
1378
1379 res->start = (unsigned long)((signed) READ_U32(ioaddr) << 16);
1380 res->end = (unsigned long)((signed) (READ_U32(ioaddr + 4) << 16) - 1);
1381 res->name = name;
1382
1383
1384
1385 if (res->end + 1 == res->start)
1386 return;
1387
1388
1389
1390
1391
1392
1393 result = insert_resource(&iomem_resource, res);
1394 if (result < 0) {
1395 printk(KERN_ERR "%s() failed to claim CCIO bus address space (%08lx,%08lx)\n",
1396 __func__, (unsigned long)res->start, (unsigned long)res->end);
1397 }
1398}
1399
1400static void __init ccio_init_resources(struct ioc *ioc)
1401{
1402 struct resource *res = ioc->mmio_region;
1403 char *name = kmalloc(14, GFP_KERNEL);
1404
1405 snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path);
1406
1407 ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low);
1408 ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv);
1409}
1410
1411static int new_ioc_area(struct resource *res, unsigned long size,
1412 unsigned long min, unsigned long max, unsigned long align)
1413{
1414 if (max <= min)
1415 return -EBUSY;
1416
1417 res->start = (max - size + 1) &~ (align - 1);
1418 res->end = res->start + size;
1419
1420
1421
1422
1423
1424 if (!insert_resource(&iomem_resource, res))
1425 return 0;
1426
1427 return new_ioc_area(res, size, min, max - size, align);
1428}
1429
1430static int expand_ioc_area(struct resource *res, unsigned long size,
1431 unsigned long min, unsigned long max, unsigned long align)
1432{
1433 unsigned long start, len;
1434
1435 if (!res->parent)
1436 return new_ioc_area(res, size, min, max, align);
1437
1438 start = (res->start - size) &~ (align - 1);
1439 len = res->end - start + 1;
1440 if (start >= min) {
1441 if (!adjust_resource(res, start, len))
1442 return 0;
1443 }
1444
1445 start = res->start;
1446 len = ((size + res->end + align) &~ (align - 1)) - start;
1447 if (start + len <= max) {
1448 if (!adjust_resource(res, start, len))
1449 return 0;
1450 }
1451
1452 return -EBUSY;
1453}
1454
1455
1456
1457
1458
1459
1460
1461
1462int ccio_allocate_resource(const struct parisc_device *dev,
1463 struct resource *res, unsigned long size,
1464 unsigned long min, unsigned long max, unsigned long align)
1465{
1466 struct resource *parent = &iomem_resource;
1467 struct ioc *ioc = ccio_get_iommu(dev);
1468 if (!ioc)
1469 goto out;
1470
1471 parent = ioc->mmio_region;
1472 if (parent->parent &&
1473 !allocate_resource(parent, res, size, min, max, align, NULL, NULL))
1474 return 0;
1475
1476 if ((parent + 1)->parent &&
1477 !allocate_resource(parent + 1, res, size, min, max, align,
1478 NULL, NULL))
1479 return 0;
1480
1481 if (!expand_ioc_area(parent, size, min, max, align)) {
1482 __raw_writel(((parent->start)>>16) | 0xffff0000,
1483 &ioc->ioc_regs->io_io_low);
1484 __raw_writel(((parent->end)>>16) | 0xffff0000,
1485 &ioc->ioc_regs->io_io_high);
1486 } else if (!expand_ioc_area(parent + 1, size, min, max, align)) {
1487 parent++;
1488 __raw_writel(((parent->start)>>16) | 0xffff0000,
1489 &ioc->ioc_regs->io_io_low_hv);
1490 __raw_writel(((parent->end)>>16) | 0xffff0000,
1491 &ioc->ioc_regs->io_io_high_hv);
1492 } else {
1493 return -EBUSY;
1494 }
1495
1496 out:
1497 return allocate_resource(parent, res, size, min, max, align, NULL,NULL);
1498}
1499
1500int ccio_request_resource(const struct parisc_device *dev,
1501 struct resource *res)
1502{
1503 struct resource *parent;
1504 struct ioc *ioc = ccio_get_iommu(dev);
1505
1506 if (!ioc) {
1507 parent = &iomem_resource;
1508 } else if ((ioc->mmio_region->start <= res->start) &&
1509 (res->end <= ioc->mmio_region->end)) {
1510 parent = ioc->mmio_region;
1511 } else if (((ioc->mmio_region + 1)->start <= res->start) &&
1512 (res->end <= (ioc->mmio_region + 1)->end)) {
1513 parent = ioc->mmio_region + 1;
1514 } else {
1515 return -EBUSY;
1516 }
1517
1518
1519
1520
1521
1522
1523 return insert_resource(parent, res);
1524}
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534static int __init ccio_probe(struct parisc_device *dev)
1535{
1536 int i;
1537 struct ioc *ioc, **ioc_p = &ioc_list;
1538
1539 ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL);
1540 if (ioc == NULL) {
1541 printk(KERN_ERR MODULE_NAME ": memory allocation failure\n");
1542 return 1;
1543 }
1544
1545 ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn";
1546
1547 printk(KERN_INFO "Found %s at 0x%lx\n", ioc->name,
1548 (unsigned long)dev->hpa.start);
1549
1550 for (i = 0; i < ioc_count; i++) {
1551 ioc_p = &(*ioc_p)->next;
1552 }
1553 *ioc_p = ioc;
1554
1555 ioc->hw_path = dev->hw_path;
1556 ioc->ioc_regs = ioremap_nocache(dev->hpa.start, 4096);
1557 ccio_ioc_init(ioc);
1558 ccio_init_resources(ioc);
1559 hppa_dma_ops = &ccio_ops;
1560 dev->dev.platform_data = kzalloc(sizeof(struct pci_hba_data), GFP_KERNEL);
1561
1562
1563 BUG_ON(dev->dev.platform_data == NULL);
1564 HBA_DATA(dev->dev.platform_data)->iommu = ioc;
1565
1566#ifdef CONFIG_PROC_FS
1567 if (ioc_count == 0) {
1568 proc_create(MODULE_NAME, 0, proc_runway_root,
1569 &ccio_proc_info_fops);
1570 proc_create(MODULE_NAME"-bitmap", 0, proc_runway_root,
1571 &ccio_proc_bitmap_fops);
1572 }
1573#endif
1574 ioc_count++;
1575
1576 parisc_has_iommu();
1577 return 0;
1578}
1579
1580
1581
1582
1583
1584
1585void __init ccio_init(void)
1586{
1587 register_parisc_driver(&ccio_driver);
1588}
1589
1590