1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/spinlock.h>
25#include <linux/slab.h>
26#include <linux/init.h>
27
28#include <linux/mm.h>
29#include <linux/string.h>
30#include <linux/pci.h>
31#include <linux/scatterlist.h>
32#include <linux/iommu-helper.h>
33
34#include <asm/byteorder.h>
35#include <asm/io.h>
36#include <asm/dma.h>
37
38#include <asm/hardware.h>
39
40#include <linux/proc_fs.h>
41#include <linux/seq_file.h>
42#include <linux/module.h>
43
44#include <asm/ropes.h>
45#include <asm/mckinley.h>
46#include <asm/runway.h>
47#include <asm/page.h>
48#include <asm/pdc.h>
49#include <asm/pdcpat.h>
50#include <asm/parisc-device.h>
51
52#define MODULE_NAME "SBA"
53
54
55
56
57
58
59#undef DEBUG_SBA_INIT
60#undef DEBUG_SBA_RUN
61#undef DEBUG_SBA_RUN_SG
62#undef DEBUG_SBA_RESOURCE
63#undef ASSERT_PDIR_SANITY
64#undef DEBUG_LARGE_SG_ENTRIES
65#undef DEBUG_DMB_TRAP
66
67#ifdef DEBUG_SBA_INIT
68#define DBG_INIT(x...) printk(x)
69#else
70#define DBG_INIT(x...)
71#endif
72
73#ifdef DEBUG_SBA_RUN
74#define DBG_RUN(x...) printk(x)
75#else
76#define DBG_RUN(x...)
77#endif
78
79#ifdef DEBUG_SBA_RUN_SG
80#define DBG_RUN_SG(x...) printk(x)
81#else
82#define DBG_RUN_SG(x...)
83#endif
84
85
86#ifdef DEBUG_SBA_RESOURCE
87#define DBG_RES(x...) printk(x)
88#else
89#define DBG_RES(x...)
90#endif
91
92#define SBA_INLINE __inline__
93
94#define DEFAULT_DMA_HINT_REG 0
95
96struct sba_device *sba_list;
97EXPORT_SYMBOL_GPL(sba_list);
98
99static unsigned long ioc_needs_fdc = 0;
100
101
102static unsigned int global_ioc_cnt = 0;
103
104
105static unsigned long piranha_bad_128k = 0;
106
107
108#define SBA_DEV(d) ((struct sba_device *) (d))
109
110#ifdef CONFIG_AGP_PARISC
111#define SBA_AGP_SUPPORT
112#endif
113
114#ifdef SBA_AGP_SUPPORT
115static int sba_reserve_agpgart = 1;
116module_param(sba_reserve_agpgart, int, 0444);
117MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART");
118#endif
119
120
121
122
123
124
125
126
127
128
129#define READ_REG32(addr) readl(addr)
130#define READ_REG64(addr) readq(addr)
131#define WRITE_REG32(val, addr) writel((val), (addr))
132#define WRITE_REG64(val, addr) writeq((val), (addr))
133
134#ifdef CONFIG_64BIT
135#define READ_REG(addr) READ_REG64(addr)
136#define WRITE_REG(value, addr) WRITE_REG64(value, addr)
137#else
138#define READ_REG(addr) READ_REG32(addr)
139#define WRITE_REG(value, addr) WRITE_REG32(value, addr)
140#endif
141
142#ifdef DEBUG_SBA_INIT
143
144
145
146
147
148
149
150
151
152
153static void
154sba_dump_ranges(void __iomem *hpa)
155{
156 DBG_INIT("SBA at 0x%p\n", hpa);
157 DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE));
158 DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK));
159 DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE));
160 DBG_INIT("\n");
161 DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE));
162 DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK));
163 DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE));
164}
165
166
167
168
169
170
171
172static void sba_dump_tlb(void __iomem *hpa)
173{
174 DBG_INIT("IO TLB at 0x%p\n", hpa);
175 DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE));
176 DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK));
177 DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG));
178 DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE));
179 DBG_INIT("\n");
180}
181#else
182#define sba_dump_ranges(x)
183#define sba_dump_tlb(x)
184#endif
185
186
187#ifdef ASSERT_PDIR_SANITY
188
189
190
191
192
193
194
195
196
197static void
198sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
199{
200
201 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
202 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
203 uint rcnt;
204
205 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
206 msg,
207 rptr, pide & (BITS_PER_LONG - 1), *rptr);
208
209 rcnt = 0;
210 while (rcnt < BITS_PER_LONG) {
211 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
212 (rcnt == (pide & (BITS_PER_LONG - 1)))
213 ? " -->" : " ",
214 rcnt, ptr, *ptr );
215 rcnt++;
216 ptr++;
217 }
218 printk(KERN_DEBUG "%s", msg);
219}
220
221
222
223
224
225
226
227
228
229static int
230sba_check_pdir(struct ioc *ioc, char *msg)
231{
232 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]);
233 u32 *rptr = (u32 *) ioc->res_map;
234 u64 *pptr = ioc->pdir_base;
235 uint pide = 0;
236
237 while (rptr < rptr_end) {
238 u32 rval = *rptr;
239 int rcnt = 32;
240
241 while (rcnt) {
242
243 u32 pde = ((u32) (((char *)pptr)[7])) << 24;
244 if ((rval ^ pde) & 0x80000000)
245 {
246
247
248
249
250 sba_dump_pdir_entry(ioc, msg, pide);
251 return(1);
252 }
253 rcnt--;
254 rval <<= 1;
255 pptr++;
256 pide++;
257 }
258 rptr++;
259 }
260
261 return 0;
262}
263
264
265
266
267
268
269
270
271
272
273static void
274sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
275{
276 while (nents-- > 0) {
277 printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n",
278 nents,
279 (unsigned long) sg_dma_address(startsg),
280 sg_dma_len(startsg),
281 sg_virt_addr(startsg), startsg->length);
282 startsg++;
283 }
284}
285
286#endif
287
288
289
290
291
292
293
294
295
296
297
298
299
300#define PAGES_PER_RANGE 1
301
302
303
304#ifdef ZX1_SUPPORT
305
306#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
307#define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
308#else
309
310#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset))
311#define SBA_IOVP(ioc,iova) (iova)
312#endif
313
314#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
315
316#define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n)))
317#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
318
319static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
320 unsigned int bitshiftcnt)
321{
322 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
323 + bitshiftcnt;
324}
325
326
327
328
329
330
331
332
333
334
335static SBA_INLINE unsigned long
336sba_search_bitmap(struct ioc *ioc, struct device *dev,
337 unsigned long bits_wanted)
338{
339 unsigned long *res_ptr = ioc->res_hint;
340 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
341 unsigned long pide = ~0UL, tpide;
342 unsigned long boundary_size;
343 unsigned long shift;
344 int ret;
345
346 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
347 1ULL << IOVP_SHIFT) >> IOVP_SHIFT;
348
349#if defined(ZX1_SUPPORT)
350 BUG_ON(ioc->ibase & ~IOVP_MASK);
351 shift = ioc->ibase >> IOVP_SHIFT;
352#else
353 shift = 0;
354#endif
355
356 if (bits_wanted > (BITS_PER_LONG/2)) {
357
358 for(; res_ptr < res_end; ++res_ptr) {
359 tpide = ptr_to_pide(ioc, res_ptr, 0);
360 ret = iommu_is_span_boundary(tpide, bits_wanted,
361 shift,
362 boundary_size);
363 if ((*res_ptr == 0) && !ret) {
364 *res_ptr = RESMAP_MASK(bits_wanted);
365 pide = tpide;
366 break;
367 }
368 }
369
370 res_ptr++;
371 ioc->res_bitshift = 0;
372 } else {
373
374
375
376
377
378
379 unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT);
380 uint bitshiftcnt = ALIGN(ioc->res_bitshift, o);
381 unsigned long mask;
382
383 if (bitshiftcnt >= BITS_PER_LONG) {
384 bitshiftcnt = 0;
385 res_ptr++;
386 }
387 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt;
388
389 DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
390 while(res_ptr < res_end)
391 {
392 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
393 WARN_ON(mask == 0);
394 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
395 ret = iommu_is_span_boundary(tpide, bits_wanted,
396 shift,
397 boundary_size);
398 if ((((*res_ptr) & mask) == 0) && !ret) {
399 *res_ptr |= mask;
400 pide = tpide;
401 break;
402 }
403 mask >>= o;
404 bitshiftcnt += o;
405 if (mask == 0) {
406 mask = RESMAP_MASK(bits_wanted);
407 bitshiftcnt=0;
408 res_ptr++;
409 }
410 }
411
412 ioc->res_bitshift = bitshiftcnt + bits_wanted;
413 }
414
415
416 if (res_end <= res_ptr) {
417 ioc->res_hint = (unsigned long *) ioc->res_map;
418 ioc->res_bitshift = 0;
419 } else {
420 ioc->res_hint = res_ptr;
421 }
422 return (pide);
423}
424
425
426
427
428
429
430
431
432
433
434static int
435sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
436{
437 unsigned int pages_needed = size >> IOVP_SHIFT;
438#ifdef SBA_COLLECT_STATS
439 unsigned long cr_start = mfctl(16);
440#endif
441 unsigned long pide;
442
443 pide = sba_search_bitmap(ioc, dev, pages_needed);
444 if (pide >= (ioc->res_size << 3)) {
445 pide = sba_search_bitmap(ioc, dev, pages_needed);
446 if (pide >= (ioc->res_size << 3))
447 panic("%s: I/O MMU @ %p is out of mapping resources\n",
448 __FILE__, ioc->ioc_hpa);
449 }
450
451#ifdef ASSERT_PDIR_SANITY
452
453 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) {
454 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
455 }
456#endif
457
458 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
459 __func__, size, pages_needed, pide,
460 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
461 ioc->res_bitshift );
462
463#ifdef SBA_COLLECT_STATS
464 {
465 unsigned long cr_end = mfctl(16);
466 unsigned long tmp = cr_end - cr_start;
467
468 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
469 }
470 ioc->avg_search[ioc->avg_idx++] = cr_start;
471 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
472
473 ioc->used_pages += pages_needed;
474#endif
475
476 return (pide);
477}
478
479
480
481
482
483
484
485
486
487
488static SBA_INLINE void
489sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
490{
491 unsigned long iovp = SBA_IOVP(ioc, iova);
492 unsigned int pide = PDIR_INDEX(iovp);
493 unsigned int ridx = pide >> 3;
494 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
495
496 int bits_not_wanted = size >> IOVP_SHIFT;
497
498
499 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1));
500
501 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
502 __func__, (uint) iova, size,
503 bits_not_wanted, m, pide, res_ptr, *res_ptr);
504
505#ifdef SBA_COLLECT_STATS
506 ioc->used_pages -= bits_not_wanted;
507#endif
508
509 *res_ptr &= ~m;
510}
511
512
513
514
515
516
517
518
519#ifdef SBA_HINT_SUPPORT
520#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
521#endif
522
523typedef unsigned long space_t;
524#define KERNEL_SPACE 0
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566static void SBA_INLINE
567sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
568 unsigned long hint)
569{
570 u64 pa;
571 register unsigned ci;
572
573 pa = virt_to_phys(vba);
574 pa &= IOVP_MASK;
575
576 mtsp(sid,1);
577 asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
578 pa |= (ci >> PAGE_SHIFT) & 0xff;
579
580 pa |= SBA_PDIR_VALID_BIT;
581 *pdir_ptr = cpu_to_le64(pa);
582
583
584
585
586
587
588 if (ioc_needs_fdc)
589 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
590}
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609static SBA_INLINE void
610sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
611{
612 u32 iovp = (u32) SBA_IOVP(ioc,iova);
613 u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
614
615#ifdef ASSERT_PDIR_SANITY
616
617
618
619
620
621
622 if (0x80 != (((u8 *) pdir_ptr)[7])) {
623 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
624 }
625#endif
626
627 if (byte_cnt > IOVP_SIZE)
628 {
629#if 0
630 unsigned long entries_per_cacheline = ioc_needs_fdc ?
631 L1_CACHE_ALIGN(((unsigned long) pdir_ptr))
632 - (unsigned long) pdir_ptr;
633 : 262144;
634#endif
635
636
637 iovp |= get_order(byte_cnt) + PAGE_SHIFT;
638
639 do {
640
641 ((u8 *) pdir_ptr)[7] = 0;
642 if (ioc_needs_fdc) {
643 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
644#if 0
645 entries_per_cacheline = L1_CACHE_SHIFT - 3;
646#endif
647 }
648 pdir_ptr++;
649 byte_cnt -= IOVP_SIZE;
650 } while (byte_cnt > IOVP_SIZE);
651 } else
652 iovp |= IOVP_SHIFT;
653
654
655
656
657
658
659
660
661 ((u8 *) pdir_ptr)[7] = 0;
662 if (ioc_needs_fdc)
663 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
664
665 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
666}
667
668
669
670
671
672
673
674
675static int sba_dma_supported( struct device *dev, u64 mask)
676{
677 struct ioc *ioc;
678
679 if (dev == NULL) {
680 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
681 BUG();
682 return(0);
683 }
684
685
686
687
688
689
690 if (mask > ~0U)
691 return 0;
692
693 ioc = GET_IOC(dev);
694
695
696
697
698
699 return((int)(mask >= (ioc->ibase - 1 +
700 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) )));
701}
702
703
704
705
706
707
708
709
710
711
712
713static dma_addr_t
714sba_map_single(struct device *dev, void *addr, size_t size,
715 enum dma_data_direction direction)
716{
717 struct ioc *ioc;
718 unsigned long flags;
719 dma_addr_t iovp;
720 dma_addr_t offset;
721 u64 *pdir_start;
722 int pide;
723
724 ioc = GET_IOC(dev);
725
726
727 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
728
729
730 size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
731
732 spin_lock_irqsave(&ioc->res_lock, flags);
733#ifdef ASSERT_PDIR_SANITY
734 sba_check_pdir(ioc,"Check before sba_map_single()");
735#endif
736
737#ifdef SBA_COLLECT_STATS
738 ioc->msingle_calls++;
739 ioc->msingle_pages += size >> IOVP_SHIFT;
740#endif
741 pide = sba_alloc_range(ioc, dev, size);
742 iovp = (dma_addr_t) pide << IOVP_SHIFT;
743
744 DBG_RUN("%s() 0x%p -> 0x%lx\n",
745 __func__, addr, (long) iovp | offset);
746
747 pdir_start = &(ioc->pdir_base[pide]);
748
749 while (size > 0) {
750 sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
751
752 DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
753 pdir_start,
754 (u8) (((u8 *) pdir_start)[7]),
755 (u8) (((u8 *) pdir_start)[6]),
756 (u8) (((u8 *) pdir_start)[5]),
757 (u8) (((u8 *) pdir_start)[4]),
758 (u8) (((u8 *) pdir_start)[3]),
759 (u8) (((u8 *) pdir_start)[2]),
760 (u8) (((u8 *) pdir_start)[1]),
761 (u8) (((u8 *) pdir_start)[0])
762 );
763
764 addr += IOVP_SIZE;
765 size -= IOVP_SIZE;
766 pdir_start++;
767 }
768
769
770 if (ioc_needs_fdc)
771 asm volatile("sync" : : );
772
773#ifdef ASSERT_PDIR_SANITY
774 sba_check_pdir(ioc,"Check after sba_map_single()");
775#endif
776 spin_unlock_irqrestore(&ioc->res_lock, flags);
777
778
779 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);
780}
781
782
783
784
785
786
787
788
789
790
791
792static void
793sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
794 enum dma_data_direction direction)
795{
796 struct ioc *ioc;
797#if DELAYED_RESOURCE_CNT > 0
798 struct sba_dma_pair *d;
799#endif
800 unsigned long flags;
801 dma_addr_t offset;
802
803 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
804
805 ioc = GET_IOC(dev);
806 offset = iova & ~IOVP_MASK;
807 iova ^= offset;
808 size += offset;
809 size = ALIGN(size, IOVP_SIZE);
810
811 spin_lock_irqsave(&ioc->res_lock, flags);
812
813#ifdef SBA_COLLECT_STATS
814 ioc->usingle_calls++;
815 ioc->usingle_pages += size >> IOVP_SHIFT;
816#endif
817
818 sba_mark_invalid(ioc, iova, size);
819
820#if DELAYED_RESOURCE_CNT > 0
821
822
823
824 d = &(ioc->saved[ioc->saved_cnt]);
825 d->iova = iova;
826 d->size = size;
827 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
828 int cnt = ioc->saved_cnt;
829 while (cnt--) {
830 sba_free_range(ioc, d->iova, d->size);
831 d--;
832 }
833 ioc->saved_cnt = 0;
834
835 READ_REG(ioc->ioc_hpa+IOC_PCOM);
836 }
837#else
838 sba_free_range(ioc, iova, size);
839
840
841 if (ioc_needs_fdc)
842 asm volatile("sync" : : );
843
844 READ_REG(ioc->ioc_hpa+IOC_PCOM);
845#endif
846
847 spin_unlock_irqrestore(&ioc->res_lock, flags);
848
849
850
851
852
853
854
855
856
857}
858
859
860
861
862
863
864
865
866
867
868static void *sba_alloc_consistent(struct device *hwdev, size_t size,
869 dma_addr_t *dma_handle, gfp_t gfp)
870{
871 void *ret;
872
873 if (!hwdev) {
874
875 *dma_handle = 0;
876 return NULL;
877 }
878
879 ret = (void *) __get_free_pages(gfp, get_order(size));
880
881 if (ret) {
882 memset(ret, 0, size);
883 *dma_handle = sba_map_single(hwdev, ret, size, 0);
884 }
885
886 return ret;
887}
888
889
890
891
892
893
894
895
896
897
898
899static void
900sba_free_consistent(struct device *hwdev, size_t size, void *vaddr,
901 dma_addr_t dma_handle)
902{
903 sba_unmap_single(hwdev, dma_handle, size, 0);
904 free_pages((unsigned long) vaddr, get_order(size));
905}
906
907
908
909
910
911
912
913#define PIDE_FLAG 0x80000000UL
914
915#ifdef SBA_COLLECT_STATS
916#define IOMMU_MAP_STATS
917#endif
918#include "iommu-helpers.h"
919
920#ifdef DEBUG_LARGE_SG_ENTRIES
921int dump_run_sg = 0;
922#endif
923
924
925
926
927
928
929
930
931
932
933
934static int
935sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
936 enum dma_data_direction direction)
937{
938 struct ioc *ioc;
939 int coalesced, filled = 0;
940 unsigned long flags;
941
942 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
943
944 ioc = GET_IOC(dev);
945
946
947 if (nents == 1) {
948 sg_dma_address(sglist) = sba_map_single(dev,
949 (void *)sg_virt_addr(sglist),
950 sglist->length, direction);
951 sg_dma_len(sglist) = sglist->length;
952 return 1;
953 }
954
955 spin_lock_irqsave(&ioc->res_lock, flags);
956
957#ifdef ASSERT_PDIR_SANITY
958 if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
959 {
960 sba_dump_sg(ioc, sglist, nents);
961 panic("Check before sba_map_sg()");
962 }
963#endif
964
965#ifdef SBA_COLLECT_STATS
966 ioc->msg_calls++;
967#endif
968
969
970
971
972
973
974
975
976
977 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range);
978
979
980
981
982
983
984
985
986
987 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry);
988
989
990 if (ioc_needs_fdc)
991 asm volatile("sync" : : );
992
993#ifdef ASSERT_PDIR_SANITY
994 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
995 {
996 sba_dump_sg(ioc, sglist, nents);
997 panic("Check after sba_map_sg()\n");
998 }
999#endif
1000
1001 spin_unlock_irqrestore(&ioc->res_lock, flags);
1002
1003 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1004
1005 return filled;
1006}
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018static void
1019sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1020 enum dma_data_direction direction)
1021{
1022 struct ioc *ioc;
1023#ifdef ASSERT_PDIR_SANITY
1024 unsigned long flags;
1025#endif
1026
1027 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1028 __func__, nents, sg_virt_addr(sglist), sglist->length);
1029
1030 ioc = GET_IOC(dev);
1031
1032#ifdef SBA_COLLECT_STATS
1033 ioc->usg_calls++;
1034#endif
1035
1036#ifdef ASSERT_PDIR_SANITY
1037 spin_lock_irqsave(&ioc->res_lock, flags);
1038 sba_check_pdir(ioc,"Check before sba_unmap_sg()");
1039 spin_unlock_irqrestore(&ioc->res_lock, flags);
1040#endif
1041
1042 while (sg_dma_len(sglist) && nents--) {
1043
1044 sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction);
1045#ifdef SBA_COLLECT_STATS
1046 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
1047 ioc->usingle_calls--;
1048#endif
1049 ++sglist;
1050 }
1051
1052 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1053
1054#ifdef ASSERT_PDIR_SANITY
1055 spin_lock_irqsave(&ioc->res_lock, flags);
1056 sba_check_pdir(ioc,"Check after sba_unmap_sg()");
1057 spin_unlock_irqrestore(&ioc->res_lock, flags);
1058#endif
1059
1060}
1061
1062static struct hppa_dma_ops sba_ops = {
1063 .dma_supported = sba_dma_supported,
1064 .alloc_consistent = sba_alloc_consistent,
1065 .alloc_noncoherent = sba_alloc_consistent,
1066 .free_consistent = sba_free_consistent,
1067 .map_single = sba_map_single,
1068 .unmap_single = sba_unmap_single,
1069 .map_sg = sba_map_sg,
1070 .unmap_sg = sba_unmap_sg,
1071 .dma_sync_single_for_cpu = NULL,
1072 .dma_sync_single_for_device = NULL,
1073 .dma_sync_sg_for_cpu = NULL,
1074 .dma_sync_sg_for_device = NULL,
1075};
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087static void
1088sba_get_pat_resources(struct sba_device *sba_dev)
1089{
1090#if 0
1091
1092
1093
1094
1095
1096
1097PAT_MOD(mod)->mod_info.mod_pages = PAT_GET_MOD_PAGES(temp);
1098 FIXME : ???
1099PAT_MOD(mod)->mod_info.dvi = PAT_GET_DVI(temp);
1100 Tells where the dvi bits are located in the address.
1101PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp);
1102 FIXME : ???
1103#endif
1104}
1105
1106
1107
1108
1109
1110
1111
1112#define PIRANHA_ADDR_MASK 0x00160000UL
1113#define PIRANHA_ADDR_VAL 0x00060000UL
1114static void *
1115sba_alloc_pdir(unsigned int pdir_size)
1116{
1117 unsigned long pdir_base;
1118 unsigned long pdir_order = get_order(pdir_size);
1119
1120 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order);
1121 if (NULL == (void *) pdir_base) {
1122 panic("%s() could not allocate I/O Page Table\n",
1123 __func__);
1124 }
1125
1126
1127
1128
1129
1130
1131
1132 if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13
1133 || (boot_cpu_data.pdc.versions > 0x202)
1134 || (boot_cpu_data.pdc.capabilities & 0x08L) )
1135 return (void *) pdir_base;
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155 if (pdir_order <= (19-12)) {
1156 if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) {
1157
1158 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12));
1159
1160 free_pages(pdir_base, pdir_order);
1161
1162 pdir_base = new_pdir;
1163
1164
1165 while (pdir_order < (19-12)) {
1166 new_pdir += pdir_size;
1167 free_pages(new_pdir, pdir_order);
1168 pdir_order +=1;
1169 pdir_size <<=1;
1170 }
1171 }
1172 } else {
1173
1174
1175
1176
1177 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1);
1178
1179
1180 free_pages( pdir_base, pdir_order);
1181
1182
1183 free_pages(new_pdir, 20-12);
1184
1185 pdir_base = new_pdir + 1024*1024;
1186
1187 if (pdir_order > (20-12)) {
1188
1189
1190
1191
1192
1193
1194 piranha_bad_128k = 1;
1195
1196 new_pdir += 3*1024*1024;
1197
1198 free_pages(new_pdir, 20-12);
1199
1200
1201 free_pages(new_pdir - 128*1024 , 17-12);
1202
1203 pdir_size -= 128*1024;
1204 }
1205 }
1206
1207 memset((void *) pdir_base, 0, pdir_size);
1208 return (void *) pdir_base;
1209}
1210
1211struct ibase_data_struct {
1212 struct ioc *ioc;
1213 int ioc_num;
1214};
1215
1216static int setup_ibase_imask_callback(struct device *dev, void *data)
1217{
1218
1219 extern void lba_set_iregs(struct parisc_device *, u32, u32);
1220 struct parisc_device *lba = to_parisc_device(dev);
1221 struct ibase_data_struct *ibd = data;
1222 int rope_num = (lba->hpa.start >> 13) & 0xf;
1223 if (rope_num >> 3 == ibd->ioc_num)
1224 lba_set_iregs(lba, ibd->ioc->ibase, ibd->ioc->imask);
1225 return 0;
1226}
1227
1228
1229static void
1230setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1231{
1232 struct ibase_data_struct ibase_data = {
1233 .ioc = ioc,
1234 .ioc_num = ioc_num,
1235 };
1236
1237 device_for_each_child(&sba->dev, &ibase_data,
1238 setup_ibase_imask_callback);
1239}
1240
1241#ifdef SBA_AGP_SUPPORT
1242static int
1243sba_ioc_find_quicksilver(struct device *dev, void *data)
1244{
1245 int *agp_found = data;
1246 struct parisc_device *lba = to_parisc_device(dev);
1247
1248 if (IS_QUICKSILVER(lba))
1249 *agp_found = 1;
1250 return 0;
1251}
1252#endif
1253
1254static void
1255sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1256{
1257 u32 iova_space_mask;
1258 u32 iova_space_size;
1259 int iov_order, tcnfg;
1260#ifdef SBA_AGP_SUPPORT
1261 int agp_found = 0;
1262#endif
1263
1264
1265
1266
1267
1268 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
1269 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
1270
1271 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
1272 printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n");
1273 iova_space_size /= 2;
1274 }
1275
1276
1277
1278
1279
1280 iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT));
1281 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1282
1283 DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n",
1284 __func__, ioc->ioc_hpa, iova_space_size >> 20,
1285 iov_order + PAGE_SHIFT);
1286
1287 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1288 get_order(ioc->pdir_size));
1289 if (!ioc->pdir_base)
1290 panic("Couldn't allocate I/O Page Table\n");
1291
1292 memset(ioc->pdir_base, 0, ioc->pdir_size);
1293
1294 DBG_INIT("%s() pdir %p size %x\n",
1295 __func__, ioc->pdir_base, ioc->pdir_size);
1296
1297#ifdef SBA_HINT_SUPPORT
1298 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1299 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1300
1301 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1302 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1303#endif
1304
1305 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base);
1306 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1307
1308
1309 iova_space_mask = 0xffffffff;
1310 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1311 ioc->imask = iova_space_mask;
1312#ifdef ZX1_SUPPORT
1313 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1314#endif
1315 sba_dump_tlb(ioc->ioc_hpa);
1316
1317 setup_ibase_imask(sba, ioc, ioc_num);
1318
1319 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
1320
1321#ifdef CONFIG_64BIT
1322
1323
1324
1325
1326 ioc->imask |= 0xFFFFFFFF00000000UL;
1327#endif
1328
1329
1330 switch (PAGE_SHIFT) {
1331 case 12: tcnfg = 0; break;
1332 case 13: tcnfg = 1; break;
1333 case 14: tcnfg = 2; break;
1334 case 16: tcnfg = 3; break;
1335 default:
1336 panic(__FILE__ "Unsupported system page size %d",
1337 1 << PAGE_SHIFT);
1338 break;
1339 }
1340 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1341
1342
1343
1344
1345
1346 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1347
1348
1349
1350
1351
1352 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);
1353
1354#ifdef SBA_AGP_SUPPORT
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364 device_for_each_child(&sba->dev, &agp_found, sba_ioc_find_quicksilver);
1365
1366 if (agp_found && sba_reserve_agpgart) {
1367 printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n",
1368 __func__, (iova_space_size/2) >> 20);
1369 ioc->pdir_size /= 2;
1370 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE;
1371 }
1372#endif
1373}
1374
1375static void
1376sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1377{
1378 u32 iova_space_size, iova_space_mask;
1379 unsigned int pdir_size, iov_order, tcnfg;
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395 iova_space_size = (u32) (totalram_pages/global_ioc_cnt);
1396
1397
1398 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1399 iova_space_size = 1 << (20 - PAGE_SHIFT);
1400 }
1401 else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1402 iova_space_size = 1 << (30 - PAGE_SHIFT);
1403 }
1404
1405
1406
1407
1408
1409
1410 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1411
1412
1413 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1414
1415 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
1416
1417 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
1418 __func__,
1419 ioc->ioc_hpa,
1420 (unsigned long) totalram_pages >> (20 - PAGE_SHIFT),
1421 iova_space_size>>20,
1422 iov_order + PAGE_SHIFT);
1423
1424 ioc->pdir_base = sba_alloc_pdir(pdir_size);
1425
1426 DBG_INIT("%s() pdir %p size %x\n",
1427 __func__, ioc->pdir_base, pdir_size);
1428
1429#ifdef SBA_HINT_SUPPORT
1430
1431 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1432 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1433
1434 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1435 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1436#endif
1437
1438 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1439
1440
1441 iova_space_mask = 0xffffffff;
1442 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1443
1444
1445
1446
1447
1448 ioc->ibase = 0;
1449 ioc->imask = iova_space_mask;
1450#ifdef ZX1_SUPPORT
1451 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1452#endif
1453
1454 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
1455 __func__, ioc->ibase, ioc->imask);
1456
1457
1458
1459
1460
1461
1462
1463 setup_ibase_imask(sba, ioc, ioc_num);
1464
1465
1466
1467
1468 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
1469 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
1470
1471
1472 switch (PAGE_SHIFT) {
1473 case 12: tcnfg = 0; break;
1474 case 13: tcnfg = 1; break;
1475 case 14: tcnfg = 2; break;
1476 case 16: tcnfg = 3; break;
1477 default:
1478 panic(__FILE__ "Unsupported system page size %d",
1479 1 << PAGE_SHIFT);
1480 break;
1481 }
1482
1483 WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG);
1484
1485
1486
1487
1488
1489 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
1490
1491 ioc->ibase = 0;
1492
1493 DBG_INIT("%s() DONE\n", __func__);
1494}
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset)
1510{
1511 return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
1512}
1513
1514static void sba_hw_init(struct sba_device *sba_dev)
1515{
1516 int i;
1517 int num_ioc;
1518 u64 ioc_ctl;
1519
1520 if (!is_pdc_pat()) {
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538 if (PAGE0->mem_kbd.cl_class == CL_KEYBD) {
1539 pdc_io_reset_devices();
1540 }
1541
1542 }
1543
1544
1545#if 0
1546printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
1547 PAGE0->mem_boot.spa, PAGE0->mem_boot.pad, PAGE0->mem_boot.cl_class);
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558 if ((PAGE0->mem_boot.cl_class != CL_RANDOM)
1559 && (PAGE0->mem_boot.cl_class != CL_SEQU)) {
1560 pdc_io_reset();
1561 }
1562#endif
1563
1564 if (!IS_PLUTO(sba_dev->dev)) {
1565 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
1566 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
1567 __func__, sba_dev->sba_hpa, ioc_ctl);
1568 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
1569 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
1570
1571
1572
1573 WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);
1574
1575#ifdef DEBUG_SBA_INIT
1576 ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL);
1577 DBG_INIT(" 0x%Lx\n", ioc_ctl);
1578#endif
1579 }
1580
1581 if (IS_ASTRO(sba_dev->dev)) {
1582 int err;
1583 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET);
1584 num_ioc = 1;
1585
1586 sba_dev->chip_resv.name = "Astro Intr Ack";
1587 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL;
1588 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ;
1589 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1590 BUG_ON(err < 0);
1591
1592 } else if (IS_PLUTO(sba_dev->dev)) {
1593 int err;
1594
1595 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET);
1596 num_ioc = 1;
1597
1598 sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA";
1599 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL;
1600 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff200000UL - 1);
1601 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1602 WARN_ON(err < 0);
1603
1604 sba_dev->iommu_resv.name = "IOVA Space";
1605 sba_dev->iommu_resv.start = 0x40000000UL;
1606 sba_dev->iommu_resv.end = 0x50000000UL - 1;
1607 err = request_resource(&iomem_resource, &(sba_dev->iommu_resv));
1608 WARN_ON(err < 0);
1609 } else {
1610
1611 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0));
1612 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1));
1613 num_ioc = 2;
1614
1615
1616 }
1617
1618
1619 sba_dev->num_ioc = num_ioc;
1620 for (i = 0; i < num_ioc; i++) {
1621 void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa;
1622 unsigned int j;
1623
1624 for (j=0; j < sizeof(u64) * ROPES_PER_IOC; j+=sizeof(u64)) {
1625
1626
1627
1628
1629
1630
1631
1632 if (IS_PLUTO(sba_dev->dev)) {
1633 void __iomem *rope_cfg;
1634 unsigned long cfg_val;
1635
1636 rope_cfg = ioc_hpa + IOC_ROPE0_CFG + j;
1637 cfg_val = READ_REG(rope_cfg);
1638 cfg_val &= ~IOC_ROPE_AO;
1639 WRITE_REG(cfg_val, rope_cfg);
1640 }
1641
1642
1643
1644
1645 WRITE_REG(HF_ENABLE, ioc_hpa + ROPE0_CTL + j);
1646 }
1647
1648
1649 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
1650
1651 DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
1652 i,
1653 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
1654 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
1655 );
1656 DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n",
1657 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
1658 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
1659 );
1660
1661 if (IS_PLUTO(sba_dev->dev)) {
1662 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
1663 } else {
1664 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
1665 }
1666 }
1667}
1668
1669static void
1670sba_common_init(struct sba_device *sba_dev)
1671{
1672 int i;
1673
1674
1675
1676
1677 sba_dev->next = sba_list;
1678 sba_list = sba_dev;
1679
1680 for(i=0; i< sba_dev->num_ioc; i++) {
1681 int res_size;
1682#ifdef DEBUG_DMB_TRAP
1683 extern void iterate_pages(unsigned long , unsigned long ,
1684 void (*)(pte_t * , unsigned long),
1685 unsigned long );
1686 void set_data_memory_break(pte_t * , unsigned long);
1687#endif
1688
1689 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64);
1690
1691
1692 if (piranha_bad_128k) {
1693 res_size -= (128*1024)/sizeof(u64);
1694 }
1695
1696 res_size >>= 3;
1697 DBG_INIT("%s() res_size 0x%x\n",
1698 __func__, res_size);
1699
1700 sba_dev->ioc[i].res_size = res_size;
1701 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
1702
1703#ifdef DEBUG_DMB_TRAP
1704 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1705 set_data_memory_break, 0);
1706#endif
1707
1708 if (NULL == sba_dev->ioc[i].res_map)
1709 {
1710 panic("%s:%s() could not allocate resource map\n",
1711 __FILE__, __func__ );
1712 }
1713
1714 memset(sba_dev->ioc[i].res_map, 0, res_size);
1715
1716 sba_dev->ioc[i].res_hint = (unsigned long *)
1717 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);
1718
1719#ifdef ASSERT_PDIR_SANITY
1720
1721 sba_dev->ioc[i].res_map[0] = 0x80;
1722 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
1723#endif
1724
1725
1726 if (piranha_bad_128k) {
1727
1728
1729 int idx_start = (1408*1024/sizeof(u64)) >> 3;
1730 int idx_end = (1536*1024/sizeof(u64)) >> 3;
1731 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]);
1732 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]);
1733
1734
1735 while (p_start < p_end)
1736 *p_start++ = -1;
1737
1738 }
1739
1740#ifdef DEBUG_DMB_TRAP
1741 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1742 set_data_memory_break, 0);
1743 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size,
1744 set_data_memory_break, 0);
1745#endif
1746
1747 DBG_INIT("%s() %d res_map %x %p\n",
1748 __func__, i, res_size, sba_dev->ioc[i].res_map);
1749 }
1750
1751 spin_lock_init(&sba_dev->sba_lock);
1752 ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC;
1753
1754#ifdef DEBUG_SBA_INIT
1755
1756
1757
1758
1759
1760 if (ioc_needs_fdc) {
1761 printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n");
1762 } else {
1763 printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n");
1764 }
1765#endif
1766}
1767
1768#ifdef CONFIG_PROC_FS
1769static int sba_proc_info(struct seq_file *m, void *p)
1770{
1771 struct sba_device *sba_dev = sba_list;
1772 struct ioc *ioc = &sba_dev->ioc[0];
1773 int total_pages = (int) (ioc->res_size << 3);
1774#ifdef SBA_COLLECT_STATS
1775 unsigned long avg = 0, min, max;
1776#endif
1777 int i, len = 0;
1778
1779 len += seq_printf(m, "%s rev %d.%d\n",
1780 sba_dev->name,
1781 (sba_dev->hw_rev & 0x7) + 1,
1782 (sba_dev->hw_rev & 0x18) >> 3
1783 );
1784 len += seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
1785 (int) ((ioc->res_size << 3) * sizeof(u64)),
1786 total_pages);
1787
1788 len += seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
1789 ioc->res_size, ioc->res_size << 3);
1790
1791 len += seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n",
1792 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE),
1793 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK),
1794 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE)
1795 );
1796
1797 for (i=0; i<4; i++)
1798 len += seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n", i,
1799 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18),
1800 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18),
1801 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18)
1802 );
1803
1804#ifdef SBA_COLLECT_STATS
1805 len += seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1806 total_pages - ioc->used_pages, ioc->used_pages,
1807 (int) (ioc->used_pages * 100 / total_pages));
1808
1809 min = max = ioc->avg_search[0];
1810 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1811 avg += ioc->avg_search[i];
1812 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1813 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1814 }
1815 avg /= SBA_SEARCH_SAMPLE;
1816 len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1817 min, avg, max);
1818
1819 len += seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
1820 ioc->msingle_calls, ioc->msingle_pages,
1821 (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1822
1823
1824 min = ioc->usingle_calls;
1825 max = ioc->usingle_pages - ioc->usg_pages;
1826 len += seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n",
1827 min, max, (int) ((max * 1000)/min));
1828
1829 len += seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1830 ioc->msg_calls, ioc->msg_pages,
1831 (int) ((ioc->msg_pages * 1000)/ioc->msg_calls));
1832
1833 len += seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1834 ioc->usg_calls, ioc->usg_pages,
1835 (int) ((ioc->usg_pages * 1000)/ioc->usg_calls));
1836#endif
1837
1838 return 0;
1839}
1840
1841static int
1842sba_proc_open(struct inode *i, struct file *f)
1843{
1844 return single_open(f, &sba_proc_info, NULL);
1845}
1846
1847static const struct file_operations sba_proc_fops = {
1848 .owner = THIS_MODULE,
1849 .open = sba_proc_open,
1850 .read = seq_read,
1851 .llseek = seq_lseek,
1852 .release = single_release,
1853};
1854
1855static int
1856sba_proc_bitmap_info(struct seq_file *m, void *p)
1857{
1858 struct sba_device *sba_dev = sba_list;
1859 struct ioc *ioc = &sba_dev->ioc[0];
1860 unsigned int *res_ptr = (unsigned int *)ioc->res_map;
1861 int i, len = 0;
1862
1863 for (i = 0; i < (ioc->res_size/sizeof(unsigned int)); ++i, ++res_ptr) {
1864 if ((i & 7) == 0)
1865 len += seq_printf(m, "\n ");
1866 len += seq_printf(m, " %08x", *res_ptr);
1867 }
1868 len += seq_printf(m, "\n");
1869
1870 return 0;
1871}
1872
1873static int
1874sba_proc_bitmap_open(struct inode *i, struct file *f)
1875{
1876 return single_open(f, &sba_proc_bitmap_info, NULL);
1877}
1878
1879static const struct file_operations sba_proc_bitmap_fops = {
1880 .owner = THIS_MODULE,
1881 .open = sba_proc_bitmap_open,
1882 .read = seq_read,
1883 .llseek = seq_lseek,
1884 .release = single_release,
1885};
1886#endif
1887
1888static struct parisc_device_id sba_tbl[] = {
1889 { HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb },
1890 { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc },
1891 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc },
1892 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc },
1893 { HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc },
1894 { 0, }
1895};
1896
1897static int sba_driver_callback(struct parisc_device *);
1898
1899static struct parisc_driver sba_driver = {
1900 .name = MODULE_NAME,
1901 .id_table = sba_tbl,
1902 .probe = sba_driver_callback,
1903};
1904
1905
1906
1907
1908
1909
1910static int sba_driver_callback(struct parisc_device *dev)
1911{
1912 struct sba_device *sba_dev;
1913 u32 func_class;
1914 int i;
1915 char *version;
1916 void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE);
1917#ifdef CONFIG_PROC_FS
1918 struct proc_dir_entry *root;
1919#endif
1920
1921 sba_dump_ranges(sba_addr);
1922
1923
1924 func_class = READ_REG(sba_addr + SBA_FCLASS);
1925
1926 if (IS_ASTRO(dev)) {
1927 unsigned long fclass;
1928 static char astro_rev[]="Astro ?.?";
1929
1930
1931 fclass = READ_REG(sba_addr);
1932
1933 astro_rev[6] = '1' + (char) (fclass & 0x7);
1934 astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3);
1935 version = astro_rev;
1936
1937 } else if (IS_IKE(dev)) {
1938 static char ike_rev[] = "Ike rev ?";
1939 ike_rev[8] = '0' + (char) (func_class & 0xff);
1940 version = ike_rev;
1941 } else if (IS_PLUTO(dev)) {
1942 static char pluto_rev[]="Pluto ?.?";
1943 pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4);
1944 pluto_rev[8] = '0' + (char) (func_class & 0x0f);
1945 version = pluto_rev;
1946 } else {
1947 static char reo_rev[] = "REO rev ?";
1948 reo_rev[8] = '0' + (char) (func_class & 0xff);
1949 version = reo_rev;
1950 }
1951
1952 if (!global_ioc_cnt) {
1953 global_ioc_cnt = count_parisc_driver(&sba_driver);
1954
1955
1956 if ((!IS_ASTRO(dev)) || (!IS_PLUTO(dev)))
1957 global_ioc_cnt *= 2;
1958 }
1959
1960 printk(KERN_INFO "%s found %s at 0x%llx\n",
1961 MODULE_NAME, version, (unsigned long long)dev->hpa.start);
1962
1963 sba_dev = kzalloc(sizeof(struct sba_device), GFP_KERNEL);
1964 if (!sba_dev) {
1965 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n");
1966 return -ENOMEM;
1967 }
1968
1969 parisc_set_drvdata(dev, sba_dev);
1970
1971 for(i=0; i<MAX_IOC; i++)
1972 spin_lock_init(&(sba_dev->ioc[i].res_lock));
1973
1974 sba_dev->dev = dev;
1975 sba_dev->hw_rev = func_class;
1976 sba_dev->name = dev->name;
1977 sba_dev->sba_hpa = sba_addr;
1978
1979 sba_get_pat_resources(sba_dev);
1980 sba_hw_init(sba_dev);
1981 sba_common_init(sba_dev);
1982
1983 hppa_dma_ops = &sba_ops;
1984
1985#ifdef CONFIG_PROC_FS
1986 switch (dev->id.hversion) {
1987 case PLUTO_MCKINLEY_PORT:
1988 root = proc_mckinley_root;
1989 break;
1990 case ASTRO_RUNWAY_PORT:
1991 case IKE_MERCED_PORT:
1992 default:
1993 root = proc_runway_root;
1994 break;
1995 }
1996
1997 proc_create("sba_iommu", 0, root, &sba_proc_fops);
1998 proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops);
1999#endif
2000
2001 parisc_has_iommu();
2002 return 0;
2003}
2004
2005
2006
2007
2008
2009
2010void __init sba_init(void)
2011{
2012 register_parisc_driver(&sba_driver);
2013}
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023void * sba_get_iommu(struct parisc_device *pci_hba)
2024{
2025 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2026 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2027 char t = sba_dev->id.hw_type;
2028 int iocnum = (pci_hba->hw_path >> 3);
2029
2030 WARN_ON((t != HPHW_IOA) && (t != HPHW_BCPORT));
2031
2032 return &(sba->ioc[iocnum]);
2033}
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
2045{
2046 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2047 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2048 char t = sba_dev->id.hw_type;
2049 int i;
2050 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));
2051
2052 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2053
2054 r->start = r->end = 0;
2055
2056
2057 for (i=0; i<4; i++) {
2058 int base, size;
2059 void __iomem *reg = sba->sba_hpa + i*0x18;
2060
2061 base = READ_REG32(reg + LMMIO_DIRECT0_BASE);
2062 if ((base & 1) == 0)
2063 continue;
2064
2065 size = READ_REG32(reg + LMMIO_DIRECT0_ROUTE);
2066
2067 if ((size & (ROPES_PER_IOC-1)) != rope)
2068 continue;
2069
2070 r->start = (base & ~1UL) | PCI_F_EXTEND;
2071 size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK);
2072 r->end = r->start + size;
2073 r->flags = IORESOURCE_MEM;
2074 }
2075}
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
2088{
2089 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2090 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2091 char t = sba_dev->id.hw_type;
2092 int base, size;
2093 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));
2094
2095 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2096
2097 r->start = r->end = 0;
2098
2099 base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE);
2100 if ((base & 1) == 0) {
2101 BUG();
2102 return;
2103 }
2104
2105 r->start = (base & ~1UL) | PCI_F_EXTEND;
2106
2107 size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC;
2108 r->start += rope * (size + 1);
2109 r->end = r->start + size;
2110 r->flags = IORESOURCE_MEM;
2111}
2112