1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/spinlock.h>
25#include <linux/slab.h>
26#include <linux/init.h>
27
28#include <linux/mm.h>
29#include <linux/string.h>
30#include <linux/pci.h>
31#include <linux/scatterlist.h>
32#include <linux/iommu-helper.h>
33
34#include <asm/byteorder.h>
35#include <asm/io.h>
36#include <asm/dma.h>
37
38#include <asm/hardware.h>
39
40#include <linux/proc_fs.h>
41#include <linux/seq_file.h>
42#include <linux/module.h>
43
44#include <asm/ropes.h>
45#include <asm/mckinley.h>
46#include <asm/runway.h>
47#include <asm/page.h>
48#include <asm/pdc.h>
49#include <asm/pdcpat.h>
50#include <asm/parisc-device.h>
51
52#define MODULE_NAME "SBA"
53
54
55
56
57
58
59#undef DEBUG_SBA_INIT
60#undef DEBUG_SBA_RUN
61#undef DEBUG_SBA_RUN_SG
62#undef DEBUG_SBA_RESOURCE
63#undef ASSERT_PDIR_SANITY
64#undef DEBUG_LARGE_SG_ENTRIES
65#undef DEBUG_DMB_TRAP
66
67#ifdef DEBUG_SBA_INIT
68#define DBG_INIT(x...) printk(x)
69#else
70#define DBG_INIT(x...)
71#endif
72
73#ifdef DEBUG_SBA_RUN
74#define DBG_RUN(x...) printk(x)
75#else
76#define DBG_RUN(x...)
77#endif
78
79#ifdef DEBUG_SBA_RUN_SG
80#define DBG_RUN_SG(x...) printk(x)
81#else
82#define DBG_RUN_SG(x...)
83#endif
84
85
86#ifdef DEBUG_SBA_RESOURCE
87#define DBG_RES(x...) printk(x)
88#else
89#define DBG_RES(x...)
90#endif
91
92#define SBA_INLINE __inline__
93
94#define DEFAULT_DMA_HINT_REG 0
95
96struct sba_device *sba_list;
97EXPORT_SYMBOL_GPL(sba_list);
98
99static unsigned long ioc_needs_fdc = 0;
100
101
102static unsigned int global_ioc_cnt = 0;
103
104
105static unsigned long piranha_bad_128k = 0;
106
107
108#define SBA_DEV(d) ((struct sba_device *) (d))
109
110#ifdef CONFIG_AGP_PARISC
111#define SBA_AGP_SUPPORT
112#endif
113
114#ifdef SBA_AGP_SUPPORT
115static int sba_reserve_agpgart = 1;
116module_param(sba_reserve_agpgart, int, 0444);
117MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART");
118#endif
119
120
121
122
123
124
125
126
127
128
129#define READ_REG32(addr) readl(addr)
130#define READ_REG64(addr) readq(addr)
131#define WRITE_REG32(val, addr) writel((val), (addr))
132#define WRITE_REG64(val, addr) writeq((val), (addr))
133
134#ifdef CONFIG_64BIT
135#define READ_REG(addr) READ_REG64(addr)
136#define WRITE_REG(value, addr) WRITE_REG64(value, addr)
137#else
138#define READ_REG(addr) READ_REG32(addr)
139#define WRITE_REG(value, addr) WRITE_REG32(value, addr)
140#endif
141
142#ifdef DEBUG_SBA_INIT
143
144
145
146
147
148
149
150
151
152
153static void
154sba_dump_ranges(void __iomem *hpa)
155{
156 DBG_INIT("SBA at 0x%p\n", hpa);
157 DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE));
158 DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK));
159 DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE));
160 DBG_INIT("\n");
161 DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE));
162 DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK));
163 DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE));
164}
165
166
167
168
169
170
171
172static void sba_dump_tlb(void __iomem *hpa)
173{
174 DBG_INIT("IO TLB at 0x%p\n", hpa);
175 DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE));
176 DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK));
177 DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG));
178 DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE));
179 DBG_INIT("\n");
180}
181#else
182#define sba_dump_ranges(x)
183#define sba_dump_tlb(x)
184#endif
185
186
187#ifdef ASSERT_PDIR_SANITY
188
189
190
191
192
193
194
195
196
197static void
198sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
199{
200
201 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
202 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
203 uint rcnt;
204
205 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
206 msg,
207 rptr, pide & (BITS_PER_LONG - 1), *rptr);
208
209 rcnt = 0;
210 while (rcnt < BITS_PER_LONG) {
211 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
212 (rcnt == (pide & (BITS_PER_LONG - 1)))
213 ? " -->" : " ",
214 rcnt, ptr, *ptr );
215 rcnt++;
216 ptr++;
217 }
218 printk(KERN_DEBUG "%s", msg);
219}
220
221
222
223
224
225
226
227
228
229static int
230sba_check_pdir(struct ioc *ioc, char *msg)
231{
232 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]);
233 u32 *rptr = (u32 *) ioc->res_map;
234 u64 *pptr = ioc->pdir_base;
235 uint pide = 0;
236
237 while (rptr < rptr_end) {
238 u32 rval = *rptr;
239 int rcnt = 32;
240
241 while (rcnt) {
242
243 u32 pde = ((u32) (((char *)pptr)[7])) << 24;
244 if ((rval ^ pde) & 0x80000000)
245 {
246
247
248
249
250 sba_dump_pdir_entry(ioc, msg, pide);
251 return(1);
252 }
253 rcnt--;
254 rval <<= 1;
255 pptr++;
256 pide++;
257 }
258 rptr++;
259 }
260
261 return 0;
262}
263
264
265
266
267
268
269
270
271
272
273static void
274sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
275{
276 while (nents-- > 0) {
277 printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n",
278 nents,
279 (unsigned long) sg_dma_address(startsg),
280 sg_dma_len(startsg),
281 sg_virt_addr(startsg), startsg->length);
282 startsg++;
283 }
284}
285
286#endif
287
288
289
290
291
292
293
294
295
296
297
298
299
300#define PAGES_PER_RANGE 1
301
302
303
304#ifdef ZX1_SUPPORT
305
306#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
307#define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
308#else
309
310#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset))
311#define SBA_IOVP(ioc,iova) (iova)
312#endif
313
314#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
315
316#define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n)))
317#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
318
319static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
320 unsigned int bitshiftcnt)
321{
322 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
323 + bitshiftcnt;
324}
325
326
327
328
329
330
331
332
333
334
335static SBA_INLINE unsigned long
336sba_search_bitmap(struct ioc *ioc, struct device *dev,
337 unsigned long bits_wanted)
338{
339 unsigned long *res_ptr = ioc->res_hint;
340 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
341 unsigned long pide = ~0UL, tpide;
342 unsigned long boundary_size;
343 unsigned long shift;
344 int ret;
345
346 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
347 1ULL << IOVP_SHIFT) >> IOVP_SHIFT;
348
349#if defined(ZX1_SUPPORT)
350 BUG_ON(ioc->ibase & ~IOVP_MASK);
351 shift = ioc->ibase >> IOVP_SHIFT;
352#else
353 shift = 0;
354#endif
355
356 if (bits_wanted > (BITS_PER_LONG/2)) {
357
358 for(; res_ptr < res_end; ++res_ptr) {
359 tpide = ptr_to_pide(ioc, res_ptr, 0);
360 ret = iommu_is_span_boundary(tpide, bits_wanted,
361 shift,
362 boundary_size);
363 if ((*res_ptr == 0) && !ret) {
364 *res_ptr = RESMAP_MASK(bits_wanted);
365 pide = tpide;
366 break;
367 }
368 }
369
370 res_ptr++;
371 ioc->res_bitshift = 0;
372 } else {
373
374
375
376
377
378
379 unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT);
380 uint bitshiftcnt = ALIGN(ioc->res_bitshift, o);
381 unsigned long mask;
382
383 if (bitshiftcnt >= BITS_PER_LONG) {
384 bitshiftcnt = 0;
385 res_ptr++;
386 }
387 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt;
388
389 DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
390 while(res_ptr < res_end)
391 {
392 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
393 WARN_ON(mask == 0);
394 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
395 ret = iommu_is_span_boundary(tpide, bits_wanted,
396 shift,
397 boundary_size);
398 if ((((*res_ptr) & mask) == 0) && !ret) {
399 *res_ptr |= mask;
400 pide = tpide;
401 break;
402 }
403 mask >>= o;
404 bitshiftcnt += o;
405 if (mask == 0) {
406 mask = RESMAP_MASK(bits_wanted);
407 bitshiftcnt=0;
408 res_ptr++;
409 }
410 }
411
412 ioc->res_bitshift = bitshiftcnt + bits_wanted;
413 }
414
415
416 if (res_end <= res_ptr) {
417 ioc->res_hint = (unsigned long *) ioc->res_map;
418 ioc->res_bitshift = 0;
419 } else {
420 ioc->res_hint = res_ptr;
421 }
422 return (pide);
423}
424
425
426
427
428
429
430
431
432
433
434static int
435sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
436{
437 unsigned int pages_needed = size >> IOVP_SHIFT;
438#ifdef SBA_COLLECT_STATS
439 unsigned long cr_start = mfctl(16);
440#endif
441 unsigned long pide;
442
443 pide = sba_search_bitmap(ioc, dev, pages_needed);
444 if (pide >= (ioc->res_size << 3)) {
445 pide = sba_search_bitmap(ioc, dev, pages_needed);
446 if (pide >= (ioc->res_size << 3))
447 panic("%s: I/O MMU @ %p is out of mapping resources\n",
448 __FILE__, ioc->ioc_hpa);
449 }
450
451#ifdef ASSERT_PDIR_SANITY
452
453 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) {
454 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
455 }
456#endif
457
458 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
459 __func__, size, pages_needed, pide,
460 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
461 ioc->res_bitshift );
462
463#ifdef SBA_COLLECT_STATS
464 {
465 unsigned long cr_end = mfctl(16);
466 unsigned long tmp = cr_end - cr_start;
467
468 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
469 }
470 ioc->avg_search[ioc->avg_idx++] = cr_start;
471 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
472
473 ioc->used_pages += pages_needed;
474#endif
475
476 return (pide);
477}
478
479
480
481
482
483
484
485
486
487
488static SBA_INLINE void
489sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
490{
491 unsigned long iovp = SBA_IOVP(ioc, iova);
492 unsigned int pide = PDIR_INDEX(iovp);
493 unsigned int ridx = pide >> 3;
494 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
495
496 int bits_not_wanted = size >> IOVP_SHIFT;
497
498
499 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1));
500
501 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
502 __func__, (uint) iova, size,
503 bits_not_wanted, m, pide, res_ptr, *res_ptr);
504
505#ifdef SBA_COLLECT_STATS
506 ioc->used_pages -= bits_not_wanted;
507#endif
508
509 *res_ptr &= ~m;
510}
511
512
513
514
515
516
517
518
519#ifdef SBA_HINT_SUPPORT
520#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
521#endif
522
523typedef unsigned long space_t;
524#define KERNEL_SPACE 0
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566static void SBA_INLINE
567sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
568 unsigned long hint)
569{
570 u64 pa;
571 register unsigned ci;
572
573 pa = virt_to_phys(vba);
574 pa &= IOVP_MASK;
575
576 mtsp(sid,1);
577 asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
578 pa |= (ci >> 12) & 0xff;
579
580 pa |= SBA_PDIR_VALID_BIT;
581 *pdir_ptr = cpu_to_le64(pa);
582
583
584
585
586
587
588 if (ioc_needs_fdc)
589 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
590}
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609static SBA_INLINE void
610sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
611{
612 u32 iovp = (u32) SBA_IOVP(ioc,iova);
613 u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
614
615#ifdef ASSERT_PDIR_SANITY
616
617
618
619
620
621
622 if (0x80 != (((u8 *) pdir_ptr)[7])) {
623 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
624 }
625#endif
626
627 if (byte_cnt > IOVP_SIZE)
628 {
629#if 0
630 unsigned long entries_per_cacheline = ioc_needs_fdc ?
631 L1_CACHE_ALIGN(((unsigned long) pdir_ptr))
632 - (unsigned long) pdir_ptr;
633 : 262144;
634#endif
635
636
637 iovp |= get_order(byte_cnt) + PAGE_SHIFT;
638
639 do {
640
641 ((u8 *) pdir_ptr)[7] = 0;
642 if (ioc_needs_fdc) {
643 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
644#if 0
645 entries_per_cacheline = L1_CACHE_SHIFT - 3;
646#endif
647 }
648 pdir_ptr++;
649 byte_cnt -= IOVP_SIZE;
650 } while (byte_cnt > IOVP_SIZE);
651 } else
652 iovp |= IOVP_SHIFT;
653
654
655
656
657
658
659
660
661 ((u8 *) pdir_ptr)[7] = 0;
662 if (ioc_needs_fdc)
663 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
664
665 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
666}
667
668
669
670
671
672
673
674
675static int sba_dma_supported( struct device *dev, u64 mask)
676{
677 struct ioc *ioc;
678
679 if (dev == NULL) {
680 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
681 BUG();
682 return(0);
683 }
684
685
686
687
688
689
690 if (mask > ~0U)
691 return 0;
692
693 ioc = GET_IOC(dev);
694
695
696
697
698
699 return((int)(mask >= (ioc->ibase - 1 +
700 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) )));
701}
702
703
704
705
706
707
708
709
710
711
712
713static dma_addr_t
714sba_map_single(struct device *dev, void *addr, size_t size,
715 enum dma_data_direction direction)
716{
717 struct ioc *ioc;
718 unsigned long flags;
719 dma_addr_t iovp;
720 dma_addr_t offset;
721 u64 *pdir_start;
722 int pide;
723
724 ioc = GET_IOC(dev);
725
726
727 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
728
729
730 size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
731
732 spin_lock_irqsave(&ioc->res_lock, flags);
733#ifdef ASSERT_PDIR_SANITY
734 sba_check_pdir(ioc,"Check before sba_map_single()");
735#endif
736
737#ifdef SBA_COLLECT_STATS
738 ioc->msingle_calls++;
739 ioc->msingle_pages += size >> IOVP_SHIFT;
740#endif
741 pide = sba_alloc_range(ioc, dev, size);
742 iovp = (dma_addr_t) pide << IOVP_SHIFT;
743
744 DBG_RUN("%s() 0x%p -> 0x%lx\n",
745 __func__, addr, (long) iovp | offset);
746
747 pdir_start = &(ioc->pdir_base[pide]);
748
749 while (size > 0) {
750 sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
751
752 DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
753 pdir_start,
754 (u8) (((u8 *) pdir_start)[7]),
755 (u8) (((u8 *) pdir_start)[6]),
756 (u8) (((u8 *) pdir_start)[5]),
757 (u8) (((u8 *) pdir_start)[4]),
758 (u8) (((u8 *) pdir_start)[3]),
759 (u8) (((u8 *) pdir_start)[2]),
760 (u8) (((u8 *) pdir_start)[1]),
761 (u8) (((u8 *) pdir_start)[0])
762 );
763
764 addr += IOVP_SIZE;
765 size -= IOVP_SIZE;
766 pdir_start++;
767 }
768
769
770 if (ioc_needs_fdc)
771 asm volatile("sync" : : );
772
773#ifdef ASSERT_PDIR_SANITY
774 sba_check_pdir(ioc,"Check after sba_map_single()");
775#endif
776 spin_unlock_irqrestore(&ioc->res_lock, flags);
777
778
779 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);
780}
781
782
783
784
785
786
787
788
789
790
791
792static void
793sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
794 enum dma_data_direction direction)
795{
796 struct ioc *ioc;
797#if DELAYED_RESOURCE_CNT > 0
798 struct sba_dma_pair *d;
799#endif
800 unsigned long flags;
801 dma_addr_t offset;
802
803 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
804
805 ioc = GET_IOC(dev);
806 offset = iova & ~IOVP_MASK;
807 iova ^= offset;
808 size += offset;
809 size = ALIGN(size, IOVP_SIZE);
810
811 spin_lock_irqsave(&ioc->res_lock, flags);
812
813#ifdef SBA_COLLECT_STATS
814 ioc->usingle_calls++;
815 ioc->usingle_pages += size >> IOVP_SHIFT;
816#endif
817
818 sba_mark_invalid(ioc, iova, size);
819
820#if DELAYED_RESOURCE_CNT > 0
821
822
823
824 d = &(ioc->saved[ioc->saved_cnt]);
825 d->iova = iova;
826 d->size = size;
827 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
828 int cnt = ioc->saved_cnt;
829 while (cnt--) {
830 sba_free_range(ioc, d->iova, d->size);
831 d--;
832 }
833 ioc->saved_cnt = 0;
834
835 READ_REG(ioc->ioc_hpa+IOC_PCOM);
836 }
837#else
838 sba_free_range(ioc, iova, size);
839
840
841 if (ioc_needs_fdc)
842 asm volatile("sync" : : );
843
844 READ_REG(ioc->ioc_hpa+IOC_PCOM);
845#endif
846
847 spin_unlock_irqrestore(&ioc->res_lock, flags);
848
849
850
851
852
853
854
855
856
857}
858
859
860
861
862
863
864
865
866
867
868static void *sba_alloc_consistent(struct device *hwdev, size_t size,
869 dma_addr_t *dma_handle, gfp_t gfp)
870{
871 void *ret;
872
873 if (!hwdev) {
874
875 *dma_handle = 0;
876 return NULL;
877 }
878
879 ret = (void *) __get_free_pages(gfp, get_order(size));
880
881 if (ret) {
882 memset(ret, 0, size);
883 *dma_handle = sba_map_single(hwdev, ret, size, 0);
884 }
885
886 return ret;
887}
888
889
890
891
892
893
894
895
896
897
898
899static void
900sba_free_consistent(struct device *hwdev, size_t size, void *vaddr,
901 dma_addr_t dma_handle)
902{
903 sba_unmap_single(hwdev, dma_handle, size, 0);
904 free_pages((unsigned long) vaddr, get_order(size));
905}
906
907
908
909
910
911
912
913#define PIDE_FLAG 0x80000000UL
914
915#ifdef SBA_COLLECT_STATS
916#define IOMMU_MAP_STATS
917#endif
918#include "iommu-helpers.h"
919
920#ifdef DEBUG_LARGE_SG_ENTRIES
921int dump_run_sg = 0;
922#endif
923
924
925
926
927
928
929
930
931
932
933
934static int
935sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
936 enum dma_data_direction direction)
937{
938 struct ioc *ioc;
939 int coalesced, filled = 0;
940 unsigned long flags;
941
942 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
943
944 ioc = GET_IOC(dev);
945
946
947 if (nents == 1) {
948 sg_dma_address(sglist) = sba_map_single(dev,
949 (void *)sg_virt_addr(sglist),
950 sglist->length, direction);
951 sg_dma_len(sglist) = sglist->length;
952 return 1;
953 }
954
955 spin_lock_irqsave(&ioc->res_lock, flags);
956
957#ifdef ASSERT_PDIR_SANITY
958 if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
959 {
960 sba_dump_sg(ioc, sglist, nents);
961 panic("Check before sba_map_sg()");
962 }
963#endif
964
965#ifdef SBA_COLLECT_STATS
966 ioc->msg_calls++;
967#endif
968
969
970
971
972
973
974
975
976
977 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range);
978
979
980
981
982
983
984
985
986
987 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry);
988
989
990 if (ioc_needs_fdc)
991 asm volatile("sync" : : );
992
993#ifdef ASSERT_PDIR_SANITY
994 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
995 {
996 sba_dump_sg(ioc, sglist, nents);
997 panic("Check after sba_map_sg()\n");
998 }
999#endif
1000
1001 spin_unlock_irqrestore(&ioc->res_lock, flags);
1002
1003 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1004
1005 return filled;
1006}
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018static void
1019sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1020 enum dma_data_direction direction)
1021{
1022 struct ioc *ioc;
1023#ifdef ASSERT_PDIR_SANITY
1024 unsigned long flags;
1025#endif
1026
1027 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1028 __func__, nents, sg_virt_addr(sglist), sglist->length);
1029
1030 ioc = GET_IOC(dev);
1031
1032#ifdef SBA_COLLECT_STATS
1033 ioc->usg_calls++;
1034#endif
1035
1036#ifdef ASSERT_PDIR_SANITY
1037 spin_lock_irqsave(&ioc->res_lock, flags);
1038 sba_check_pdir(ioc,"Check before sba_unmap_sg()");
1039 spin_unlock_irqrestore(&ioc->res_lock, flags);
1040#endif
1041
1042 while (sg_dma_len(sglist) && nents--) {
1043
1044 sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction);
1045#ifdef SBA_COLLECT_STATS
1046 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
1047 ioc->usingle_calls--;
1048#endif
1049 ++sglist;
1050 }
1051
1052 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1053
1054#ifdef ASSERT_PDIR_SANITY
1055 spin_lock_irqsave(&ioc->res_lock, flags);
1056 sba_check_pdir(ioc,"Check after sba_unmap_sg()");
1057 spin_unlock_irqrestore(&ioc->res_lock, flags);
1058#endif
1059
1060}
1061
1062static struct hppa_dma_ops sba_ops = {
1063 .dma_supported = sba_dma_supported,
1064 .alloc_consistent = sba_alloc_consistent,
1065 .alloc_noncoherent = sba_alloc_consistent,
1066 .free_consistent = sba_free_consistent,
1067 .map_single = sba_map_single,
1068 .unmap_single = sba_unmap_single,
1069 .map_sg = sba_map_sg,
1070 .unmap_sg = sba_unmap_sg,
1071 .dma_sync_single_for_cpu = NULL,
1072 .dma_sync_single_for_device = NULL,
1073 .dma_sync_sg_for_cpu = NULL,
1074 .dma_sync_sg_for_device = NULL,
1075};
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087static void
1088sba_get_pat_resources(struct sba_device *sba_dev)
1089{
1090#if 0
1091
1092
1093
1094
1095
1096
1097PAT_MOD(mod)->mod_info.mod_pages = PAT_GET_MOD_PAGES(temp);
1098 FIXME : ???
1099PAT_MOD(mod)->mod_info.dvi = PAT_GET_DVI(temp);
1100 Tells where the dvi bits are located in the address.
1101PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp);
1102 FIXME : ???
1103#endif
1104}
1105
1106
1107
1108
1109
1110
1111
1112#define PIRANHA_ADDR_MASK 0x00160000UL
1113#define PIRANHA_ADDR_VAL 0x00060000UL
1114static void *
1115sba_alloc_pdir(unsigned int pdir_size)
1116{
1117 unsigned long pdir_base;
1118 unsigned long pdir_order = get_order(pdir_size);
1119
1120 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order);
1121 if (NULL == (void *) pdir_base) {
1122 panic("%s() could not allocate I/O Page Table\n",
1123 __func__);
1124 }
1125
1126
1127
1128
1129
1130
1131
1132 if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13
1133 || (boot_cpu_data.pdc.versions > 0x202)
1134 || (boot_cpu_data.pdc.capabilities & 0x08L) )
1135 return (void *) pdir_base;
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155 if (pdir_order <= (19-12)) {
1156 if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) {
1157
1158 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12));
1159
1160 free_pages(pdir_base, pdir_order);
1161
1162 pdir_base = new_pdir;
1163
1164
1165 while (pdir_order < (19-12)) {
1166 new_pdir += pdir_size;
1167 free_pages(new_pdir, pdir_order);
1168 pdir_order +=1;
1169 pdir_size <<=1;
1170 }
1171 }
1172 } else {
1173
1174
1175
1176
1177 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1);
1178
1179
1180 free_pages( pdir_base, pdir_order);
1181
1182
1183 free_pages(new_pdir, 20-12);
1184
1185 pdir_base = new_pdir + 1024*1024;
1186
1187 if (pdir_order > (20-12)) {
1188
1189
1190
1191
1192
1193
1194 piranha_bad_128k = 1;
1195
1196 new_pdir += 3*1024*1024;
1197
1198 free_pages(new_pdir, 20-12);
1199
1200
1201 free_pages(new_pdir - 128*1024 , 17-12);
1202
1203 pdir_size -= 128*1024;
1204 }
1205 }
1206
1207 memset((void *) pdir_base, 0, pdir_size);
1208 return (void *) pdir_base;
1209}
1210
1211struct ibase_data_struct {
1212 struct ioc *ioc;
1213 int ioc_num;
1214};
1215
1216static int setup_ibase_imask_callback(struct device *dev, void *data)
1217{
1218
1219 extern void lba_set_iregs(struct parisc_device *, u32, u32);
1220 struct parisc_device *lba = to_parisc_device(dev);
1221 struct ibase_data_struct *ibd = data;
1222 int rope_num = (lba->hpa.start >> 13) & 0xf;
1223 if (rope_num >> 3 == ibd->ioc_num)
1224 lba_set_iregs(lba, ibd->ioc->ibase, ibd->ioc->imask);
1225 return 0;
1226}
1227
1228
1229static void
1230setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1231{
1232 struct ibase_data_struct ibase_data = {
1233 .ioc = ioc,
1234 .ioc_num = ioc_num,
1235 };
1236
1237 device_for_each_child(&sba->dev, &ibase_data,
1238 setup_ibase_imask_callback);
1239}
1240
1241#ifdef SBA_AGP_SUPPORT
1242static int
1243sba_ioc_find_quicksilver(struct device *dev, void *data)
1244{
1245 int *agp_found = data;
1246 struct parisc_device *lba = to_parisc_device(dev);
1247
1248 if (IS_QUICKSILVER(lba))
1249 *agp_found = 1;
1250 return 0;
1251}
1252#endif
1253
1254static void
1255sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1256{
1257 u32 iova_space_mask;
1258 u32 iova_space_size;
1259 int iov_order, tcnfg;
1260#ifdef SBA_AGP_SUPPORT
1261 int agp_found = 0;
1262#endif
1263
1264
1265
1266
1267
1268 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
1269 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
1270
1271 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
1272 printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n");
1273 iova_space_size /= 2;
1274 }
1275
1276
1277
1278
1279
1280 iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT));
1281 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1282
1283 DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n",
1284 __func__, ioc->ioc_hpa, iova_space_size >> 20,
1285 iov_order + PAGE_SHIFT);
1286
1287 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1288 get_order(ioc->pdir_size));
1289 if (!ioc->pdir_base)
1290 panic("Couldn't allocate I/O Page Table\n");
1291
1292 memset(ioc->pdir_base, 0, ioc->pdir_size);
1293
1294 DBG_INIT("%s() pdir %p size %x\n",
1295 __func__, ioc->pdir_base, ioc->pdir_size);
1296
1297#ifdef SBA_HINT_SUPPORT
1298 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1299 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1300
1301 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1302 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1303#endif
1304
1305 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base);
1306 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1307
1308
1309 iova_space_mask = 0xffffffff;
1310 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1311 ioc->imask = iova_space_mask;
1312#ifdef ZX1_SUPPORT
1313 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1314#endif
1315 sba_dump_tlb(ioc->ioc_hpa);
1316
1317 setup_ibase_imask(sba, ioc, ioc_num);
1318
1319 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
1320
1321#ifdef CONFIG_64BIT
1322
1323
1324
1325
1326 ioc->imask |= 0xFFFFFFFF00000000UL;
1327#endif
1328
1329
1330 switch (PAGE_SHIFT) {
1331 case 12: tcnfg = 0; break;
1332 case 13: tcnfg = 1; break;
1333 case 14: tcnfg = 2; break;
1334 case 16: tcnfg = 3; break;
1335 default:
1336 panic(__FILE__ "Unsupported system page size %d",
1337 1 << PAGE_SHIFT);
1338 break;
1339 }
1340 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1341
1342
1343
1344
1345
1346 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1347
1348
1349
1350
1351
1352 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);
1353
1354#ifdef SBA_AGP_SUPPORT
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364 device_for_each_child(&sba->dev, &agp_found, sba_ioc_find_quicksilver);
1365
1366 if (agp_found && sba_reserve_agpgart) {
1367 printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n",
1368 __func__, (iova_space_size/2) >> 20);
1369 ioc->pdir_size /= 2;
1370 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE;
1371 }
1372#endif
1373}
1374
1375static void
1376sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1377{
1378 u32 iova_space_size, iova_space_mask;
1379 unsigned int pdir_size, iov_order;
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395 iova_space_size = (u32) (totalram_pages/global_ioc_cnt);
1396
1397
1398 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1399 iova_space_size = 1 << (20 - PAGE_SHIFT);
1400 }
1401 else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1402 iova_space_size = 1 << (30 - PAGE_SHIFT);
1403 }
1404
1405
1406
1407
1408
1409
1410 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1411
1412
1413 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1414
1415 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
1416
1417 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
1418 __func__,
1419 ioc->ioc_hpa,
1420 (unsigned long) totalram_pages >> (20 - PAGE_SHIFT),
1421 iova_space_size>>20,
1422 iov_order + PAGE_SHIFT);
1423
1424 ioc->pdir_base = sba_alloc_pdir(pdir_size);
1425
1426 DBG_INIT("%s() pdir %p size %x\n",
1427 __func__, ioc->pdir_base, pdir_size);
1428
1429#ifdef SBA_HINT_SUPPORT
1430
1431 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1432 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1433
1434 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1435 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1436#endif
1437
1438 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1439
1440
1441 iova_space_mask = 0xffffffff;
1442 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1443
1444
1445
1446
1447
1448 ioc->ibase = 0;
1449 ioc->imask = iova_space_mask;
1450#ifdef ZX1_SUPPORT
1451 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1452#endif
1453
1454 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
1455 __func__, ioc->ibase, ioc->imask);
1456
1457
1458
1459
1460
1461
1462
1463 setup_ibase_imask(sba, ioc, ioc_num);
1464
1465
1466
1467
1468 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
1469 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
1470
1471
1472 WRITE_REG(0, ioc->ioc_hpa+IOC_TCNFG);
1473
1474
1475
1476
1477
1478 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
1479
1480 ioc->ibase = 0;
1481
1482 DBG_INIT("%s() DONE\n", __func__);
1483}
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset)
1499{
1500 return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
1501}
1502
1503static void sba_hw_init(struct sba_device *sba_dev)
1504{
1505 int i;
1506 int num_ioc;
1507 u64 ioc_ctl;
1508
1509 if (!is_pdc_pat()) {
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527 if (PAGE0->mem_kbd.cl_class == CL_KEYBD) {
1528 pdc_io_reset_devices();
1529 }
1530
1531 }
1532
1533
1534#if 0
1535printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
1536 PAGE0->mem_boot.spa, PAGE0->mem_boot.pad, PAGE0->mem_boot.cl_class);
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547 if ((PAGE0->mem_boot.cl_class != CL_RANDOM)
1548 && (PAGE0->mem_boot.cl_class != CL_SEQU)) {
1549 pdc_io_reset();
1550 }
1551#endif
1552
1553 if (!IS_PLUTO(sba_dev->dev)) {
1554 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
1555 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
1556 __func__, sba_dev->sba_hpa, ioc_ctl);
1557 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
1558 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
1559
1560
1561
1562 WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);
1563
1564#ifdef DEBUG_SBA_INIT
1565 ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL);
1566 DBG_INIT(" 0x%Lx\n", ioc_ctl);
1567#endif
1568 }
1569
1570 if (IS_ASTRO(sba_dev->dev)) {
1571 int err;
1572 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET);
1573 num_ioc = 1;
1574
1575 sba_dev->chip_resv.name = "Astro Intr Ack";
1576 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL;
1577 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ;
1578 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1579 BUG_ON(err < 0);
1580
1581 } else if (IS_PLUTO(sba_dev->dev)) {
1582 int err;
1583
1584 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET);
1585 num_ioc = 1;
1586
1587 sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA";
1588 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL;
1589 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff200000UL - 1);
1590 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1591 WARN_ON(err < 0);
1592
1593 sba_dev->iommu_resv.name = "IOVA Space";
1594 sba_dev->iommu_resv.start = 0x40000000UL;
1595 sba_dev->iommu_resv.end = 0x50000000UL - 1;
1596 err = request_resource(&iomem_resource, &(sba_dev->iommu_resv));
1597 WARN_ON(err < 0);
1598 } else {
1599
1600 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0));
1601 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1));
1602 num_ioc = 2;
1603
1604
1605 }
1606
1607
1608 sba_dev->num_ioc = num_ioc;
1609 for (i = 0; i < num_ioc; i++) {
1610 void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa;
1611 unsigned int j;
1612
1613 for (j=0; j < sizeof(u64) * ROPES_PER_IOC; j+=sizeof(u64)) {
1614
1615
1616
1617
1618
1619
1620
1621 if (IS_PLUTO(sba_dev->dev)) {
1622 void __iomem *rope_cfg;
1623 unsigned long cfg_val;
1624
1625 rope_cfg = ioc_hpa + IOC_ROPE0_CFG + j;
1626 cfg_val = READ_REG(rope_cfg);
1627 cfg_val &= ~IOC_ROPE_AO;
1628 WRITE_REG(cfg_val, rope_cfg);
1629 }
1630
1631
1632
1633
1634 WRITE_REG(HF_ENABLE, ioc_hpa + ROPE0_CTL + j);
1635 }
1636
1637
1638 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
1639
1640 DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
1641 i,
1642 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
1643 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
1644 );
1645 DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n",
1646 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
1647 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
1648 );
1649
1650 if (IS_PLUTO(sba_dev->dev)) {
1651 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
1652 } else {
1653 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
1654 }
1655 }
1656}
1657
1658static void
1659sba_common_init(struct sba_device *sba_dev)
1660{
1661 int i;
1662
1663
1664
1665
1666 sba_dev->next = sba_list;
1667 sba_list = sba_dev;
1668
1669 for(i=0; i< sba_dev->num_ioc; i++) {
1670 int res_size;
1671#ifdef DEBUG_DMB_TRAP
1672 extern void iterate_pages(unsigned long , unsigned long ,
1673 void (*)(pte_t * , unsigned long),
1674 unsigned long );
1675 void set_data_memory_break(pte_t * , unsigned long);
1676#endif
1677
1678 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64);
1679
1680
1681 if (piranha_bad_128k) {
1682 res_size -= (128*1024)/sizeof(u64);
1683 }
1684
1685 res_size >>= 3;
1686 DBG_INIT("%s() res_size 0x%x\n",
1687 __func__, res_size);
1688
1689 sba_dev->ioc[i].res_size = res_size;
1690 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
1691
1692#ifdef DEBUG_DMB_TRAP
1693 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1694 set_data_memory_break, 0);
1695#endif
1696
1697 if (NULL == sba_dev->ioc[i].res_map)
1698 {
1699 panic("%s:%s() could not allocate resource map\n",
1700 __FILE__, __func__ );
1701 }
1702
1703 memset(sba_dev->ioc[i].res_map, 0, res_size);
1704
1705 sba_dev->ioc[i].res_hint = (unsigned long *)
1706 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);
1707
1708#ifdef ASSERT_PDIR_SANITY
1709
1710 sba_dev->ioc[i].res_map[0] = 0x80;
1711 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
1712#endif
1713
1714
1715 if (piranha_bad_128k) {
1716
1717
1718 int idx_start = (1408*1024/sizeof(u64)) >> 3;
1719 int idx_end = (1536*1024/sizeof(u64)) >> 3;
1720 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]);
1721 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]);
1722
1723
1724 while (p_start < p_end)
1725 *p_start++ = -1;
1726
1727 }
1728
1729#ifdef DEBUG_DMB_TRAP
1730 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1731 set_data_memory_break, 0);
1732 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size,
1733 set_data_memory_break, 0);
1734#endif
1735
1736 DBG_INIT("%s() %d res_map %x %p\n",
1737 __func__, i, res_size, sba_dev->ioc[i].res_map);
1738 }
1739
1740 spin_lock_init(&sba_dev->sba_lock);
1741 ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC;
1742
1743#ifdef DEBUG_SBA_INIT
1744
1745
1746
1747
1748
1749 if (ioc_needs_fdc) {
1750 printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n");
1751 } else {
1752 printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n");
1753 }
1754#endif
1755}
1756
1757#ifdef CONFIG_PROC_FS
1758static int sba_proc_info(struct seq_file *m, void *p)
1759{
1760 struct sba_device *sba_dev = sba_list;
1761 struct ioc *ioc = &sba_dev->ioc[0];
1762 int total_pages = (int) (ioc->res_size << 3);
1763#ifdef SBA_COLLECT_STATS
1764 unsigned long avg = 0, min, max;
1765#endif
1766 int i, len = 0;
1767
1768 len += seq_printf(m, "%s rev %d.%d\n",
1769 sba_dev->name,
1770 (sba_dev->hw_rev & 0x7) + 1,
1771 (sba_dev->hw_rev & 0x18) >> 3
1772 );
1773 len += seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
1774 (int) ((ioc->res_size << 3) * sizeof(u64)),
1775 total_pages);
1776
1777 len += seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
1778 ioc->res_size, ioc->res_size << 3);
1779
1780 len += seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n",
1781 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE),
1782 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK),
1783 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE)
1784 );
1785
1786 for (i=0; i<4; i++)
1787 len += seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n", i,
1788 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18),
1789 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18),
1790 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18)
1791 );
1792
1793#ifdef SBA_COLLECT_STATS
1794 len += seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1795 total_pages - ioc->used_pages, ioc->used_pages,
1796 (int) (ioc->used_pages * 100 / total_pages));
1797
1798 min = max = ioc->avg_search[0];
1799 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1800 avg += ioc->avg_search[i];
1801 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1802 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1803 }
1804 avg /= SBA_SEARCH_SAMPLE;
1805 len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1806 min, avg, max);
1807
1808 len += seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
1809 ioc->msingle_calls, ioc->msingle_pages,
1810 (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1811
1812
1813 min = ioc->usingle_calls;
1814 max = ioc->usingle_pages - ioc->usg_pages;
1815 len += seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n",
1816 min, max, (int) ((max * 1000)/min));
1817
1818 len += seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1819 ioc->msg_calls, ioc->msg_pages,
1820 (int) ((ioc->msg_pages * 1000)/ioc->msg_calls));
1821
1822 len += seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1823 ioc->usg_calls, ioc->usg_pages,
1824 (int) ((ioc->usg_pages * 1000)/ioc->usg_calls));
1825#endif
1826
1827 return 0;
1828}
1829
1830static int
1831sba_proc_open(struct inode *i, struct file *f)
1832{
1833 return single_open(f, &sba_proc_info, NULL);
1834}
1835
1836static const struct file_operations sba_proc_fops = {
1837 .owner = THIS_MODULE,
1838 .open = sba_proc_open,
1839 .read = seq_read,
1840 .llseek = seq_lseek,
1841 .release = single_release,
1842};
1843
1844static int
1845sba_proc_bitmap_info(struct seq_file *m, void *p)
1846{
1847 struct sba_device *sba_dev = sba_list;
1848 struct ioc *ioc = &sba_dev->ioc[0];
1849 unsigned int *res_ptr = (unsigned int *)ioc->res_map;
1850 int i, len = 0;
1851
1852 for (i = 0; i < (ioc->res_size/sizeof(unsigned int)); ++i, ++res_ptr) {
1853 if ((i & 7) == 0)
1854 len += seq_printf(m, "\n ");
1855 len += seq_printf(m, " %08x", *res_ptr);
1856 }
1857 len += seq_printf(m, "\n");
1858
1859 return 0;
1860}
1861
1862static int
1863sba_proc_bitmap_open(struct inode *i, struct file *f)
1864{
1865 return single_open(f, &sba_proc_bitmap_info, NULL);
1866}
1867
1868static const struct file_operations sba_proc_bitmap_fops = {
1869 .owner = THIS_MODULE,
1870 .open = sba_proc_bitmap_open,
1871 .read = seq_read,
1872 .llseek = seq_lseek,
1873 .release = single_release,
1874};
1875#endif
1876
1877static struct parisc_device_id sba_tbl[] = {
1878 { HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb },
1879 { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc },
1880 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc },
1881 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc },
1882 { HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc },
1883 { 0, }
1884};
1885
1886static int sba_driver_callback(struct parisc_device *);
1887
1888static struct parisc_driver sba_driver = {
1889 .name = MODULE_NAME,
1890 .id_table = sba_tbl,
1891 .probe = sba_driver_callback,
1892};
1893
1894
1895
1896
1897
1898
1899static int sba_driver_callback(struct parisc_device *dev)
1900{
1901 struct sba_device *sba_dev;
1902 u32 func_class;
1903 int i;
1904 char *version;
1905 void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE);
1906#ifdef CONFIG_PROC_FS
1907 struct proc_dir_entry *root;
1908#endif
1909
1910 sba_dump_ranges(sba_addr);
1911
1912
1913 func_class = READ_REG(sba_addr + SBA_FCLASS);
1914
1915 if (IS_ASTRO(dev)) {
1916 unsigned long fclass;
1917 static char astro_rev[]="Astro ?.?";
1918
1919
1920 fclass = READ_REG(sba_addr);
1921
1922 astro_rev[6] = '1' + (char) (fclass & 0x7);
1923 astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3);
1924 version = astro_rev;
1925
1926 } else if (IS_IKE(dev)) {
1927 static char ike_rev[] = "Ike rev ?";
1928 ike_rev[8] = '0' + (char) (func_class & 0xff);
1929 version = ike_rev;
1930 } else if (IS_PLUTO(dev)) {
1931 static char pluto_rev[]="Pluto ?.?";
1932 pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4);
1933 pluto_rev[8] = '0' + (char) (func_class & 0x0f);
1934 version = pluto_rev;
1935 } else {
1936 static char reo_rev[] = "REO rev ?";
1937 reo_rev[8] = '0' + (char) (func_class & 0xff);
1938 version = reo_rev;
1939 }
1940
1941 if (!global_ioc_cnt) {
1942 global_ioc_cnt = count_parisc_driver(&sba_driver);
1943
1944
1945 if ((!IS_ASTRO(dev)) || (!IS_PLUTO(dev)))
1946 global_ioc_cnt *= 2;
1947 }
1948
1949 printk(KERN_INFO "%s found %s at 0x%llx\n",
1950 MODULE_NAME, version, (unsigned long long)dev->hpa.start);
1951
1952 sba_dev = kzalloc(sizeof(struct sba_device), GFP_KERNEL);
1953 if (!sba_dev) {
1954 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n");
1955 return -ENOMEM;
1956 }
1957
1958 parisc_set_drvdata(dev, sba_dev);
1959
1960 for(i=0; i<MAX_IOC; i++)
1961 spin_lock_init(&(sba_dev->ioc[i].res_lock));
1962
1963 sba_dev->dev = dev;
1964 sba_dev->hw_rev = func_class;
1965 sba_dev->name = dev->name;
1966 sba_dev->sba_hpa = sba_addr;
1967
1968 sba_get_pat_resources(sba_dev);
1969 sba_hw_init(sba_dev);
1970 sba_common_init(sba_dev);
1971
1972 hppa_dma_ops = &sba_ops;
1973
1974#ifdef CONFIG_PROC_FS
1975 switch (dev->id.hversion) {
1976 case PLUTO_MCKINLEY_PORT:
1977 root = proc_mckinley_root;
1978 break;
1979 case ASTRO_RUNWAY_PORT:
1980 case IKE_MERCED_PORT:
1981 default:
1982 root = proc_runway_root;
1983 break;
1984 }
1985
1986 proc_create("sba_iommu", 0, root, &sba_proc_fops);
1987 proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops);
1988#endif
1989
1990 parisc_has_iommu();
1991 return 0;
1992}
1993
1994
1995
1996
1997
1998
1999void __init sba_init(void)
2000{
2001 register_parisc_driver(&sba_driver);
2002}
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012void * sba_get_iommu(struct parisc_device *pci_hba)
2013{
2014 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2015 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2016 char t = sba_dev->id.hw_type;
2017 int iocnum = (pci_hba->hw_path >> 3);
2018
2019 WARN_ON((t != HPHW_IOA) && (t != HPHW_BCPORT));
2020
2021 return &(sba->ioc[iocnum]);
2022}
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
2034{
2035 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2036 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2037 char t = sba_dev->id.hw_type;
2038 int i;
2039 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));
2040
2041 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2042
2043 r->start = r->end = 0;
2044
2045
2046 for (i=0; i<4; i++) {
2047 int base, size;
2048 void __iomem *reg = sba->sba_hpa + i*0x18;
2049
2050 base = READ_REG32(reg + LMMIO_DIRECT0_BASE);
2051 if ((base & 1) == 0)
2052 continue;
2053
2054 size = READ_REG32(reg + LMMIO_DIRECT0_ROUTE);
2055
2056 if ((size & (ROPES_PER_IOC-1)) != rope)
2057 continue;
2058
2059 r->start = (base & ~1UL) | PCI_F_EXTEND;
2060 size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK);
2061 r->end = r->start + size;
2062 r->flags = IORESOURCE_MEM;
2063 }
2064}
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
2077{
2078 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2079 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2080 char t = sba_dev->id.hw_type;
2081 int base, size;
2082 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));
2083
2084 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2085
2086 r->start = r->end = 0;
2087
2088 base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE);
2089 if ((base & 1) == 0) {
2090 BUG();
2091 return;
2092 }
2093
2094 r->start = (base & ~1UL) | PCI_F_EXTEND;
2095
2096 size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC;
2097 r->start += rope * (size + 1);
2098 r->end = r->start + size;
2099 r->flags = IORESOURCE_MEM;
2100}
2101