1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/spinlock.h>
25#include <linux/slab.h>
26#include <linux/init.h>
27
28#include <linux/mm.h>
29#include <linux/string.h>
30#include <linux/pci.h>
31#include <linux/scatterlist.h>
32#include <linux/iommu-helper.h>
33
34#include <asm/byteorder.h>
35#include <asm/io.h>
36#include <asm/dma.h>
37
38#include <asm/hardware.h>
39
40#include <linux/proc_fs.h>
41#include <linux/seq_file.h>
42#include <linux/module.h>
43
44#include <asm/ropes.h>
45#include <asm/mckinley.h>
46#include <asm/runway.h>
47#include <asm/page.h>
48#include <asm/pdc.h>
49#include <asm/pdcpat.h>
50#include <asm/parisc-device.h>
51
52#define MODULE_NAME "SBA"
53
54
55
56
57
58
59#undef DEBUG_SBA_INIT
60#undef DEBUG_SBA_RUN
61#undef DEBUG_SBA_RUN_SG
62#undef DEBUG_SBA_RESOURCE
63#undef ASSERT_PDIR_SANITY
64#undef DEBUG_LARGE_SG_ENTRIES
65#undef DEBUG_DMB_TRAP
66
67#ifdef DEBUG_SBA_INIT
68#define DBG_INIT(x...) printk(x)
69#else
70#define DBG_INIT(x...)
71#endif
72
73#ifdef DEBUG_SBA_RUN
74#define DBG_RUN(x...) printk(x)
75#else
76#define DBG_RUN(x...)
77#endif
78
79#ifdef DEBUG_SBA_RUN_SG
80#define DBG_RUN_SG(x...) printk(x)
81#else
82#define DBG_RUN_SG(x...)
83#endif
84
85
86#ifdef DEBUG_SBA_RESOURCE
87#define DBG_RES(x...) printk(x)
88#else
89#define DBG_RES(x...)
90#endif
91
92#define SBA_INLINE __inline__
93
94#define DEFAULT_DMA_HINT_REG 0
95
96#define SBA_MAPPING_ERROR (~(dma_addr_t)0)
97
98struct sba_device *sba_list;
99EXPORT_SYMBOL_GPL(sba_list);
100
101static unsigned long ioc_needs_fdc = 0;
102
103
104static unsigned int global_ioc_cnt = 0;
105
106
107static unsigned long piranha_bad_128k = 0;
108
109
110#define SBA_DEV(d) ((struct sba_device *) (d))
111
112#ifdef CONFIG_AGP_PARISC
113#define SBA_AGP_SUPPORT
114#endif
115
116#ifdef SBA_AGP_SUPPORT
117static int sba_reserve_agpgart = 1;
118module_param(sba_reserve_agpgart, int, 0444);
119MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART");
120#endif
121
122
123
124
125
126
127
128
129
130
131#define READ_REG32(addr) readl(addr)
132#define READ_REG64(addr) readq(addr)
133#define WRITE_REG32(val, addr) writel((val), (addr))
134#define WRITE_REG64(val, addr) writeq((val), (addr))
135
136#ifdef CONFIG_64BIT
137#define READ_REG(addr) READ_REG64(addr)
138#define WRITE_REG(value, addr) WRITE_REG64(value, addr)
139#else
140#define READ_REG(addr) READ_REG32(addr)
141#define WRITE_REG(value, addr) WRITE_REG32(value, addr)
142#endif
143
144#ifdef DEBUG_SBA_INIT
145
146
147
148
149
150
151
152
153
154
155static void
156sba_dump_ranges(void __iomem *hpa)
157{
158 DBG_INIT("SBA at 0x%p\n", hpa);
159 DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE));
160 DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK));
161 DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE));
162 DBG_INIT("\n");
163 DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE));
164 DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK));
165 DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE));
166}
167
168
169
170
171
172
173
174static void sba_dump_tlb(void __iomem *hpa)
175{
176 DBG_INIT("IO TLB at 0x%p\n", hpa);
177 DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE));
178 DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK));
179 DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG));
180 DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE));
181 DBG_INIT("\n");
182}
183#else
184#define sba_dump_ranges(x)
185#define sba_dump_tlb(x)
186#endif
187
188
189#ifdef ASSERT_PDIR_SANITY
190
191
192
193
194
195
196
197
198
199static void
200sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
201{
202
203 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
204 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
205 uint rcnt;
206
207 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
208 msg,
209 rptr, pide & (BITS_PER_LONG - 1), *rptr);
210
211 rcnt = 0;
212 while (rcnt < BITS_PER_LONG) {
213 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
214 (rcnt == (pide & (BITS_PER_LONG - 1)))
215 ? " -->" : " ",
216 rcnt, ptr, *ptr );
217 rcnt++;
218 ptr++;
219 }
220 printk(KERN_DEBUG "%s", msg);
221}
222
223
224
225
226
227
228
229
230
231static int
232sba_check_pdir(struct ioc *ioc, char *msg)
233{
234 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]);
235 u32 *rptr = (u32 *) ioc->res_map;
236 u64 *pptr = ioc->pdir_base;
237 uint pide = 0;
238
239 while (rptr < rptr_end) {
240 u32 rval = *rptr;
241 int rcnt = 32;
242
243 while (rcnt) {
244
245 u32 pde = ((u32) (((char *)pptr)[7])) << 24;
246 if ((rval ^ pde) & 0x80000000)
247 {
248
249
250
251
252 sba_dump_pdir_entry(ioc, msg, pide);
253 return(1);
254 }
255 rcnt--;
256 rval <<= 1;
257 pptr++;
258 pide++;
259 }
260 rptr++;
261 }
262
263 return 0;
264}
265
266
267
268
269
270
271
272
273
274
275static void
276sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
277{
278 while (nents-- > 0) {
279 printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n",
280 nents,
281 (unsigned long) sg_dma_address(startsg),
282 sg_dma_len(startsg),
283 sg_virt(startsg), startsg->length);
284 startsg++;
285 }
286}
287
288#endif
289
290
291
292
293
294
295
296
297
298
299
300
301
302#define PAGES_PER_RANGE 1
303
304
305
306#ifdef ZX1_SUPPORT
307
308#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
309#define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
310#else
311
312#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset))
313#define SBA_IOVP(ioc,iova) (iova)
314#endif
315
316#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
317
318#define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n)))
319#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
320
321static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
322 unsigned int bitshiftcnt)
323{
324 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
325 + bitshiftcnt;
326}
327
328
329
330
331
332
333
334
335
336
337static SBA_INLINE unsigned long
338sba_search_bitmap(struct ioc *ioc, struct device *dev,
339 unsigned long bits_wanted)
340{
341 unsigned long *res_ptr = ioc->res_hint;
342 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
343 unsigned long pide = ~0UL, tpide;
344 unsigned long boundary_size;
345 unsigned long shift;
346 int ret;
347
348 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
349 1ULL << IOVP_SHIFT) >> IOVP_SHIFT;
350
351#if defined(ZX1_SUPPORT)
352 BUG_ON(ioc->ibase & ~IOVP_MASK);
353 shift = ioc->ibase >> IOVP_SHIFT;
354#else
355 shift = 0;
356#endif
357
358 if (bits_wanted > (BITS_PER_LONG/2)) {
359
360 for(; res_ptr < res_end; ++res_ptr) {
361 tpide = ptr_to_pide(ioc, res_ptr, 0);
362 ret = iommu_is_span_boundary(tpide, bits_wanted,
363 shift,
364 boundary_size);
365 if ((*res_ptr == 0) && !ret) {
366 *res_ptr = RESMAP_MASK(bits_wanted);
367 pide = tpide;
368 break;
369 }
370 }
371
372 res_ptr++;
373 ioc->res_bitshift = 0;
374 } else {
375
376
377
378
379
380
381 unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT);
382 uint bitshiftcnt = ALIGN(ioc->res_bitshift, o);
383 unsigned long mask;
384
385 if (bitshiftcnt >= BITS_PER_LONG) {
386 bitshiftcnt = 0;
387 res_ptr++;
388 }
389 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt;
390
391 DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
392 while(res_ptr < res_end)
393 {
394 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
395 WARN_ON(mask == 0);
396 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
397 ret = iommu_is_span_boundary(tpide, bits_wanted,
398 shift,
399 boundary_size);
400 if ((((*res_ptr) & mask) == 0) && !ret) {
401 *res_ptr |= mask;
402 pide = tpide;
403 break;
404 }
405 mask >>= o;
406 bitshiftcnt += o;
407 if (mask == 0) {
408 mask = RESMAP_MASK(bits_wanted);
409 bitshiftcnt=0;
410 res_ptr++;
411 }
412 }
413
414 ioc->res_bitshift = bitshiftcnt + bits_wanted;
415 }
416
417
418 if (res_end <= res_ptr) {
419 ioc->res_hint = (unsigned long *) ioc->res_map;
420 ioc->res_bitshift = 0;
421 } else {
422 ioc->res_hint = res_ptr;
423 }
424 return (pide);
425}
426
427
428
429
430
431
432
433
434
435
436static int
437sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
438{
439 unsigned int pages_needed = size >> IOVP_SHIFT;
440#ifdef SBA_COLLECT_STATS
441 unsigned long cr_start = mfctl(16);
442#endif
443 unsigned long pide;
444
445 pide = sba_search_bitmap(ioc, dev, pages_needed);
446 if (pide >= (ioc->res_size << 3)) {
447 pide = sba_search_bitmap(ioc, dev, pages_needed);
448 if (pide >= (ioc->res_size << 3))
449 panic("%s: I/O MMU @ %p is out of mapping resources\n",
450 __FILE__, ioc->ioc_hpa);
451 }
452
453#ifdef ASSERT_PDIR_SANITY
454
455 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) {
456 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
457 }
458#endif
459
460 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
461 __func__, size, pages_needed, pide,
462 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
463 ioc->res_bitshift );
464
465#ifdef SBA_COLLECT_STATS
466 {
467 unsigned long cr_end = mfctl(16);
468 unsigned long tmp = cr_end - cr_start;
469
470 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
471 }
472 ioc->avg_search[ioc->avg_idx++] = cr_start;
473 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
474
475 ioc->used_pages += pages_needed;
476#endif
477
478 return (pide);
479}
480
481
482
483
484
485
486
487
488
489
490static SBA_INLINE void
491sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
492{
493 unsigned long iovp = SBA_IOVP(ioc, iova);
494 unsigned int pide = PDIR_INDEX(iovp);
495 unsigned int ridx = pide >> 3;
496 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
497
498 int bits_not_wanted = size >> IOVP_SHIFT;
499
500
501 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1));
502
503 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
504 __func__, (uint) iova, size,
505 bits_not_wanted, m, pide, res_ptr, *res_ptr);
506
507#ifdef SBA_COLLECT_STATS
508 ioc->used_pages -= bits_not_wanted;
509#endif
510
511 *res_ptr &= ~m;
512}
513
514
515
516
517
518
519
520
521#ifdef SBA_HINT_SUPPORT
522#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
523#endif
524
525typedef unsigned long space_t;
526#define KERNEL_SPACE 0
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568static void SBA_INLINE
569sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
570 unsigned long hint)
571{
572 u64 pa;
573 register unsigned ci;
574
575 pa = virt_to_phys(vba);
576 pa &= IOVP_MASK;
577
578 mtsp(sid,1);
579 asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
580 pa |= (ci >> PAGE_SHIFT) & 0xff;
581
582 pa |= SBA_PDIR_VALID_BIT;
583 *pdir_ptr = cpu_to_le64(pa);
584
585
586
587
588
589
590 if (ioc_needs_fdc)
591 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
592}
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611static SBA_INLINE void
612sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
613{
614 u32 iovp = (u32) SBA_IOVP(ioc,iova);
615 u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
616
617#ifdef ASSERT_PDIR_SANITY
618
619
620
621
622
623
624 if (0x80 != (((u8 *) pdir_ptr)[7])) {
625 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
626 }
627#endif
628
629 if (byte_cnt > IOVP_SIZE)
630 {
631#if 0
632 unsigned long entries_per_cacheline = ioc_needs_fdc ?
633 L1_CACHE_ALIGN(((unsigned long) pdir_ptr))
634 - (unsigned long) pdir_ptr;
635 : 262144;
636#endif
637
638
639 iovp |= get_order(byte_cnt) + PAGE_SHIFT;
640
641 do {
642
643 ((u8 *) pdir_ptr)[7] = 0;
644 if (ioc_needs_fdc) {
645 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
646#if 0
647 entries_per_cacheline = L1_CACHE_SHIFT - 3;
648#endif
649 }
650 pdir_ptr++;
651 byte_cnt -= IOVP_SIZE;
652 } while (byte_cnt > IOVP_SIZE);
653 } else
654 iovp |= IOVP_SHIFT;
655
656
657
658
659
660
661
662
663 ((u8 *) pdir_ptr)[7] = 0;
664 if (ioc_needs_fdc)
665 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
666
667 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
668}
669
670
671
672
673
674
675
676
677static int sba_dma_supported( struct device *dev, u64 mask)
678{
679 struct ioc *ioc;
680
681 if (dev == NULL) {
682 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
683 BUG();
684 return(0);
685 }
686
687
688
689
690
691
692 if (mask > ~0U)
693 return 0;
694
695 ioc = GET_IOC(dev);
696 if (!ioc)
697 return 0;
698
699
700
701
702
703 return((int)(mask >= (ioc->ibase - 1 +
704 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) )));
705}
706
707
708
709
710
711
712
713
714
715
716
717static dma_addr_t
718sba_map_single(struct device *dev, void *addr, size_t size,
719 enum dma_data_direction direction)
720{
721 struct ioc *ioc;
722 unsigned long flags;
723 dma_addr_t iovp;
724 dma_addr_t offset;
725 u64 *pdir_start;
726 int pide;
727
728 ioc = GET_IOC(dev);
729 if (!ioc)
730 return SBA_MAPPING_ERROR;
731
732
733 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
734
735
736 size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
737
738 spin_lock_irqsave(&ioc->res_lock, flags);
739#ifdef ASSERT_PDIR_SANITY
740 sba_check_pdir(ioc,"Check before sba_map_single()");
741#endif
742
743#ifdef SBA_COLLECT_STATS
744 ioc->msingle_calls++;
745 ioc->msingle_pages += size >> IOVP_SHIFT;
746#endif
747 pide = sba_alloc_range(ioc, dev, size);
748 iovp = (dma_addr_t) pide << IOVP_SHIFT;
749
750 DBG_RUN("%s() 0x%p -> 0x%lx\n",
751 __func__, addr, (long) iovp | offset);
752
753 pdir_start = &(ioc->pdir_base[pide]);
754
755 while (size > 0) {
756 sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
757
758 DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
759 pdir_start,
760 (u8) (((u8 *) pdir_start)[7]),
761 (u8) (((u8 *) pdir_start)[6]),
762 (u8) (((u8 *) pdir_start)[5]),
763 (u8) (((u8 *) pdir_start)[4]),
764 (u8) (((u8 *) pdir_start)[3]),
765 (u8) (((u8 *) pdir_start)[2]),
766 (u8) (((u8 *) pdir_start)[1]),
767 (u8) (((u8 *) pdir_start)[0])
768 );
769
770 addr += IOVP_SIZE;
771 size -= IOVP_SIZE;
772 pdir_start++;
773 }
774
775
776 if (ioc_needs_fdc)
777 asm volatile("sync" : : );
778
779#ifdef ASSERT_PDIR_SANITY
780 sba_check_pdir(ioc,"Check after sba_map_single()");
781#endif
782 spin_unlock_irqrestore(&ioc->res_lock, flags);
783
784
785 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);
786}
787
788
789static dma_addr_t
790sba_map_page(struct device *dev, struct page *page, unsigned long offset,
791 size_t size, enum dma_data_direction direction,
792 unsigned long attrs)
793{
794 return sba_map_single(dev, page_address(page) + offset, size,
795 direction);
796}
797
798
799
800
801
802
803
804
805
806
807
808static void
809sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
810 enum dma_data_direction direction, unsigned long attrs)
811{
812 struct ioc *ioc;
813#if DELAYED_RESOURCE_CNT > 0
814 struct sba_dma_pair *d;
815#endif
816 unsigned long flags;
817 dma_addr_t offset;
818
819 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
820
821 ioc = GET_IOC(dev);
822 if (!ioc) {
823 WARN_ON(!ioc);
824 return;
825 }
826 offset = iova & ~IOVP_MASK;
827 iova ^= offset;
828 size += offset;
829 size = ALIGN(size, IOVP_SIZE);
830
831 spin_lock_irqsave(&ioc->res_lock, flags);
832
833#ifdef SBA_COLLECT_STATS
834 ioc->usingle_calls++;
835 ioc->usingle_pages += size >> IOVP_SHIFT;
836#endif
837
838 sba_mark_invalid(ioc, iova, size);
839
840#if DELAYED_RESOURCE_CNT > 0
841
842
843
844 d = &(ioc->saved[ioc->saved_cnt]);
845 d->iova = iova;
846 d->size = size;
847 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
848 int cnt = ioc->saved_cnt;
849 while (cnt--) {
850 sba_free_range(ioc, d->iova, d->size);
851 d--;
852 }
853 ioc->saved_cnt = 0;
854
855 READ_REG(ioc->ioc_hpa+IOC_PCOM);
856 }
857#else
858 sba_free_range(ioc, iova, size);
859
860
861 if (ioc_needs_fdc)
862 asm volatile("sync" : : );
863
864 READ_REG(ioc->ioc_hpa+IOC_PCOM);
865#endif
866
867 spin_unlock_irqrestore(&ioc->res_lock, flags);
868
869
870
871
872
873
874
875
876
877}
878
879
880
881
882
883
884
885
886
887
888static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
889 gfp_t gfp, unsigned long attrs)
890{
891 void *ret;
892
893 if (!hwdev) {
894
895 *dma_handle = 0;
896 return NULL;
897 }
898
899 ret = (void *) __get_free_pages(gfp, get_order(size));
900
901 if (ret) {
902 memset(ret, 0, size);
903 *dma_handle = sba_map_single(hwdev, ret, size, 0);
904 }
905
906 return ret;
907}
908
909
910
911
912
913
914
915
916
917
918
919static void
920sba_free(struct device *hwdev, size_t size, void *vaddr,
921 dma_addr_t dma_handle, unsigned long attrs)
922{
923 sba_unmap_page(hwdev, dma_handle, size, 0, 0);
924 free_pages((unsigned long) vaddr, get_order(size));
925}
926
927
928
929
930
931
932
933#define PIDE_FLAG 0x80000000UL
934
935#ifdef SBA_COLLECT_STATS
936#define IOMMU_MAP_STATS
937#endif
938#include "iommu-helpers.h"
939
940#ifdef DEBUG_LARGE_SG_ENTRIES
941int dump_run_sg = 0;
942#endif
943
944
945
946
947
948
949
950
951
952
953
954static int
955sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
956 enum dma_data_direction direction, unsigned long attrs)
957{
958 struct ioc *ioc;
959 int coalesced, filled = 0;
960 unsigned long flags;
961
962 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
963
964 ioc = GET_IOC(dev);
965 if (!ioc)
966 return 0;
967
968
969 if (nents == 1) {
970 sg_dma_address(sglist) = sba_map_single(dev, sg_virt(sglist),
971 sglist->length, direction);
972 sg_dma_len(sglist) = sglist->length;
973 return 1;
974 }
975
976 spin_lock_irqsave(&ioc->res_lock, flags);
977
978#ifdef ASSERT_PDIR_SANITY
979 if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
980 {
981 sba_dump_sg(ioc, sglist, nents);
982 panic("Check before sba_map_sg()");
983 }
984#endif
985
986#ifdef SBA_COLLECT_STATS
987 ioc->msg_calls++;
988#endif
989
990
991
992
993
994
995
996
997
998 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range);
999
1000
1001
1002
1003
1004
1005
1006
1007
1008 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry);
1009
1010
1011 if (ioc_needs_fdc)
1012 asm volatile("sync" : : );
1013
1014#ifdef ASSERT_PDIR_SANITY
1015 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
1016 {
1017 sba_dump_sg(ioc, sglist, nents);
1018 panic("Check after sba_map_sg()\n");
1019 }
1020#endif
1021
1022 spin_unlock_irqrestore(&ioc->res_lock, flags);
1023
1024 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1025
1026 return filled;
1027}
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039static void
1040sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1041 enum dma_data_direction direction, unsigned long attrs)
1042{
1043 struct ioc *ioc;
1044#ifdef ASSERT_PDIR_SANITY
1045 unsigned long flags;
1046#endif
1047
1048 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1049 __func__, nents, sg_virt(sglist), sglist->length);
1050
1051 ioc = GET_IOC(dev);
1052 if (!ioc) {
1053 WARN_ON(!ioc);
1054 return;
1055 }
1056
1057#ifdef SBA_COLLECT_STATS
1058 ioc->usg_calls++;
1059#endif
1060
1061#ifdef ASSERT_PDIR_SANITY
1062 spin_lock_irqsave(&ioc->res_lock, flags);
1063 sba_check_pdir(ioc,"Check before sba_unmap_sg()");
1064 spin_unlock_irqrestore(&ioc->res_lock, flags);
1065#endif
1066
1067 while (sg_dma_len(sglist) && nents--) {
1068
1069 sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
1070 direction, 0);
1071#ifdef SBA_COLLECT_STATS
1072 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
1073 ioc->usingle_calls--;
1074#endif
1075 ++sglist;
1076 }
1077
1078 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1079
1080#ifdef ASSERT_PDIR_SANITY
1081 spin_lock_irqsave(&ioc->res_lock, flags);
1082 sba_check_pdir(ioc,"Check after sba_unmap_sg()");
1083 spin_unlock_irqrestore(&ioc->res_lock, flags);
1084#endif
1085
1086}
1087
1088static int sba_mapping_error(struct device *dev, dma_addr_t dma_addr)
1089{
1090 return dma_addr == SBA_MAPPING_ERROR;
1091}
1092
1093static const struct dma_map_ops sba_ops = {
1094 .dma_supported = sba_dma_supported,
1095 .alloc = sba_alloc,
1096 .free = sba_free,
1097 .map_page = sba_map_page,
1098 .unmap_page = sba_unmap_page,
1099 .map_sg = sba_map_sg,
1100 .unmap_sg = sba_unmap_sg,
1101 .mapping_error = sba_mapping_error,
1102};
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114static void
1115sba_get_pat_resources(struct sba_device *sba_dev)
1116{
1117#if 0
1118
1119
1120
1121
1122
1123
1124PAT_MOD(mod)->mod_info.mod_pages = PAT_GET_MOD_PAGES(temp);
1125 FIXME : ???
1126PAT_MOD(mod)->mod_info.dvi = PAT_GET_DVI(temp);
1127 Tells where the dvi bits are located in the address.
1128PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp);
1129 FIXME : ???
1130#endif
1131}
1132
1133
1134
1135
1136
1137
1138
1139#define PIRANHA_ADDR_MASK 0x00160000UL
1140#define PIRANHA_ADDR_VAL 0x00060000UL
1141static void *
1142sba_alloc_pdir(unsigned int pdir_size)
1143{
1144 unsigned long pdir_base;
1145 unsigned long pdir_order = get_order(pdir_size);
1146
1147 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order);
1148 if (NULL == (void *) pdir_base) {
1149 panic("%s() could not allocate I/O Page Table\n",
1150 __func__);
1151 }
1152
1153
1154
1155
1156
1157
1158
1159 if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13
1160 || (boot_cpu_data.pdc.versions > 0x202)
1161 || (boot_cpu_data.pdc.capabilities & 0x08L) )
1162 return (void *) pdir_base;
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182 if (pdir_order <= (19-12)) {
1183 if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) {
1184
1185 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12));
1186
1187 free_pages(pdir_base, pdir_order);
1188
1189 pdir_base = new_pdir;
1190
1191
1192 while (pdir_order < (19-12)) {
1193 new_pdir += pdir_size;
1194 free_pages(new_pdir, pdir_order);
1195 pdir_order +=1;
1196 pdir_size <<=1;
1197 }
1198 }
1199 } else {
1200
1201
1202
1203
1204 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1);
1205
1206
1207 free_pages( pdir_base, pdir_order);
1208
1209
1210 free_pages(new_pdir, 20-12);
1211
1212 pdir_base = new_pdir + 1024*1024;
1213
1214 if (pdir_order > (20-12)) {
1215
1216
1217
1218
1219
1220
1221 piranha_bad_128k = 1;
1222
1223 new_pdir += 3*1024*1024;
1224
1225 free_pages(new_pdir, 20-12);
1226
1227
1228 free_pages(new_pdir - 128*1024 , 17-12);
1229
1230 pdir_size -= 128*1024;
1231 }
1232 }
1233
1234 memset((void *) pdir_base, 0, pdir_size);
1235 return (void *) pdir_base;
1236}
1237
1238struct ibase_data_struct {
1239 struct ioc *ioc;
1240 int ioc_num;
1241};
1242
1243static int setup_ibase_imask_callback(struct device *dev, void *data)
1244{
1245
1246 extern void lba_set_iregs(struct parisc_device *, u32, u32);
1247 struct parisc_device *lba = to_parisc_device(dev);
1248 struct ibase_data_struct *ibd = data;
1249 int rope_num = (lba->hpa.start >> 13) & 0xf;
1250 if (rope_num >> 3 == ibd->ioc_num)
1251 lba_set_iregs(lba, ibd->ioc->ibase, ibd->ioc->imask);
1252 return 0;
1253}
1254
1255
1256static void
1257setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1258{
1259 struct ibase_data_struct ibase_data = {
1260 .ioc = ioc,
1261 .ioc_num = ioc_num,
1262 };
1263
1264 device_for_each_child(&sba->dev, &ibase_data,
1265 setup_ibase_imask_callback);
1266}
1267
1268#ifdef SBA_AGP_SUPPORT
1269static int
1270sba_ioc_find_quicksilver(struct device *dev, void *data)
1271{
1272 int *agp_found = data;
1273 struct parisc_device *lba = to_parisc_device(dev);
1274
1275 if (IS_QUICKSILVER(lba))
1276 *agp_found = 1;
1277 return 0;
1278}
1279#endif
1280
1281static void
1282sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1283{
1284 u32 iova_space_mask;
1285 u32 iova_space_size;
1286 int iov_order, tcnfg;
1287#ifdef SBA_AGP_SUPPORT
1288 int agp_found = 0;
1289#endif
1290
1291
1292
1293
1294
1295 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
1296 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
1297
1298 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
1299 printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n");
1300 iova_space_size /= 2;
1301 }
1302
1303
1304
1305
1306
1307 iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT));
1308 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1309
1310 DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n",
1311 __func__, ioc->ioc_hpa, iova_space_size >> 20,
1312 iov_order + PAGE_SHIFT);
1313
1314 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1315 get_order(ioc->pdir_size));
1316 if (!ioc->pdir_base)
1317 panic("Couldn't allocate I/O Page Table\n");
1318
1319 memset(ioc->pdir_base, 0, ioc->pdir_size);
1320
1321 DBG_INIT("%s() pdir %p size %x\n",
1322 __func__, ioc->pdir_base, ioc->pdir_size);
1323
1324#ifdef SBA_HINT_SUPPORT
1325 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1326 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1327
1328 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1329 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1330#endif
1331
1332 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base);
1333 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1334
1335
1336 iova_space_mask = 0xffffffff;
1337 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1338 ioc->imask = iova_space_mask;
1339#ifdef ZX1_SUPPORT
1340 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1341#endif
1342 sba_dump_tlb(ioc->ioc_hpa);
1343
1344 setup_ibase_imask(sba, ioc, ioc_num);
1345
1346 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
1347
1348#ifdef CONFIG_64BIT
1349
1350
1351
1352
1353 ioc->imask |= 0xFFFFFFFF00000000UL;
1354#endif
1355
1356
1357 switch (PAGE_SHIFT) {
1358 case 12: tcnfg = 0; break;
1359 case 13: tcnfg = 1; break;
1360 case 14: tcnfg = 2; break;
1361 case 16: tcnfg = 3; break;
1362 default:
1363 panic(__FILE__ "Unsupported system page size %d",
1364 1 << PAGE_SHIFT);
1365 break;
1366 }
1367 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1368
1369
1370
1371
1372
1373 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1374
1375
1376
1377
1378
1379 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);
1380
1381#ifdef SBA_AGP_SUPPORT
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391 device_for_each_child(&sba->dev, &agp_found, sba_ioc_find_quicksilver);
1392
1393 if (agp_found && sba_reserve_agpgart) {
1394 printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n",
1395 __func__, (iova_space_size/2) >> 20);
1396 ioc->pdir_size /= 2;
1397 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE;
1398 }
1399#endif
1400}
1401
1402static void
1403sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1404{
1405 u32 iova_space_size, iova_space_mask;
1406 unsigned int pdir_size, iov_order, tcnfg;
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422 iova_space_size = (u32) (totalram_pages/global_ioc_cnt);
1423
1424
1425 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1426 iova_space_size = 1 << (20 - PAGE_SHIFT);
1427 }
1428 else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1429 iova_space_size = 1 << (30 - PAGE_SHIFT);
1430 }
1431
1432
1433
1434
1435
1436
1437 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1438
1439
1440 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1441
1442 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
1443
1444 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
1445 __func__,
1446 ioc->ioc_hpa,
1447 (unsigned long) totalram_pages >> (20 - PAGE_SHIFT),
1448 iova_space_size>>20,
1449 iov_order + PAGE_SHIFT);
1450
1451 ioc->pdir_base = sba_alloc_pdir(pdir_size);
1452
1453 DBG_INIT("%s() pdir %p size %x\n",
1454 __func__, ioc->pdir_base, pdir_size);
1455
1456#ifdef SBA_HINT_SUPPORT
1457
1458 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1459 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1460
1461 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1462 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1463#endif
1464
1465 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1466
1467
1468 iova_space_mask = 0xffffffff;
1469 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1470
1471
1472
1473
1474
1475 ioc->ibase = 0;
1476 ioc->imask = iova_space_mask;
1477#ifdef ZX1_SUPPORT
1478 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1479#endif
1480
1481 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
1482 __func__, ioc->ibase, ioc->imask);
1483
1484
1485
1486
1487
1488
1489
1490 setup_ibase_imask(sba, ioc, ioc_num);
1491
1492
1493
1494
1495 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
1496 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
1497
1498
1499 switch (PAGE_SHIFT) {
1500 case 12: tcnfg = 0; break;
1501 case 13: tcnfg = 1; break;
1502 case 14: tcnfg = 2; break;
1503 case 16: tcnfg = 3; break;
1504 default:
1505 panic(__FILE__ "Unsupported system page size %d",
1506 1 << PAGE_SHIFT);
1507 break;
1508 }
1509
1510 WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG);
1511
1512
1513
1514
1515
1516 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
1517
1518 ioc->ibase = 0;
1519
1520 DBG_INIT("%s() DONE\n", __func__);
1521}
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset)
1537{
1538 return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
1539}
1540
1541static void sba_hw_init(struct sba_device *sba_dev)
1542{
1543 int i;
1544 int num_ioc;
1545 u64 ioc_ctl;
1546
1547 if (!is_pdc_pat()) {
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565 if (PAGE0->mem_kbd.cl_class == CL_KEYBD) {
1566 pdc_io_reset_devices();
1567 }
1568
1569 }
1570
1571
1572#if 0
1573printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
1574 PAGE0->mem_boot.spa, PAGE0->mem_boot.pad, PAGE0->mem_boot.cl_class);
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585 if ((PAGE0->mem_boot.cl_class != CL_RANDOM)
1586 && (PAGE0->mem_boot.cl_class != CL_SEQU)) {
1587 pdc_io_reset();
1588 }
1589#endif
1590
1591 if (!IS_PLUTO(sba_dev->dev)) {
1592 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
1593 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
1594 __func__, sba_dev->sba_hpa, ioc_ctl);
1595 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
1596 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
1597
1598
1599
1600 WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);
1601
1602#ifdef DEBUG_SBA_INIT
1603 ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL);
1604 DBG_INIT(" 0x%Lx\n", ioc_ctl);
1605#endif
1606 }
1607
1608 if (IS_ASTRO(sba_dev->dev)) {
1609 int err;
1610 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET);
1611 num_ioc = 1;
1612
1613 sba_dev->chip_resv.name = "Astro Intr Ack";
1614 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL;
1615 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ;
1616 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1617 BUG_ON(err < 0);
1618
1619 } else if (IS_PLUTO(sba_dev->dev)) {
1620 int err;
1621
1622 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET);
1623 num_ioc = 1;
1624
1625 sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA";
1626 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL;
1627 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff200000UL - 1);
1628 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1629 WARN_ON(err < 0);
1630
1631 sba_dev->iommu_resv.name = "IOVA Space";
1632 sba_dev->iommu_resv.start = 0x40000000UL;
1633 sba_dev->iommu_resv.end = 0x50000000UL - 1;
1634 err = request_resource(&iomem_resource, &(sba_dev->iommu_resv));
1635 WARN_ON(err < 0);
1636 } else {
1637
1638 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0));
1639 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1));
1640 num_ioc = 2;
1641
1642
1643 }
1644
1645
1646 sba_dev->num_ioc = num_ioc;
1647 for (i = 0; i < num_ioc; i++) {
1648 void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa;
1649 unsigned int j;
1650
1651 for (j=0; j < sizeof(u64) * ROPES_PER_IOC; j+=sizeof(u64)) {
1652
1653
1654
1655
1656
1657
1658
1659 if (IS_PLUTO(sba_dev->dev)) {
1660 void __iomem *rope_cfg;
1661 unsigned long cfg_val;
1662
1663 rope_cfg = ioc_hpa + IOC_ROPE0_CFG + j;
1664 cfg_val = READ_REG(rope_cfg);
1665 cfg_val &= ~IOC_ROPE_AO;
1666 WRITE_REG(cfg_val, rope_cfg);
1667 }
1668
1669
1670
1671
1672 WRITE_REG(HF_ENABLE, ioc_hpa + ROPE0_CTL + j);
1673 }
1674
1675
1676 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
1677
1678 DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
1679 i,
1680 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
1681 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
1682 );
1683 DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n",
1684 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
1685 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
1686 );
1687
1688 if (IS_PLUTO(sba_dev->dev)) {
1689 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
1690 } else {
1691 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
1692 }
1693 }
1694}
1695
1696static void
1697sba_common_init(struct sba_device *sba_dev)
1698{
1699 int i;
1700
1701
1702
1703
1704 sba_dev->next = sba_list;
1705 sba_list = sba_dev;
1706
1707 for(i=0; i< sba_dev->num_ioc; i++) {
1708 int res_size;
1709#ifdef DEBUG_DMB_TRAP
1710 extern void iterate_pages(unsigned long , unsigned long ,
1711 void (*)(pte_t * , unsigned long),
1712 unsigned long );
1713 void set_data_memory_break(pte_t * , unsigned long);
1714#endif
1715
1716 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64);
1717
1718
1719 if (piranha_bad_128k) {
1720 res_size -= (128*1024)/sizeof(u64);
1721 }
1722
1723 res_size >>= 3;
1724 DBG_INIT("%s() res_size 0x%x\n",
1725 __func__, res_size);
1726
1727 sba_dev->ioc[i].res_size = res_size;
1728 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
1729
1730#ifdef DEBUG_DMB_TRAP
1731 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1732 set_data_memory_break, 0);
1733#endif
1734
1735 if (NULL == sba_dev->ioc[i].res_map)
1736 {
1737 panic("%s:%s() could not allocate resource map\n",
1738 __FILE__, __func__ );
1739 }
1740
1741 memset(sba_dev->ioc[i].res_map, 0, res_size);
1742
1743 sba_dev->ioc[i].res_hint = (unsigned long *)
1744 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);
1745
1746#ifdef ASSERT_PDIR_SANITY
1747
1748 sba_dev->ioc[i].res_map[0] = 0x80;
1749 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
1750#endif
1751
1752
1753 if (piranha_bad_128k) {
1754
1755
1756 int idx_start = (1408*1024/sizeof(u64)) >> 3;
1757 int idx_end = (1536*1024/sizeof(u64)) >> 3;
1758 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]);
1759 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]);
1760
1761
1762 while (p_start < p_end)
1763 *p_start++ = -1;
1764
1765 }
1766
1767#ifdef DEBUG_DMB_TRAP
1768 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1769 set_data_memory_break, 0);
1770 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size,
1771 set_data_memory_break, 0);
1772#endif
1773
1774 DBG_INIT("%s() %d res_map %x %p\n",
1775 __func__, i, res_size, sba_dev->ioc[i].res_map);
1776 }
1777
1778 spin_lock_init(&sba_dev->sba_lock);
1779 ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC;
1780
1781#ifdef DEBUG_SBA_INIT
1782
1783
1784
1785
1786
1787 if (ioc_needs_fdc) {
1788 printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n");
1789 } else {
1790 printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n");
1791 }
1792#endif
1793}
1794
1795#ifdef CONFIG_PROC_FS
1796static int sba_proc_info(struct seq_file *m, void *p)
1797{
1798 struct sba_device *sba_dev = sba_list;
1799 struct ioc *ioc = &sba_dev->ioc[0];
1800 int total_pages = (int) (ioc->res_size << 3);
1801#ifdef SBA_COLLECT_STATS
1802 unsigned long avg = 0, min, max;
1803#endif
1804 int i;
1805
1806 seq_printf(m, "%s rev %d.%d\n",
1807 sba_dev->name,
1808 (sba_dev->hw_rev & 0x7) + 1,
1809 (sba_dev->hw_rev & 0x18) >> 3);
1810 seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
1811 (int)((ioc->res_size << 3) * sizeof(u64)),
1812 total_pages);
1813
1814 seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
1815 ioc->res_size, ioc->res_size << 3);
1816
1817 seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n",
1818 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE),
1819 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK),
1820 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE));
1821
1822 for (i=0; i<4; i++)
1823 seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n",
1824 i,
1825 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18),
1826 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18),
1827 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18));
1828
1829#ifdef SBA_COLLECT_STATS
1830 seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1831 total_pages - ioc->used_pages, ioc->used_pages,
1832 (int)(ioc->used_pages * 100 / total_pages));
1833
1834 min = max = ioc->avg_search[0];
1835 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1836 avg += ioc->avg_search[i];
1837 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1838 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1839 }
1840 avg /= SBA_SEARCH_SAMPLE;
1841 seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1842 min, avg, max);
1843
1844 seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
1845 ioc->msingle_calls, ioc->msingle_pages,
1846 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1847
1848
1849 min = ioc->usingle_calls;
1850 max = ioc->usingle_pages - ioc->usg_pages;
1851 seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n",
1852 min, max, (int)((max * 1000)/min));
1853
1854 seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1855 ioc->msg_calls, ioc->msg_pages,
1856 (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
1857
1858 seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1859 ioc->usg_calls, ioc->usg_pages,
1860 (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
1861#endif
1862
1863 return 0;
1864}
1865
1866static int
1867sba_proc_open(struct inode *i, struct file *f)
1868{
1869 return single_open(f, &sba_proc_info, NULL);
1870}
1871
1872static const struct file_operations sba_proc_fops = {
1873 .owner = THIS_MODULE,
1874 .open = sba_proc_open,
1875 .read = seq_read,
1876 .llseek = seq_lseek,
1877 .release = single_release,
1878};
1879
1880static int
1881sba_proc_bitmap_info(struct seq_file *m, void *p)
1882{
1883 struct sba_device *sba_dev = sba_list;
1884 struct ioc *ioc = &sba_dev->ioc[0];
1885
1886 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map,
1887 ioc->res_size, false);
1888 seq_putc(m, '\n');
1889
1890 return 0;
1891}
1892
1893static int
1894sba_proc_bitmap_open(struct inode *i, struct file *f)
1895{
1896 return single_open(f, &sba_proc_bitmap_info, NULL);
1897}
1898
1899static const struct file_operations sba_proc_bitmap_fops = {
1900 .owner = THIS_MODULE,
1901 .open = sba_proc_bitmap_open,
1902 .read = seq_read,
1903 .llseek = seq_lseek,
1904 .release = single_release,
1905};
1906#endif
1907
1908static const struct parisc_device_id sba_tbl[] __initconst = {
1909 { HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb },
1910 { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc },
1911 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc },
1912 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc },
1913 { HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc },
1914 { 0, }
1915};
1916
1917static int sba_driver_callback(struct parisc_device *);
1918
1919static struct parisc_driver sba_driver __refdata = {
1920 .name = MODULE_NAME,
1921 .id_table = sba_tbl,
1922 .probe = sba_driver_callback,
1923};
1924
1925
1926
1927
1928
1929
1930static int __init sba_driver_callback(struct parisc_device *dev)
1931{
1932 struct sba_device *sba_dev;
1933 u32 func_class;
1934 int i;
1935 char *version;
1936 void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE);
1937#ifdef CONFIG_PROC_FS
1938 struct proc_dir_entry *root;
1939#endif
1940
1941 sba_dump_ranges(sba_addr);
1942
1943
1944 func_class = READ_REG(sba_addr + SBA_FCLASS);
1945
1946 if (IS_ASTRO(dev)) {
1947 unsigned long fclass;
1948 static char astro_rev[]="Astro ?.?";
1949
1950
1951 fclass = READ_REG(sba_addr);
1952
1953 astro_rev[6] = '1' + (char) (fclass & 0x7);
1954 astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3);
1955 version = astro_rev;
1956
1957 } else if (IS_IKE(dev)) {
1958 static char ike_rev[] = "Ike rev ?";
1959 ike_rev[8] = '0' + (char) (func_class & 0xff);
1960 version = ike_rev;
1961 } else if (IS_PLUTO(dev)) {
1962 static char pluto_rev[]="Pluto ?.?";
1963 pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4);
1964 pluto_rev[8] = '0' + (char) (func_class & 0x0f);
1965 version = pluto_rev;
1966 } else {
1967 static char reo_rev[] = "REO rev ?";
1968 reo_rev[8] = '0' + (char) (func_class & 0xff);
1969 version = reo_rev;
1970 }
1971
1972 if (!global_ioc_cnt) {
1973 global_ioc_cnt = count_parisc_driver(&sba_driver);
1974
1975
1976 if ((!IS_ASTRO(dev)) || (!IS_PLUTO(dev)))
1977 global_ioc_cnt *= 2;
1978 }
1979
1980 printk(KERN_INFO "%s found %s at 0x%llx\n",
1981 MODULE_NAME, version, (unsigned long long)dev->hpa.start);
1982
1983 sba_dev = kzalloc(sizeof(struct sba_device), GFP_KERNEL);
1984 if (!sba_dev) {
1985 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n");
1986 return -ENOMEM;
1987 }
1988
1989 parisc_set_drvdata(dev, sba_dev);
1990
1991 for(i=0; i<MAX_IOC; i++)
1992 spin_lock_init(&(sba_dev->ioc[i].res_lock));
1993
1994 sba_dev->dev = dev;
1995 sba_dev->hw_rev = func_class;
1996 sba_dev->name = dev->name;
1997 sba_dev->sba_hpa = sba_addr;
1998
1999 sba_get_pat_resources(sba_dev);
2000 sba_hw_init(sba_dev);
2001 sba_common_init(sba_dev);
2002
2003 hppa_dma_ops = &sba_ops;
2004
2005#ifdef CONFIG_PROC_FS
2006 switch (dev->id.hversion) {
2007 case PLUTO_MCKINLEY_PORT:
2008 root = proc_mckinley_root;
2009 break;
2010 case ASTRO_RUNWAY_PORT:
2011 case IKE_MERCED_PORT:
2012 default:
2013 root = proc_runway_root;
2014 break;
2015 }
2016
2017 proc_create("sba_iommu", 0, root, &sba_proc_fops);
2018 proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops);
2019#endif
2020
2021 parisc_has_iommu();
2022 return 0;
2023}
2024
2025
2026
2027
2028
2029
2030void __init sba_init(void)
2031{
2032 register_parisc_driver(&sba_driver);
2033}
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043void * sba_get_iommu(struct parisc_device *pci_hba)
2044{
2045 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2046 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2047 char t = sba_dev->id.hw_type;
2048 int iocnum = (pci_hba->hw_path >> 3);
2049
2050 WARN_ON((t != HPHW_IOA) && (t != HPHW_BCPORT));
2051
2052 return &(sba->ioc[iocnum]);
2053}
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
2065{
2066 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2067 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2068 char t = sba_dev->id.hw_type;
2069 int i;
2070 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));
2071
2072 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2073
2074 r->start = r->end = 0;
2075
2076
2077 for (i=0; i<4; i++) {
2078 int base, size;
2079 void __iomem *reg = sba->sba_hpa + i*0x18;
2080
2081 base = READ_REG32(reg + LMMIO_DIRECT0_BASE);
2082 if ((base & 1) == 0)
2083 continue;
2084
2085 size = READ_REG32(reg + LMMIO_DIRECT0_ROUTE);
2086
2087 if ((size & (ROPES_PER_IOC-1)) != rope)
2088 continue;
2089
2090 r->start = (base & ~1UL) | PCI_F_EXTEND;
2091 size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK);
2092 r->end = r->start + size;
2093 r->flags = IORESOURCE_MEM;
2094 }
2095}
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
2108{
2109 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2110 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2111 char t = sba_dev->id.hw_type;
2112 int base, size;
2113 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));
2114
2115 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2116
2117 r->start = r->end = 0;
2118
2119 base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE);
2120 if ((base & 1) == 0) {
2121 BUG();
2122 return;
2123 }
2124
2125 r->start = (base & ~1UL) | PCI_F_EXTEND;
2126
2127 size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC;
2128 r->start += rope * (size + 1);
2129 r->end = r->start + size;
2130 r->flags = IORESOURCE_MEM;
2131}
2132