1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/init.h>
24
25#include <linux/mm.h>
26#include <linux/string.h>
27#include <linux/pci.h>
28#include <linux/scatterlist.h>
29#include <linux/iommu-helper.h>
30
31#include <asm/byteorder.h>
32#include <asm/io.h>
33#include <asm/dma.h>
34
35#include <asm/hardware.h>
36
37#include <linux/proc_fs.h>
38#include <linux/seq_file.h>
39#include <linux/module.h>
40
41#include <asm/ropes.h>
42#include <asm/mckinley.h>
43#include <asm/runway.h>
44#include <asm/page.h>
45#include <asm/pdc.h>
46#include <asm/pdcpat.h>
47#include <asm/parisc-device.h>
48
49#include "iommu.h"
50
51#define MODULE_NAME "SBA"
52
53
54
55
56
57
58#undef DEBUG_SBA_INIT
59#undef DEBUG_SBA_RUN
60#undef DEBUG_SBA_RUN_SG
61#undef DEBUG_SBA_RESOURCE
62#undef ASSERT_PDIR_SANITY
63#undef DEBUG_LARGE_SG_ENTRIES
64#undef DEBUG_DMB_TRAP
65
66#ifdef DEBUG_SBA_INIT
67#define DBG_INIT(x...) printk(x)
68#else
69#define DBG_INIT(x...)
70#endif
71
72#ifdef DEBUG_SBA_RUN
73#define DBG_RUN(x...) printk(x)
74#else
75#define DBG_RUN(x...)
76#endif
77
78#ifdef DEBUG_SBA_RUN_SG
79#define DBG_RUN_SG(x...) printk(x)
80#else
81#define DBG_RUN_SG(x...)
82#endif
83
84
85#ifdef DEBUG_SBA_RESOURCE
86#define DBG_RES(x...) printk(x)
87#else
88#define DBG_RES(x...)
89#endif
90
91#define SBA_INLINE __inline__
92
93#define DEFAULT_DMA_HINT_REG 0
94
95struct sba_device *sba_list;
96EXPORT_SYMBOL_GPL(sba_list);
97
98static unsigned long ioc_needs_fdc = 0;
99
100
101static unsigned int global_ioc_cnt = 0;
102
103
104static unsigned long piranha_bad_128k = 0;
105
106
107#define SBA_DEV(d) ((struct sba_device *) (d))
108
109#ifdef CONFIG_AGP_PARISC
110#define SBA_AGP_SUPPORT
111#endif
112
113#ifdef SBA_AGP_SUPPORT
114static int sba_reserve_agpgart = 1;
115module_param(sba_reserve_agpgart, int, 0444);
116MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART");
117#endif
118
119
120
121
122
123
124
125
126
127
128#define READ_REG32(addr) readl(addr)
129#define READ_REG64(addr) readq(addr)
130#define WRITE_REG32(val, addr) writel((val), (addr))
131#define WRITE_REG64(val, addr) writeq((val), (addr))
132
133#ifdef CONFIG_64BIT
134#define READ_REG(addr) READ_REG64(addr)
135#define WRITE_REG(value, addr) WRITE_REG64(value, addr)
136#else
137#define READ_REG(addr) READ_REG32(addr)
138#define WRITE_REG(value, addr) WRITE_REG32(value, addr)
139#endif
140
141#ifdef DEBUG_SBA_INIT
142
143
144
145
146
147
148
149
150
151
152static void
153sba_dump_ranges(void __iomem *hpa)
154{
155 DBG_INIT("SBA at 0x%p\n", hpa);
156 DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE));
157 DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK));
158 DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE));
159 DBG_INIT("\n");
160 DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE));
161 DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK));
162 DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE));
163}
164
165
166
167
168
169
170
171static void sba_dump_tlb(void __iomem *hpa)
172{
173 DBG_INIT("IO TLB at 0x%p\n", hpa);
174 DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE));
175 DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK));
176 DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG));
177 DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE));
178 DBG_INIT("\n");
179}
180#else
181#define sba_dump_ranges(x)
182#define sba_dump_tlb(x)
183#endif
184
185
186#ifdef ASSERT_PDIR_SANITY
187
188
189
190
191
192
193
194
195
196static void
197sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
198{
199
200 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
201 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
202 uint rcnt;
203
204 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
205 msg,
206 rptr, pide & (BITS_PER_LONG - 1), *rptr);
207
208 rcnt = 0;
209 while (rcnt < BITS_PER_LONG) {
210 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
211 (rcnt == (pide & (BITS_PER_LONG - 1)))
212 ? " -->" : " ",
213 rcnt, ptr, *ptr );
214 rcnt++;
215 ptr++;
216 }
217 printk(KERN_DEBUG "%s", msg);
218}
219
220
221
222
223
224
225
226
227
228static int
229sba_check_pdir(struct ioc *ioc, char *msg)
230{
231 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]);
232 u32 *rptr = (u32 *) ioc->res_map;
233 u64 *pptr = ioc->pdir_base;
234 uint pide = 0;
235
236 while (rptr < rptr_end) {
237 u32 rval = *rptr;
238 int rcnt = 32;
239
240 while (rcnt) {
241
242 u32 pde = ((u32) (((char *)pptr)[7])) << 24;
243 if ((rval ^ pde) & 0x80000000)
244 {
245
246
247
248
249 sba_dump_pdir_entry(ioc, msg, pide);
250 return(1);
251 }
252 rcnt--;
253 rval <<= 1;
254 pptr++;
255 pide++;
256 }
257 rptr++;
258 }
259
260 return 0;
261}
262
263
264
265
266
267
268
269
270
271
272static void
273sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
274{
275 while (nents-- > 0) {
276 printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n",
277 nents,
278 (unsigned long) sg_dma_address(startsg),
279 sg_dma_len(startsg),
280 sg_virt(startsg), startsg->length);
281 startsg++;
282 }
283}
284
285#endif
286
287
288
289
290
291
292
293
294
295
296
297
298
299#define PAGES_PER_RANGE 1
300
301
302
303#ifdef ZX1_SUPPORT
304
305#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
306#define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
307#else
308
309#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset))
310#define SBA_IOVP(ioc,iova) (iova)
311#endif
312
313#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
314
315#define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n)))
316#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
317
318static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
319 unsigned int bitshiftcnt)
320{
321 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
322 + bitshiftcnt;
323}
324
325
326
327
328
329
330
331
332
333
334static SBA_INLINE unsigned long
335sba_search_bitmap(struct ioc *ioc, struct device *dev,
336 unsigned long bits_wanted)
337{
338 unsigned long *res_ptr = ioc->res_hint;
339 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
340 unsigned long pide = ~0UL, tpide;
341 unsigned long boundary_size;
342 unsigned long shift;
343 int ret;
344
345 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
346 1ULL << IOVP_SHIFT) >> IOVP_SHIFT;
347
348#if defined(ZX1_SUPPORT)
349 BUG_ON(ioc->ibase & ~IOVP_MASK);
350 shift = ioc->ibase >> IOVP_SHIFT;
351#else
352 shift = 0;
353#endif
354
355 if (bits_wanted > (BITS_PER_LONG/2)) {
356
357 for(; res_ptr < res_end; ++res_ptr) {
358 tpide = ptr_to_pide(ioc, res_ptr, 0);
359 ret = iommu_is_span_boundary(tpide, bits_wanted,
360 shift,
361 boundary_size);
362 if ((*res_ptr == 0) && !ret) {
363 *res_ptr = RESMAP_MASK(bits_wanted);
364 pide = tpide;
365 break;
366 }
367 }
368
369 res_ptr++;
370 ioc->res_bitshift = 0;
371 } else {
372
373
374
375
376
377
378 unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT);
379 uint bitshiftcnt = ALIGN(ioc->res_bitshift, o);
380 unsigned long mask;
381
382 if (bitshiftcnt >= BITS_PER_LONG) {
383 bitshiftcnt = 0;
384 res_ptr++;
385 }
386 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt;
387
388 DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
389 while(res_ptr < res_end)
390 {
391 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
392 WARN_ON(mask == 0);
393 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
394 ret = iommu_is_span_boundary(tpide, bits_wanted,
395 shift,
396 boundary_size);
397 if ((((*res_ptr) & mask) == 0) && !ret) {
398 *res_ptr |= mask;
399 pide = tpide;
400 break;
401 }
402 mask >>= o;
403 bitshiftcnt += o;
404 if (mask == 0) {
405 mask = RESMAP_MASK(bits_wanted);
406 bitshiftcnt=0;
407 res_ptr++;
408 }
409 }
410
411 ioc->res_bitshift = bitshiftcnt + bits_wanted;
412 }
413
414
415 if (res_end <= res_ptr) {
416 ioc->res_hint = (unsigned long *) ioc->res_map;
417 ioc->res_bitshift = 0;
418 } else {
419 ioc->res_hint = res_ptr;
420 }
421 return (pide);
422}
423
424
425
426
427
428
429
430
431
432
433static int
434sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
435{
436 unsigned int pages_needed = size >> IOVP_SHIFT;
437#ifdef SBA_COLLECT_STATS
438 unsigned long cr_start = mfctl(16);
439#endif
440 unsigned long pide;
441
442 pide = sba_search_bitmap(ioc, dev, pages_needed);
443 if (pide >= (ioc->res_size << 3)) {
444 pide = sba_search_bitmap(ioc, dev, pages_needed);
445 if (pide >= (ioc->res_size << 3))
446 panic("%s: I/O MMU @ %p is out of mapping resources\n",
447 __FILE__, ioc->ioc_hpa);
448 }
449
450#ifdef ASSERT_PDIR_SANITY
451
452 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) {
453 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
454 }
455#endif
456
457 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
458 __func__, size, pages_needed, pide,
459 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
460 ioc->res_bitshift );
461
462#ifdef SBA_COLLECT_STATS
463 {
464 unsigned long cr_end = mfctl(16);
465 unsigned long tmp = cr_end - cr_start;
466
467 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
468 }
469 ioc->avg_search[ioc->avg_idx++] = cr_start;
470 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
471
472 ioc->used_pages += pages_needed;
473#endif
474
475 return (pide);
476}
477
478
479
480
481
482
483
484
485
486
487static SBA_INLINE void
488sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
489{
490 unsigned long iovp = SBA_IOVP(ioc, iova);
491 unsigned int pide = PDIR_INDEX(iovp);
492 unsigned int ridx = pide >> 3;
493 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
494
495 int bits_not_wanted = size >> IOVP_SHIFT;
496
497
498 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1));
499
500 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
501 __func__, (uint) iova, size,
502 bits_not_wanted, m, pide, res_ptr, *res_ptr);
503
504#ifdef SBA_COLLECT_STATS
505 ioc->used_pages -= bits_not_wanted;
506#endif
507
508 *res_ptr &= ~m;
509}
510
511
512
513
514
515
516
517
518#ifdef SBA_HINT_SUPPORT
519#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
520#endif
521
522typedef unsigned long space_t;
523#define KERNEL_SPACE 0
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565static void SBA_INLINE
566sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
567 unsigned long hint)
568{
569 u64 pa;
570 register unsigned ci;
571
572 pa = lpa(vba);
573 pa &= IOVP_MASK;
574
575 asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
576 pa |= (ci >> PAGE_SHIFT) & 0xff;
577
578 pa |= SBA_PDIR_VALID_BIT;
579 *pdir_ptr = cpu_to_le64(pa);
580
581
582
583
584
585
586 asm_io_fdc(pdir_ptr);
587}
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606static SBA_INLINE void
607sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
608{
609 u32 iovp = (u32) SBA_IOVP(ioc,iova);
610 u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
611
612#ifdef ASSERT_PDIR_SANITY
613
614
615
616
617
618
619 if (0x80 != (((u8 *) pdir_ptr)[7])) {
620 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
621 }
622#endif
623
624 if (byte_cnt > IOVP_SIZE)
625 {
626#if 0
627 unsigned long entries_per_cacheline = ioc_needs_fdc ?
628 L1_CACHE_ALIGN(((unsigned long) pdir_ptr))
629 - (unsigned long) pdir_ptr;
630 : 262144;
631#endif
632
633
634 iovp |= get_order(byte_cnt) + PAGE_SHIFT;
635
636 do {
637
638 ((u8 *) pdir_ptr)[7] = 0;
639 asm_io_fdc(pdir_ptr);
640 if (ioc_needs_fdc) {
641#if 0
642 entries_per_cacheline = L1_CACHE_SHIFT - 3;
643#endif
644 }
645 pdir_ptr++;
646 byte_cnt -= IOVP_SIZE;
647 } while (byte_cnt > IOVP_SIZE);
648 } else
649 iovp |= IOVP_SHIFT;
650
651
652
653
654
655
656
657
658 ((u8 *) pdir_ptr)[7] = 0;
659 asm_io_fdc(pdir_ptr);
660
661 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
662}
663
664
665
666
667
668
669
670
671static int sba_dma_supported( struct device *dev, u64 mask)
672{
673 struct ioc *ioc;
674
675 if (dev == NULL) {
676 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
677 BUG();
678 return(0);
679 }
680
681
682
683
684
685
686 if (mask > ~0U)
687 return 0;
688
689 ioc = GET_IOC(dev);
690 if (!ioc)
691 return 0;
692
693
694
695
696
697 return((int)(mask >= (ioc->ibase - 1 +
698 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) )));
699}
700
701
702
703
704
705
706
707
708
709
710
711static dma_addr_t
712sba_map_single(struct device *dev, void *addr, size_t size,
713 enum dma_data_direction direction)
714{
715 struct ioc *ioc;
716 unsigned long flags;
717 dma_addr_t iovp;
718 dma_addr_t offset;
719 u64 *pdir_start;
720 int pide;
721
722 ioc = GET_IOC(dev);
723 if (!ioc)
724 return DMA_MAPPING_ERROR;
725
726
727 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
728
729
730 size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
731
732 spin_lock_irqsave(&ioc->res_lock, flags);
733#ifdef ASSERT_PDIR_SANITY
734 sba_check_pdir(ioc,"Check before sba_map_single()");
735#endif
736
737#ifdef SBA_COLLECT_STATS
738 ioc->msingle_calls++;
739 ioc->msingle_pages += size >> IOVP_SHIFT;
740#endif
741 pide = sba_alloc_range(ioc, dev, size);
742 iovp = (dma_addr_t) pide << IOVP_SHIFT;
743
744 DBG_RUN("%s() 0x%p -> 0x%lx\n",
745 __func__, addr, (long) iovp | offset);
746
747 pdir_start = &(ioc->pdir_base[pide]);
748
749 while (size > 0) {
750 sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
751
752 DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
753 pdir_start,
754 (u8) (((u8 *) pdir_start)[7]),
755 (u8) (((u8 *) pdir_start)[6]),
756 (u8) (((u8 *) pdir_start)[5]),
757 (u8) (((u8 *) pdir_start)[4]),
758 (u8) (((u8 *) pdir_start)[3]),
759 (u8) (((u8 *) pdir_start)[2]),
760 (u8) (((u8 *) pdir_start)[1]),
761 (u8) (((u8 *) pdir_start)[0])
762 );
763
764 addr += IOVP_SIZE;
765 size -= IOVP_SIZE;
766 pdir_start++;
767 }
768
769
770 asm_io_sync();
771
772#ifdef ASSERT_PDIR_SANITY
773 sba_check_pdir(ioc,"Check after sba_map_single()");
774#endif
775 spin_unlock_irqrestore(&ioc->res_lock, flags);
776
777
778 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);
779}
780
781
782static dma_addr_t
783sba_map_page(struct device *dev, struct page *page, unsigned long offset,
784 size_t size, enum dma_data_direction direction,
785 unsigned long attrs)
786{
787 return sba_map_single(dev, page_address(page) + offset, size,
788 direction);
789}
790
791
792
793
794
795
796
797
798
799
800
801static void
802sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
803 enum dma_data_direction direction, unsigned long attrs)
804{
805 struct ioc *ioc;
806#if DELAYED_RESOURCE_CNT > 0
807 struct sba_dma_pair *d;
808#endif
809 unsigned long flags;
810 dma_addr_t offset;
811
812 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
813
814 ioc = GET_IOC(dev);
815 if (!ioc) {
816 WARN_ON(!ioc);
817 return;
818 }
819 offset = iova & ~IOVP_MASK;
820 iova ^= offset;
821 size += offset;
822 size = ALIGN(size, IOVP_SIZE);
823
824 spin_lock_irqsave(&ioc->res_lock, flags);
825
826#ifdef SBA_COLLECT_STATS
827 ioc->usingle_calls++;
828 ioc->usingle_pages += size >> IOVP_SHIFT;
829#endif
830
831 sba_mark_invalid(ioc, iova, size);
832
833#if DELAYED_RESOURCE_CNT > 0
834
835
836
837 d = &(ioc->saved[ioc->saved_cnt]);
838 d->iova = iova;
839 d->size = size;
840 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
841 int cnt = ioc->saved_cnt;
842 while (cnt--) {
843 sba_free_range(ioc, d->iova, d->size);
844 d--;
845 }
846 ioc->saved_cnt = 0;
847
848 READ_REG(ioc->ioc_hpa+IOC_PCOM);
849 }
850#else
851 sba_free_range(ioc, iova, size);
852
853
854 asm_io_sync();
855
856 READ_REG(ioc->ioc_hpa+IOC_PCOM);
857#endif
858
859 spin_unlock_irqrestore(&ioc->res_lock, flags);
860
861
862
863
864
865
866
867
868
869}
870
871
872
873
874
875
876
877
878
879
880static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
881 gfp_t gfp, unsigned long attrs)
882{
883 void *ret;
884
885 if (!hwdev) {
886
887 *dma_handle = 0;
888 return NULL;
889 }
890
891 ret = (void *) __get_free_pages(gfp, get_order(size));
892
893 if (ret) {
894 memset(ret, 0, size);
895 *dma_handle = sba_map_single(hwdev, ret, size, 0);
896 }
897
898 return ret;
899}
900
901
902
903
904
905
906
907
908
909
910
911static void
912sba_free(struct device *hwdev, size_t size, void *vaddr,
913 dma_addr_t dma_handle, unsigned long attrs)
914{
915 sba_unmap_page(hwdev, dma_handle, size, 0, 0);
916 free_pages((unsigned long) vaddr, get_order(size));
917}
918
919
920
921
922
923
924
925#define PIDE_FLAG 0x80000000UL
926
927#ifdef SBA_COLLECT_STATS
928#define IOMMU_MAP_STATS
929#endif
930#include "iommu-helpers.h"
931
932#ifdef DEBUG_LARGE_SG_ENTRIES
933int dump_run_sg = 0;
934#endif
935
936
937
938
939
940
941
942
943
944
945
946static int
947sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
948 enum dma_data_direction direction, unsigned long attrs)
949{
950 struct ioc *ioc;
951 int coalesced, filled = 0;
952 unsigned long flags;
953
954 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
955
956 ioc = GET_IOC(dev);
957 if (!ioc)
958 return 0;
959
960
961 if (nents == 1) {
962 sg_dma_address(sglist) = sba_map_single(dev, sg_virt(sglist),
963 sglist->length, direction);
964 sg_dma_len(sglist) = sglist->length;
965 return 1;
966 }
967
968 spin_lock_irqsave(&ioc->res_lock, flags);
969
970#ifdef ASSERT_PDIR_SANITY
971 if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
972 {
973 sba_dump_sg(ioc, sglist, nents);
974 panic("Check before sba_map_sg()");
975 }
976#endif
977
978#ifdef SBA_COLLECT_STATS
979 ioc->msg_calls++;
980#endif
981
982
983
984
985
986
987
988
989
990 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range);
991
992
993
994
995
996
997
998
999
1000 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry);
1001
1002
1003 asm_io_sync();
1004
1005#ifdef ASSERT_PDIR_SANITY
1006 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
1007 {
1008 sba_dump_sg(ioc, sglist, nents);
1009 panic("Check after sba_map_sg()\n");
1010 }
1011#endif
1012
1013 spin_unlock_irqrestore(&ioc->res_lock, flags);
1014
1015 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1016
1017 return filled;
1018}
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030static void
1031sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1032 enum dma_data_direction direction, unsigned long attrs)
1033{
1034 struct ioc *ioc;
1035#ifdef ASSERT_PDIR_SANITY
1036 unsigned long flags;
1037#endif
1038
1039 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1040 __func__, nents, sg_virt(sglist), sglist->length);
1041
1042 ioc = GET_IOC(dev);
1043 if (!ioc) {
1044 WARN_ON(!ioc);
1045 return;
1046 }
1047
1048#ifdef SBA_COLLECT_STATS
1049 ioc->usg_calls++;
1050#endif
1051
1052#ifdef ASSERT_PDIR_SANITY
1053 spin_lock_irqsave(&ioc->res_lock, flags);
1054 sba_check_pdir(ioc,"Check before sba_unmap_sg()");
1055 spin_unlock_irqrestore(&ioc->res_lock, flags);
1056#endif
1057
1058 while (sg_dma_len(sglist) && nents--) {
1059
1060 sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
1061 direction, 0);
1062#ifdef SBA_COLLECT_STATS
1063 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
1064 ioc->usingle_calls--;
1065#endif
1066 ++sglist;
1067 }
1068
1069 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1070
1071#ifdef ASSERT_PDIR_SANITY
1072 spin_lock_irqsave(&ioc->res_lock, flags);
1073 sba_check_pdir(ioc,"Check after sba_unmap_sg()");
1074 spin_unlock_irqrestore(&ioc->res_lock, flags);
1075#endif
1076
1077}
1078
1079static const struct dma_map_ops sba_ops = {
1080 .dma_supported = sba_dma_supported,
1081 .alloc = sba_alloc,
1082 .free = sba_free,
1083 .map_page = sba_map_page,
1084 .unmap_page = sba_unmap_page,
1085 .map_sg = sba_map_sg,
1086 .unmap_sg = sba_unmap_sg,
1087};
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099static void
1100sba_get_pat_resources(struct sba_device *sba_dev)
1101{
1102#if 0
1103
1104
1105
1106
1107
1108
1109PAT_MOD(mod)->mod_info.mod_pages = PAT_GET_MOD_PAGES(temp);
1110 FIXME : ???
1111PAT_MOD(mod)->mod_info.dvi = PAT_GET_DVI(temp);
1112 Tells where the dvi bits are located in the address.
1113PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp);
1114 FIXME : ???
1115#endif
1116}
1117
1118
1119
1120
1121
1122
1123
1124#define PIRANHA_ADDR_MASK 0x00160000UL
1125#define PIRANHA_ADDR_VAL 0x00060000UL
1126static void *
1127sba_alloc_pdir(unsigned int pdir_size)
1128{
1129 unsigned long pdir_base;
1130 unsigned long pdir_order = get_order(pdir_size);
1131
1132 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order);
1133 if (NULL == (void *) pdir_base) {
1134 panic("%s() could not allocate I/O Page Table\n",
1135 __func__);
1136 }
1137
1138
1139
1140
1141
1142
1143
1144 if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13
1145 || (boot_cpu_data.pdc.versions > 0x202)
1146 || (boot_cpu_data.pdc.capabilities & 0x08L) )
1147 return (void *) pdir_base;
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167 if (pdir_order <= (19-12)) {
1168 if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) {
1169
1170 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12));
1171
1172 free_pages(pdir_base, pdir_order);
1173
1174 pdir_base = new_pdir;
1175
1176
1177 while (pdir_order < (19-12)) {
1178 new_pdir += pdir_size;
1179 free_pages(new_pdir, pdir_order);
1180 pdir_order +=1;
1181 pdir_size <<=1;
1182 }
1183 }
1184 } else {
1185
1186
1187
1188
1189 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1);
1190
1191
1192 free_pages( pdir_base, pdir_order);
1193
1194
1195 free_pages(new_pdir, 20-12);
1196
1197 pdir_base = new_pdir + 1024*1024;
1198
1199 if (pdir_order > (20-12)) {
1200
1201
1202
1203
1204
1205
1206 piranha_bad_128k = 1;
1207
1208 new_pdir += 3*1024*1024;
1209
1210 free_pages(new_pdir, 20-12);
1211
1212
1213 free_pages(new_pdir - 128*1024 , 17-12);
1214
1215 pdir_size -= 128*1024;
1216 }
1217 }
1218
1219 memset((void *) pdir_base, 0, pdir_size);
1220 return (void *) pdir_base;
1221}
1222
1223struct ibase_data_struct {
1224 struct ioc *ioc;
1225 int ioc_num;
1226};
1227
1228static int setup_ibase_imask_callback(struct device *dev, void *data)
1229{
1230
1231 extern void lba_set_iregs(struct parisc_device *, u32, u32);
1232 struct parisc_device *lba = to_parisc_device(dev);
1233 struct ibase_data_struct *ibd = data;
1234 int rope_num = (lba->hpa.start >> 13) & 0xf;
1235 if (rope_num >> 3 == ibd->ioc_num)
1236 lba_set_iregs(lba, ibd->ioc->ibase, ibd->ioc->imask);
1237 return 0;
1238}
1239
1240
1241static void
1242setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1243{
1244 struct ibase_data_struct ibase_data = {
1245 .ioc = ioc,
1246 .ioc_num = ioc_num,
1247 };
1248
1249 device_for_each_child(&sba->dev, &ibase_data,
1250 setup_ibase_imask_callback);
1251}
1252
1253#ifdef SBA_AGP_SUPPORT
1254static int
1255sba_ioc_find_quicksilver(struct device *dev, void *data)
1256{
1257 int *agp_found = data;
1258 struct parisc_device *lba = to_parisc_device(dev);
1259
1260 if (IS_QUICKSILVER(lba))
1261 *agp_found = 1;
1262 return 0;
1263}
1264#endif
1265
1266static void
1267sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1268{
1269 u32 iova_space_mask;
1270 u32 iova_space_size;
1271 int iov_order, tcnfg;
1272#ifdef SBA_AGP_SUPPORT
1273 int agp_found = 0;
1274#endif
1275
1276
1277
1278
1279
1280 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
1281 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
1282
1283 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
1284 printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n");
1285 iova_space_size /= 2;
1286 }
1287
1288
1289
1290
1291
1292 iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT));
1293 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1294
1295 DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n",
1296 __func__, ioc->ioc_hpa, iova_space_size >> 20,
1297 iov_order + PAGE_SHIFT);
1298
1299 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1300 get_order(ioc->pdir_size));
1301 if (!ioc->pdir_base)
1302 panic("Couldn't allocate I/O Page Table\n");
1303
1304 memset(ioc->pdir_base, 0, ioc->pdir_size);
1305
1306 DBG_INIT("%s() pdir %p size %x\n",
1307 __func__, ioc->pdir_base, ioc->pdir_size);
1308
1309#ifdef SBA_HINT_SUPPORT
1310 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1311 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1312
1313 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1314 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1315#endif
1316
1317 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base);
1318 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1319
1320
1321 iova_space_mask = 0xffffffff;
1322 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1323 ioc->imask = iova_space_mask;
1324#ifdef ZX1_SUPPORT
1325 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1326#endif
1327 sba_dump_tlb(ioc->ioc_hpa);
1328
1329 setup_ibase_imask(sba, ioc, ioc_num);
1330
1331 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
1332
1333#ifdef CONFIG_64BIT
1334
1335
1336
1337
1338 ioc->imask |= 0xFFFFFFFF00000000UL;
1339#endif
1340
1341
1342 switch (PAGE_SHIFT) {
1343 case 12: tcnfg = 0; break;
1344 case 13: tcnfg = 1; break;
1345 case 14: tcnfg = 2; break;
1346 case 16: tcnfg = 3; break;
1347 default:
1348 panic(__FILE__ "Unsupported system page size %d",
1349 1 << PAGE_SHIFT);
1350 break;
1351 }
1352 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1353
1354
1355
1356
1357
1358 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1359
1360
1361
1362
1363
1364 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);
1365
1366#ifdef SBA_AGP_SUPPORT
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376 device_for_each_child(&sba->dev, &agp_found, sba_ioc_find_quicksilver);
1377
1378 if (agp_found && sba_reserve_agpgart) {
1379 printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n",
1380 __func__, (iova_space_size/2) >> 20);
1381 ioc->pdir_size /= 2;
1382 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE;
1383 }
1384#endif
1385}
1386
1387static void
1388sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1389{
1390 u32 iova_space_size, iova_space_mask;
1391 unsigned int pdir_size, iov_order, tcnfg;
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407 iova_space_size = (u32) (totalram_pages()/global_ioc_cnt);
1408
1409
1410 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1411 iova_space_size = 1 << (20 - PAGE_SHIFT);
1412 }
1413 else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1414 iova_space_size = 1 << (30 - PAGE_SHIFT);
1415 }
1416
1417
1418
1419
1420
1421
1422 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1423
1424
1425 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1426
1427 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
1428
1429 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
1430 __func__,
1431 ioc->ioc_hpa,
1432 (unsigned long) totalram_pages() >> (20 - PAGE_SHIFT),
1433 iova_space_size>>20,
1434 iov_order + PAGE_SHIFT);
1435
1436 ioc->pdir_base = sba_alloc_pdir(pdir_size);
1437
1438 DBG_INIT("%s() pdir %p size %x\n",
1439 __func__, ioc->pdir_base, pdir_size);
1440
1441#ifdef SBA_HINT_SUPPORT
1442
1443 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1444 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1445
1446 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1447 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1448#endif
1449
1450 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1451
1452
1453 iova_space_mask = 0xffffffff;
1454 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1455
1456
1457
1458
1459
1460 ioc->ibase = 0;
1461 ioc->imask = iova_space_mask;
1462#ifdef ZX1_SUPPORT
1463 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1464#endif
1465
1466 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
1467 __func__, ioc->ibase, ioc->imask);
1468
1469
1470
1471
1472
1473
1474
1475 setup_ibase_imask(sba, ioc, ioc_num);
1476
1477
1478
1479
1480 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
1481 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
1482
1483
1484 switch (PAGE_SHIFT) {
1485 case 12: tcnfg = 0; break;
1486 case 13: tcnfg = 1; break;
1487 case 14: tcnfg = 2; break;
1488 case 16: tcnfg = 3; break;
1489 default:
1490 panic(__FILE__ "Unsupported system page size %d",
1491 1 << PAGE_SHIFT);
1492 break;
1493 }
1494
1495 WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG);
1496
1497
1498
1499
1500
1501 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
1502
1503 ioc->ibase = 0;
1504
1505 DBG_INIT("%s() DONE\n", __func__);
1506}
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset)
1522{
1523 return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
1524}
1525
1526static void sba_hw_init(struct sba_device *sba_dev)
1527{
1528 int i;
1529 int num_ioc;
1530 u64 ioc_ctl;
1531
1532 if (!is_pdc_pat()) {
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550 if (PAGE0->mem_kbd.cl_class == CL_KEYBD) {
1551 pdc_io_reset_devices();
1552 }
1553
1554 }
1555
1556
1557#if 0
1558printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
1559 PAGE0->mem_boot.spa, PAGE0->mem_boot.pad, PAGE0->mem_boot.cl_class);
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570 if ((PAGE0->mem_boot.cl_class != CL_RANDOM)
1571 && (PAGE0->mem_boot.cl_class != CL_SEQU)) {
1572 pdc_io_reset();
1573 }
1574#endif
1575
1576 if (!IS_PLUTO(sba_dev->dev)) {
1577 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
1578 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
1579 __func__, sba_dev->sba_hpa, ioc_ctl);
1580 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
1581 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
1582
1583
1584
1585 WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);
1586
1587#ifdef DEBUG_SBA_INIT
1588 ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL);
1589 DBG_INIT(" 0x%Lx\n", ioc_ctl);
1590#endif
1591 }
1592
1593 if (IS_ASTRO(sba_dev->dev)) {
1594 int err;
1595 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET);
1596 num_ioc = 1;
1597
1598 sba_dev->chip_resv.name = "Astro Intr Ack";
1599 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL;
1600 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ;
1601 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1602 BUG_ON(err < 0);
1603
1604 } else if (IS_PLUTO(sba_dev->dev)) {
1605 int err;
1606
1607 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET);
1608 num_ioc = 1;
1609
1610 sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA";
1611 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL;
1612 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff200000UL - 1);
1613 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1614 WARN_ON(err < 0);
1615
1616 sba_dev->iommu_resv.name = "IOVA Space";
1617 sba_dev->iommu_resv.start = 0x40000000UL;
1618 sba_dev->iommu_resv.end = 0x50000000UL - 1;
1619 err = request_resource(&iomem_resource, &(sba_dev->iommu_resv));
1620 WARN_ON(err < 0);
1621 } else {
1622
1623 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0));
1624 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1));
1625 num_ioc = 2;
1626
1627
1628 }
1629
1630
1631 sba_dev->num_ioc = num_ioc;
1632 for (i = 0; i < num_ioc; i++) {
1633 void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa;
1634 unsigned int j;
1635
1636 for (j=0; j < sizeof(u64) * ROPES_PER_IOC; j+=sizeof(u64)) {
1637
1638
1639
1640
1641
1642
1643
1644 if (IS_PLUTO(sba_dev->dev)) {
1645 void __iomem *rope_cfg;
1646 unsigned long cfg_val;
1647
1648 rope_cfg = ioc_hpa + IOC_ROPE0_CFG + j;
1649 cfg_val = READ_REG(rope_cfg);
1650 cfg_val &= ~IOC_ROPE_AO;
1651 WRITE_REG(cfg_val, rope_cfg);
1652 }
1653
1654
1655
1656
1657 WRITE_REG(HF_ENABLE, ioc_hpa + ROPE0_CTL + j);
1658 }
1659
1660
1661 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
1662
1663 DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
1664 i,
1665 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
1666 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
1667 );
1668 DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n",
1669 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
1670 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
1671 );
1672
1673 if (IS_PLUTO(sba_dev->dev)) {
1674 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
1675 } else {
1676 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
1677 }
1678 }
1679}
1680
1681static void
1682sba_common_init(struct sba_device *sba_dev)
1683{
1684 int i;
1685
1686
1687
1688
1689 sba_dev->next = sba_list;
1690 sba_list = sba_dev;
1691
1692 for(i=0; i< sba_dev->num_ioc; i++) {
1693 int res_size;
1694#ifdef DEBUG_DMB_TRAP
1695 extern void iterate_pages(unsigned long , unsigned long ,
1696 void (*)(pte_t * , unsigned long),
1697 unsigned long );
1698 void set_data_memory_break(pte_t * , unsigned long);
1699#endif
1700
1701 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64);
1702
1703
1704 if (piranha_bad_128k) {
1705 res_size -= (128*1024)/sizeof(u64);
1706 }
1707
1708 res_size >>= 3;
1709 DBG_INIT("%s() res_size 0x%x\n",
1710 __func__, res_size);
1711
1712 sba_dev->ioc[i].res_size = res_size;
1713 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
1714
1715#ifdef DEBUG_DMB_TRAP
1716 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1717 set_data_memory_break, 0);
1718#endif
1719
1720 if (NULL == sba_dev->ioc[i].res_map)
1721 {
1722 panic("%s:%s() could not allocate resource map\n",
1723 __FILE__, __func__ );
1724 }
1725
1726 memset(sba_dev->ioc[i].res_map, 0, res_size);
1727
1728 sba_dev->ioc[i].res_hint = (unsigned long *)
1729 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);
1730
1731#ifdef ASSERT_PDIR_SANITY
1732
1733 sba_dev->ioc[i].res_map[0] = 0x80;
1734 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
1735#endif
1736
1737
1738 if (piranha_bad_128k) {
1739
1740
1741 int idx_start = (1408*1024/sizeof(u64)) >> 3;
1742 int idx_end = (1536*1024/sizeof(u64)) >> 3;
1743 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]);
1744 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]);
1745
1746
1747 while (p_start < p_end)
1748 *p_start++ = -1;
1749
1750 }
1751
1752#ifdef DEBUG_DMB_TRAP
1753 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1754 set_data_memory_break, 0);
1755 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size,
1756 set_data_memory_break, 0);
1757#endif
1758
1759 DBG_INIT("%s() %d res_map %x %p\n",
1760 __func__, i, res_size, sba_dev->ioc[i].res_map);
1761 }
1762
1763 spin_lock_init(&sba_dev->sba_lock);
1764 ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC;
1765
1766#ifdef DEBUG_SBA_INIT
1767
1768
1769
1770
1771
1772 if (ioc_needs_fdc) {
1773 printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n");
1774 } else {
1775 printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n");
1776 }
1777#endif
1778}
1779
1780#ifdef CONFIG_PROC_FS
1781static int sba_proc_info(struct seq_file *m, void *p)
1782{
1783 struct sba_device *sba_dev = sba_list;
1784 struct ioc *ioc = &sba_dev->ioc[0];
1785 int total_pages = (int) (ioc->res_size << 3);
1786#ifdef SBA_COLLECT_STATS
1787 unsigned long avg = 0, min, max;
1788#endif
1789 int i;
1790
1791 seq_printf(m, "%s rev %d.%d\n",
1792 sba_dev->name,
1793 (sba_dev->hw_rev & 0x7) + 1,
1794 (sba_dev->hw_rev & 0x18) >> 3);
1795 seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
1796 (int)((ioc->res_size << 3) * sizeof(u64)),
1797 total_pages);
1798
1799 seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
1800 ioc->res_size, ioc->res_size << 3);
1801
1802 seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n",
1803 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE),
1804 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK),
1805 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE));
1806
1807 for (i=0; i<4; i++)
1808 seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n",
1809 i,
1810 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18),
1811 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18),
1812 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18));
1813
1814#ifdef SBA_COLLECT_STATS
1815 seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1816 total_pages - ioc->used_pages, ioc->used_pages,
1817 (int)(ioc->used_pages * 100 / total_pages));
1818
1819 min = max = ioc->avg_search[0];
1820 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1821 avg += ioc->avg_search[i];
1822 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1823 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1824 }
1825 avg /= SBA_SEARCH_SAMPLE;
1826 seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1827 min, avg, max);
1828
1829 seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
1830 ioc->msingle_calls, ioc->msingle_pages,
1831 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1832
1833
1834 min = ioc->usingle_calls;
1835 max = ioc->usingle_pages - ioc->usg_pages;
1836 seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n",
1837 min, max, (int)((max * 1000)/min));
1838
1839 seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1840 ioc->msg_calls, ioc->msg_pages,
1841 (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
1842
1843 seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1844 ioc->usg_calls, ioc->usg_pages,
1845 (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
1846#endif
1847
1848 return 0;
1849}
1850
1851static int
1852sba_proc_bitmap_info(struct seq_file *m, void *p)
1853{
1854 struct sba_device *sba_dev = sba_list;
1855 struct ioc *ioc = &sba_dev->ioc[0];
1856
1857 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map,
1858 ioc->res_size, false);
1859 seq_putc(m, '\n');
1860
1861 return 0;
1862}
1863#endif
1864
1865static const struct parisc_device_id sba_tbl[] __initconst = {
1866 { HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb },
1867 { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc },
1868 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc },
1869 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc },
1870 { HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc },
1871 { 0, }
1872};
1873
1874static int sba_driver_callback(struct parisc_device *);
1875
1876static struct parisc_driver sba_driver __refdata = {
1877 .name = MODULE_NAME,
1878 .id_table = sba_tbl,
1879 .probe = sba_driver_callback,
1880};
1881
1882
1883
1884
1885
1886
1887static int __init sba_driver_callback(struct parisc_device *dev)
1888{
1889 struct sba_device *sba_dev;
1890 u32 func_class;
1891 int i;
1892 char *version;
1893 void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE);
1894#ifdef CONFIG_PROC_FS
1895 struct proc_dir_entry *root;
1896#endif
1897
1898 sba_dump_ranges(sba_addr);
1899
1900
1901 func_class = READ_REG(sba_addr + SBA_FCLASS);
1902
1903 if (IS_ASTRO(dev)) {
1904 unsigned long fclass;
1905 static char astro_rev[]="Astro ?.?";
1906
1907
1908 fclass = READ_REG(sba_addr);
1909
1910 astro_rev[6] = '1' + (char) (fclass & 0x7);
1911 astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3);
1912 version = astro_rev;
1913
1914 } else if (IS_IKE(dev)) {
1915 static char ike_rev[] = "Ike rev ?";
1916 ike_rev[8] = '0' + (char) (func_class & 0xff);
1917 version = ike_rev;
1918 } else if (IS_PLUTO(dev)) {
1919 static char pluto_rev[]="Pluto ?.?";
1920 pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4);
1921 pluto_rev[8] = '0' + (char) (func_class & 0x0f);
1922 version = pluto_rev;
1923 } else {
1924 static char reo_rev[] = "REO rev ?";
1925 reo_rev[8] = '0' + (char) (func_class & 0xff);
1926 version = reo_rev;
1927 }
1928
1929 if (!global_ioc_cnt) {
1930 global_ioc_cnt = count_parisc_driver(&sba_driver);
1931
1932
1933 if ((!IS_ASTRO(dev)) || (!IS_PLUTO(dev)))
1934 global_ioc_cnt *= 2;
1935 }
1936
1937 printk(KERN_INFO "%s found %s at 0x%llx\n",
1938 MODULE_NAME, version, (unsigned long long)dev->hpa.start);
1939
1940 sba_dev = kzalloc(sizeof(struct sba_device), GFP_KERNEL);
1941 if (!sba_dev) {
1942 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n");
1943 return -ENOMEM;
1944 }
1945
1946 parisc_set_drvdata(dev, sba_dev);
1947
1948 for(i=0; i<MAX_IOC; i++)
1949 spin_lock_init(&(sba_dev->ioc[i].res_lock));
1950
1951 sba_dev->dev = dev;
1952 sba_dev->hw_rev = func_class;
1953 sba_dev->name = dev->name;
1954 sba_dev->sba_hpa = sba_addr;
1955
1956 sba_get_pat_resources(sba_dev);
1957 sba_hw_init(sba_dev);
1958 sba_common_init(sba_dev);
1959
1960 hppa_dma_ops = &sba_ops;
1961
1962#ifdef CONFIG_PROC_FS
1963 switch (dev->id.hversion) {
1964 case PLUTO_MCKINLEY_PORT:
1965 root = proc_mckinley_root;
1966 break;
1967 case ASTRO_RUNWAY_PORT:
1968 case IKE_MERCED_PORT:
1969 default:
1970 root = proc_runway_root;
1971 break;
1972 }
1973
1974 proc_create_single("sba_iommu", 0, root, sba_proc_info);
1975 proc_create_single("sba_iommu-bitmap", 0, root, sba_proc_bitmap_info);
1976#endif
1977 return 0;
1978}
1979
1980
1981
1982
1983
1984
1985void __init sba_init(void)
1986{
1987 register_parisc_driver(&sba_driver);
1988}
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998void * sba_get_iommu(struct parisc_device *pci_hba)
1999{
2000 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2001 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2002 char t = sba_dev->id.hw_type;
2003 int iocnum = (pci_hba->hw_path >> 3);
2004
2005 WARN_ON((t != HPHW_IOA) && (t != HPHW_BCPORT));
2006
2007 return &(sba->ioc[iocnum]);
2008}
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
2020{
2021 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2022 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2023 char t = sba_dev->id.hw_type;
2024 int i;
2025 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));
2026
2027 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2028
2029 r->start = r->end = 0;
2030
2031
2032 for (i=0; i<4; i++) {
2033 int base, size;
2034 void __iomem *reg = sba->sba_hpa + i*0x18;
2035
2036 base = READ_REG32(reg + LMMIO_DIRECT0_BASE);
2037 if ((base & 1) == 0)
2038 continue;
2039
2040 size = READ_REG32(reg + LMMIO_DIRECT0_ROUTE);
2041
2042 if ((size & (ROPES_PER_IOC-1)) != rope)
2043 continue;
2044
2045 r->start = (base & ~1UL) | PCI_F_EXTEND;
2046 size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK);
2047 r->end = r->start + size;
2048 r->flags = IORESOURCE_MEM;
2049 }
2050}
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
2063{
2064 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2065 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2066 char t = sba_dev->id.hw_type;
2067 int base, size;
2068 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));
2069
2070 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2071
2072 r->start = r->end = 0;
2073
2074 base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE);
2075 if ((base & 1) == 0) {
2076 BUG();
2077 return;
2078 }
2079
2080 r->start = (base & ~1UL) | PCI_F_EXTEND;
2081
2082 size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC;
2083 r->start += rope * (size + 1);
2084 r->end = r->start + size;
2085 r->flags = IORESOURCE_MEM;
2086}
2087