1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/init.h>
24
25#include <linux/mm.h>
26#include <linux/string.h>
27#include <linux/pci.h>
28#include <linux/dma-map-ops.h>
29#include <linux/scatterlist.h>
30#include <linux/iommu-helper.h>
31
32#include <asm/byteorder.h>
33#include <asm/io.h>
34#include <asm/dma.h>
35
36#include <asm/hardware.h>
37
38#include <linux/proc_fs.h>
39#include <linux/seq_file.h>
40#include <linux/module.h>
41
42#include <asm/ropes.h>
43#include <asm/mckinley.h>
44#include <asm/runway.h>
45#include <asm/page.h>
46#include <asm/pdc.h>
47#include <asm/pdcpat.h>
48#include <asm/parisc-device.h>
49
50#include "iommu.h"
51
52#define MODULE_NAME "SBA"
53
54
55
56
57
58
59#undef DEBUG_SBA_INIT
60#undef DEBUG_SBA_RUN
61#undef DEBUG_SBA_RUN_SG
62#undef DEBUG_SBA_RESOURCE
63#undef ASSERT_PDIR_SANITY
64#undef DEBUG_LARGE_SG_ENTRIES
65#undef DEBUG_DMB_TRAP
66
67#ifdef DEBUG_SBA_INIT
68#define DBG_INIT(x...) printk(x)
69#else
70#define DBG_INIT(x...)
71#endif
72
73#ifdef DEBUG_SBA_RUN
74#define DBG_RUN(x...) printk(x)
75#else
76#define DBG_RUN(x...)
77#endif
78
79#ifdef DEBUG_SBA_RUN_SG
80#define DBG_RUN_SG(x...) printk(x)
81#else
82#define DBG_RUN_SG(x...)
83#endif
84
85
86#ifdef DEBUG_SBA_RESOURCE
87#define DBG_RES(x...) printk(x)
88#else
89#define DBG_RES(x...)
90#endif
91
92#define SBA_INLINE __inline__
93
94#define DEFAULT_DMA_HINT_REG 0
95
96struct sba_device *sba_list;
97EXPORT_SYMBOL_GPL(sba_list);
98
99static unsigned long ioc_needs_fdc = 0;
100
101
102static unsigned int global_ioc_cnt = 0;
103
104
105static unsigned long piranha_bad_128k = 0;
106
107
108#define SBA_DEV(d) ((struct sba_device *) (d))
109
110#ifdef CONFIG_AGP_PARISC
111#define SBA_AGP_SUPPORT
112#endif
113
114#ifdef SBA_AGP_SUPPORT
115static int sba_reserve_agpgart = 1;
116module_param(sba_reserve_agpgart, int, 0444);
117MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART");
118#endif
119
120
121
122
123
124
125
126
127
128
129#define READ_REG32(addr) readl(addr)
130#define READ_REG64(addr) readq(addr)
131#define WRITE_REG32(val, addr) writel((val), (addr))
132#define WRITE_REG64(val, addr) writeq((val), (addr))
133
134#ifdef CONFIG_64BIT
135#define READ_REG(addr) READ_REG64(addr)
136#define WRITE_REG(value, addr) WRITE_REG64(value, addr)
137#else
138#define READ_REG(addr) READ_REG32(addr)
139#define WRITE_REG(value, addr) WRITE_REG32(value, addr)
140#endif
141
142#ifdef DEBUG_SBA_INIT
143
144
145
146
147
148
149
150
151
152
153static void
154sba_dump_ranges(void __iomem *hpa)
155{
156 DBG_INIT("SBA at 0x%p\n", hpa);
157 DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE));
158 DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK));
159 DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE));
160 DBG_INIT("\n");
161 DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE));
162 DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK));
163 DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE));
164}
165
166
167
168
169
170
171
172static void sba_dump_tlb(void __iomem *hpa)
173{
174 DBG_INIT("IO TLB at 0x%p\n", hpa);
175 DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE));
176 DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK));
177 DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG));
178 DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE));
179 DBG_INIT("\n");
180}
181#else
182#define sba_dump_ranges(x)
183#define sba_dump_tlb(x)
184#endif
185
186
187#ifdef ASSERT_PDIR_SANITY
188
189
190
191
192
193
194
195
196
197static void
198sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
199{
200
201 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
202 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
203 uint rcnt;
204
205 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
206 msg,
207 rptr, pide & (BITS_PER_LONG - 1), *rptr);
208
209 rcnt = 0;
210 while (rcnt < BITS_PER_LONG) {
211 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
212 (rcnt == (pide & (BITS_PER_LONG - 1)))
213 ? " -->" : " ",
214 rcnt, ptr, *ptr );
215 rcnt++;
216 ptr++;
217 }
218 printk(KERN_DEBUG "%s", msg);
219}
220
221
222
223
224
225
226
227
228
229static int
230sba_check_pdir(struct ioc *ioc, char *msg)
231{
232 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]);
233 u32 *rptr = (u32 *) ioc->res_map;
234 u64 *pptr = ioc->pdir_base;
235 uint pide = 0;
236
237 while (rptr < rptr_end) {
238 u32 rval = *rptr;
239 int rcnt = 32;
240
241 while (rcnt) {
242
243 u32 pde = ((u32) (((char *)pptr)[7])) << 24;
244 if ((rval ^ pde) & 0x80000000)
245 {
246
247
248
249
250 sba_dump_pdir_entry(ioc, msg, pide);
251 return(1);
252 }
253 rcnt--;
254 rval <<= 1;
255 pptr++;
256 pide++;
257 }
258 rptr++;
259 }
260
261 return 0;
262}
263
264
265
266
267
268
269
270
271
272
273static void
274sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
275{
276 while (nents-- > 0) {
277 printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n",
278 nents,
279 (unsigned long) sg_dma_address(startsg),
280 sg_dma_len(startsg),
281 sg_virt(startsg), startsg->length);
282 startsg++;
283 }
284}
285
286#endif
287
288
289
290
291
292
293
294
295
296
297
298
299
300#define PAGES_PER_RANGE 1
301
302
303
304#ifdef ZX1_SUPPORT
305
306#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
307#define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
308#else
309
310#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset))
311#define SBA_IOVP(ioc,iova) (iova)
312#endif
313
314#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
315
316#define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n)))
317#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
318
319static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
320 unsigned int bitshiftcnt)
321{
322 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
323 + bitshiftcnt;
324}
325
326
327
328
329
330
331
332
333
334
335static SBA_INLINE unsigned long
336sba_search_bitmap(struct ioc *ioc, struct device *dev,
337 unsigned long bits_wanted)
338{
339 unsigned long *res_ptr = ioc->res_hint;
340 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
341 unsigned long pide = ~0UL, tpide;
342 unsigned long boundary_size;
343 unsigned long shift;
344 int ret;
345
346 boundary_size = dma_get_seg_boundary_nr_pages(dev, IOVP_SHIFT);
347
348#if defined(ZX1_SUPPORT)
349 BUG_ON(ioc->ibase & ~IOVP_MASK);
350 shift = ioc->ibase >> IOVP_SHIFT;
351#else
352 shift = 0;
353#endif
354
355 if (bits_wanted > (BITS_PER_LONG/2)) {
356
357 for(; res_ptr < res_end; ++res_ptr) {
358 tpide = ptr_to_pide(ioc, res_ptr, 0);
359 ret = iommu_is_span_boundary(tpide, bits_wanted,
360 shift,
361 boundary_size);
362 if ((*res_ptr == 0) && !ret) {
363 *res_ptr = RESMAP_MASK(bits_wanted);
364 pide = tpide;
365 break;
366 }
367 }
368
369 res_ptr++;
370 ioc->res_bitshift = 0;
371 } else {
372
373
374
375
376
377
378 unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT);
379 uint bitshiftcnt = ALIGN(ioc->res_bitshift, o);
380 unsigned long mask;
381
382 if (bitshiftcnt >= BITS_PER_LONG) {
383 bitshiftcnt = 0;
384 res_ptr++;
385 }
386 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt;
387
388 DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
389 while(res_ptr < res_end)
390 {
391 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
392 WARN_ON(mask == 0);
393 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
394 ret = iommu_is_span_boundary(tpide, bits_wanted,
395 shift,
396 boundary_size);
397 if ((((*res_ptr) & mask) == 0) && !ret) {
398 *res_ptr |= mask;
399 pide = tpide;
400 break;
401 }
402 mask >>= o;
403 bitshiftcnt += o;
404 if (mask == 0) {
405 mask = RESMAP_MASK(bits_wanted);
406 bitshiftcnt=0;
407 res_ptr++;
408 }
409 }
410
411 ioc->res_bitshift = bitshiftcnt + bits_wanted;
412 }
413
414
415 if (res_end <= res_ptr) {
416 ioc->res_hint = (unsigned long *) ioc->res_map;
417 ioc->res_bitshift = 0;
418 } else {
419 ioc->res_hint = res_ptr;
420 }
421 return (pide);
422}
423
424
425
426
427
428
429
430
431
432
433static int
434sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
435{
436 unsigned int pages_needed = size >> IOVP_SHIFT;
437#ifdef SBA_COLLECT_STATS
438 unsigned long cr_start = mfctl(16);
439#endif
440 unsigned long pide;
441
442 pide = sba_search_bitmap(ioc, dev, pages_needed);
443 if (pide >= (ioc->res_size << 3)) {
444 pide = sba_search_bitmap(ioc, dev, pages_needed);
445 if (pide >= (ioc->res_size << 3))
446 panic("%s: I/O MMU @ %p is out of mapping resources\n",
447 __FILE__, ioc->ioc_hpa);
448 }
449
450#ifdef ASSERT_PDIR_SANITY
451
452 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) {
453 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
454 }
455#endif
456
457 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
458 __func__, size, pages_needed, pide,
459 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
460 ioc->res_bitshift );
461
462#ifdef SBA_COLLECT_STATS
463 {
464 unsigned long cr_end = mfctl(16);
465 unsigned long tmp = cr_end - cr_start;
466
467 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
468 }
469 ioc->avg_search[ioc->avg_idx++] = cr_start;
470 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
471
472 ioc->used_pages += pages_needed;
473#endif
474
475 return (pide);
476}
477
478
479
480
481
482
483
484
485
486
487static SBA_INLINE void
488sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
489{
490 unsigned long iovp = SBA_IOVP(ioc, iova);
491 unsigned int pide = PDIR_INDEX(iovp);
492 unsigned int ridx = pide >> 3;
493 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
494
495 int bits_not_wanted = size >> IOVP_SHIFT;
496
497
498 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1));
499
500 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
501 __func__, (uint) iova, size,
502 bits_not_wanted, m, pide, res_ptr, *res_ptr);
503
504#ifdef SBA_COLLECT_STATS
505 ioc->used_pages -= bits_not_wanted;
506#endif
507
508 *res_ptr &= ~m;
509}
510
511
512
513
514
515
516
517
518#ifdef SBA_HINT_SUPPORT
519#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
520#endif
521
522typedef unsigned long space_t;
523#define KERNEL_SPACE 0
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565static void SBA_INLINE
566sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
567 unsigned long hint)
568{
569 u64 pa;
570 register unsigned ci;
571
572 pa = lpa(vba);
573 pa &= IOVP_MASK;
574
575 asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
576 pa |= (ci >> PAGE_SHIFT) & 0xff;
577
578 pa |= SBA_PDIR_VALID_BIT;
579 *pdir_ptr = cpu_to_le64(pa);
580
581
582
583
584
585
586 asm_io_fdc(pdir_ptr);
587}
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606static SBA_INLINE void
607sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
608{
609 u32 iovp = (u32) SBA_IOVP(ioc,iova);
610 u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
611
612#ifdef ASSERT_PDIR_SANITY
613
614
615
616
617
618
619 if (0x80 != (((u8 *) pdir_ptr)[7])) {
620 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
621 }
622#endif
623
624 if (byte_cnt > IOVP_SIZE)
625 {
626#if 0
627 unsigned long entries_per_cacheline = ioc_needs_fdc ?
628 L1_CACHE_ALIGN(((unsigned long) pdir_ptr))
629 - (unsigned long) pdir_ptr;
630 : 262144;
631#endif
632
633
634 iovp |= get_order(byte_cnt) + PAGE_SHIFT;
635
636 do {
637
638 ((u8 *) pdir_ptr)[7] = 0;
639 asm_io_fdc(pdir_ptr);
640 if (ioc_needs_fdc) {
641#if 0
642 entries_per_cacheline = L1_CACHE_SHIFT - 3;
643#endif
644 }
645 pdir_ptr++;
646 byte_cnt -= IOVP_SIZE;
647 } while (byte_cnt > IOVP_SIZE);
648 } else
649 iovp |= IOVP_SHIFT;
650
651
652
653
654
655
656
657
658 ((u8 *) pdir_ptr)[7] = 0;
659 asm_io_fdc(pdir_ptr);
660
661 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
662}
663
664
665
666
667
668
669
670
671static int sba_dma_supported( struct device *dev, u64 mask)
672{
673 struct ioc *ioc;
674
675 if (dev == NULL) {
676 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
677 BUG();
678 return(0);
679 }
680
681 ioc = GET_IOC(dev);
682 if (!ioc)
683 return 0;
684
685
686
687
688
689 return((int)(mask >= (ioc->ibase - 1 +
690 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) )));
691}
692
693
694
695
696
697
698
699
700
701
702
703static dma_addr_t
704sba_map_single(struct device *dev, void *addr, size_t size,
705 enum dma_data_direction direction)
706{
707 struct ioc *ioc;
708 unsigned long flags;
709 dma_addr_t iovp;
710 dma_addr_t offset;
711 u64 *pdir_start;
712 int pide;
713
714 ioc = GET_IOC(dev);
715 if (!ioc)
716 return DMA_MAPPING_ERROR;
717
718
719 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
720
721
722 size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
723
724 spin_lock_irqsave(&ioc->res_lock, flags);
725#ifdef ASSERT_PDIR_SANITY
726 sba_check_pdir(ioc,"Check before sba_map_single()");
727#endif
728
729#ifdef SBA_COLLECT_STATS
730 ioc->msingle_calls++;
731 ioc->msingle_pages += size >> IOVP_SHIFT;
732#endif
733 pide = sba_alloc_range(ioc, dev, size);
734 iovp = (dma_addr_t) pide << IOVP_SHIFT;
735
736 DBG_RUN("%s() 0x%p -> 0x%lx\n",
737 __func__, addr, (long) iovp | offset);
738
739 pdir_start = &(ioc->pdir_base[pide]);
740
741 while (size > 0) {
742 sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
743
744 DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
745 pdir_start,
746 (u8) (((u8 *) pdir_start)[7]),
747 (u8) (((u8 *) pdir_start)[6]),
748 (u8) (((u8 *) pdir_start)[5]),
749 (u8) (((u8 *) pdir_start)[4]),
750 (u8) (((u8 *) pdir_start)[3]),
751 (u8) (((u8 *) pdir_start)[2]),
752 (u8) (((u8 *) pdir_start)[1]),
753 (u8) (((u8 *) pdir_start)[0])
754 );
755
756 addr += IOVP_SIZE;
757 size -= IOVP_SIZE;
758 pdir_start++;
759 }
760
761
762 asm_io_sync();
763
764#ifdef ASSERT_PDIR_SANITY
765 sba_check_pdir(ioc,"Check after sba_map_single()");
766#endif
767 spin_unlock_irqrestore(&ioc->res_lock, flags);
768
769
770 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);
771}
772
773
774static dma_addr_t
775sba_map_page(struct device *dev, struct page *page, unsigned long offset,
776 size_t size, enum dma_data_direction direction,
777 unsigned long attrs)
778{
779 return sba_map_single(dev, page_address(page) + offset, size,
780 direction);
781}
782
783
784
785
786
787
788
789
790
791
792
793static void
794sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
795 enum dma_data_direction direction, unsigned long attrs)
796{
797 struct ioc *ioc;
798#if DELAYED_RESOURCE_CNT > 0
799 struct sba_dma_pair *d;
800#endif
801 unsigned long flags;
802 dma_addr_t offset;
803
804 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
805
806 ioc = GET_IOC(dev);
807 if (!ioc) {
808 WARN_ON(!ioc);
809 return;
810 }
811 offset = iova & ~IOVP_MASK;
812 iova ^= offset;
813 size += offset;
814 size = ALIGN(size, IOVP_SIZE);
815
816 spin_lock_irqsave(&ioc->res_lock, flags);
817
818#ifdef SBA_COLLECT_STATS
819 ioc->usingle_calls++;
820 ioc->usingle_pages += size >> IOVP_SHIFT;
821#endif
822
823 sba_mark_invalid(ioc, iova, size);
824
825#if DELAYED_RESOURCE_CNT > 0
826
827
828
829 d = &(ioc->saved[ioc->saved_cnt]);
830 d->iova = iova;
831 d->size = size;
832 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
833 int cnt = ioc->saved_cnt;
834 while (cnt--) {
835 sba_free_range(ioc, d->iova, d->size);
836 d--;
837 }
838 ioc->saved_cnt = 0;
839
840 READ_REG(ioc->ioc_hpa+IOC_PCOM);
841 }
842#else
843 sba_free_range(ioc, iova, size);
844
845
846 asm_io_sync();
847
848 READ_REG(ioc->ioc_hpa+IOC_PCOM);
849#endif
850
851 spin_unlock_irqrestore(&ioc->res_lock, flags);
852
853
854
855
856
857
858
859
860
861}
862
863
864
865
866
867
868
869
870
871
872static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
873 gfp_t gfp, unsigned long attrs)
874{
875 void *ret;
876
877 if (!hwdev) {
878
879 *dma_handle = 0;
880 return NULL;
881 }
882
883 ret = (void *) __get_free_pages(gfp, get_order(size));
884
885 if (ret) {
886 memset(ret, 0, size);
887 *dma_handle = sba_map_single(hwdev, ret, size, 0);
888 }
889
890 return ret;
891}
892
893
894
895
896
897
898
899
900
901
902
903static void
904sba_free(struct device *hwdev, size_t size, void *vaddr,
905 dma_addr_t dma_handle, unsigned long attrs)
906{
907 sba_unmap_page(hwdev, dma_handle, size, 0, 0);
908 free_pages((unsigned long) vaddr, get_order(size));
909}
910
911
912
913
914
915
916
917#define PIDE_FLAG 0x80000000UL
918
919#ifdef SBA_COLLECT_STATS
920#define IOMMU_MAP_STATS
921#endif
922#include "iommu-helpers.h"
923
924#ifdef DEBUG_LARGE_SG_ENTRIES
925int dump_run_sg = 0;
926#endif
927
928
929
930
931
932
933
934
935
936
937
938static int
939sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
940 enum dma_data_direction direction, unsigned long attrs)
941{
942 struct ioc *ioc;
943 int coalesced, filled = 0;
944 unsigned long flags;
945
946 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
947
948 ioc = GET_IOC(dev);
949 if (!ioc)
950 return -EINVAL;
951
952
953 if (nents == 1) {
954 sg_dma_address(sglist) = sba_map_single(dev, sg_virt(sglist),
955 sglist->length, direction);
956 sg_dma_len(sglist) = sglist->length;
957 return 1;
958 }
959
960 spin_lock_irqsave(&ioc->res_lock, flags);
961
962#ifdef ASSERT_PDIR_SANITY
963 if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
964 {
965 sba_dump_sg(ioc, sglist, nents);
966 panic("Check before sba_map_sg()");
967 }
968#endif
969
970#ifdef SBA_COLLECT_STATS
971 ioc->msg_calls++;
972#endif
973
974
975
976
977
978
979
980
981
982 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range);
983
984
985
986
987
988
989
990
991
992 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry);
993
994
995 asm_io_sync();
996
997#ifdef ASSERT_PDIR_SANITY
998 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
999 {
1000 sba_dump_sg(ioc, sglist, nents);
1001 panic("Check after sba_map_sg()\n");
1002 }
1003#endif
1004
1005 spin_unlock_irqrestore(&ioc->res_lock, flags);
1006
1007 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1008
1009 return filled;
1010}
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022static void
1023sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1024 enum dma_data_direction direction, unsigned long attrs)
1025{
1026 struct ioc *ioc;
1027#ifdef ASSERT_PDIR_SANITY
1028 unsigned long flags;
1029#endif
1030
1031 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1032 __func__, nents, sg_virt(sglist), sglist->length);
1033
1034 ioc = GET_IOC(dev);
1035 if (!ioc) {
1036 WARN_ON(!ioc);
1037 return;
1038 }
1039
1040#ifdef SBA_COLLECT_STATS
1041 ioc->usg_calls++;
1042#endif
1043
1044#ifdef ASSERT_PDIR_SANITY
1045 spin_lock_irqsave(&ioc->res_lock, flags);
1046 sba_check_pdir(ioc,"Check before sba_unmap_sg()");
1047 spin_unlock_irqrestore(&ioc->res_lock, flags);
1048#endif
1049
1050 while (sg_dma_len(sglist) && nents--) {
1051
1052 sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
1053 direction, 0);
1054#ifdef SBA_COLLECT_STATS
1055 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
1056 ioc->usingle_calls--;
1057#endif
1058 ++sglist;
1059 }
1060
1061 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1062
1063#ifdef ASSERT_PDIR_SANITY
1064 spin_lock_irqsave(&ioc->res_lock, flags);
1065 sba_check_pdir(ioc,"Check after sba_unmap_sg()");
1066 spin_unlock_irqrestore(&ioc->res_lock, flags);
1067#endif
1068
1069}
1070
1071static const struct dma_map_ops sba_ops = {
1072 .dma_supported = sba_dma_supported,
1073 .alloc = sba_alloc,
1074 .free = sba_free,
1075 .map_page = sba_map_page,
1076 .unmap_page = sba_unmap_page,
1077 .map_sg = sba_map_sg,
1078 .unmap_sg = sba_unmap_sg,
1079 .get_sgtable = dma_common_get_sgtable,
1080 .alloc_pages = dma_common_alloc_pages,
1081 .free_pages = dma_common_free_pages,
1082};
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094static void
1095sba_get_pat_resources(struct sba_device *sba_dev)
1096{
1097#if 0
1098
1099
1100
1101
1102
1103
1104PAT_MOD(mod)->mod_info.mod_pages = PAT_GET_MOD_PAGES(temp);
1105 FIXME : ???
1106PAT_MOD(mod)->mod_info.dvi = PAT_GET_DVI(temp);
1107 Tells where the dvi bits are located in the address.
1108PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp);
1109 FIXME : ???
1110#endif
1111}
1112
1113
1114
1115
1116
1117
1118
1119#define PIRANHA_ADDR_MASK 0x00160000UL
1120#define PIRANHA_ADDR_VAL 0x00060000UL
1121static void *
1122sba_alloc_pdir(unsigned int pdir_size)
1123{
1124 unsigned long pdir_base;
1125 unsigned long pdir_order = get_order(pdir_size);
1126
1127 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order);
1128 if (NULL == (void *) pdir_base) {
1129 panic("%s() could not allocate I/O Page Table\n",
1130 __func__);
1131 }
1132
1133
1134
1135
1136
1137
1138
1139 if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13
1140 || (boot_cpu_data.pdc.versions > 0x202)
1141 || (boot_cpu_data.pdc.capabilities & 0x08L) )
1142 return (void *) pdir_base;
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162 if (pdir_order <= (19-12)) {
1163 if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) {
1164
1165 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12));
1166
1167 free_pages(pdir_base, pdir_order);
1168
1169 pdir_base = new_pdir;
1170
1171
1172 while (pdir_order < (19-12)) {
1173 new_pdir += pdir_size;
1174 free_pages(new_pdir, pdir_order);
1175 pdir_order +=1;
1176 pdir_size <<=1;
1177 }
1178 }
1179 } else {
1180
1181
1182
1183
1184 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1);
1185
1186
1187 free_pages( pdir_base, pdir_order);
1188
1189
1190 free_pages(new_pdir, 20-12);
1191
1192 pdir_base = new_pdir + 1024*1024;
1193
1194 if (pdir_order > (20-12)) {
1195
1196
1197
1198
1199
1200
1201 piranha_bad_128k = 1;
1202
1203 new_pdir += 3*1024*1024;
1204
1205 free_pages(new_pdir, 20-12);
1206
1207
1208 free_pages(new_pdir - 128*1024 , 17-12);
1209
1210 pdir_size -= 128*1024;
1211 }
1212 }
1213
1214 memset((void *) pdir_base, 0, pdir_size);
1215 return (void *) pdir_base;
1216}
1217
1218struct ibase_data_struct {
1219 struct ioc *ioc;
1220 int ioc_num;
1221};
1222
1223static int setup_ibase_imask_callback(struct device *dev, void *data)
1224{
1225
1226 extern void lba_set_iregs(struct parisc_device *, u32, u32);
1227 struct parisc_device *lba = to_parisc_device(dev);
1228 struct ibase_data_struct *ibd = data;
1229 int rope_num = (lba->hpa.start >> 13) & 0xf;
1230 if (rope_num >> 3 == ibd->ioc_num)
1231 lba_set_iregs(lba, ibd->ioc->ibase, ibd->ioc->imask);
1232 return 0;
1233}
1234
1235
1236static void
1237setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1238{
1239 struct ibase_data_struct ibase_data = {
1240 .ioc = ioc,
1241 .ioc_num = ioc_num,
1242 };
1243
1244 device_for_each_child(&sba->dev, &ibase_data,
1245 setup_ibase_imask_callback);
1246}
1247
1248#ifdef SBA_AGP_SUPPORT
1249static int
1250sba_ioc_find_quicksilver(struct device *dev, void *data)
1251{
1252 int *agp_found = data;
1253 struct parisc_device *lba = to_parisc_device(dev);
1254
1255 if (IS_QUICKSILVER(lba))
1256 *agp_found = 1;
1257 return 0;
1258}
1259#endif
1260
1261static void
1262sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1263{
1264 u32 iova_space_mask;
1265 u32 iova_space_size;
1266 int iov_order, tcnfg;
1267#ifdef SBA_AGP_SUPPORT
1268 int agp_found = 0;
1269#endif
1270
1271
1272
1273
1274
1275 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1fffffULL;
1276 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
1277
1278 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
1279 printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n");
1280 iova_space_size /= 2;
1281 }
1282
1283
1284
1285
1286
1287 iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT));
1288 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1289
1290 DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n",
1291 __func__, ioc->ioc_hpa, iova_space_size >> 20,
1292 iov_order + PAGE_SHIFT);
1293
1294 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1295 get_order(ioc->pdir_size));
1296 if (!ioc->pdir_base)
1297 panic("Couldn't allocate I/O Page Table\n");
1298
1299 memset(ioc->pdir_base, 0, ioc->pdir_size);
1300
1301 DBG_INIT("%s() pdir %p size %x\n",
1302 __func__, ioc->pdir_base, ioc->pdir_size);
1303
1304#ifdef SBA_HINT_SUPPORT
1305 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1306 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1307
1308 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1309 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1310#endif
1311
1312 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base);
1313 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1314
1315
1316 iova_space_mask = 0xffffffff;
1317 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1318 ioc->imask = iova_space_mask;
1319#ifdef ZX1_SUPPORT
1320 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1321#endif
1322 sba_dump_tlb(ioc->ioc_hpa);
1323
1324 setup_ibase_imask(sba, ioc, ioc_num);
1325
1326 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
1327
1328#ifdef CONFIG_64BIT
1329
1330
1331
1332
1333 ioc->imask |= 0xFFFFFFFF00000000UL;
1334#endif
1335
1336
1337 switch (PAGE_SHIFT) {
1338 case 12: tcnfg = 0; break;
1339 case 13: tcnfg = 1; break;
1340 case 14: tcnfg = 2; break;
1341 case 16: tcnfg = 3; break;
1342 default:
1343 panic(__FILE__ "Unsupported system page size %d",
1344 1 << PAGE_SHIFT);
1345 break;
1346 }
1347 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1348
1349
1350
1351
1352
1353 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1354
1355
1356
1357
1358
1359 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);
1360
1361#ifdef SBA_AGP_SUPPORT
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371 device_for_each_child(&sba->dev, &agp_found, sba_ioc_find_quicksilver);
1372
1373 if (agp_found && sba_reserve_agpgart) {
1374 printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n",
1375 __func__, (iova_space_size/2) >> 20);
1376 ioc->pdir_size /= 2;
1377 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE;
1378 }
1379#endif
1380}
1381
1382static void
1383sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1384{
1385 u32 iova_space_size, iova_space_mask;
1386 unsigned int pdir_size, iov_order, tcnfg;
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402 iova_space_size = (u32) (totalram_pages()/global_ioc_cnt);
1403
1404
1405 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1406 iova_space_size = 1 << (20 - PAGE_SHIFT);
1407 }
1408 else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1409 iova_space_size = 1 << (30 - PAGE_SHIFT);
1410 }
1411
1412
1413
1414
1415
1416
1417 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1418
1419
1420 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1421
1422 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
1423
1424 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
1425 __func__,
1426 ioc->ioc_hpa,
1427 (unsigned long) totalram_pages() >> (20 - PAGE_SHIFT),
1428 iova_space_size>>20,
1429 iov_order + PAGE_SHIFT);
1430
1431 ioc->pdir_base = sba_alloc_pdir(pdir_size);
1432
1433 DBG_INIT("%s() pdir %p size %x\n",
1434 __func__, ioc->pdir_base, pdir_size);
1435
1436#ifdef SBA_HINT_SUPPORT
1437
1438 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1439 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1440
1441 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1442 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1443#endif
1444
1445 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1446
1447
1448 iova_space_mask = 0xffffffff;
1449 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1450
1451
1452
1453
1454
1455 ioc->ibase = 0;
1456 ioc->imask = iova_space_mask;
1457#ifdef ZX1_SUPPORT
1458 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1459#endif
1460
1461 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
1462 __func__, ioc->ibase, ioc->imask);
1463
1464
1465
1466
1467
1468
1469
1470 setup_ibase_imask(sba, ioc, ioc_num);
1471
1472
1473
1474
1475 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
1476 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
1477
1478
1479 switch (PAGE_SHIFT) {
1480 case 12: tcnfg = 0; break;
1481 case 13: tcnfg = 1; break;
1482 case 14: tcnfg = 2; break;
1483 case 16: tcnfg = 3; break;
1484 default:
1485 panic(__FILE__ "Unsupported system page size %d",
1486 1 << PAGE_SHIFT);
1487 break;
1488 }
1489
1490 WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG);
1491
1492
1493
1494
1495
1496 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
1497
1498 ioc->ibase = 0;
1499
1500 DBG_INIT("%s() DONE\n", __func__);
1501}
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset)
1517{
1518 return ioremap(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
1519}
1520
1521static void sba_hw_init(struct sba_device *sba_dev)
1522{
1523 int i;
1524 int num_ioc;
1525 u64 ioc_ctl;
1526
1527 if (!is_pdc_pat()) {
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545 if (PAGE0->mem_kbd.cl_class == CL_KEYBD) {
1546 pdc_io_reset_devices();
1547 }
1548
1549 }
1550
1551
1552#if 0
1553printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
1554 PAGE0->mem_boot.spa, PAGE0->mem_boot.pad, PAGE0->mem_boot.cl_class);
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565 if ((PAGE0->mem_boot.cl_class != CL_RANDOM)
1566 && (PAGE0->mem_boot.cl_class != CL_SEQU)) {
1567 pdc_io_reset();
1568 }
1569#endif
1570
1571 if (!IS_PLUTO(sba_dev->dev)) {
1572 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
1573 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
1574 __func__, sba_dev->sba_hpa, ioc_ctl);
1575 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
1576 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
1577
1578
1579
1580 WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);
1581
1582#ifdef DEBUG_SBA_INIT
1583 ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL);
1584 DBG_INIT(" 0x%Lx\n", ioc_ctl);
1585#endif
1586 }
1587
1588 if (IS_ASTRO(sba_dev->dev)) {
1589 int err;
1590 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET);
1591 num_ioc = 1;
1592
1593 sba_dev->chip_resv.name = "Astro Intr Ack";
1594 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL;
1595 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ;
1596 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1597 BUG_ON(err < 0);
1598
1599 } else if (IS_PLUTO(sba_dev->dev)) {
1600 int err;
1601
1602 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET);
1603 num_ioc = 1;
1604
1605 sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA";
1606 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL;
1607 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff200000UL - 1);
1608 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1609 WARN_ON(err < 0);
1610
1611 sba_dev->iommu_resv.name = "IOVA Space";
1612 sba_dev->iommu_resv.start = 0x40000000UL;
1613 sba_dev->iommu_resv.end = 0x50000000UL - 1;
1614 err = request_resource(&iomem_resource, &(sba_dev->iommu_resv));
1615 WARN_ON(err < 0);
1616 } else {
1617
1618 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0));
1619 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1));
1620 num_ioc = 2;
1621
1622
1623 }
1624
1625
1626 sba_dev->num_ioc = num_ioc;
1627 for (i = 0; i < num_ioc; i++) {
1628 void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa;
1629 unsigned int j;
1630
1631 for (j=0; j < sizeof(u64) * ROPES_PER_IOC; j+=sizeof(u64)) {
1632
1633
1634
1635
1636
1637
1638
1639 if (IS_PLUTO(sba_dev->dev)) {
1640 void __iomem *rope_cfg;
1641 unsigned long cfg_val;
1642
1643 rope_cfg = ioc_hpa + IOC_ROPE0_CFG + j;
1644 cfg_val = READ_REG(rope_cfg);
1645 cfg_val &= ~IOC_ROPE_AO;
1646 WRITE_REG(cfg_val, rope_cfg);
1647 }
1648
1649
1650
1651
1652 WRITE_REG(HF_ENABLE, ioc_hpa + ROPE0_CTL + j);
1653 }
1654
1655
1656 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
1657
1658 DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
1659 i,
1660 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
1661 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
1662 );
1663 DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n",
1664 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
1665 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
1666 );
1667
1668 if (IS_PLUTO(sba_dev->dev)) {
1669 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
1670 } else {
1671 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
1672 }
1673 }
1674}
1675
1676static void
1677sba_common_init(struct sba_device *sba_dev)
1678{
1679 int i;
1680
1681
1682
1683
1684 sba_dev->next = sba_list;
1685 sba_list = sba_dev;
1686
1687 for(i=0; i< sba_dev->num_ioc; i++) {
1688 int res_size;
1689#ifdef DEBUG_DMB_TRAP
1690 extern void iterate_pages(unsigned long , unsigned long ,
1691 void (*)(pte_t * , unsigned long),
1692 unsigned long );
1693 void set_data_memory_break(pte_t * , unsigned long);
1694#endif
1695
1696 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64);
1697
1698
1699 if (piranha_bad_128k) {
1700 res_size -= (128*1024)/sizeof(u64);
1701 }
1702
1703 res_size >>= 3;
1704 DBG_INIT("%s() res_size 0x%x\n",
1705 __func__, res_size);
1706
1707 sba_dev->ioc[i].res_size = res_size;
1708 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
1709
1710#ifdef DEBUG_DMB_TRAP
1711 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1712 set_data_memory_break, 0);
1713#endif
1714
1715 if (NULL == sba_dev->ioc[i].res_map)
1716 {
1717 panic("%s:%s() could not allocate resource map\n",
1718 __FILE__, __func__ );
1719 }
1720
1721 memset(sba_dev->ioc[i].res_map, 0, res_size);
1722
1723 sba_dev->ioc[i].res_hint = (unsigned long *)
1724 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);
1725
1726#ifdef ASSERT_PDIR_SANITY
1727
1728 sba_dev->ioc[i].res_map[0] = 0x80;
1729 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
1730#endif
1731
1732
1733 if (piranha_bad_128k) {
1734
1735
1736 int idx_start = (1408*1024/sizeof(u64)) >> 3;
1737 int idx_end = (1536*1024/sizeof(u64)) >> 3;
1738 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]);
1739 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]);
1740
1741
1742 while (p_start < p_end)
1743 *p_start++ = -1;
1744
1745 }
1746
1747#ifdef DEBUG_DMB_TRAP
1748 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1749 set_data_memory_break, 0);
1750 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size,
1751 set_data_memory_break, 0);
1752#endif
1753
1754 DBG_INIT("%s() %d res_map %x %p\n",
1755 __func__, i, res_size, sba_dev->ioc[i].res_map);
1756 }
1757
1758 spin_lock_init(&sba_dev->sba_lock);
1759 ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC;
1760
1761#ifdef DEBUG_SBA_INIT
1762
1763
1764
1765
1766
1767 if (ioc_needs_fdc) {
1768 printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n");
1769 } else {
1770 printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n");
1771 }
1772#endif
1773}
1774
1775#ifdef CONFIG_PROC_FS
1776static int sba_proc_info(struct seq_file *m, void *p)
1777{
1778 struct sba_device *sba_dev = sba_list;
1779 struct ioc *ioc = &sba_dev->ioc[0];
1780 int total_pages = (int) (ioc->res_size << 3);
1781#ifdef SBA_COLLECT_STATS
1782 unsigned long avg = 0, min, max;
1783#endif
1784 int i;
1785
1786 seq_printf(m, "%s rev %d.%d\n",
1787 sba_dev->name,
1788 (sba_dev->hw_rev & 0x7) + 1,
1789 (sba_dev->hw_rev & 0x18) >> 3);
1790 seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
1791 (int)((ioc->res_size << 3) * sizeof(u64)),
1792 total_pages);
1793
1794 seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
1795 ioc->res_size, ioc->res_size << 3);
1796
1797 seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n",
1798 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE),
1799 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK),
1800 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE));
1801
1802 for (i=0; i<4; i++)
1803 seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n",
1804 i,
1805 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18),
1806 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18),
1807 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18));
1808
1809#ifdef SBA_COLLECT_STATS
1810 seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1811 total_pages - ioc->used_pages, ioc->used_pages,
1812 (int)(ioc->used_pages * 100 / total_pages));
1813
1814 min = max = ioc->avg_search[0];
1815 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1816 avg += ioc->avg_search[i];
1817 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1818 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1819 }
1820 avg /= SBA_SEARCH_SAMPLE;
1821 seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1822 min, avg, max);
1823
1824 seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
1825 ioc->msingle_calls, ioc->msingle_pages,
1826 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1827
1828
1829 min = ioc->usingle_calls;
1830 max = ioc->usingle_pages - ioc->usg_pages;
1831 seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n",
1832 min, max, (int)((max * 1000)/min));
1833
1834 seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1835 ioc->msg_calls, ioc->msg_pages,
1836 (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
1837
1838 seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1839 ioc->usg_calls, ioc->usg_pages,
1840 (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
1841#endif
1842
1843 return 0;
1844}
1845
1846static int
1847sba_proc_bitmap_info(struct seq_file *m, void *p)
1848{
1849 struct sba_device *sba_dev = sba_list;
1850 struct ioc *ioc = &sba_dev->ioc[0];
1851
1852 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map,
1853 ioc->res_size, false);
1854 seq_putc(m, '\n');
1855
1856 return 0;
1857}
1858#endif
1859
1860static const struct parisc_device_id sba_tbl[] __initconst = {
1861 { HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb },
1862 { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc },
1863 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc },
1864 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc },
1865 { HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc },
1866 { 0, }
1867};
1868
1869static int sba_driver_callback(struct parisc_device *);
1870
1871static struct parisc_driver sba_driver __refdata = {
1872 .name = MODULE_NAME,
1873 .id_table = sba_tbl,
1874 .probe = sba_driver_callback,
1875};
1876
1877
1878
1879
1880
1881
1882static int __init sba_driver_callback(struct parisc_device *dev)
1883{
1884 struct sba_device *sba_dev;
1885 u32 func_class;
1886 int i;
1887 char *version;
1888 void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE);
1889#ifdef CONFIG_PROC_FS
1890 struct proc_dir_entry *root;
1891#endif
1892
1893 sba_dump_ranges(sba_addr);
1894
1895
1896 func_class = READ_REG(sba_addr + SBA_FCLASS);
1897
1898 if (IS_ASTRO(dev)) {
1899 unsigned long fclass;
1900 static char astro_rev[]="Astro ?.?";
1901
1902
1903 fclass = READ_REG(sba_addr);
1904
1905 astro_rev[6] = '1' + (char) (fclass & 0x7);
1906 astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3);
1907 version = astro_rev;
1908
1909 } else if (IS_IKE(dev)) {
1910 static char ike_rev[] = "Ike rev ?";
1911 ike_rev[8] = '0' + (char) (func_class & 0xff);
1912 version = ike_rev;
1913 } else if (IS_PLUTO(dev)) {
1914 static char pluto_rev[]="Pluto ?.?";
1915 pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4);
1916 pluto_rev[8] = '0' + (char) (func_class & 0x0f);
1917 version = pluto_rev;
1918 } else {
1919 static char reo_rev[] = "REO rev ?";
1920 reo_rev[8] = '0' + (char) (func_class & 0xff);
1921 version = reo_rev;
1922 }
1923
1924 if (!global_ioc_cnt) {
1925 global_ioc_cnt = count_parisc_driver(&sba_driver);
1926
1927
1928 if ((!IS_ASTRO(dev)) || (!IS_PLUTO(dev)))
1929 global_ioc_cnt *= 2;
1930 }
1931
1932 printk(KERN_INFO "%s found %s at 0x%llx\n",
1933 MODULE_NAME, version, (unsigned long long)dev->hpa.start);
1934
1935 sba_dev = kzalloc(sizeof(struct sba_device), GFP_KERNEL);
1936 if (!sba_dev) {
1937 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n");
1938 return -ENOMEM;
1939 }
1940
1941 parisc_set_drvdata(dev, sba_dev);
1942
1943 for(i=0; i<MAX_IOC; i++)
1944 spin_lock_init(&(sba_dev->ioc[i].res_lock));
1945
1946 sba_dev->dev = dev;
1947 sba_dev->hw_rev = func_class;
1948 sba_dev->name = dev->name;
1949 sba_dev->sba_hpa = sba_addr;
1950
1951 sba_get_pat_resources(sba_dev);
1952 sba_hw_init(sba_dev);
1953 sba_common_init(sba_dev);
1954
1955 hppa_dma_ops = &sba_ops;
1956
1957#ifdef CONFIG_PROC_FS
1958 switch (dev->id.hversion) {
1959 case PLUTO_MCKINLEY_PORT:
1960 root = proc_mckinley_root;
1961 break;
1962 case ASTRO_RUNWAY_PORT:
1963 case IKE_MERCED_PORT:
1964 default:
1965 root = proc_runway_root;
1966 break;
1967 }
1968
1969 proc_create_single("sba_iommu", 0, root, sba_proc_info);
1970 proc_create_single("sba_iommu-bitmap", 0, root, sba_proc_bitmap_info);
1971#endif
1972 return 0;
1973}
1974
1975
1976
1977
1978
1979
1980void __init sba_init(void)
1981{
1982 register_parisc_driver(&sba_driver);
1983}
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993void * sba_get_iommu(struct parisc_device *pci_hba)
1994{
1995 struct parisc_device *sba_dev = parisc_parent(pci_hba);
1996 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
1997 char t = sba_dev->id.hw_type;
1998 int iocnum = (pci_hba->hw_path >> 3);
1999
2000 WARN_ON((t != HPHW_IOA) && (t != HPHW_BCPORT));
2001
2002 return &(sba->ioc[iocnum]);
2003}
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
2015{
2016 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2017 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2018 char t = sba_dev->id.hw_type;
2019 int i;
2020 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));
2021
2022 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2023
2024 r->start = r->end = 0;
2025
2026
2027 for (i=0; i<4; i++) {
2028 int base, size;
2029 void __iomem *reg = sba->sba_hpa + i*0x18;
2030
2031 base = READ_REG32(reg + LMMIO_DIRECT0_BASE);
2032 if ((base & 1) == 0)
2033 continue;
2034
2035 size = READ_REG32(reg + LMMIO_DIRECT0_ROUTE);
2036
2037 if ((size & (ROPES_PER_IOC-1)) != rope)
2038 continue;
2039
2040 r->start = (base & ~1UL) | PCI_F_EXTEND;
2041 size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK);
2042 r->end = r->start + size;
2043 r->flags = IORESOURCE_MEM;
2044 }
2045}
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
2058{
2059 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2060 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2061 char t = sba_dev->id.hw_type;
2062 int base, size;
2063 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));
2064
2065 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2066
2067 r->start = r->end = 0;
2068
2069 base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE);
2070 if ((base & 1) == 0) {
2071 BUG();
2072 return;
2073 }
2074
2075 r->start = (base & ~1UL) | PCI_F_EXTEND;
2076
2077 size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC;
2078 r->start += rope * (size + 1);
2079 r->end = r->start + size;
2080 r->flags = IORESOURCE_MEM;
2081}
2082