1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#undef DEBUG
24
25#include <linux/kernel.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/notifier.h>
29#include <linux/of.h>
30#include <linux/of_platform.h>
31#include <linux/slab.h>
32#include <linux/memblock.h>
33
34#include <asm/prom.h>
35#include <asm/iommu.h>
36#include <asm/machdep.h>
37#include <asm/pci-bridge.h>
38#include <asm/udbg.h>
39#include <asm/firmware.h>
40#include <asm/cell-regs.h>
41
42#include "interrupt.h"
43
44
45
46
47
48
49#define CELL_IOMMU_REAL_UNMAP
50
51
52
53
54
55
56#define CELL_IOMMU_STRICT_PROTECTION
57
58
59#define NR_IOMMUS 2
60
61
62#define IOC_Reg_Size 0x2000
63
64#define IOC_IOPT_CacheInvd 0x908
65#define IOC_IOPT_CacheInvd_NE_Mask 0xffe0000000000000ul
66#define IOC_IOPT_CacheInvd_IOPTE_Mask 0x000003fffffffff8ul
67#define IOC_IOPT_CacheInvd_Busy 0x0000000000000001ul
68
69#define IOC_IOST_Origin 0x918
70#define IOC_IOST_Origin_E 0x8000000000000000ul
71#define IOC_IOST_Origin_HW 0x0000000000000800ul
72#define IOC_IOST_Origin_HL 0x0000000000000400ul
73
74#define IOC_IO_ExcpStat 0x920
75#define IOC_IO_ExcpStat_V 0x8000000000000000ul
76#define IOC_IO_ExcpStat_SPF_Mask 0x6000000000000000ul
77#define IOC_IO_ExcpStat_SPF_S 0x6000000000000000ul
78#define IOC_IO_ExcpStat_SPF_P 0x2000000000000000ul
79#define IOC_IO_ExcpStat_ADDR_Mask 0x00000007fffff000ul
80#define IOC_IO_ExcpStat_RW_Mask 0x0000000000000800ul
81#define IOC_IO_ExcpStat_IOID_Mask 0x00000000000007fful
82
83#define IOC_IO_ExcpMask 0x928
84#define IOC_IO_ExcpMask_SFE 0x4000000000000000ul
85#define IOC_IO_ExcpMask_PFE 0x2000000000000000ul
86
87#define IOC_IOCmd_Offset 0x1000
88
89#define IOC_IOCmd_Cfg 0xc00
90#define IOC_IOCmd_Cfg_TE 0x0000800000000000ul
91
92
93
94#define IOSTE_V 0x8000000000000000ul
95#define IOSTE_H 0x4000000000000000ul
96#define IOSTE_PT_Base_RPN_Mask 0x3ffffffffffff000ul
97#define IOSTE_NPPT_Mask 0x0000000000000fe0ul
98#define IOSTE_PS_Mask 0x0000000000000007ul
99#define IOSTE_PS_4K 0x0000000000000001ul
100#define IOSTE_PS_64K 0x0000000000000003ul
101#define IOSTE_PS_1M 0x0000000000000005ul
102#define IOSTE_PS_16M 0x0000000000000007ul
103
104
105
106#define IO_SEGMENT_SHIFT 28
107#define IO_PAGENO_BITS(shift) (IO_SEGMENT_SHIFT - (shift))
108
109
110#define SPIDER_DMA_OFFSET 0x80000000ul
111
112struct iommu_window {
113 struct list_head list;
114 struct cbe_iommu *iommu;
115 unsigned long offset;
116 unsigned long size;
117 unsigned int ioid;
118 struct iommu_table table;
119};
120
121#define NAMESIZE 8
122struct cbe_iommu {
123 int nid;
124 char name[NAMESIZE];
125 void __iomem *xlate_regs;
126 void __iomem *cmd_regs;
127 unsigned long *stab;
128 unsigned long *ptab;
129 void *pad_page;
130 struct list_head windows;
131};
132
133
134
135
136
137
138static struct cbe_iommu iommus[NR_IOMMUS];
139static int cbe_nr_iommus;
140
141static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
142 long n_ptes)
143{
144 u64 __iomem *reg;
145 u64 val;
146 long n;
147
148 reg = iommu->xlate_regs + IOC_IOPT_CacheInvd;
149
150 while (n_ptes > 0) {
151
152 n = min(n_ptes, 1l << 11);
153 val = (((n ) << 53) & IOC_IOPT_CacheInvd_NE_Mask)
154 | (__pa(pte) & IOC_IOPT_CacheInvd_IOPTE_Mask)
155 | IOC_IOPT_CacheInvd_Busy;
156
157 out_be64(reg, val);
158 while (in_be64(reg) & IOC_IOPT_CacheInvd_Busy)
159 ;
160
161 n_ptes -= n;
162 pte += n;
163 }
164}
165
166static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
167 unsigned long uaddr, enum dma_data_direction direction,
168 struct dma_attrs *attrs)
169{
170 int i;
171 unsigned long *io_pte, base_pte;
172 struct iommu_window *window =
173 container_of(tbl, struct iommu_window, table);
174
175
176
177
178#ifdef CELL_IOMMU_STRICT_PROTECTION
179
180
181
182
183
184
185 const unsigned long prot = 0xc48;
186 base_pte =
187 ((prot << (52 + 4 * direction)) &
188 (CBE_IOPTE_PP_W | CBE_IOPTE_PP_R)) |
189 CBE_IOPTE_M | CBE_IOPTE_SO_RW |
190 (window->ioid & CBE_IOPTE_IOID_Mask);
191#else
192 base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
193 CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask);
194#endif
195 if (unlikely(dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)))
196 base_pte &= ~CBE_IOPTE_SO_RW;
197
198 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
199
200 for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE)
201 io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
202
203 mb();
204
205 invalidate_tce_cache(window->iommu, io_pte, npages);
206
207 pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n",
208 index, npages, direction, base_pte);
209 return 0;
210}
211
212static void tce_free_cell(struct iommu_table *tbl, long index, long npages)
213{
214
215 int i;
216 unsigned long *io_pte, pte;
217 struct iommu_window *window =
218 container_of(tbl, struct iommu_window, table);
219
220 pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages);
221
222#ifdef CELL_IOMMU_REAL_UNMAP
223 pte = 0;
224#else
225
226
227 pte = CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW |
228 __pa(window->iommu->pad_page) |
229 (window->ioid & CBE_IOPTE_IOID_Mask);
230#endif
231
232 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
233
234 for (i = 0; i < npages; i++)
235 io_pte[i] = pte;
236
237 mb();
238
239 invalidate_tce_cache(window->iommu, io_pte, npages);
240}
241
242static irqreturn_t ioc_interrupt(int irq, void *data)
243{
244 unsigned long stat, spf;
245 struct cbe_iommu *iommu = data;
246
247 stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
248 spf = stat & IOC_IO_ExcpStat_SPF_Mask;
249
250
251 printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat);
252 printk(KERN_ERR " V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n",
253 !!(stat & IOC_IO_ExcpStat_V),
254 (spf == IOC_IO_ExcpStat_SPF_S) ? 'S' : ' ',
255 (spf == IOC_IO_ExcpStat_SPF_P) ? 'P' : ' ',
256 (stat & IOC_IO_ExcpStat_RW_Mask) ? "Read" : "Write",
257 (unsigned int)(stat & IOC_IO_ExcpStat_IOID_Mask));
258 printk(KERN_ERR " page=0x%016lx\n",
259 stat & IOC_IO_ExcpStat_ADDR_Mask);
260
261
262 stat &= ~IOC_IO_ExcpStat_V;
263 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat);
264
265 return IRQ_HANDLED;
266}
267
268static int cell_iommu_find_ioc(int nid, unsigned long *base)
269{
270 struct device_node *np;
271 struct resource r;
272
273 *base = 0;
274
275
276 for_each_node_by_name(np, "ioc") {
277 if (of_node_to_nid(np) != nid)
278 continue;
279 if (of_address_to_resource(np, 0, &r)) {
280 printk(KERN_ERR "iommu: can't get address for %s\n",
281 np->full_name);
282 continue;
283 }
284 *base = r.start;
285 of_node_put(np);
286 return 0;
287 }
288
289
290 for_each_node_by_type(np, "cpu") {
291 const unsigned int *nidp;
292 const unsigned long *tmp;
293
294 nidp = of_get_property(np, "node-id", NULL);
295 if (nidp && *nidp == nid) {
296 tmp = of_get_property(np, "ioc-translation", NULL);
297 if (tmp) {
298 *base = *tmp;
299 of_node_put(np);
300 return 0;
301 }
302 }
303 }
304
305 return -ENODEV;
306}
307
308static void cell_iommu_setup_stab(struct cbe_iommu *iommu,
309 unsigned long dbase, unsigned long dsize,
310 unsigned long fbase, unsigned long fsize)
311{
312 struct page *page;
313 unsigned long segments, stab_size;
314
315 segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT;
316
317 pr_debug("%s: iommu[%d]: segments: %lu\n",
318 __func__, iommu->nid, segments);
319
320
321 stab_size = segments * sizeof(unsigned long);
322 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size));
323 BUG_ON(!page);
324 iommu->stab = page_address(page);
325 memset(iommu->stab, 0, stab_size);
326}
327
328static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
329 unsigned long base, unsigned long size, unsigned long gap_base,
330 unsigned long gap_size, unsigned long page_shift)
331{
332 struct page *page;
333 int i;
334 unsigned long reg, segments, pages_per_segment, ptab_size,
335 n_pte_pages, start_seg, *ptab;
336
337 start_seg = base >> IO_SEGMENT_SHIFT;
338 segments = size >> IO_SEGMENT_SHIFT;
339 pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift);
340
341 pages_per_segment = max(pages_per_segment,
342 (1 << 12) / sizeof(unsigned long));
343
344 ptab_size = segments * pages_per_segment * sizeof(unsigned long);
345 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__,
346 iommu->nid, ptab_size, get_order(ptab_size));
347 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size));
348 BUG_ON(!page);
349
350 ptab = page_address(page);
351 memset(ptab, 0, ptab_size);
352
353
354 n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12;
355
356 pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
357 __func__, iommu->nid, iommu->stab, ptab,
358 n_pte_pages);
359
360
361 reg = IOSTE_V | ((n_pte_pages - 1) << 5);
362
363 switch (page_shift) {
364 case 12: reg |= IOSTE_PS_4K; break;
365 case 16: reg |= IOSTE_PS_64K; break;
366 case 20: reg |= IOSTE_PS_1M; break;
367 case 24: reg |= IOSTE_PS_16M; break;
368 default: BUG();
369 }
370
371 gap_base = gap_base >> IO_SEGMENT_SHIFT;
372 gap_size = gap_size >> IO_SEGMENT_SHIFT;
373
374 pr_debug("Setting up IOMMU stab:\n");
375 for (i = start_seg; i < (start_seg + segments); i++) {
376 if (i >= gap_base && i < (gap_base + gap_size)) {
377 pr_debug("\toverlap at %d, skipping\n", i);
378 continue;
379 }
380 iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) *
381 (i - start_seg));
382 pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]);
383 }
384
385 return ptab;
386}
387
388static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
389{
390 int ret;
391 unsigned long reg, xlate_base;
392 unsigned int virq;
393
394 if (cell_iommu_find_ioc(iommu->nid, &xlate_base))
395 panic("%s: missing IOC register mappings for node %d\n",
396 __func__, iommu->nid);
397
398 iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size);
399 iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset;
400
401
402 mb();
403
404
405 reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
406 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat,
407 reg & ~IOC_IO_ExcpStat_V);
408 out_be64(iommu->xlate_regs + IOC_IO_ExcpMask,
409 IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE);
410
411 virq = irq_create_mapping(NULL,
412 IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT));
413 BUG_ON(virq == NO_IRQ);
414
415 ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu);
416 BUG_ON(ret);
417
418
419 reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW;
420 out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg);
421 in_be64(iommu->xlate_regs + IOC_IOST_Origin);
422
423
424 reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE;
425 out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg);
426}
427
428static void cell_iommu_setup_hardware(struct cbe_iommu *iommu,
429 unsigned long base, unsigned long size)
430{
431 cell_iommu_setup_stab(iommu, base, size, 0, 0);
432 iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0,
433 IOMMU_PAGE_SHIFT);
434 cell_iommu_enable_hardware(iommu);
435}
436
437#if 0
438static struct iommu_window *find_window(struct cbe_iommu *iommu,
439 unsigned long offset, unsigned long size)
440{
441 struct iommu_window *window;
442
443
444
445 list_for_each_entry(window, &(iommu->windows), list) {
446 if (window->offset == offset && window->size == size)
447 return window;
448 }
449
450 return NULL;
451}
452#endif
453
454static inline u32 cell_iommu_get_ioid(struct device_node *np)
455{
456 const u32 *ioid;
457
458 ioid = of_get_property(np, "ioid", NULL);
459 if (ioid == NULL) {
460 printk(KERN_WARNING "iommu: missing ioid for %s using 0\n",
461 np->full_name);
462 return 0;
463 }
464
465 return *ioid;
466}
467
468static struct iommu_window * __init
469cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
470 unsigned long offset, unsigned long size,
471 unsigned long pte_offset)
472{
473 struct iommu_window *window;
474 struct page *page;
475 u32 ioid;
476
477 ioid = cell_iommu_get_ioid(np);
478
479 window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
480 BUG_ON(window == NULL);
481
482 window->offset = offset;
483 window->size = size;
484 window->ioid = ioid;
485 window->iommu = iommu;
486
487 window->table.it_blocksize = 16;
488 window->table.it_base = (unsigned long)iommu->ptab;
489 window->table.it_index = iommu->nid;
490 window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + pte_offset;
491 window->table.it_size = size >> IOMMU_PAGE_SHIFT;
492
493 iommu_init_table(&window->table, iommu->nid);
494
495 pr_debug("\tioid %d\n", window->ioid);
496 pr_debug("\tblocksize %ld\n", window->table.it_blocksize);
497 pr_debug("\tbase 0x%016lx\n", window->table.it_base);
498 pr_debug("\toffset 0x%lx\n", window->table.it_offset);
499 pr_debug("\tsize %ld\n", window->table.it_size);
500
501 list_add(&window->list, &iommu->windows);
502
503 if (offset != 0)
504 return window;
505
506
507
508
509
510
511
512
513 page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
514 BUG_ON(!page);
515 iommu->pad_page = page_address(page);
516 clear_page(iommu->pad_page);
517
518 __set_bit(0, window->table.it_map);
519 tce_build_cell(&window->table, window->table.it_offset, 1,
520 (unsigned long)iommu->pad_page, DMA_TO_DEVICE, NULL);
521 window->table.it_hint = window->table.it_blocksize;
522
523 return window;
524}
525
526static struct cbe_iommu *cell_iommu_for_node(int nid)
527{
528 int i;
529
530 for (i = 0; i < cbe_nr_iommus; i++)
531 if (iommus[i].nid == nid)
532 return &iommus[i];
533 return NULL;
534}
535
536static unsigned long cell_dma_direct_offset;
537
538static unsigned long dma_iommu_fixed_base;
539
540
541static int iommu_fixed_is_weak;
542
543static struct iommu_table *cell_get_iommu_table(struct device *dev)
544{
545 struct iommu_window *window;
546 struct cbe_iommu *iommu;
547
548
549
550
551
552 iommu = cell_iommu_for_node(dev_to_node(dev));
553 if (iommu == NULL || list_empty(&iommu->windows)) {
554 printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n",
555 dev->of_node ? dev->of_node->full_name : "?",
556 dev_to_node(dev));
557 return NULL;
558 }
559 window = list_entry(iommu->windows.next, struct iommu_window, list);
560
561 return &window->table;
562}
563
564
565
566static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
567 dma_addr_t *dma_handle, gfp_t flag)
568{
569 if (iommu_fixed_is_weak)
570 return iommu_alloc_coherent(dev, cell_get_iommu_table(dev),
571 size, dma_handle,
572 device_to_mask(dev), flag,
573 dev_to_node(dev));
574 else
575 return dma_direct_ops.alloc_coherent(dev, size, dma_handle,
576 flag);
577}
578
579static void dma_fixed_free_coherent(struct device *dev, size_t size,
580 void *vaddr, dma_addr_t dma_handle)
581{
582 if (iommu_fixed_is_weak)
583 iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr,
584 dma_handle);
585 else
586 dma_direct_ops.free_coherent(dev, size, vaddr, dma_handle);
587}
588
589static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
590 unsigned long offset, size_t size,
591 enum dma_data_direction direction,
592 struct dma_attrs *attrs)
593{
594 if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
595 return dma_direct_ops.map_page(dev, page, offset, size,
596 direction, attrs);
597 else
598 return iommu_map_page(dev, cell_get_iommu_table(dev), page,
599 offset, size, device_to_mask(dev),
600 direction, attrs);
601}
602
603static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr,
604 size_t size, enum dma_data_direction direction,
605 struct dma_attrs *attrs)
606{
607 if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
608 dma_direct_ops.unmap_page(dev, dma_addr, size, direction,
609 attrs);
610 else
611 iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size,
612 direction, attrs);
613}
614
615static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg,
616 int nents, enum dma_data_direction direction,
617 struct dma_attrs *attrs)
618{
619 if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
620 return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs);
621 else
622 return iommu_map_sg(dev, cell_get_iommu_table(dev), sg, nents,
623 device_to_mask(dev), direction, attrs);
624}
625
626static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
627 int nents, enum dma_data_direction direction,
628 struct dma_attrs *attrs)
629{
630 if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
631 dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs);
632 else
633 iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents, direction,
634 attrs);
635}
636
637static int dma_fixed_dma_supported(struct device *dev, u64 mask)
638{
639 return mask == DMA_BIT_MASK(64);
640}
641
642static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
643
644struct dma_map_ops dma_iommu_fixed_ops = {
645 .alloc_coherent = dma_fixed_alloc_coherent,
646 .free_coherent = dma_fixed_free_coherent,
647 .map_sg = dma_fixed_map_sg,
648 .unmap_sg = dma_fixed_unmap_sg,
649 .dma_supported = dma_fixed_dma_supported,
650 .set_dma_mask = dma_set_mask_and_switch,
651 .map_page = dma_fixed_map_page,
652 .unmap_page = dma_fixed_unmap_page,
653};
654
655static void cell_dma_dev_setup_fixed(struct device *dev);
656
657static void cell_dma_dev_setup(struct device *dev)
658{
659
660 if (get_dma_ops(dev) == &dma_iommu_fixed_ops)
661 cell_dma_dev_setup_fixed(dev);
662 else if (get_pci_dma_ops() == &dma_iommu_ops)
663 set_iommu_table_base(dev, cell_get_iommu_table(dev));
664 else if (get_pci_dma_ops() == &dma_direct_ops)
665 set_dma_offset(dev, cell_dma_direct_offset);
666 else
667 BUG();
668}
669
670static void cell_pci_dma_dev_setup(struct pci_dev *dev)
671{
672 cell_dma_dev_setup(&dev->dev);
673}
674
675static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action,
676 void *data)
677{
678 struct device *dev = data;
679
680
681 if (action != BUS_NOTIFY_ADD_DEVICE)
682 return 0;
683
684
685 dev->archdata.dma_ops = get_pci_dma_ops();
686
687 cell_dma_dev_setup(dev);
688
689 return 0;
690}
691
692static struct notifier_block cell_of_bus_notifier = {
693 .notifier_call = cell_of_bus_notify
694};
695
696static int __init cell_iommu_get_window(struct device_node *np,
697 unsigned long *base,
698 unsigned long *size)
699{
700 const void *dma_window;
701 unsigned long index;
702
703
704 dma_window = of_get_property(np, "ibm,dma-window", NULL);
705 if (dma_window == NULL) {
706 *base = 0;
707 *size = 0x80000000u;
708 return -ENODEV;
709 }
710
711 of_parse_dma_window(np, dma_window, &index, base, size);
712 return 0;
713}
714
715static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np)
716{
717 struct cbe_iommu *iommu;
718 int nid, i;
719
720
721 nid = of_node_to_nid(np);
722 if (nid < 0) {
723 printk(KERN_ERR "iommu: failed to get node for %s\n",
724 np->full_name);
725 return NULL;
726 }
727 pr_debug("iommu: setting up iommu for node %d (%s)\n",
728 nid, np->full_name);
729
730
731
732
733
734
735
736
737
738 if (cbe_nr_iommus >= NR_IOMMUS) {
739 printk(KERN_ERR "iommu: too many IOMMUs detected ! (%s)\n",
740 np->full_name);
741 return NULL;
742 }
743
744
745 i = cbe_nr_iommus++;
746 iommu = &iommus[i];
747 iommu->stab = NULL;
748 iommu->nid = nid;
749 snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i);
750 INIT_LIST_HEAD(&iommu->windows);
751
752 return iommu;
753}
754
755static void __init cell_iommu_init_one(struct device_node *np,
756 unsigned long offset)
757{
758 struct cbe_iommu *iommu;
759 unsigned long base, size;
760
761 iommu = cell_iommu_alloc(np);
762 if (!iommu)
763 return;
764
765
766 cell_iommu_get_window(np, &base, &size);
767
768 pr_debug("\ttranslating window 0x%lx...0x%lx\n",
769 base, base + size - 1);
770
771
772 cell_iommu_setup_hardware(iommu, base, size);
773
774
775 cell_iommu_setup_window(iommu, np, base, size,
776 offset >> IOMMU_PAGE_SHIFT);
777}
778
779static void __init cell_disable_iommus(void)
780{
781 int node;
782 unsigned long base, val;
783 void __iomem *xregs, *cregs;
784
785
786 for_each_online_node(node) {
787 if (cell_iommu_find_ioc(node, &base))
788 continue;
789 xregs = ioremap(base, IOC_Reg_Size);
790 if (xregs == NULL)
791 continue;
792 cregs = xregs + IOC_IOCmd_Offset;
793
794 pr_debug("iommu: cleaning up iommu on node %d\n", node);
795
796 out_be64(xregs + IOC_IOST_Origin, 0);
797 (void)in_be64(xregs + IOC_IOST_Origin);
798 val = in_be64(cregs + IOC_IOCmd_Cfg);
799 val &= ~IOC_IOCmd_Cfg_TE;
800 out_be64(cregs + IOC_IOCmd_Cfg, val);
801 (void)in_be64(cregs + IOC_IOCmd_Cfg);
802
803 iounmap(xregs);
804 }
805}
806
807static int __init cell_iommu_init_disabled(void)
808{
809 struct device_node *np = NULL;
810 unsigned long base = 0, size;
811
812
813 set_pci_dma_ops(&dma_direct_ops);
814
815
816 cell_disable_iommus();
817
818
819 if (of_find_node_by_name(NULL, "axon") == NULL)
820 cell_dma_direct_offset = SPIDER_DMA_OFFSET;
821
822
823
824
825
826
827
828 for_each_node_by_name(np, "axon") {
829 if (np->parent == NULL || np->parent->parent != NULL)
830 continue;
831 if (cell_iommu_get_window(np, &base, &size) == 0)
832 break;
833 }
834 if (np == NULL) {
835 for_each_node_by_name(np, "pci-internal") {
836 if (np->parent == NULL || np->parent->parent != NULL)
837 continue;
838 if (cell_iommu_get_window(np, &base, &size) == 0)
839 break;
840 }
841 }
842 of_node_put(np);
843
844
845
846
847 if (np && size < memblock_end_of_DRAM()) {
848 printk(KERN_WARNING "iommu: force-enabled, dma window"
849 " (%ldMB) smaller than total memory (%lldMB)\n",
850 size >> 20, memblock_end_of_DRAM() >> 20);
851 return -ENODEV;
852 }
853
854 cell_dma_direct_offset += base;
855
856 if (cell_dma_direct_offset != 0)
857 ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
858
859 printk("iommu: disabled, direct DMA offset is 0x%lx\n",
860 cell_dma_direct_offset);
861
862 return 0;
863}
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891static u64 cell_iommu_get_fixed_address(struct device *dev)
892{
893 u64 cpu_addr, size, best_size, dev_addr = OF_BAD_ADDR;
894 struct device_node *np;
895 const u32 *ranges = NULL;
896 int i, len, best, naddr, nsize, pna, range_size;
897
898 np = of_node_get(dev->of_node);
899 while (1) {
900 naddr = of_n_addr_cells(np);
901 nsize = of_n_size_cells(np);
902 np = of_get_next_parent(np);
903 if (!np)
904 break;
905
906 ranges = of_get_property(np, "dma-ranges", &len);
907
908
909 if (ranges && len > 0)
910 break;
911 }
912
913 if (!ranges) {
914 dev_dbg(dev, "iommu: no dma-ranges found\n");
915 goto out;
916 }
917
918 len /= sizeof(u32);
919
920 pna = of_n_addr_cells(np);
921 range_size = naddr + nsize + pna;
922
923
924
925
926
927
928 for (i = 0, best = -1, best_size = 0; i < len; i += range_size) {
929 cpu_addr = of_translate_dma_address(np, ranges + i + naddr);
930 size = of_read_number(ranges + i + naddr + pna, nsize);
931
932 if (cpu_addr == 0 && size > best_size) {
933 best = i;
934 best_size = size;
935 }
936 }
937
938 if (best >= 0) {
939 dev_addr = of_read_number(ranges + best, naddr);
940 } else
941 dev_dbg(dev, "iommu: no suitable range found!\n");
942
943out:
944 of_node_put(np);
945
946 return dev_addr;
947}
948
949static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask)
950{
951 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
952 return -EIO;
953
954 if (dma_mask == DMA_BIT_MASK(64) &&
955 cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
956 {
957 dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
958 set_dma_ops(dev, &dma_iommu_fixed_ops);
959 } else {
960 dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
961 set_dma_ops(dev, get_pci_dma_ops());
962 }
963
964 cell_dma_dev_setup(dev);
965
966 *dev->dma_mask = dma_mask;
967
968 return 0;
969}
970
971static void cell_dma_dev_setup_fixed(struct device *dev)
972{
973 u64 addr;
974
975 addr = cell_iommu_get_fixed_address(dev) + dma_iommu_fixed_base;
976 set_dma_offset(dev, addr);
977
978 dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
979}
980
981static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
982 unsigned long base_pte)
983{
984 unsigned long segment, offset;
985
986 segment = addr >> IO_SEGMENT_SHIFT;
987 offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24));
988 ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long));
989
990 pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n",
991 addr, ptab, segment, offset);
992
993 ptab[offset] = base_pte | (__pa(addr) & CBE_IOPTE_RPN_Mask);
994}
995
996static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
997 struct device_node *np, unsigned long dbase, unsigned long dsize,
998 unsigned long fbase, unsigned long fsize)
999{
1000 unsigned long base_pte, uaddr, ioaddr, *ptab;
1001
1002 ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24);
1003
1004 dma_iommu_fixed_base = fbase;
1005
1006 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase);
1007
1008 base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
1009 (cell_iommu_get_ioid(np) & CBE_IOPTE_IOID_Mask);
1010
1011 if (iommu_fixed_is_weak)
1012 pr_info("IOMMU: Using weak ordering for fixed mapping\n");
1013 else {
1014 pr_info("IOMMU: Using strong ordering for fixed mapping\n");
1015 base_pte |= CBE_IOPTE_SO_RW;
1016 }
1017
1018 for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) {
1019
1020 ioaddr = uaddr + fbase;
1021 if (ioaddr >= dbase && ioaddr < (dbase + dsize)) {
1022 pr_debug("iommu: fixed/dynamic overlap, skipping\n");
1023 continue;
1024 }
1025
1026 insert_16M_pte(uaddr, ptab, base_pte);
1027 }
1028
1029 mb();
1030}
1031
1032static int __init cell_iommu_fixed_mapping_init(void)
1033{
1034 unsigned long dbase, dsize, fbase, fsize, hbase, hend;
1035 struct cbe_iommu *iommu;
1036 struct device_node *np;
1037
1038
1039 np = of_find_node_by_name(NULL, "axon");
1040 if (!np) {
1041 pr_debug("iommu: fixed mapping disabled, no axons found\n");
1042 return -1;
1043 }
1044
1045
1046 np = of_find_node_with_property(NULL, "dma-ranges");
1047 of_node_put(np);
1048
1049 if (!np) {
1050 pr_debug("iommu: no dma-ranges found, no fixed mapping\n");
1051 return -1;
1052 }
1053
1054
1055
1056
1057
1058
1059 fbase = 0;
1060 for_each_node_by_name(np, "axon") {
1061 cell_iommu_get_window(np, &dbase, &dsize);
1062 fbase = max(fbase, dbase + dsize);
1063 }
1064
1065 fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT);
1066 fsize = memblock_phys_mem_size();
1067
1068 if ((fbase + fsize) <= 0x800000000ul)
1069 hbase = 0;
1070 else {
1071
1072
1073
1074
1075
1076
1077 if (!htab_address) {
1078 pr_debug("iommu: htab is NULL, on LPAR? Huh?\n");
1079 return -1;
1080 }
1081 hbase = __pa(htab_address);
1082 hend = hbase + htab_size_bytes;
1083
1084
1085 if ((hbase != _ALIGN_UP(hbase, 1 << IO_SEGMENT_SHIFT)) ||
1086 (hend != _ALIGN_UP(hend, 1 << IO_SEGMENT_SHIFT))) {
1087 pr_debug("iommu: hash window not segment aligned\n");
1088 return -1;
1089 }
1090
1091
1092 for_each_node_by_name(np, "axon") {
1093 cell_iommu_get_window(np, &dbase, &dsize);
1094
1095 if (hbase < dbase || (hend > (dbase + dsize))) {
1096 pr_debug("iommu: hash window doesn't fit in"
1097 "real DMA window\n");
1098 return -1;
1099 }
1100 }
1101
1102 fbase = 0;
1103 }
1104
1105
1106 for_each_node_by_name(np, "axon") {
1107 iommu = cell_iommu_alloc(np);
1108 BUG_ON(!iommu);
1109
1110 if (hbase == 0)
1111 cell_iommu_get_window(np, &dbase, &dsize);
1112 else {
1113 dbase = hbase;
1114 dsize = htab_size_bytes;
1115 }
1116
1117 printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx "
1118 "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase,
1119 dbase + dsize, fbase, fbase + fsize);
1120
1121 cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize);
1122 iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0,
1123 IOMMU_PAGE_SHIFT);
1124 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize,
1125 fbase, fsize);
1126 cell_iommu_enable_hardware(iommu);
1127 cell_iommu_setup_window(iommu, np, dbase, dsize, 0);
1128 }
1129
1130 dma_iommu_ops.set_dma_mask = dma_set_mask_and_switch;
1131 set_pci_dma_ops(&dma_iommu_ops);
1132
1133 return 0;
1134}
1135
1136static int iommu_fixed_disabled;
1137
1138static int __init setup_iommu_fixed(char *str)
1139{
1140 struct device_node *pciep;
1141
1142 if (strcmp(str, "off") == 0)
1143 iommu_fixed_disabled = 1;
1144
1145
1146
1147
1148
1149
1150 pciep = of_find_node_by_type(NULL, "pcie-endpoint");
1151
1152 if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0))
1153 iommu_fixed_is_weak = 1;
1154
1155 of_node_put(pciep);
1156
1157 return 1;
1158}
1159__setup("iommu_fixed=", setup_iommu_fixed);
1160
1161static u64 cell_dma_get_required_mask(struct device *dev)
1162{
1163 struct dma_map_ops *dma_ops;
1164
1165 if (!dev->dma_mask)
1166 return 0;
1167
1168 if (!iommu_fixed_disabled &&
1169 cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
1170 return DMA_BIT_MASK(64);
1171
1172 dma_ops = get_dma_ops(dev);
1173 if (dma_ops->get_required_mask)
1174 return dma_ops->get_required_mask(dev);
1175
1176 WARN_ONCE(1, "no get_required_mask in %p ops", dma_ops);
1177
1178 return DMA_BIT_MASK(64);
1179}
1180
1181static int __init cell_iommu_init(void)
1182{
1183 struct device_node *np;
1184
1185
1186
1187
1188
1189
1190 if (iommu_is_off ||
1191 (!iommu_force_on && memblock_end_of_DRAM() <= 0x80000000ull))
1192 if (cell_iommu_init_disabled() == 0)
1193 goto bail;
1194
1195
1196 ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
1197 ppc_md.dma_get_required_mask = cell_dma_get_required_mask;
1198 ppc_md.tce_build = tce_build_cell;
1199 ppc_md.tce_free = tce_free_cell;
1200
1201 if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0)
1202 goto bail;
1203
1204
1205 for_each_node_by_name(np, "axon") {
1206 if (np->parent == NULL || np->parent->parent != NULL)
1207 continue;
1208 cell_iommu_init_one(np, 0);
1209 }
1210
1211
1212
1213
1214 for_each_node_by_name(np, "pci-internal") {
1215 if (np->parent == NULL || np->parent->parent != NULL)
1216 continue;
1217 cell_iommu_init_one(np, SPIDER_DMA_OFFSET);
1218 }
1219
1220
1221 set_pci_dma_ops(&dma_iommu_ops);
1222
1223 bail:
1224
1225
1226
1227 bus_register_notifier(&platform_bus_type, &cell_of_bus_notifier);
1228
1229 return 0;
1230}
1231machine_arch_initcall(cell, cell_iommu_init);
1232machine_arch_initcall(celleb_native, cell_iommu_init);
1233
1234