1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#undef DEBUG
24
25#include <linux/kernel.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/notifier.h>
29#include <linux/of.h>
30#include <linux/of_platform.h>
31#include <linux/slab.h>
32#include <linux/memblock.h>
33
34#include <asm/prom.h>
35#include <asm/iommu.h>
36#include <asm/machdep.h>
37#include <asm/pci-bridge.h>
38#include <asm/udbg.h>
39#include <asm/firmware.h>
40#include <asm/cell-regs.h>
41
42#include "cell.h"
43#include "interrupt.h"
44
45
46
47
48
49
50#define CELL_IOMMU_REAL_UNMAP
51
52
53
54
55
56
57#define CELL_IOMMU_STRICT_PROTECTION
58
59
60#define NR_IOMMUS 2
61
62
63#define IOC_Reg_Size 0x2000
64
65#define IOC_IOPT_CacheInvd 0x908
66#define IOC_IOPT_CacheInvd_NE_Mask 0xffe0000000000000ul
67#define IOC_IOPT_CacheInvd_IOPTE_Mask 0x000003fffffffff8ul
68#define IOC_IOPT_CacheInvd_Busy 0x0000000000000001ul
69
70#define IOC_IOST_Origin 0x918
71#define IOC_IOST_Origin_E 0x8000000000000000ul
72#define IOC_IOST_Origin_HW 0x0000000000000800ul
73#define IOC_IOST_Origin_HL 0x0000000000000400ul
74
75#define IOC_IO_ExcpStat 0x920
76#define IOC_IO_ExcpStat_V 0x8000000000000000ul
77#define IOC_IO_ExcpStat_SPF_Mask 0x6000000000000000ul
78#define IOC_IO_ExcpStat_SPF_S 0x6000000000000000ul
79#define IOC_IO_ExcpStat_SPF_P 0x2000000000000000ul
80#define IOC_IO_ExcpStat_ADDR_Mask 0x00000007fffff000ul
81#define IOC_IO_ExcpStat_RW_Mask 0x0000000000000800ul
82#define IOC_IO_ExcpStat_IOID_Mask 0x00000000000007fful
83
84#define IOC_IO_ExcpMask 0x928
85#define IOC_IO_ExcpMask_SFE 0x4000000000000000ul
86#define IOC_IO_ExcpMask_PFE 0x2000000000000000ul
87
88#define IOC_IOCmd_Offset 0x1000
89
90#define IOC_IOCmd_Cfg 0xc00
91#define IOC_IOCmd_Cfg_TE 0x0000800000000000ul
92
93
94
95#define IOSTE_V 0x8000000000000000ul
96#define IOSTE_H 0x4000000000000000ul
97#define IOSTE_PT_Base_RPN_Mask 0x3ffffffffffff000ul
98#define IOSTE_NPPT_Mask 0x0000000000000fe0ul
99#define IOSTE_PS_Mask 0x0000000000000007ul
100#define IOSTE_PS_4K 0x0000000000000001ul
101#define IOSTE_PS_64K 0x0000000000000003ul
102#define IOSTE_PS_1M 0x0000000000000005ul
103#define IOSTE_PS_16M 0x0000000000000007ul
104
105
106
107#define IO_SEGMENT_SHIFT 28
108#define IO_PAGENO_BITS(shift) (IO_SEGMENT_SHIFT - (shift))
109
110
111#define SPIDER_DMA_OFFSET 0x80000000ul
112
113struct iommu_window {
114 struct list_head list;
115 struct cbe_iommu *iommu;
116 unsigned long offset;
117 unsigned long size;
118 unsigned int ioid;
119 struct iommu_table table;
120};
121
122#define NAMESIZE 8
123struct cbe_iommu {
124 int nid;
125 char name[NAMESIZE];
126 void __iomem *xlate_regs;
127 void __iomem *cmd_regs;
128 unsigned long *stab;
129 unsigned long *ptab;
130 void *pad_page;
131 struct list_head windows;
132};
133
134
135
136
137
138
139static struct cbe_iommu iommus[NR_IOMMUS];
140static int cbe_nr_iommus;
141
142static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
143 long n_ptes)
144{
145 u64 __iomem *reg;
146 u64 val;
147 long n;
148
149 reg = iommu->xlate_regs + IOC_IOPT_CacheInvd;
150
151 while (n_ptes > 0) {
152
153 n = min(n_ptes, 1l << 11);
154 val = (((n ) << 53) & IOC_IOPT_CacheInvd_NE_Mask)
155 | (__pa(pte) & IOC_IOPT_CacheInvd_IOPTE_Mask)
156 | IOC_IOPT_CacheInvd_Busy;
157
158 out_be64(reg, val);
159 while (in_be64(reg) & IOC_IOPT_CacheInvd_Busy)
160 ;
161
162 n_ptes -= n;
163 pte += n;
164 }
165}
166
167static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
168 unsigned long uaddr, enum dma_data_direction direction,
169 unsigned long attrs)
170{
171 int i;
172 unsigned long *io_pte, base_pte;
173 struct iommu_window *window =
174 container_of(tbl, struct iommu_window, table);
175
176
177
178
179#ifdef CELL_IOMMU_STRICT_PROTECTION
180
181
182
183
184
185
186 const unsigned long prot = 0xc48;
187 base_pte =
188 ((prot << (52 + 4 * direction)) &
189 (CBE_IOPTE_PP_W | CBE_IOPTE_PP_R)) |
190 CBE_IOPTE_M | CBE_IOPTE_SO_RW |
191 (window->ioid & CBE_IOPTE_IOID_Mask);
192#else
193 base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
194 CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask);
195#endif
196 if (unlikely(attrs & DMA_ATTR_WEAK_ORDERING))
197 base_pte &= ~CBE_IOPTE_SO_RW;
198
199 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
200
201 for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift))
202 io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
203
204 mb();
205
206 invalidate_tce_cache(window->iommu, io_pte, npages);
207
208 pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n",
209 index, npages, direction, base_pte);
210 return 0;
211}
212
213static void tce_free_cell(struct iommu_table *tbl, long index, long npages)
214{
215
216 int i;
217 unsigned long *io_pte, pte;
218 struct iommu_window *window =
219 container_of(tbl, struct iommu_window, table);
220
221 pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages);
222
223#ifdef CELL_IOMMU_REAL_UNMAP
224 pte = 0;
225#else
226
227
228 pte = CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW |
229 __pa(window->iommu->pad_page) |
230 (window->ioid & CBE_IOPTE_IOID_Mask);
231#endif
232
233 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
234
235 for (i = 0; i < npages; i++)
236 io_pte[i] = pte;
237
238 mb();
239
240 invalidate_tce_cache(window->iommu, io_pte, npages);
241}
242
243static irqreturn_t ioc_interrupt(int irq, void *data)
244{
245 unsigned long stat, spf;
246 struct cbe_iommu *iommu = data;
247
248 stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
249 spf = stat & IOC_IO_ExcpStat_SPF_Mask;
250
251
252 printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat);
253 printk(KERN_ERR " V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n",
254 !!(stat & IOC_IO_ExcpStat_V),
255 (spf == IOC_IO_ExcpStat_SPF_S) ? 'S' : ' ',
256 (spf == IOC_IO_ExcpStat_SPF_P) ? 'P' : ' ',
257 (stat & IOC_IO_ExcpStat_RW_Mask) ? "Read" : "Write",
258 (unsigned int)(stat & IOC_IO_ExcpStat_IOID_Mask));
259 printk(KERN_ERR " page=0x%016lx\n",
260 stat & IOC_IO_ExcpStat_ADDR_Mask);
261
262
263 stat &= ~IOC_IO_ExcpStat_V;
264 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat);
265
266 return IRQ_HANDLED;
267}
268
269static int cell_iommu_find_ioc(int nid, unsigned long *base)
270{
271 struct device_node *np;
272 struct resource r;
273
274 *base = 0;
275
276
277 for_each_node_by_name(np, "ioc") {
278 if (of_node_to_nid(np) != nid)
279 continue;
280 if (of_address_to_resource(np, 0, &r)) {
281 printk(KERN_ERR "iommu: can't get address for %pOF\n",
282 np);
283 continue;
284 }
285 *base = r.start;
286 of_node_put(np);
287 return 0;
288 }
289
290
291 for_each_node_by_type(np, "cpu") {
292 const unsigned int *nidp;
293 const unsigned long *tmp;
294
295 nidp = of_get_property(np, "node-id", NULL);
296 if (nidp && *nidp == nid) {
297 tmp = of_get_property(np, "ioc-translation", NULL);
298 if (tmp) {
299 *base = *tmp;
300 of_node_put(np);
301 return 0;
302 }
303 }
304 }
305
306 return -ENODEV;
307}
308
309static void cell_iommu_setup_stab(struct cbe_iommu *iommu,
310 unsigned long dbase, unsigned long dsize,
311 unsigned long fbase, unsigned long fsize)
312{
313 struct page *page;
314 unsigned long segments, stab_size;
315
316 segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT;
317
318 pr_debug("%s: iommu[%d]: segments: %lu\n",
319 __func__, iommu->nid, segments);
320
321
322 stab_size = segments * sizeof(unsigned long);
323 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size));
324 BUG_ON(!page);
325 iommu->stab = page_address(page);
326 memset(iommu->stab, 0, stab_size);
327}
328
329static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
330 unsigned long base, unsigned long size, unsigned long gap_base,
331 unsigned long gap_size, unsigned long page_shift)
332{
333 struct page *page;
334 int i;
335 unsigned long reg, segments, pages_per_segment, ptab_size,
336 n_pte_pages, start_seg, *ptab;
337
338 start_seg = base >> IO_SEGMENT_SHIFT;
339 segments = size >> IO_SEGMENT_SHIFT;
340 pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift);
341
342 pages_per_segment = max(pages_per_segment,
343 (1 << 12) / sizeof(unsigned long));
344
345 ptab_size = segments * pages_per_segment * sizeof(unsigned long);
346 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__,
347 iommu->nid, ptab_size, get_order(ptab_size));
348 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size));
349 BUG_ON(!page);
350
351 ptab = page_address(page);
352 memset(ptab, 0, ptab_size);
353
354
355 n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12;
356
357 pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
358 __func__, iommu->nid, iommu->stab, ptab,
359 n_pte_pages);
360
361
362 reg = IOSTE_V | ((n_pte_pages - 1) << 5);
363
364 switch (page_shift) {
365 case 12: reg |= IOSTE_PS_4K; break;
366 case 16: reg |= IOSTE_PS_64K; break;
367 case 20: reg |= IOSTE_PS_1M; break;
368 case 24: reg |= IOSTE_PS_16M; break;
369 default: BUG();
370 }
371
372 gap_base = gap_base >> IO_SEGMENT_SHIFT;
373 gap_size = gap_size >> IO_SEGMENT_SHIFT;
374
375 pr_debug("Setting up IOMMU stab:\n");
376 for (i = start_seg; i < (start_seg + segments); i++) {
377 if (i >= gap_base && i < (gap_base + gap_size)) {
378 pr_debug("\toverlap at %d, skipping\n", i);
379 continue;
380 }
381 iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) *
382 (i - start_seg));
383 pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]);
384 }
385
386 return ptab;
387}
388
389static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
390{
391 int ret;
392 unsigned long reg, xlate_base;
393 unsigned int virq;
394
395 if (cell_iommu_find_ioc(iommu->nid, &xlate_base))
396 panic("%s: missing IOC register mappings for node %d\n",
397 __func__, iommu->nid);
398
399 iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size);
400 iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset;
401
402
403 mb();
404
405
406 reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
407 out_be64(iommu->xlate_regs + IOC_IO_ExcpStat,
408 reg & ~IOC_IO_ExcpStat_V);
409 out_be64(iommu->xlate_regs + IOC_IO_ExcpMask,
410 IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE);
411
412 virq = irq_create_mapping(NULL,
413 IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT));
414 BUG_ON(!virq);
415
416 ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu);
417 BUG_ON(ret);
418
419
420 reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW;
421 out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg);
422 in_be64(iommu->xlate_regs + IOC_IOST_Origin);
423
424
425 reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE;
426 out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg);
427}
428
429static void cell_iommu_setup_hardware(struct cbe_iommu *iommu,
430 unsigned long base, unsigned long size)
431{
432 cell_iommu_setup_stab(iommu, base, size, 0, 0);
433 iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0,
434 IOMMU_PAGE_SHIFT_4K);
435 cell_iommu_enable_hardware(iommu);
436}
437
438#if 0
439static struct iommu_window *find_window(struct cbe_iommu *iommu,
440 unsigned long offset, unsigned long size)
441{
442 struct iommu_window *window;
443
444
445
446 list_for_each_entry(window, &(iommu->windows), list) {
447 if (window->offset == offset && window->size == size)
448 return window;
449 }
450
451 return NULL;
452}
453#endif
454
455static inline u32 cell_iommu_get_ioid(struct device_node *np)
456{
457 const u32 *ioid;
458
459 ioid = of_get_property(np, "ioid", NULL);
460 if (ioid == NULL) {
461 printk(KERN_WARNING "iommu: missing ioid for %pOF using 0\n",
462 np);
463 return 0;
464 }
465
466 return *ioid;
467}
468
469static struct iommu_table_ops cell_iommu_ops = {
470 .set = tce_build_cell,
471 .clear = tce_free_cell
472};
473
474static struct iommu_window * __init
475cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
476 unsigned long offset, unsigned long size,
477 unsigned long pte_offset)
478{
479 struct iommu_window *window;
480 struct page *page;
481 u32 ioid;
482
483 ioid = cell_iommu_get_ioid(np);
484
485 window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
486 BUG_ON(window == NULL);
487
488 window->offset = offset;
489 window->size = size;
490 window->ioid = ioid;
491 window->iommu = iommu;
492
493 window->table.it_blocksize = 16;
494 window->table.it_base = (unsigned long)iommu->ptab;
495 window->table.it_index = iommu->nid;
496 window->table.it_page_shift = IOMMU_PAGE_SHIFT_4K;
497 window->table.it_offset =
498 (offset >> window->table.it_page_shift) + pte_offset;
499 window->table.it_size = size >> window->table.it_page_shift;
500 window->table.it_ops = &cell_iommu_ops;
501
502 iommu_init_table(&window->table, iommu->nid);
503
504 pr_debug("\tioid %d\n", window->ioid);
505 pr_debug("\tblocksize %ld\n", window->table.it_blocksize);
506 pr_debug("\tbase 0x%016lx\n", window->table.it_base);
507 pr_debug("\toffset 0x%lx\n", window->table.it_offset);
508 pr_debug("\tsize %ld\n", window->table.it_size);
509
510 list_add(&window->list, &iommu->windows);
511
512 if (offset != 0)
513 return window;
514
515
516
517
518
519
520
521
522 page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
523 BUG_ON(!page);
524 iommu->pad_page = page_address(page);
525 clear_page(iommu->pad_page);
526
527 __set_bit(0, window->table.it_map);
528 tce_build_cell(&window->table, window->table.it_offset, 1,
529 (unsigned long)iommu->pad_page, DMA_TO_DEVICE, 0);
530
531 return window;
532}
533
534static struct cbe_iommu *cell_iommu_for_node(int nid)
535{
536 int i;
537
538 for (i = 0; i < cbe_nr_iommus; i++)
539 if (iommus[i].nid == nid)
540 return &iommus[i];
541 return NULL;
542}
543
544static unsigned long cell_dma_direct_offset;
545
546static unsigned long dma_iommu_fixed_base;
547
548
549static int iommu_fixed_is_weak;
550
551static struct iommu_table *cell_get_iommu_table(struct device *dev)
552{
553 struct iommu_window *window;
554 struct cbe_iommu *iommu;
555
556
557
558
559
560 iommu = cell_iommu_for_node(dev_to_node(dev));
561 if (iommu == NULL || list_empty(&iommu->windows)) {
562 dev_err(dev, "iommu: missing iommu for %pOF (node %d)\n",
563 dev->of_node, dev_to_node(dev));
564 return NULL;
565 }
566 window = list_entry(iommu->windows.next, struct iommu_window, list);
567
568 return &window->table;
569}
570
571
572
573static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
574 dma_addr_t *dma_handle, gfp_t flag,
575 unsigned long attrs)
576{
577 if (iommu_fixed_is_weak)
578 return iommu_alloc_coherent(dev, cell_get_iommu_table(dev),
579 size, dma_handle,
580 device_to_mask(dev), flag,
581 dev_to_node(dev));
582 else
583 return dma_direct_ops.alloc(dev, size, dma_handle, flag,
584 attrs);
585}
586
587static void dma_fixed_free_coherent(struct device *dev, size_t size,
588 void *vaddr, dma_addr_t dma_handle,
589 unsigned long attrs)
590{
591 if (iommu_fixed_is_weak)
592 iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr,
593 dma_handle);
594 else
595 dma_direct_ops.free(dev, size, vaddr, dma_handle, attrs);
596}
597
598static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
599 unsigned long offset, size_t size,
600 enum dma_data_direction direction,
601 unsigned long attrs)
602{
603 if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
604 return dma_direct_ops.map_page(dev, page, offset, size,
605 direction, attrs);
606 else
607 return iommu_map_page(dev, cell_get_iommu_table(dev), page,
608 offset, size, device_to_mask(dev),
609 direction, attrs);
610}
611
612static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr,
613 size_t size, enum dma_data_direction direction,
614 unsigned long attrs)
615{
616 if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
617 dma_direct_ops.unmap_page(dev, dma_addr, size, direction,
618 attrs);
619 else
620 iommu_unmap_page(cell_get_iommu_table(dev), dma_addr, size,
621 direction, attrs);
622}
623
624static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg,
625 int nents, enum dma_data_direction direction,
626 unsigned long attrs)
627{
628 if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
629 return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs);
630 else
631 return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg,
632 nents, device_to_mask(dev),
633 direction, attrs);
634}
635
636static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
637 int nents, enum dma_data_direction direction,
638 unsigned long attrs)
639{
640 if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
641 dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs);
642 else
643 ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents,
644 direction, attrs);
645}
646
647static int dma_suported_and_switch(struct device *dev, u64 dma_mask);
648
649static const struct dma_map_ops dma_iommu_fixed_ops = {
650 .alloc = dma_fixed_alloc_coherent,
651 .free = dma_fixed_free_coherent,
652 .map_sg = dma_fixed_map_sg,
653 .unmap_sg = dma_fixed_unmap_sg,
654 .dma_supported = dma_suported_and_switch,
655 .map_page = dma_fixed_map_page,
656 .unmap_page = dma_fixed_unmap_page,
657 .mapping_error = dma_iommu_mapping_error,
658};
659
660static void cell_dma_dev_setup(struct device *dev)
661{
662 if (get_pci_dma_ops() == &dma_iommu_ops)
663 set_iommu_table_base(dev, cell_get_iommu_table(dev));
664 else if (get_pci_dma_ops() == &dma_direct_ops)
665 set_dma_offset(dev, cell_dma_direct_offset);
666 else
667 BUG();
668}
669
670static void cell_pci_dma_dev_setup(struct pci_dev *dev)
671{
672 cell_dma_dev_setup(&dev->dev);
673}
674
675static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action,
676 void *data)
677{
678 struct device *dev = data;
679
680
681 if (action != BUS_NOTIFY_ADD_DEVICE)
682 return 0;
683
684
685 dev->dma_ops = get_pci_dma_ops();
686
687 cell_dma_dev_setup(dev);
688
689 return 0;
690}
691
692static struct notifier_block cell_of_bus_notifier = {
693 .notifier_call = cell_of_bus_notify
694};
695
696static int __init cell_iommu_get_window(struct device_node *np,
697 unsigned long *base,
698 unsigned long *size)
699{
700 const __be32 *dma_window;
701 unsigned long index;
702
703
704 dma_window = of_get_property(np, "ibm,dma-window", NULL);
705 if (dma_window == NULL) {
706 *base = 0;
707 *size = 0x80000000u;
708 return -ENODEV;
709 }
710
711 of_parse_dma_window(np, dma_window, &index, base, size);
712 return 0;
713}
714
715static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np)
716{
717 struct cbe_iommu *iommu;
718 int nid, i;
719
720
721 nid = of_node_to_nid(np);
722 if (nid < 0) {
723 printk(KERN_ERR "iommu: failed to get node for %pOF\n",
724 np);
725 return NULL;
726 }
727 pr_debug("iommu: setting up iommu for node %d (%pOF)\n",
728 nid, np);
729
730
731
732
733
734
735
736
737
738 if (cbe_nr_iommus >= NR_IOMMUS) {
739 printk(KERN_ERR "iommu: too many IOMMUs detected ! (%pOF)\n",
740 np);
741 return NULL;
742 }
743
744
745 i = cbe_nr_iommus++;
746 iommu = &iommus[i];
747 iommu->stab = NULL;
748 iommu->nid = nid;
749 snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i);
750 INIT_LIST_HEAD(&iommu->windows);
751
752 return iommu;
753}
754
755static void __init cell_iommu_init_one(struct device_node *np,
756 unsigned long offset)
757{
758 struct cbe_iommu *iommu;
759 unsigned long base, size;
760
761 iommu = cell_iommu_alloc(np);
762 if (!iommu)
763 return;
764
765
766 cell_iommu_get_window(np, &base, &size);
767
768 pr_debug("\ttranslating window 0x%lx...0x%lx\n",
769 base, base + size - 1);
770
771
772 cell_iommu_setup_hardware(iommu, base, size);
773
774
775 cell_iommu_setup_window(iommu, np, base, size,
776 offset >> IOMMU_PAGE_SHIFT_4K);
777}
778
779static void __init cell_disable_iommus(void)
780{
781 int node;
782 unsigned long base, val;
783 void __iomem *xregs, *cregs;
784
785
786 for_each_online_node(node) {
787 if (cell_iommu_find_ioc(node, &base))
788 continue;
789 xregs = ioremap(base, IOC_Reg_Size);
790 if (xregs == NULL)
791 continue;
792 cregs = xregs + IOC_IOCmd_Offset;
793
794 pr_debug("iommu: cleaning up iommu on node %d\n", node);
795
796 out_be64(xregs + IOC_IOST_Origin, 0);
797 (void)in_be64(xregs + IOC_IOST_Origin);
798 val = in_be64(cregs + IOC_IOCmd_Cfg);
799 val &= ~IOC_IOCmd_Cfg_TE;
800 out_be64(cregs + IOC_IOCmd_Cfg, val);
801 (void)in_be64(cregs + IOC_IOCmd_Cfg);
802
803 iounmap(xregs);
804 }
805}
806
807static int __init cell_iommu_init_disabled(void)
808{
809 struct device_node *np = NULL;
810 unsigned long base = 0, size;
811
812
813 set_pci_dma_ops(&dma_direct_ops);
814
815
816 cell_disable_iommus();
817
818
819 if (of_find_node_by_name(NULL, "axon") == NULL)
820 cell_dma_direct_offset = SPIDER_DMA_OFFSET;
821
822
823
824
825
826
827
828 for_each_node_by_name(np, "axon") {
829 if (np->parent == NULL || np->parent->parent != NULL)
830 continue;
831 if (cell_iommu_get_window(np, &base, &size) == 0)
832 break;
833 }
834 if (np == NULL) {
835 for_each_node_by_name(np, "pci-internal") {
836 if (np->parent == NULL || np->parent->parent != NULL)
837 continue;
838 if (cell_iommu_get_window(np, &base, &size) == 0)
839 break;
840 }
841 }
842 of_node_put(np);
843
844
845
846
847 if (np && size < memblock_end_of_DRAM()) {
848 printk(KERN_WARNING "iommu: force-enabled, dma window"
849 " (%ldMB) smaller than total memory (%lldMB)\n",
850 size >> 20, memblock_end_of_DRAM() >> 20);
851 return -ENODEV;
852 }
853
854 cell_dma_direct_offset += base;
855
856 if (cell_dma_direct_offset != 0)
857 cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
858
859 printk("iommu: disabled, direct DMA offset is 0x%lx\n",
860 cell_dma_direct_offset);
861
862 return 0;
863}
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891static u64 cell_iommu_get_fixed_address(struct device *dev)
892{
893 u64 cpu_addr, size, best_size, dev_addr = OF_BAD_ADDR;
894 struct device_node *np;
895 const u32 *ranges = NULL;
896 int i, len, best, naddr, nsize, pna, range_size;
897
898 np = of_node_get(dev->of_node);
899 while (1) {
900 naddr = of_n_addr_cells(np);
901 nsize = of_n_size_cells(np);
902 np = of_get_next_parent(np);
903 if (!np)
904 break;
905
906 ranges = of_get_property(np, "dma-ranges", &len);
907
908
909 if (ranges && len > 0)
910 break;
911 }
912
913 if (!ranges) {
914 dev_dbg(dev, "iommu: no dma-ranges found\n");
915 goto out;
916 }
917
918 len /= sizeof(u32);
919
920 pna = of_n_addr_cells(np);
921 range_size = naddr + nsize + pna;
922
923
924
925
926
927
928 for (i = 0, best = -1, best_size = 0; i < len; i += range_size) {
929 cpu_addr = of_translate_dma_address(np, ranges + i + naddr);
930 size = of_read_number(ranges + i + naddr + pna, nsize);
931
932 if (cpu_addr == 0 && size > best_size) {
933 best = i;
934 best_size = size;
935 }
936 }
937
938 if (best >= 0) {
939 dev_addr = of_read_number(ranges + best, naddr);
940 } else
941 dev_dbg(dev, "iommu: no suitable range found!\n");
942
943out:
944 of_node_put(np);
945
946 return dev_addr;
947}
948
949static int dma_suported_and_switch(struct device *dev, u64 dma_mask)
950{
951 if (dma_mask == DMA_BIT_MASK(64) &&
952 cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR) {
953 u64 addr = cell_iommu_get_fixed_address(dev) +
954 dma_iommu_fixed_base;
955 dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
956 dev_dbg(dev, "iommu: fixed addr = %llx\n", addr);
957 set_dma_ops(dev, &dma_iommu_fixed_ops);
958 set_dma_offset(dev, addr);
959 return 1;
960 }
961
962 if (dma_iommu_dma_supported(dev, dma_mask)) {
963 dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
964 set_dma_ops(dev, get_pci_dma_ops());
965 cell_dma_dev_setup(dev);
966 return 1;
967 }
968
969 return 0;
970}
971
972static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
973 unsigned long base_pte)
974{
975 unsigned long segment, offset;
976
977 segment = addr >> IO_SEGMENT_SHIFT;
978 offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24));
979 ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long));
980
981 pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n",
982 addr, ptab, segment, offset);
983
984 ptab[offset] = base_pte | (__pa(addr) & CBE_IOPTE_RPN_Mask);
985}
986
987static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
988 struct device_node *np, unsigned long dbase, unsigned long dsize,
989 unsigned long fbase, unsigned long fsize)
990{
991 unsigned long base_pte, uaddr, ioaddr, *ptab;
992
993 ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24);
994
995 dma_iommu_fixed_base = fbase;
996
997 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase);
998
999 base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
1000 (cell_iommu_get_ioid(np) & CBE_IOPTE_IOID_Mask);
1001
1002 if (iommu_fixed_is_weak)
1003 pr_info("IOMMU: Using weak ordering for fixed mapping\n");
1004 else {
1005 pr_info("IOMMU: Using strong ordering for fixed mapping\n");
1006 base_pte |= CBE_IOPTE_SO_RW;
1007 }
1008
1009 for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) {
1010
1011 ioaddr = uaddr + fbase;
1012 if (ioaddr >= dbase && ioaddr < (dbase + dsize)) {
1013 pr_debug("iommu: fixed/dynamic overlap, skipping\n");
1014 continue;
1015 }
1016
1017 insert_16M_pte(uaddr, ptab, base_pte);
1018 }
1019
1020 mb();
1021}
1022
1023static int __init cell_iommu_fixed_mapping_init(void)
1024{
1025 unsigned long dbase, dsize, fbase, fsize, hbase, hend;
1026 struct cbe_iommu *iommu;
1027 struct device_node *np;
1028
1029
1030 np = of_find_node_by_name(NULL, "axon");
1031 of_node_put(np);
1032
1033 if (!np) {
1034 pr_debug("iommu: fixed mapping disabled, no axons found\n");
1035 return -1;
1036 }
1037
1038
1039 np = of_find_node_with_property(NULL, "dma-ranges");
1040 of_node_put(np);
1041
1042 if (!np) {
1043 pr_debug("iommu: no dma-ranges found, no fixed mapping\n");
1044 return -1;
1045 }
1046
1047
1048
1049
1050
1051
1052 fbase = 0;
1053 for_each_node_by_name(np, "axon") {
1054 cell_iommu_get_window(np, &dbase, &dsize);
1055 fbase = max(fbase, dbase + dsize);
1056 }
1057
1058 fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT);
1059 fsize = memblock_phys_mem_size();
1060
1061 if ((fbase + fsize) <= 0x800000000ul)
1062 hbase = 0;
1063 else {
1064
1065
1066
1067
1068
1069
1070 if (!htab_address) {
1071 pr_debug("iommu: htab is NULL, on LPAR? Huh?\n");
1072 return -1;
1073 }
1074 hbase = __pa(htab_address);
1075 hend = hbase + htab_size_bytes;
1076
1077
1078 if ((hbase != _ALIGN_UP(hbase, 1 << IO_SEGMENT_SHIFT)) ||
1079 (hend != _ALIGN_UP(hend, 1 << IO_SEGMENT_SHIFT))) {
1080 pr_debug("iommu: hash window not segment aligned\n");
1081 return -1;
1082 }
1083
1084
1085 for_each_node_by_name(np, "axon") {
1086 cell_iommu_get_window(np, &dbase, &dsize);
1087
1088 if (hbase < dbase || (hend > (dbase + dsize))) {
1089 pr_debug("iommu: hash window doesn't fit in"
1090 "real DMA window\n");
1091 return -1;
1092 }
1093 }
1094
1095 fbase = 0;
1096 }
1097
1098
1099 for_each_node_by_name(np, "axon") {
1100 iommu = cell_iommu_alloc(np);
1101 BUG_ON(!iommu);
1102
1103 if (hbase == 0)
1104 cell_iommu_get_window(np, &dbase, &dsize);
1105 else {
1106 dbase = hbase;
1107 dsize = htab_size_bytes;
1108 }
1109
1110 printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx "
1111 "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase,
1112 dbase + dsize, fbase, fbase + fsize);
1113
1114 cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize);
1115 iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0,
1116 IOMMU_PAGE_SHIFT_4K);
1117 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize,
1118 fbase, fsize);
1119 cell_iommu_enable_hardware(iommu);
1120 cell_iommu_setup_window(iommu, np, dbase, dsize, 0);
1121 }
1122
1123 dma_iommu_ops.dma_supported = dma_suported_and_switch;
1124 set_pci_dma_ops(&dma_iommu_ops);
1125
1126 return 0;
1127}
1128
1129static int iommu_fixed_disabled;
1130
1131static int __init setup_iommu_fixed(char *str)
1132{
1133 struct device_node *pciep;
1134
1135 if (strcmp(str, "off") == 0)
1136 iommu_fixed_disabled = 1;
1137
1138
1139
1140
1141
1142
1143 pciep = of_find_node_by_type(NULL, "pcie-endpoint");
1144
1145 if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0))
1146 iommu_fixed_is_weak = DMA_ATTR_WEAK_ORDERING;
1147
1148 of_node_put(pciep);
1149
1150 return 1;
1151}
1152__setup("iommu_fixed=", setup_iommu_fixed);
1153
1154static u64 cell_dma_get_required_mask(struct device *dev)
1155{
1156 const struct dma_map_ops *dma_ops;
1157
1158 if (!dev->dma_mask)
1159 return 0;
1160
1161 if (!iommu_fixed_disabled &&
1162 cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
1163 return DMA_BIT_MASK(64);
1164
1165 dma_ops = get_dma_ops(dev);
1166 if (dma_ops->get_required_mask)
1167 return dma_ops->get_required_mask(dev);
1168
1169 WARN_ONCE(1, "no get_required_mask in %p ops", dma_ops);
1170
1171 return DMA_BIT_MASK(64);
1172}
1173
1174static int __init cell_iommu_init(void)
1175{
1176 struct device_node *np;
1177
1178
1179
1180
1181
1182
1183 if (iommu_is_off ||
1184 (!iommu_force_on && memblock_end_of_DRAM() <= 0x80000000ull))
1185 if (cell_iommu_init_disabled() == 0)
1186 goto bail;
1187
1188
1189 cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
1190 ppc_md.dma_get_required_mask = cell_dma_get_required_mask;
1191
1192 if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0)
1193 goto bail;
1194
1195
1196 for_each_node_by_name(np, "axon") {
1197 if (np->parent == NULL || np->parent->parent != NULL)
1198 continue;
1199 cell_iommu_init_one(np, 0);
1200 }
1201
1202
1203
1204
1205 for_each_node_by_name(np, "pci-internal") {
1206 if (np->parent == NULL || np->parent->parent != NULL)
1207 continue;
1208 cell_iommu_init_one(np, SPIDER_DMA_OFFSET);
1209 }
1210
1211
1212 set_pci_dma_ops(&dma_iommu_ops);
1213
1214 bail:
1215
1216
1217
1218 bus_register_notifier(&platform_bus_type, &cell_of_bus_notifier);
1219
1220 return 0;
1221}
1222machine_arch_initcall(cell, cell_iommu_init);
1223