1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#undef DEBUG
21
22#include <linux/memblock.h>
23#include <linux/types.h>
24#include <linux/spinlock.h>
25#include <linux/pci.h>
26#include <asm/iommu.h>
27#include <asm/machdep.h>
28#include <asm/firmware.h>
29
30#include "pasemi.h"
31
32#define IOBMAP_PAGE_SHIFT 12
33#define IOBMAP_PAGE_SIZE (1 << IOBMAP_PAGE_SHIFT)
34#define IOBMAP_PAGE_MASK (IOBMAP_PAGE_SIZE - 1)
35
36#define IOB_BASE 0xe0000000
37#define IOB_SIZE 0x3000
38
39#define IOBCAP_REG 0x40
40#define IOBCOM_REG 0x100
41
42#define IOBCOM_ATEN 0x00000100
43
44
45#define IOB_AD_REG 0x14c
46
47#define IOB_AD_VGPRT 0x00000e00
48#define IOB_AD_VGAEN 0x00000100
49
50#define IOB_AD_MPSEL_MASK 0x00000030
51#define IOB_AD_MPSEL_B38 0x00000000
52#define IOB_AD_MPSEL_B40 0x00000010
53#define IOB_AD_MPSEL_B42 0x00000020
54
55#define IOB_AD_TRNG_MASK 0x00000003
56#define IOB_AD_TRNG_256M 0x00000000
57#define IOB_AD_TRNG_2G 0x00000001
58#define IOB_AD_TRNG_128G 0x00000003
59
60#define IOB_TABLEBASE_REG 0x154
61
62
63#define IOB_XLT_L1_REGBASE 0x2b00
64
65
66#define IOB_AT_INVAL_TLB_REG 0x2d00
67
68
69#define IOBMAP_L1E_V 0x40000000
70#define IOBMAP_L1E_V_B 0x80000000
71
72
73#define IOBMAP_L1E_BIG_CACHED 0x00000002
74#define IOBMAP_L1E_BIG_PRIORITY 0x00000001
75
76
77#define IOBMAP_L2E_V 0x80000000
78#define IOBMAP_L2E_V_CACHED 0xc0000000
79
80static void __iomem *iob;
81static u32 iob_l1_emptyval;
82static u32 iob_l2_emptyval;
83static u32 *iob_l2_base;
84
85static struct iommu_table iommu_table_iobmap;
86static int iommu_table_iobmap_inited;
87
88static int iobmap_build(struct iommu_table *tbl, long index,
89 long npages, unsigned long uaddr,
90 enum dma_data_direction direction,
91 unsigned long attrs)
92{
93 u32 *ip;
94 u32 rpn;
95 unsigned long bus_addr;
96
97 pr_debug("iobmap: build at: %lx, %lx, addr: %lx\n", index, npages, uaddr);
98
99 bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT;
100
101 ip = ((u32 *)tbl->it_base) + index;
102
103 while (npages--) {
104 rpn = __pa(uaddr) >> IOBMAP_PAGE_SHIFT;
105
106 *(ip++) = IOBMAP_L2E_V | rpn;
107
108 out_le32(iob+IOB_AT_INVAL_TLB_REG, bus_addr >> 14);
109
110 uaddr += IOBMAP_PAGE_SIZE;
111 bus_addr += IOBMAP_PAGE_SIZE;
112 }
113 return 0;
114}
115
116
117static void iobmap_free(struct iommu_table *tbl, long index,
118 long npages)
119{
120 u32 *ip;
121 unsigned long bus_addr;
122
123 pr_debug("iobmap: free at: %lx, %lx\n", index, npages);
124
125 bus_addr = (tbl->it_offset + index) << IOBMAP_PAGE_SHIFT;
126
127 ip = ((u32 *)tbl->it_base) + index;
128
129 while (npages--) {
130 *(ip++) = iob_l2_emptyval;
131
132 out_le32(iob+IOB_AT_INVAL_TLB_REG, bus_addr >> 14);
133 bus_addr += IOBMAP_PAGE_SIZE;
134 }
135}
136
137static struct iommu_table_ops iommu_table_iobmap_ops = {
138 .set = iobmap_build,
139 .clear = iobmap_free
140};
141
142static void iommu_table_iobmap_setup(void)
143{
144 pr_debug(" -> %s\n", __func__);
145 iommu_table_iobmap.it_busno = 0;
146 iommu_table_iobmap.it_offset = 0;
147 iommu_table_iobmap.it_page_shift = IOBMAP_PAGE_SHIFT;
148
149
150 iommu_table_iobmap.it_size =
151 0x80000000 >> iommu_table_iobmap.it_page_shift;
152
153
154 iommu_table_iobmap.it_base = (unsigned long)iob_l2_base;
155 iommu_table_iobmap.it_index = 0;
156
157
158
159 iommu_table_iobmap.it_blocksize = 4;
160 iommu_table_iobmap.it_ops = &iommu_table_iobmap_ops;
161 iommu_init_table(&iommu_table_iobmap, 0);
162 pr_debug(" <- %s\n", __func__);
163}
164
165
166
167static void pci_dma_bus_setup_pasemi(struct pci_bus *bus)
168{
169 pr_debug("pci_dma_bus_setup, bus %p, bus->self %p\n", bus, bus->self);
170
171 if (!iommu_table_iobmap_inited) {
172 iommu_table_iobmap_inited = 1;
173 iommu_table_iobmap_setup();
174 }
175}
176
177
178static void pci_dma_dev_setup_pasemi(struct pci_dev *dev)
179{
180 pr_debug("pci_dma_dev_setup, dev %p (%s)\n", dev, pci_name(dev));
181
182#if !defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
183
184
185
186
187 if (dev->vendor == 0x1959 && dev->device == 0xa007 &&
188 !firmware_has_feature(FW_FEATURE_LPAR)) {
189 dev->dev.dma_ops = NULL;
190
191
192
193
194 dev->dev.coherent_dma_mask = DMA_BIT_MASK(44);
195 return;
196 }
197#endif
198
199 set_iommu_table_base(&dev->dev, &iommu_table_iobmap);
200}
201
202static int __init iob_init(struct device_node *dn)
203{
204 unsigned long tmp;
205 u32 regword;
206 int i;
207
208 pr_debug(" -> %s\n", __func__);
209
210
211 iob_l2_base = (u32 *)__va(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000));
212
213 printk(KERN_INFO "IOBMAP L2 allocated at: %p\n", iob_l2_base);
214
215
216 tmp = memblock_phys_alloc(IOBMAP_PAGE_SIZE, IOBMAP_PAGE_SIZE);
217 if (!tmp)
218 panic("IOBMAP: Cannot allocate spare page!");
219
220 iob_l1_emptyval = 0;
221
222 iob_l2_emptyval = IOBMAP_L2E_V | (tmp >> IOBMAP_PAGE_SHIFT);
223
224 iob = ioremap(IOB_BASE, IOB_SIZE);
225 if (!iob)
226 panic("IOBMAP: Cannot map registers!");
227
228
229 for (i = 0; i < 64; i++) {
230
231 regword = IOBMAP_L1E_V | (__pa(iob_l2_base + i*0x2000) >> 12);
232 out_le32(iob+IOB_XLT_L1_REGBASE+i*4, regword);
233 }
234
235
236 regword = in_le32(iob+IOB_AD_REG);
237 regword &= ~IOB_AD_TRNG_MASK;
238 regword |= IOB_AD_TRNG_2G;
239 out_le32(iob+IOB_AD_REG, regword);
240
241
242 regword = in_le32(iob+IOBCOM_REG);
243 regword |= IOBCOM_ATEN;
244 out_le32(iob+IOBCOM_REG, regword);
245
246 pr_debug(" <- %s\n", __func__);
247
248 return 0;
249}
250
251
252
253void __init iommu_init_early_pasemi(void)
254{
255 int iommu_off;
256
257#ifndef CONFIG_PPC_PASEMI_IOMMU
258 iommu_off = 1;
259#else
260 iommu_off = of_chosen &&
261 of_get_property(of_chosen, "linux,iommu-off", NULL);
262#endif
263 if (iommu_off)
264 return;
265
266 iob_init(NULL);
267
268 pasemi_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pasemi;
269 pasemi_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pasemi;
270 set_pci_dma_ops(&dma_iommu_ops);
271}
272
273