1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/types.h>
28#include <linux/dma-mapping.h>
29#include <linux/list.h>
30#include <linux/pci.h>
31#include <linux/module.h>
32#include <linux/slab.h>
33
34#include <asm/iommu.h>
35#include <asm/vio.h>
36#include <asm/tce.h>
37#include <asm/machdep.h>
38#include <asm/abs_addr.h>
39#include <asm/prom.h>
40#include <asm/pci-bridge.h>
41#include <asm/iseries/hv_call_xm.h>
42#include <asm/iseries/hv_call_event.h>
43#include <asm/iseries/iommu.h>
44
45static int tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
46 unsigned long uaddr, enum dma_data_direction direction,
47 struct dma_attrs *attrs)
48{
49 u64 rc;
50 u64 tce, rpn;
51
52 while (npages--) {
53 rpn = virt_to_abs(uaddr) >> TCE_SHIFT;
54 tce = (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
55
56 if (tbl->it_type == TCE_VB) {
57
58 tce |= TCE_VALID|TCE_ALLIO;
59 if (direction != DMA_TO_DEVICE)
60 tce |= TCE_VB_WRITE;
61 } else {
62
63 tce |= TCE_PCI_READ;
64 if (direction != DMA_TO_DEVICE)
65 tce |= TCE_PCI_WRITE;
66 }
67
68 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, tce);
69 if (rc)
70 panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
71 rc);
72 index++;
73 uaddr += TCE_PAGE_SIZE;
74 }
75 return 0;
76}
77
78static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
79{
80 u64 rc;
81
82 while (npages--) {
83 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0);
84 if (rc)
85 panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
86 rc);
87 index++;
88 }
89}
90
91
92
93
94struct iommu_table_cb {
95 unsigned long itc_busno;
96 unsigned long itc_start;
97 unsigned long itc_totalsize;
98 unsigned long itc_offset;
99
100 unsigned long itc_size;
101 unsigned long itc_index;
102 unsigned short itc_maxtables;
103 unsigned char itc_virtbus;
104 unsigned char itc_slotno;
105 unsigned char itc_rsvd[4];
106};
107
108
109
110
111
112
113
114
115
116
117void iommu_table_getparms_iSeries(unsigned long busno,
118 unsigned char slotno,
119 unsigned char virtbus,
120 struct iommu_table* tbl)
121{
122 struct iommu_table_cb *parms;
123
124 parms = kzalloc(sizeof(*parms), GFP_KERNEL);
125 if (parms == NULL)
126 panic("PCI_DMA: TCE Table Allocation failed.");
127
128 parms->itc_busno = busno;
129 parms->itc_slotno = slotno;
130 parms->itc_virtbus = virtbus;
131
132 HvCallXm_getTceTableParms(iseries_hv_addr(parms));
133
134 if (parms->itc_size == 0)
135 panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
136
137
138 tbl->it_size = (parms->itc_size * TCE_PAGE_SIZE) / TCE_ENTRY_SIZE;
139 tbl->it_busno = parms->itc_busno;
140 tbl->it_offset = parms->itc_offset;
141 tbl->it_index = parms->itc_index;
142 tbl->it_blocksize = 1;
143 tbl->it_type = virtbus ? TCE_VB : TCE_PCI;
144
145 kfree(parms);
146}
147
148
149#ifdef CONFIG_PCI
150
151
152
153
154static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
155{
156 struct device_node *node;
157
158 for (node = NULL; (node = of_find_all_nodes(node)); ) {
159 struct pci_dn *pdn = PCI_DN(node);
160 struct iommu_table *it;
161
162 if (pdn == NULL)
163 continue;
164 it = pdn->iommu_table;
165 if ((it != NULL) &&
166 (it->it_type == TCE_PCI) &&
167 (it->it_offset == tbl->it_offset) &&
168 (it->it_index == tbl->it_index) &&
169 (it->it_size == tbl->it_size)) {
170 of_node_put(node);
171 return it;
172 }
173 }
174 return NULL;
175}
176
177
178static void pci_dma_dev_setup_iseries(struct pci_dev *pdev)
179{
180 struct iommu_table *tbl;
181 struct device_node *dn = pci_device_to_OF_node(pdev);
182 struct pci_dn *pdn = PCI_DN(dn);
183 const u32 *lsn = of_get_property(dn, "linux,logical-slot-number", NULL);
184
185 BUG_ON(lsn == NULL);
186
187 tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
188
189 iommu_table_getparms_iSeries(pdn->busno, *lsn, 0, tbl);
190
191
192 pdn->iommu_table = iommu_table_find(tbl);
193 if (pdn->iommu_table == NULL)
194 pdn->iommu_table = iommu_init_table(tbl, -1);
195 else
196 kfree(tbl);
197 set_iommu_table_base(&pdev->dev, pdn->iommu_table);
198}
199#else
200#define pci_dma_dev_setup_iseries NULL
201#endif
202
203static struct iommu_table veth_iommu_table;
204static struct iommu_table vio_iommu_table;
205
206void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag)
207{
208 return iommu_alloc_coherent(NULL, &vio_iommu_table, size, dma_handle,
209 DMA_BIT_MASK(32), flag, -1);
210}
211EXPORT_SYMBOL_GPL(iseries_hv_alloc);
212
213void iseries_hv_free(size_t size, void *vaddr, dma_addr_t dma_handle)
214{
215 iommu_free_coherent(&vio_iommu_table, size, vaddr, dma_handle);
216}
217EXPORT_SYMBOL_GPL(iseries_hv_free);
218
219dma_addr_t iseries_hv_map(void *vaddr, size_t size,
220 enum dma_data_direction direction)
221{
222 return iommu_map_page(NULL, &vio_iommu_table, virt_to_page(vaddr),
223 (unsigned long)vaddr % PAGE_SIZE, size,
224 DMA_BIT_MASK(32), direction, NULL);
225}
226
227void iseries_hv_unmap(dma_addr_t dma_handle, size_t size,
228 enum dma_data_direction direction)
229{
230 iommu_unmap_page(&vio_iommu_table, dma_handle, size, direction, NULL);
231}
232
233void __init iommu_vio_init(void)
234{
235 iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table);
236 veth_iommu_table.it_size /= 2;
237 vio_iommu_table = veth_iommu_table;
238 vio_iommu_table.it_offset += veth_iommu_table.it_size;
239
240 if (!iommu_init_table(&veth_iommu_table, -1))
241 printk("Virtual Bus VETH TCE table failed.\n");
242 if (!iommu_init_table(&vio_iommu_table, -1))
243 printk("Virtual Bus VIO TCE table failed.\n");
244}
245
246struct iommu_table *vio_build_iommu_table_iseries(struct vio_dev *dev)
247{
248 if (strcmp(dev->type, "network") == 0)
249 return &veth_iommu_table;
250 return &vio_iommu_table;
251}
252
253void iommu_init_early_iSeries(void)
254{
255 ppc_md.tce_build = tce_build_iSeries;
256 ppc_md.tce_free = tce_free_iSeries;
257
258 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_iseries;
259 set_pci_dma_ops(&dma_iommu_ops);
260}
261