1
2
3
4
5
6
7
8
9
10
11
12#include <linux/gfp.h>
13#include <linux/module.h>
14#include <linux/dma-mapping.h>
15#include <asm/dma.h>
16#include <asm/sn/intr.h>
17#include <asm/sn/pcibus_provider_defs.h>
18#include <asm/sn/pcidev.h>
19#include <asm/sn/sn_sal.h>
20
21#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
22#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
23
24
25
26
27
28
29
30
31
32
33
34
35static int sn_dma_supported(struct device *dev, u64 mask)
36{
37 BUG_ON(dev->bus != &pci_bus_type);
38
39 if (mask < 0x7fffffff)
40 return 0;
41 return 1;
42}
43
44
45
46
47
48
49
50
51int sn_dma_set_mask(struct device *dev, u64 dma_mask)
52{
53 BUG_ON(dev->bus != &pci_bus_type);
54
55 if (!sn_dma_supported(dev, dma_mask))
56 return 0;
57
58 *dev->dma_mask = dma_mask;
59 return 1;
60}
61EXPORT_SYMBOL(sn_dma_set_mask);
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
79 dma_addr_t * dma_handle, gfp_t flags,
80 struct dma_attrs *attrs)
81{
82 void *cpuaddr;
83 unsigned long phys_addr;
84 int node;
85 struct pci_dev *pdev = to_pci_dev(dev);
86 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
87
88 BUG_ON(dev->bus != &pci_bus_type);
89
90
91
92
93 node = pcibus_to_node(pdev->bus);
94 if (likely(node >=0)) {
95 struct page *p = alloc_pages_exact_node(node,
96 flags, get_order(size));
97
98 if (likely(p))
99 cpuaddr = page_address(p);
100 else
101 return NULL;
102 } else
103 cpuaddr = (void *)__get_free_pages(flags, get_order(size));
104
105 if (unlikely(!cpuaddr))
106 return NULL;
107
108 memset(cpuaddr, 0x0, size);
109
110
111 phys_addr = __pa(cpuaddr);
112
113
114
115
116
117
118
119 *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
120 SN_DMA_ADDR_PHYS);
121 if (!*dma_handle) {
122 printk(KERN_ERR "%s: out of ATEs\n", __func__);
123 free_pages((unsigned long)cpuaddr, get_order(size));
124 return NULL;
125 }
126
127 return cpuaddr;
128}
129
130
131
132
133
134
135
136
137
138
139
140static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
141 dma_addr_t dma_handle, struct dma_attrs *attrs)
142{
143 struct pci_dev *pdev = to_pci_dev(dev);
144 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
145
146 BUG_ON(dev->bus != &pci_bus_type);
147
148 provider->dma_unmap(pdev, dma_handle, 0);
149 free_pages((unsigned long)cpu_addr, get_order(size));
150}
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
177 unsigned long offset, size_t size,
178 enum dma_data_direction dir,
179 struct dma_attrs *attrs)
180{
181 void *cpu_addr = page_address(page) + offset;
182 dma_addr_t dma_addr;
183 unsigned long phys_addr;
184 struct pci_dev *pdev = to_pci_dev(dev);
185 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
186 int dmabarr;
187
188 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
189
190 BUG_ON(dev->bus != &pci_bus_type);
191
192 phys_addr = __pa(cpu_addr);
193 if (dmabarr)
194 dma_addr = provider->dma_map_consistent(pdev, phys_addr,
195 size, SN_DMA_ADDR_PHYS);
196 else
197 dma_addr = provider->dma_map(pdev, phys_addr, size,
198 SN_DMA_ADDR_PHYS);
199
200 if (!dma_addr) {
201 printk(KERN_ERR "%s: out of ATEs\n", __func__);
202 return 0;
203 }
204 return dma_addr;
205}
206
207
208
209
210
211
212
213
214
215
216
217
218
219static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
220 size_t size, enum dma_data_direction dir,
221 struct dma_attrs *attrs)
222{
223 struct pci_dev *pdev = to_pci_dev(dev);
224 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
225
226 BUG_ON(dev->bus != &pci_bus_type);
227
228 provider->dma_unmap(pdev, dma_addr, dir);
229}
230
231
232
233
234
235
236
237
238
239
240
241static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
242 int nhwentries, enum dma_data_direction dir,
243 struct dma_attrs *attrs)
244{
245 int i;
246 struct pci_dev *pdev = to_pci_dev(dev);
247 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
248 struct scatterlist *sg;
249
250 BUG_ON(dev->bus != &pci_bus_type);
251
252 for_each_sg(sgl, sg, nhwentries, i) {
253 provider->dma_unmap(pdev, sg->dma_address, dir);
254 sg->dma_address = (dma_addr_t) NULL;
255 sg->dma_length = 0;
256 }
257}
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
275 int nhwentries, enum dma_data_direction dir,
276 struct dma_attrs *attrs)
277{
278 unsigned long phys_addr;
279 struct scatterlist *saved_sg = sgl, *sg;
280 struct pci_dev *pdev = to_pci_dev(dev);
281 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
282 int i;
283 int dmabarr;
284
285 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
286
287 BUG_ON(dev->bus != &pci_bus_type);
288
289
290
291
292 for_each_sg(sgl, sg, nhwentries, i) {
293 dma_addr_t dma_addr;
294 phys_addr = SG_ENT_PHYS_ADDRESS(sg);
295 if (dmabarr)
296 dma_addr = provider->dma_map_consistent(pdev,
297 phys_addr,
298 sg->length,
299 SN_DMA_ADDR_PHYS);
300 else
301 dma_addr = provider->dma_map(pdev, phys_addr,
302 sg->length,
303 SN_DMA_ADDR_PHYS);
304
305 sg->dma_address = dma_addr;
306 if (!sg->dma_address) {
307 printk(KERN_ERR "%s: out of ATEs\n", __func__);
308
309
310
311
312 if (i > 0)
313 sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
314 return 0;
315 }
316
317 sg->dma_length = sg->length;
318 }
319
320 return nhwentries;
321}
322
323static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
324 size_t size, enum dma_data_direction dir)
325{
326 BUG_ON(dev->bus != &pci_bus_type);
327}
328
329static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
330 size_t size,
331 enum dma_data_direction dir)
332{
333 BUG_ON(dev->bus != &pci_bus_type);
334}
335
336static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
337 int nelems, enum dma_data_direction dir)
338{
339 BUG_ON(dev->bus != &pci_bus_type);
340}
341
342static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
343 int nelems, enum dma_data_direction dir)
344{
345 BUG_ON(dev->bus != &pci_bus_type);
346}
347
348static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
349{
350 return 0;
351}
352
353u64 sn_dma_get_required_mask(struct device *dev)
354{
355 return DMA_BIT_MASK(64);
356}
357EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
358
359char *sn_pci_get_legacy_mem(struct pci_bus *bus)
360{
361 if (!SN_PCIBUS_BUSSOFT(bus))
362 return ERR_PTR(-ENODEV);
363
364 return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET);
365}
366
367int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
368{
369 unsigned long addr;
370 int ret;
371 struct ia64_sal_retval isrv;
372
373
374
375
376
377
378
379 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
380 pci_domain_nr(bus), bus->number,
381 0,
382 0,
383 port, size, __pa(val));
384
385 if (isrv.status == 0)
386 return size;
387
388
389
390
391
392
393
394
395 if (!SN_PCIBUS_BUSSOFT(bus))
396 return -ENODEV;
397
398 addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
399 addr += port;
400
401 ret = ia64_sn_probe_mem(addr, (long)size, (void *)val);
402
403 if (ret == 2)
404 return -EINVAL;
405
406 if (ret == 1)
407 *val = -1;
408
409 return size;
410}
411
412int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
413{
414 int ret = size;
415 unsigned long paddr;
416 unsigned long *addr;
417 struct ia64_sal_retval isrv;
418
419
420
421
422
423
424
425 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
426 pci_domain_nr(bus), bus->number,
427 0,
428 1,
429 port, size, __pa(&val));
430
431 if (isrv.status == 0)
432 return size;
433
434
435
436
437
438
439
440
441 if (!SN_PCIBUS_BUSSOFT(bus)) {
442 ret = -ENODEV;
443 goto out;
444 }
445
446
447 paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
448 paddr += port;
449 addr = (unsigned long *)paddr;
450
451 switch (size) {
452 case 1:
453 *(volatile u8 *)(addr) = (u8)(val);
454 break;
455 case 2:
456 *(volatile u16 *)(addr) = (u16)(val);
457 break;
458 case 4:
459 *(volatile u32 *)(addr) = (u32)(val);
460 break;
461 default:
462 ret = -EINVAL;
463 break;
464 }
465 out:
466 return ret;
467}
468
469static struct dma_map_ops sn_dma_ops = {
470 .alloc = sn_dma_alloc_coherent,
471 .free = sn_dma_free_coherent,
472 .map_page = sn_dma_map_page,
473 .unmap_page = sn_dma_unmap_page,
474 .map_sg = sn_dma_map_sg,
475 .unmap_sg = sn_dma_unmap_sg,
476 .sync_single_for_cpu = sn_dma_sync_single_for_cpu,
477 .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu,
478 .sync_single_for_device = sn_dma_sync_single_for_device,
479 .sync_sg_for_device = sn_dma_sync_sg_for_device,
480 .mapping_error = sn_dma_mapping_error,
481 .dma_supported = sn_dma_supported,
482};
483
484void sn_dma_init(void)
485{
486 dma_ops = &sn_dma_ops;
487}
488