1
2
3
4
5
6
7
8
9
10
11
12#include <linux/gfp.h>
13#include <linux/module.h>
14#include <linux/dma-mapping.h>
15#include <asm/dma.h>
16#include <asm/sn/intr.h>
17#include <asm/sn/pcibus_provider_defs.h>
18#include <asm/sn/pcidev.h>
19#include <asm/sn/sn_sal.h>
20
21#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
22#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
23
24
25
26
27
28
29
30
31
32
33
34
35static int sn_dma_supported(struct device *dev, u64 mask)
36{
37 BUG_ON(dev->bus != &pci_bus_type);
38
39 if (mask < 0x7fffffff)
40 return 0;
41 return 1;
42}
43
44
45
46
47
48
49
50
51int sn_dma_set_mask(struct device *dev, u64 dma_mask)
52{
53 BUG_ON(dev->bus != &pci_bus_type);
54
55 if (!sn_dma_supported(dev, dma_mask))
56 return 0;
57
58 *dev->dma_mask = dma_mask;
59 return 1;
60}
61EXPORT_SYMBOL(sn_dma_set_mask);
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
79 dma_addr_t * dma_handle, gfp_t flags)
80{
81 void *cpuaddr;
82 unsigned long phys_addr;
83 int node;
84 struct pci_dev *pdev = to_pci_dev(dev);
85 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
86
87 BUG_ON(dev->bus != &pci_bus_type);
88
89
90
91
92 node = pcibus_to_node(pdev->bus);
93 if (likely(node >=0)) {
94 struct page *p = alloc_pages_exact_node(node,
95 flags, get_order(size));
96
97 if (likely(p))
98 cpuaddr = page_address(p);
99 else
100 return NULL;
101 } else
102 cpuaddr = (void *)__get_free_pages(flags, get_order(size));
103
104 if (unlikely(!cpuaddr))
105 return NULL;
106
107 memset(cpuaddr, 0x0, size);
108
109
110 phys_addr = __pa(cpuaddr);
111
112
113
114
115
116
117
118 *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
119 SN_DMA_ADDR_PHYS);
120 if (!*dma_handle) {
121 printk(KERN_ERR "%s: out of ATEs\n", __func__);
122 free_pages((unsigned long)cpuaddr, get_order(size));
123 return NULL;
124 }
125
126 return cpuaddr;
127}
128
129
130
131
132
133
134
135
136
137
138
139static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
140 dma_addr_t dma_handle)
141{
142 struct pci_dev *pdev = to_pci_dev(dev);
143 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
144
145 BUG_ON(dev->bus != &pci_bus_type);
146
147 provider->dma_unmap(pdev, dma_handle, 0);
148 free_pages((unsigned long)cpu_addr, get_order(size));
149}
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
176 unsigned long offset, size_t size,
177 enum dma_data_direction dir,
178 struct dma_attrs *attrs)
179{
180 void *cpu_addr = page_address(page) + offset;
181 dma_addr_t dma_addr;
182 unsigned long phys_addr;
183 struct pci_dev *pdev = to_pci_dev(dev);
184 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
185 int dmabarr;
186
187 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
188
189 BUG_ON(dev->bus != &pci_bus_type);
190
191 phys_addr = __pa(cpu_addr);
192 if (dmabarr)
193 dma_addr = provider->dma_map_consistent(pdev, phys_addr,
194 size, SN_DMA_ADDR_PHYS);
195 else
196 dma_addr = provider->dma_map(pdev, phys_addr, size,
197 SN_DMA_ADDR_PHYS);
198
199 if (!dma_addr) {
200 printk(KERN_ERR "%s: out of ATEs\n", __func__);
201 return 0;
202 }
203 return dma_addr;
204}
205
206
207
208
209
210
211
212
213
214
215
216
217
218static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
219 size_t size, enum dma_data_direction dir,
220 struct dma_attrs *attrs)
221{
222 struct pci_dev *pdev = to_pci_dev(dev);
223 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
224
225 BUG_ON(dev->bus != &pci_bus_type);
226
227 provider->dma_unmap(pdev, dma_addr, dir);
228}
229
230
231
232
233
234
235
236
237
238
239
240static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
241 int nhwentries, enum dma_data_direction dir,
242 struct dma_attrs *attrs)
243{
244 int i;
245 struct pci_dev *pdev = to_pci_dev(dev);
246 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
247 struct scatterlist *sg;
248
249 BUG_ON(dev->bus != &pci_bus_type);
250
251 for_each_sg(sgl, sg, nhwentries, i) {
252 provider->dma_unmap(pdev, sg->dma_address, dir);
253 sg->dma_address = (dma_addr_t) NULL;
254 sg->dma_length = 0;
255 }
256}
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
274 int nhwentries, enum dma_data_direction dir,
275 struct dma_attrs *attrs)
276{
277 unsigned long phys_addr;
278 struct scatterlist *saved_sg = sgl, *sg;
279 struct pci_dev *pdev = to_pci_dev(dev);
280 struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
281 int i;
282 int dmabarr;
283
284 dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
285
286 BUG_ON(dev->bus != &pci_bus_type);
287
288
289
290
291 for_each_sg(sgl, sg, nhwentries, i) {
292 dma_addr_t dma_addr;
293 phys_addr = SG_ENT_PHYS_ADDRESS(sg);
294 if (dmabarr)
295 dma_addr = provider->dma_map_consistent(pdev,
296 phys_addr,
297 sg->length,
298 SN_DMA_ADDR_PHYS);
299 else
300 dma_addr = provider->dma_map(pdev, phys_addr,
301 sg->length,
302 SN_DMA_ADDR_PHYS);
303
304 sg->dma_address = dma_addr;
305 if (!sg->dma_address) {
306 printk(KERN_ERR "%s: out of ATEs\n", __func__);
307
308
309
310
311 if (i > 0)
312 sn_dma_unmap_sg(dev, saved_sg, i, dir, attrs);
313 return 0;
314 }
315
316 sg->dma_length = sg->length;
317 }
318
319 return nhwentries;
320}
321
322static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
323 size_t size, enum dma_data_direction dir)
324{
325 BUG_ON(dev->bus != &pci_bus_type);
326}
327
328static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
329 size_t size,
330 enum dma_data_direction dir)
331{
332 BUG_ON(dev->bus != &pci_bus_type);
333}
334
335static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
336 int nelems, enum dma_data_direction dir)
337{
338 BUG_ON(dev->bus != &pci_bus_type);
339}
340
341static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
342 int nelems, enum dma_data_direction dir)
343{
344 BUG_ON(dev->bus != &pci_bus_type);
345}
346
347static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
348{
349 return 0;
350}
351
352u64 sn_dma_get_required_mask(struct device *dev)
353{
354 return DMA_BIT_MASK(64);
355}
356EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
357
358char *sn_pci_get_legacy_mem(struct pci_bus *bus)
359{
360 if (!SN_PCIBUS_BUSSOFT(bus))
361 return ERR_PTR(-ENODEV);
362
363 return (char *)(SN_PCIBUS_BUSSOFT(bus)->bs_legacy_mem | __IA64_UNCACHED_OFFSET);
364}
365
366int sn_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
367{
368 unsigned long addr;
369 int ret;
370 struct ia64_sal_retval isrv;
371
372
373
374
375
376
377
378 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
379 pci_domain_nr(bus), bus->number,
380 0,
381 0,
382 port, size, __pa(val));
383
384 if (isrv.status == 0)
385 return size;
386
387
388
389
390
391
392
393
394 if (!SN_PCIBUS_BUSSOFT(bus))
395 return -ENODEV;
396
397 addr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
398 addr += port;
399
400 ret = ia64_sn_probe_mem(addr, (long)size, (void *)val);
401
402 if (ret == 2)
403 return -EINVAL;
404
405 if (ret == 1)
406 *val = -1;
407
408 return size;
409}
410
411int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
412{
413 int ret = size;
414 unsigned long paddr;
415 unsigned long *addr;
416 struct ia64_sal_retval isrv;
417
418
419
420
421
422
423
424 SAL_CALL(isrv, SN_SAL_IOIF_PCI_SAFE,
425 pci_domain_nr(bus), bus->number,
426 0,
427 1,
428 port, size, __pa(&val));
429
430 if (isrv.status == 0)
431 return size;
432
433
434
435
436
437
438
439
440 if (!SN_PCIBUS_BUSSOFT(bus)) {
441 ret = -ENODEV;
442 goto out;
443 }
444
445
446 paddr = SN_PCIBUS_BUSSOFT(bus)->bs_legacy_io | __IA64_UNCACHED_OFFSET;
447 paddr += port;
448 addr = (unsigned long *)paddr;
449
450 switch (size) {
451 case 1:
452 *(volatile u8 *)(addr) = (u8)(val);
453 break;
454 case 2:
455 *(volatile u16 *)(addr) = (u16)(val);
456 break;
457 case 4:
458 *(volatile u32 *)(addr) = (u32)(val);
459 break;
460 default:
461 ret = -EINVAL;
462 break;
463 }
464 out:
465 return ret;
466}
467
468static struct dma_map_ops sn_dma_ops = {
469 .alloc_coherent = sn_dma_alloc_coherent,
470 .free_coherent = sn_dma_free_coherent,
471 .map_page = sn_dma_map_page,
472 .unmap_page = sn_dma_unmap_page,
473 .map_sg = sn_dma_map_sg,
474 .unmap_sg = sn_dma_unmap_sg,
475 .sync_single_for_cpu = sn_dma_sync_single_for_cpu,
476 .sync_sg_for_cpu = sn_dma_sync_sg_for_cpu,
477 .sync_single_for_device = sn_dma_sync_single_for_device,
478 .sync_sg_for_device = sn_dma_sync_sg_for_device,
479 .mapping_error = sn_dma_mapping_error,
480 .dma_supported = sn_dma_supported,
481};
482
483void sn_dma_init(void)
484{
485 dma_ops = &sn_dma_ops;
486}
487