1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/mm.h>
16#include <linux/dma-mapping.h>
17#include <linux/swiotlb.h>
18#include <linux/vmalloc.h>
19#include <linux/export.h>
20#include <asm/tlbflush.h>
21#include <asm/homecache.h>
22
23
24
25
26
27
28
29#ifdef __tilepro__
30#define PAGE_HOME_DMA PAGE_HOME_UNCACHED
31#else
32#define PAGE_HOME_DMA PAGE_HOME_HASH
33#endif
34
35static void *tile_dma_alloc_coherent(struct device *dev, size_t size,
36 dma_addr_t *dma_handle, gfp_t gfp,
37 unsigned long attrs)
38{
39 u64 dma_mask = (dev && dev->coherent_dma_mask) ?
40 dev->coherent_dma_mask : DMA_BIT_MASK(32);
41 int node = dev ? dev_to_node(dev) : 0;
42 int order = get_order(size);
43 struct page *pg;
44 dma_addr_t addr;
45
46 gfp |= __GFP_ZERO;
47
48
49
50
51
52
53
54
55
56 if (dma_mask <= DMA_BIT_MASK(32)) {
57 gfp |= GFP_DMA;
58 node = 0;
59 }
60
61 pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA);
62 if (pg == NULL)
63 return NULL;
64
65 addr = page_to_phys(pg);
66 if (addr + size > dma_mask) {
67 __homecache_free_pages(pg, order);
68 return NULL;
69 }
70
71 *dma_handle = addr;
72
73 return page_address(pg);
74}
75
76
77
78
79static void tile_dma_free_coherent(struct device *dev, size_t size,
80 void *vaddr, dma_addr_t dma_handle,
81 unsigned long attrs)
82{
83 homecache_free_pages((unsigned long)vaddr, get_order(size));
84}
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101static void __dma_prep_page(struct page *page, unsigned long offset,
102 size_t size, enum dma_data_direction direction)
103{
104
105
106
107
108
109
110
111
112
113
114 int home = page_home(page);
115 switch (home) {
116 case PAGE_HOME_HASH:
117#ifdef __tilegx__
118 return;
119#endif
120 break;
121 case PAGE_HOME_UNCACHED:
122#ifdef __tilepro__
123 return;
124#endif
125 break;
126 case PAGE_HOME_IMMUTABLE:
127
128 BUG_ON(direction == DMA_FROM_DEVICE ||
129 direction == DMA_BIDIRECTIONAL);
130 return;
131 case PAGE_HOME_INCOHERENT:
132
133 return;
134 default:
135 BUG_ON(home < 0 || home >= NR_CPUS);
136 break;
137 }
138 homecache_finv_page(page);
139
140#ifdef DEBUG_ALIGNMENT
141
142 if (offset & (L2_CACHE_BYTES - 1) || (size & (L2_CACHE_BYTES - 1)))
143 pr_warn("Unaligned DMA to non-hfh memory: PA %#llx/%#lx\n",
144 PFN_PHYS(page_to_pfn(page)) + offset, size);
145#endif
146}
147
148
149static void __dma_complete_page(struct page *page, unsigned long offset,
150 size_t size, enum dma_data_direction direction)
151{
152#ifdef __tilegx__
153 switch (page_home(page)) {
154 case PAGE_HOME_HASH:
155
156 break;
157 case PAGE_HOME_INCOHERENT:
158
159 break;
160 case PAGE_HOME_IMMUTABLE:
161
162 break;
163 default:
164
165 homecache_finv_map_page(page, PAGE_HOME_HASH);
166 break;
167 }
168#endif
169}
170
171static void __dma_prep_pa_range(dma_addr_t dma_addr, size_t size,
172 enum dma_data_direction direction)
173{
174 struct page *page = pfn_to_page(PFN_DOWN(dma_addr));
175 unsigned long offset = dma_addr & (PAGE_SIZE - 1);
176 size_t bytes = min(size, (size_t)(PAGE_SIZE - offset));
177
178 while (size != 0) {
179 __dma_prep_page(page, offset, bytes, direction);
180 size -= bytes;
181 ++page;
182 offset = 0;
183 bytes = min((size_t)PAGE_SIZE, size);
184 }
185}
186
187static void __dma_complete_pa_range(dma_addr_t dma_addr, size_t size,
188 enum dma_data_direction direction)
189{
190 struct page *page = pfn_to_page(PFN_DOWN(dma_addr));
191 unsigned long offset = dma_addr & (PAGE_SIZE - 1);
192 size_t bytes = min(size, (size_t)(PAGE_SIZE - offset));
193
194 while (size != 0) {
195 __dma_complete_page(page, offset, bytes, direction);
196 size -= bytes;
197 ++page;
198 offset = 0;
199 bytes = min((size_t)PAGE_SIZE, size);
200 }
201}
202
203static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist,
204 int nents, enum dma_data_direction direction,
205 unsigned long attrs)
206{
207 struct scatterlist *sg;
208 int i;
209
210 BUG_ON(!valid_dma_direction(direction));
211
212 WARN_ON(nents == 0 || sglist->length == 0);
213
214 for_each_sg(sglist, sg, nents, i) {
215 sg->dma_address = sg_phys(sg);
216#ifdef CONFIG_NEED_SG_DMA_LENGTH
217 sg->dma_length = sg->length;
218#endif
219 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
220 continue;
221 __dma_prep_pa_range(sg->dma_address, sg->length, direction);
222 }
223
224 return nents;
225}
226
227static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
228 int nents, enum dma_data_direction direction,
229 unsigned long attrs)
230{
231 struct scatterlist *sg;
232 int i;
233
234 BUG_ON(!valid_dma_direction(direction));
235 for_each_sg(sglist, sg, nents, i) {
236 sg->dma_address = sg_phys(sg);
237 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
238 continue;
239 __dma_complete_pa_range(sg->dma_address, sg->length,
240 direction);
241 }
242}
243
244static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page,
245 unsigned long offset, size_t size,
246 enum dma_data_direction direction,
247 unsigned long attrs)
248{
249 BUG_ON(!valid_dma_direction(direction));
250
251 BUG_ON(offset + size > PAGE_SIZE);
252 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
253 __dma_prep_page(page, offset, size, direction);
254
255 return page_to_pa(page) + offset;
256}
257
258static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
259 size_t size, enum dma_data_direction direction,
260 unsigned long attrs)
261{
262 BUG_ON(!valid_dma_direction(direction));
263
264 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
265 return;
266
267 __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
268 dma_address & (PAGE_SIZE - 1), size, direction);
269}
270
271static void tile_dma_sync_single_for_cpu(struct device *dev,
272 dma_addr_t dma_handle,
273 size_t size,
274 enum dma_data_direction direction)
275{
276 BUG_ON(!valid_dma_direction(direction));
277
278 __dma_complete_pa_range(dma_handle, size, direction);
279}
280
281static void tile_dma_sync_single_for_device(struct device *dev,
282 dma_addr_t dma_handle, size_t size,
283 enum dma_data_direction direction)
284{
285 __dma_prep_pa_range(dma_handle, size, direction);
286}
287
288static void tile_dma_sync_sg_for_cpu(struct device *dev,
289 struct scatterlist *sglist, int nelems,
290 enum dma_data_direction direction)
291{
292 struct scatterlist *sg;
293 int i;
294
295 BUG_ON(!valid_dma_direction(direction));
296 WARN_ON(nelems == 0 || sglist->length == 0);
297
298 for_each_sg(sglist, sg, nelems, i) {
299 dma_sync_single_for_cpu(dev, sg->dma_address,
300 sg_dma_len(sg), direction);
301 }
302}
303
304static void tile_dma_sync_sg_for_device(struct device *dev,
305 struct scatterlist *sglist, int nelems,
306 enum dma_data_direction direction)
307{
308 struct scatterlist *sg;
309 int i;
310
311 BUG_ON(!valid_dma_direction(direction));
312 WARN_ON(nelems == 0 || sglist->length == 0);
313
314 for_each_sg(sglist, sg, nelems, i) {
315 dma_sync_single_for_device(dev, sg->dma_address,
316 sg_dma_len(sg), direction);
317 }
318}
319
320static const struct dma_map_ops tile_default_dma_map_ops = {
321 .alloc = tile_dma_alloc_coherent,
322 .free = tile_dma_free_coherent,
323 .map_page = tile_dma_map_page,
324 .unmap_page = tile_dma_unmap_page,
325 .map_sg = tile_dma_map_sg,
326 .unmap_sg = tile_dma_unmap_sg,
327 .sync_single_for_cpu = tile_dma_sync_single_for_cpu,
328 .sync_single_for_device = tile_dma_sync_single_for_device,
329 .sync_sg_for_cpu = tile_dma_sync_sg_for_cpu,
330 .sync_sg_for_device = tile_dma_sync_sg_for_device,
331};
332
333const struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops;
334EXPORT_SYMBOL(tile_dma_map_ops);
335
336
337
338static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size,
339 dma_addr_t *dma_handle, gfp_t gfp,
340 unsigned long attrs)
341{
342 int node = dev_to_node(dev);
343 int order = get_order(size);
344 struct page *pg;
345 dma_addr_t addr;
346
347 gfp |= __GFP_ZERO;
348
349 pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA);
350 if (pg == NULL)
351 return NULL;
352
353 addr = page_to_phys(pg);
354
355 *dma_handle = addr + get_dma_offset(dev);
356
357 return page_address(pg);
358}
359
360
361
362
363static void tile_pci_dma_free_coherent(struct device *dev, size_t size,
364 void *vaddr, dma_addr_t dma_handle,
365 unsigned long attrs)
366{
367 homecache_free_pages((unsigned long)vaddr, get_order(size));
368}
369
370static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist,
371 int nents, enum dma_data_direction direction,
372 unsigned long attrs)
373{
374 struct scatterlist *sg;
375 int i;
376
377 BUG_ON(!valid_dma_direction(direction));
378
379 WARN_ON(nents == 0 || sglist->length == 0);
380
381 for_each_sg(sglist, sg, nents, i) {
382 sg->dma_address = sg_phys(sg);
383 __dma_prep_pa_range(sg->dma_address, sg->length, direction);
384
385 sg->dma_address = sg->dma_address + get_dma_offset(dev);
386#ifdef CONFIG_NEED_SG_DMA_LENGTH
387 sg->dma_length = sg->length;
388#endif
389 }
390
391 return nents;
392}
393
394static void tile_pci_dma_unmap_sg(struct device *dev,
395 struct scatterlist *sglist, int nents,
396 enum dma_data_direction direction,
397 unsigned long attrs)
398{
399 struct scatterlist *sg;
400 int i;
401
402 BUG_ON(!valid_dma_direction(direction));
403 for_each_sg(sglist, sg, nents, i) {
404 sg->dma_address = sg_phys(sg);
405 __dma_complete_pa_range(sg->dma_address, sg->length,
406 direction);
407 }
408}
409
410static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page,
411 unsigned long offset, size_t size,
412 enum dma_data_direction direction,
413 unsigned long attrs)
414{
415 BUG_ON(!valid_dma_direction(direction));
416
417 BUG_ON(offset + size > PAGE_SIZE);
418 __dma_prep_page(page, offset, size, direction);
419
420 return page_to_pa(page) + offset + get_dma_offset(dev);
421}
422
423static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
424 size_t size,
425 enum dma_data_direction direction,
426 unsigned long attrs)
427{
428 BUG_ON(!valid_dma_direction(direction));
429
430 dma_address -= get_dma_offset(dev);
431
432 __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
433 dma_address & (PAGE_SIZE - 1), size, direction);
434}
435
436static void tile_pci_dma_sync_single_for_cpu(struct device *dev,
437 dma_addr_t dma_handle,
438 size_t size,
439 enum dma_data_direction direction)
440{
441 BUG_ON(!valid_dma_direction(direction));
442
443 dma_handle -= get_dma_offset(dev);
444
445 __dma_complete_pa_range(dma_handle, size, direction);
446}
447
448static void tile_pci_dma_sync_single_for_device(struct device *dev,
449 dma_addr_t dma_handle,
450 size_t size,
451 enum dma_data_direction
452 direction)
453{
454 dma_handle -= get_dma_offset(dev);
455
456 __dma_prep_pa_range(dma_handle, size, direction);
457}
458
459static void tile_pci_dma_sync_sg_for_cpu(struct device *dev,
460 struct scatterlist *sglist,
461 int nelems,
462 enum dma_data_direction direction)
463{
464 struct scatterlist *sg;
465 int i;
466
467 BUG_ON(!valid_dma_direction(direction));
468 WARN_ON(nelems == 0 || sglist->length == 0);
469
470 for_each_sg(sglist, sg, nelems, i) {
471 dma_sync_single_for_cpu(dev, sg->dma_address,
472 sg_dma_len(sg), direction);
473 }
474}
475
476static void tile_pci_dma_sync_sg_for_device(struct device *dev,
477 struct scatterlist *sglist,
478 int nelems,
479 enum dma_data_direction direction)
480{
481 struct scatterlist *sg;
482 int i;
483
484 BUG_ON(!valid_dma_direction(direction));
485 WARN_ON(nelems == 0 || sglist->length == 0);
486
487 for_each_sg(sglist, sg, nelems, i) {
488 dma_sync_single_for_device(dev, sg->dma_address,
489 sg_dma_len(sg), direction);
490 }
491}
492
493static const struct dma_map_ops tile_pci_default_dma_map_ops = {
494 .alloc = tile_pci_dma_alloc_coherent,
495 .free = tile_pci_dma_free_coherent,
496 .map_page = tile_pci_dma_map_page,
497 .unmap_page = tile_pci_dma_unmap_page,
498 .map_sg = tile_pci_dma_map_sg,
499 .unmap_sg = tile_pci_dma_unmap_sg,
500 .sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu,
501 .sync_single_for_device = tile_pci_dma_sync_single_for_device,
502 .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu,
503 .sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
504};
505
506const struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops;
507EXPORT_SYMBOL(gx_pci_dma_map_ops);
508
509
510
511#ifdef CONFIG_SWIOTLB
512static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size,
513 dma_addr_t *dma_handle, gfp_t gfp,
514 unsigned long attrs)
515{
516 gfp |= GFP_DMA;
517 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
518}
519
520static void tile_swiotlb_free_coherent(struct device *dev, size_t size,
521 void *vaddr, dma_addr_t dma_addr,
522 unsigned long attrs)
523{
524 swiotlb_free_coherent(dev, size, vaddr, dma_addr);
525}
526
527static const struct dma_map_ops pci_swiotlb_dma_ops = {
528 .alloc = tile_swiotlb_alloc_coherent,
529 .free = tile_swiotlb_free_coherent,
530 .map_page = swiotlb_map_page,
531 .unmap_page = swiotlb_unmap_page,
532 .map_sg = swiotlb_map_sg_attrs,
533 .unmap_sg = swiotlb_unmap_sg_attrs,
534 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
535 .sync_single_for_device = swiotlb_sync_single_for_device,
536 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
537 .sync_sg_for_device = swiotlb_sync_sg_for_device,
538 .dma_supported = swiotlb_dma_supported,
539 .mapping_error = swiotlb_dma_mapping_error,
540};
541
542static const struct dma_map_ops pci_hybrid_dma_ops = {
543 .alloc = tile_swiotlb_alloc_coherent,
544 .free = tile_swiotlb_free_coherent,
545 .map_page = tile_pci_dma_map_page,
546 .unmap_page = tile_pci_dma_unmap_page,
547 .map_sg = tile_pci_dma_map_sg,
548 .unmap_sg = tile_pci_dma_unmap_sg,
549 .sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu,
550 .sync_single_for_device = tile_pci_dma_sync_single_for_device,
551 .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu,
552 .sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
553};
554
555const struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops;
556const struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops;
557#else
558const struct dma_map_ops *gx_legacy_pci_dma_map_ops;
559const struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
560#endif
561EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
562EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops);
563
564int dma_set_mask(struct device *dev, u64 mask)
565{
566 const struct dma_map_ops *dma_ops = get_dma_ops(dev);
567
568
569
570
571
572
573
574 if (dma_ops == gx_pci_dma_map_ops ||
575 dma_ops == gx_hybrid_pci_dma_map_ops ||
576 dma_ops == gx_legacy_pci_dma_map_ops) {
577 if (mask == DMA_BIT_MASK(64) &&
578 dma_ops == gx_legacy_pci_dma_map_ops)
579 set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
580 else if (mask > dev->archdata.max_direct_dma_addr)
581 mask = dev->archdata.max_direct_dma_addr;
582 }
583
584 if (!dev->dma_mask || !dma_supported(dev, mask))
585 return -EIO;
586
587 *dev->dma_mask = mask;
588
589 return 0;
590}
591EXPORT_SYMBOL(dma_set_mask);
592
593#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
594int dma_set_coherent_mask(struct device *dev, u64 mask)
595{
596 const struct dma_map_ops *dma_ops = get_dma_ops(dev);
597
598
599
600
601
602
603
604 if (dma_ops == gx_pci_dma_map_ops ||
605 dma_ops == gx_hybrid_pci_dma_map_ops ||
606 dma_ops == gx_legacy_pci_dma_map_ops) {
607 if (mask == DMA_BIT_MASK(64))
608 set_dma_ops(dev, gx_pci_dma_map_ops);
609 else if (mask > dev->archdata.max_direct_dma_addr)
610 mask = dev->archdata.max_direct_dma_addr;
611 }
612
613 if (!dma_supported(dev, mask))
614 return -EIO;
615 dev->coherent_dma_mask = mask;
616 return 0;
617}
618EXPORT_SYMBOL(dma_set_coherent_mask);
619#endif
620
621#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
622
623
624
625
626
627
628
629
630
631
632u64 dma_get_required_mask(struct device *dev)
633{
634 return DMA_BIT_MASK(64);
635}
636EXPORT_SYMBOL_GPL(dma_get_required_mask);
637#endif
638