1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/cache.h>
21#include <linux/dma-mapping.h>
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/spinlock.h>
25#include <linux/string.h>
26#include <linux/swiotlb.h>
27#include <linux/pfn.h>
28#include <linux/types.h>
29#include <linux/ctype.h>
30#include <linux/highmem.h>
31
32#include <asm/io.h>
33#include <asm/dma.h>
34#include <asm/scatterlist.h>
35
36#include <linux/init.h>
37#include <linux/bootmem.h>
38#include <linux/iommu-helper.h>
39
40#define OFFSET(val,align) ((unsigned long) \
41 ( (val) & ( (align) - 1)))
42
43#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
44
45
46
47
48
49
50#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
51
52
53
54
55enum dma_sync_target {
56 SYNC_FOR_CPU = 0,
57 SYNC_FOR_DEVICE = 1,
58};
59
60int swiotlb_force;
61
62
63
64
65
66
67static char *io_tlb_start, *io_tlb_end;
68
69
70
71
72
73static unsigned long io_tlb_nslabs;
74
75
76
77
78static unsigned long io_tlb_overflow = 32*1024;
79
80void *io_tlb_overflow_buffer;
81
82
83
84
85
86static unsigned int *io_tlb_list;
87static unsigned int io_tlb_index;
88
89
90
91
92
93static phys_addr_t *io_tlb_orig_addr;
94
95
96
97
98static DEFINE_SPINLOCK(io_tlb_lock);
99
100static int __init
101setup_io_tlb_npages(char *str)
102{
103 if (isdigit(*str)) {
104 io_tlb_nslabs = simple_strtoul(str, &str, 0);
105
106 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
107 }
108 if (*str == ',')
109 ++str;
110 if (!strcmp(str, "force"))
111 swiotlb_force = 1;
112 return 1;
113}
114__setup("swiotlb=", setup_io_tlb_npages);
115
116
117
118static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
119 volatile void *address)
120{
121 return phys_to_dma(hwdev, virt_to_phys(address));
122}
123
124static void swiotlb_print_info(unsigned long bytes)
125{
126 phys_addr_t pstart, pend;
127
128 pstart = virt_to_phys(io_tlb_start);
129 pend = virt_to_phys(io_tlb_end);
130
131 printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n",
132 bytes >> 20, io_tlb_start, io_tlb_end);
133 printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n",
134 (unsigned long long)pstart,
135 (unsigned long long)pend);
136}
137
138
139
140
141
142void __init
143swiotlb_init_with_default_size(size_t default_size)
144{
145 unsigned long i, bytes;
146
147 if (!io_tlb_nslabs) {
148 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
149 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
150 }
151
152 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
153
154
155
156
157 io_tlb_start = alloc_bootmem_low_pages(bytes);
158 if (!io_tlb_start)
159 panic("Cannot allocate SWIOTLB buffer");
160 io_tlb_end = io_tlb_start + bytes;
161
162
163
164
165
166
167 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
168 for (i = 0; i < io_tlb_nslabs; i++)
169 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
170 io_tlb_index = 0;
171 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t));
172
173
174
175
176 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
177 if (!io_tlb_overflow_buffer)
178 panic("Cannot allocate SWIOTLB overflow buffer!\n");
179
180 swiotlb_print_info(bytes);
181}
182
183void __init
184swiotlb_init(void)
185{
186 swiotlb_init_with_default_size(64 * (1<<20));
187}
188
189
190
191
192
193
194int
195swiotlb_late_init_with_default_size(size_t default_size)
196{
197 unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
198 unsigned int order;
199
200 if (!io_tlb_nslabs) {
201 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
202 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
203 }
204
205
206
207
208 order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
209 io_tlb_nslabs = SLABS_PER_PAGE << order;
210 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
211
212 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
213 io_tlb_start = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
214 order);
215 if (io_tlb_start)
216 break;
217 order--;
218 }
219
220 if (!io_tlb_start)
221 goto cleanup1;
222
223 if (order != get_order(bytes)) {
224 printk(KERN_WARNING "Warning: only able to allocate %ld MB "
225 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
226 io_tlb_nslabs = SLABS_PER_PAGE << order;
227 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
228 }
229 io_tlb_end = io_tlb_start + bytes;
230 memset(io_tlb_start, 0, bytes);
231
232
233
234
235
236
237 io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
238 get_order(io_tlb_nslabs * sizeof(int)));
239 if (!io_tlb_list)
240 goto cleanup2;
241
242 for (i = 0; i < io_tlb_nslabs; i++)
243 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
244 io_tlb_index = 0;
245
246 io_tlb_orig_addr = (phys_addr_t *)
247 __get_free_pages(GFP_KERNEL,
248 get_order(io_tlb_nslabs *
249 sizeof(phys_addr_t)));
250 if (!io_tlb_orig_addr)
251 goto cleanup3;
252
253 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
254
255
256
257
258 io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
259 get_order(io_tlb_overflow));
260 if (!io_tlb_overflow_buffer)
261 goto cleanup4;
262
263 swiotlb_print_info(bytes);
264
265 return 0;
266
267cleanup4:
268 free_pages((unsigned long)io_tlb_orig_addr,
269 get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
270 io_tlb_orig_addr = NULL;
271cleanup3:
272 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
273 sizeof(int)));
274 io_tlb_list = NULL;
275cleanup2:
276 io_tlb_end = NULL;
277 free_pages((unsigned long)io_tlb_start, order);
278 io_tlb_start = NULL;
279cleanup1:
280 io_tlb_nslabs = req_nslabs;
281 return -ENOMEM;
282}
283
284static int is_swiotlb_buffer(phys_addr_t paddr)
285{
286 return paddr >= virt_to_phys(io_tlb_start) &&
287 paddr < virt_to_phys(io_tlb_end);
288}
289
290
291
292
293static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
294 enum dma_data_direction dir)
295{
296 unsigned long pfn = PFN_DOWN(phys);
297
298 if (PageHighMem(pfn_to_page(pfn))) {
299
300 unsigned int offset = phys & ~PAGE_MASK;
301 char *buffer;
302 unsigned int sz = 0;
303 unsigned long flags;
304
305 while (size) {
306 sz = min_t(size_t, PAGE_SIZE - offset, size);
307
308 local_irq_save(flags);
309 buffer = kmap_atomic(pfn_to_page(pfn),
310 KM_BOUNCE_READ);
311 if (dir == DMA_TO_DEVICE)
312 memcpy(dma_addr, buffer + offset, sz);
313 else
314 memcpy(buffer + offset, dma_addr, sz);
315 kunmap_atomic(buffer, KM_BOUNCE_READ);
316 local_irq_restore(flags);
317
318 size -= sz;
319 pfn++;
320 dma_addr += sz;
321 offset = 0;
322 }
323 } else {
324 if (dir == DMA_TO_DEVICE)
325 memcpy(dma_addr, phys_to_virt(phys), size);
326 else
327 memcpy(phys_to_virt(phys), dma_addr, size);
328 }
329}
330
331
332
333
334static void *
335map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir)
336{
337 unsigned long flags;
338 char *dma_addr;
339 unsigned int nslots, stride, index, wrap;
340 int i;
341 unsigned long start_dma_addr;
342 unsigned long mask;
343 unsigned long offset_slots;
344 unsigned long max_slots;
345
346 mask = dma_get_seg_boundary(hwdev);
347 start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask;
348
349 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
350
351
352
353
354 max_slots = mask + 1
355 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
356 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
357
358
359
360
361
362 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
363 if (size > PAGE_SIZE)
364 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
365 else
366 stride = 1;
367
368 BUG_ON(!nslots);
369
370
371
372
373
374 spin_lock_irqsave(&io_tlb_lock, flags);
375 index = ALIGN(io_tlb_index, stride);
376 if (index >= io_tlb_nslabs)
377 index = 0;
378 wrap = index;
379
380 do {
381 while (iommu_is_span_boundary(index, nslots, offset_slots,
382 max_slots)) {
383 index += stride;
384 if (index >= io_tlb_nslabs)
385 index = 0;
386 if (index == wrap)
387 goto not_found;
388 }
389
390
391
392
393
394
395 if (io_tlb_list[index] >= nslots) {
396 int count = 0;
397
398 for (i = index; i < (int) (index + nslots); i++)
399 io_tlb_list[i] = 0;
400 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
401 io_tlb_list[i] = ++count;
402 dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
403
404
405
406
407
408 io_tlb_index = ((index + nslots) < io_tlb_nslabs
409 ? (index + nslots) : 0);
410
411 goto found;
412 }
413 index += stride;
414 if (index >= io_tlb_nslabs)
415 index = 0;
416 } while (index != wrap);
417
418not_found:
419 spin_unlock_irqrestore(&io_tlb_lock, flags);
420 return NULL;
421found:
422 spin_unlock_irqrestore(&io_tlb_lock, flags);
423
424
425
426
427
428
429 for (i = 0; i < nslots; i++)
430 io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
431 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
432 swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
433
434 return dma_addr;
435}
436
437
438
439
440static void
441do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
442{
443 unsigned long flags;
444 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
445 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
446 phys_addr_t phys = io_tlb_orig_addr[index];
447
448
449
450
451 if (phys && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
452 swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
453
454
455
456
457
458
459
460 spin_lock_irqsave(&io_tlb_lock, flags);
461 {
462 count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
463 io_tlb_list[index + nslots] : 0);
464
465
466
467
468 for (i = index + nslots - 1; i >= index; i--)
469 io_tlb_list[i] = ++count;
470
471
472
473
474 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
475 io_tlb_list[i] = ++count;
476 }
477 spin_unlock_irqrestore(&io_tlb_lock, flags);
478}
479
480static void
481sync_single(struct device *hwdev, char *dma_addr, size_t size,
482 int dir, int target)
483{
484 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
485 phys_addr_t phys = io_tlb_orig_addr[index];
486
487 phys += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
488
489 switch (target) {
490 case SYNC_FOR_CPU:
491 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
492 swiotlb_bounce(phys, dma_addr, size, DMA_FROM_DEVICE);
493 else
494 BUG_ON(dir != DMA_TO_DEVICE);
495 break;
496 case SYNC_FOR_DEVICE:
497 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
498 swiotlb_bounce(phys, dma_addr, size, DMA_TO_DEVICE);
499 else
500 BUG_ON(dir != DMA_FROM_DEVICE);
501 break;
502 default:
503 BUG();
504 }
505}
506
507void *
508swiotlb_alloc_coherent(struct device *hwdev, size_t size,
509 dma_addr_t *dma_handle, gfp_t flags)
510{
511 dma_addr_t dev_addr;
512 void *ret;
513 int order = get_order(size);
514 u64 dma_mask = DMA_BIT_MASK(32);
515
516 if (hwdev && hwdev->coherent_dma_mask)
517 dma_mask = hwdev->coherent_dma_mask;
518
519 ret = (void *)__get_free_pages(flags, order);
520 if (ret && swiotlb_virt_to_bus(hwdev, ret) + size > dma_mask) {
521
522
523
524 free_pages((unsigned long) ret, order);
525 ret = NULL;
526 }
527 if (!ret) {
528
529
530
531
532
533 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
534 if (!ret)
535 return NULL;
536 }
537
538 memset(ret, 0, size);
539 dev_addr = swiotlb_virt_to_bus(hwdev, ret);
540
541
542 if (dev_addr + size > dma_mask) {
543 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
544 (unsigned long long)dma_mask,
545 (unsigned long long)dev_addr);
546
547
548 do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE);
549 return NULL;
550 }
551 *dma_handle = dev_addr;
552 return ret;
553}
554EXPORT_SYMBOL(swiotlb_alloc_coherent);
555
556void
557swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
558 dma_addr_t dev_addr)
559{
560 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
561
562 WARN_ON(irqs_disabled());
563 if (!is_swiotlb_buffer(paddr))
564 free_pages((unsigned long)vaddr, get_order(size));
565 else
566
567 do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE);
568}
569EXPORT_SYMBOL(swiotlb_free_coherent);
570
571static void
572swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
573{
574
575
576
577
578
579
580
581 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
582 "device %s\n", size, dev ? dev_name(dev) : "?");
583
584 if (size <= io_tlb_overflow || !do_panic)
585 return;
586
587 if (dir == DMA_BIDIRECTIONAL)
588 panic("DMA: Random memory could be DMA accessed\n");
589 if (dir == DMA_FROM_DEVICE)
590 panic("DMA: Random memory could be DMA written\n");
591 if (dir == DMA_TO_DEVICE)
592 panic("DMA: Random memory could be DMA read\n");
593}
594
595
596
597
598
599
600
601
602dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
603 unsigned long offset, size_t size,
604 enum dma_data_direction dir,
605 struct dma_attrs *attrs)
606{
607 phys_addr_t phys = page_to_phys(page) + offset;
608 dma_addr_t dev_addr = phys_to_dma(dev, phys);
609 void *map;
610
611 BUG_ON(dir == DMA_NONE);
612
613
614
615
616
617 if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
618 return dev_addr;
619
620
621
622
623 map = map_single(dev, phys, size, dir);
624 if (!map) {
625 swiotlb_full(dev, size, dir, 1);
626 map = io_tlb_overflow_buffer;
627 }
628
629 dev_addr = swiotlb_virt_to_bus(dev, map);
630
631
632
633
634 if (!dma_capable(dev, dev_addr, size))
635 panic("map_single: bounce buffer is not DMA'ble");
636
637 return dev_addr;
638}
639EXPORT_SYMBOL_GPL(swiotlb_map_page);
640
641
642
643
644
645
646
647
648
649static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
650 size_t size, int dir)
651{
652 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
653
654 BUG_ON(dir == DMA_NONE);
655
656 if (is_swiotlb_buffer(paddr)) {
657 do_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
658 return;
659 }
660
661 if (dir != DMA_FROM_DEVICE)
662 return;
663
664
665
666
667
668
669
670 dma_mark_clean(phys_to_virt(paddr), size);
671}
672
673void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
674 size_t size, enum dma_data_direction dir,
675 struct dma_attrs *attrs)
676{
677 unmap_single(hwdev, dev_addr, size, dir);
678}
679EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
680
681
682
683
684
685
686
687
688
689
690
691static void
692swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
693 size_t size, int dir, int target)
694{
695 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
696
697 BUG_ON(dir == DMA_NONE);
698
699 if (is_swiotlb_buffer(paddr)) {
700 sync_single(hwdev, phys_to_virt(paddr), size, dir, target);
701 return;
702 }
703
704 if (dir != DMA_FROM_DEVICE)
705 return;
706
707 dma_mark_clean(phys_to_virt(paddr), size);
708}
709
710void
711swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
712 size_t size, enum dma_data_direction dir)
713{
714 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
715}
716EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
717
718void
719swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
720 size_t size, enum dma_data_direction dir)
721{
722 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
723}
724EXPORT_SYMBOL(swiotlb_sync_single_for_device);
725
726
727
728
729static void
730swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
731 unsigned long offset, size_t size,
732 int dir, int target)
733{
734 swiotlb_sync_single(hwdev, dev_addr + offset, size, dir, target);
735}
736
737void
738swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
739 unsigned long offset, size_t size,
740 enum dma_data_direction dir)
741{
742 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
743 SYNC_FOR_CPU);
744}
745EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
746
747void
748swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
749 unsigned long offset, size_t size,
750 enum dma_data_direction dir)
751{
752 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
753 SYNC_FOR_DEVICE);
754}
755EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773int
774swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
775 enum dma_data_direction dir, struct dma_attrs *attrs)
776{
777 struct scatterlist *sg;
778 int i;
779
780 BUG_ON(dir == DMA_NONE);
781
782 for_each_sg(sgl, sg, nelems, i) {
783 phys_addr_t paddr = sg_phys(sg);
784 dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
785
786 if (swiotlb_force ||
787 !dma_capable(hwdev, dev_addr, sg->length)) {
788 void *map = map_single(hwdev, sg_phys(sg),
789 sg->length, dir);
790 if (!map) {
791
792
793 swiotlb_full(hwdev, sg->length, dir, 0);
794 swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
795 attrs);
796 sgl[0].dma_length = 0;
797 return 0;
798 }
799 sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
800 } else
801 sg->dma_address = dev_addr;
802 sg->dma_length = sg->length;
803 }
804 return nelems;
805}
806EXPORT_SYMBOL(swiotlb_map_sg_attrs);
807
808int
809swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
810 int dir)
811{
812 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
813}
814EXPORT_SYMBOL(swiotlb_map_sg);
815
816
817
818
819
820void
821swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
822 int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
823{
824 struct scatterlist *sg;
825 int i;
826
827 BUG_ON(dir == DMA_NONE);
828
829 for_each_sg(sgl, sg, nelems, i)
830 unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
831
832}
833EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
834
835void
836swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
837 int dir)
838{
839 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
840}
841EXPORT_SYMBOL(swiotlb_unmap_sg);
842
843
844
845
846
847
848
849
850static void
851swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
852 int nelems, int dir, int target)
853{
854 struct scatterlist *sg;
855 int i;
856
857 for_each_sg(sgl, sg, nelems, i)
858 swiotlb_sync_single(hwdev, sg->dma_address,
859 sg->dma_length, dir, target);
860}
861
862void
863swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
864 int nelems, enum dma_data_direction dir)
865{
866 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
867}
868EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
869
870void
871swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
872 int nelems, enum dma_data_direction dir)
873{
874 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
875}
876EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
877
878int
879swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
880{
881 return (dma_addr == swiotlb_virt_to_bus(hwdev, io_tlb_overflow_buffer));
882}
883EXPORT_SYMBOL(swiotlb_dma_mapping_error);
884
885
886
887
888
889
890
891int
892swiotlb_dma_supported(struct device *hwdev, u64 mask)
893{
894 return swiotlb_virt_to_bus(hwdev, io_tlb_end - 1) <= mask;
895}
896EXPORT_SYMBOL(swiotlb_dma_supported);
897