1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/cache.h>
21#include <linux/dma-mapping.h>
22#include <linux/mm.h>
23#include <linux/export.h>
24#include <linux/spinlock.h>
25#include <linux/string.h>
26#include <linux/swiotlb.h>
27#include <linux/pfn.h>
28#include <linux/types.h>
29#include <linux/ctype.h>
30#include <linux/highmem.h>
31#include <linux/gfp.h>
32#include <linux/scatterlist.h>
33
34#include <asm/io.h>
35#include <asm/dma.h>
36
37#include <linux/init.h>
38#include <linux/bootmem.h>
39#include <linux/iommu-helper.h>
40
41#define CREATE_TRACE_POINTS
42#include <trace/events/swiotlb.h>
43
44#define OFFSET(val,align) ((unsigned long) \
45 ( (val) & ( (align) - 1)))
46
47#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
48
49
50
51
52
53
54#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
55
56int swiotlb_force;
57
58
59
60
61
62
63static phys_addr_t io_tlb_start, io_tlb_end;
64
65
66
67
68
69static unsigned long io_tlb_nslabs;
70
71
72
73
74static unsigned long io_tlb_overflow = 32*1024;
75
76static phys_addr_t io_tlb_overflow_buffer;
77
78
79
80
81
82static unsigned int *io_tlb_list;
83static unsigned int io_tlb_index;
84
85
86
87
88
89#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
90static phys_addr_t *io_tlb_orig_addr;
91
92
93
94
95static DEFINE_SPINLOCK(io_tlb_lock);
96
97static int late_alloc;
98
99static int __init
100setup_io_tlb_npages(char *str)
101{
102 if (isdigit(*str)) {
103 io_tlb_nslabs = simple_strtoul(str, &str, 0);
104
105 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
106 }
107 if (*str == ',')
108 ++str;
109 if (!strcmp(str, "force"))
110 swiotlb_force = 1;
111
112 return 0;
113}
114early_param("swiotlb", setup_io_tlb_npages);
115
116
117unsigned long swiotlb_nr_tbl(void)
118{
119 return io_tlb_nslabs;
120}
121EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
122
123
124#define IO_TLB_DEFAULT_SIZE (64UL<<20)
125unsigned long swiotlb_size_or_default(void)
126{
127 unsigned long size;
128
129 size = io_tlb_nslabs << IO_TLB_SHIFT;
130
131 return size ? size : (IO_TLB_DEFAULT_SIZE);
132}
133
134
135static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
136 volatile void *address)
137{
138 return phys_to_dma(hwdev, virt_to_phys(address));
139}
140
141static bool no_iotlb_memory;
142
143void swiotlb_print_info(void)
144{
145 unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
146 unsigned char *vstart, *vend;
147
148 if (no_iotlb_memory) {
149 pr_warn("software IO TLB: No low mem\n");
150 return;
151 }
152
153 vstart = phys_to_virt(io_tlb_start);
154 vend = phys_to_virt(io_tlb_end);
155
156 printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
157 (unsigned long long)io_tlb_start,
158 (unsigned long long)io_tlb_end,
159 bytes >> 20, vstart, vend - 1);
160}
161
162int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
163{
164 void *v_overflow_buffer;
165 unsigned long i, bytes;
166
167 bytes = nslabs << IO_TLB_SHIFT;
168
169 io_tlb_nslabs = nslabs;
170 io_tlb_start = __pa(tlb);
171 io_tlb_end = io_tlb_start + bytes;
172
173
174
175
176 v_overflow_buffer = memblock_virt_alloc_low_nopanic(
177 PAGE_ALIGN(io_tlb_overflow),
178 PAGE_SIZE);
179 if (!v_overflow_buffer)
180 return -ENOMEM;
181
182 io_tlb_overflow_buffer = __pa(v_overflow_buffer);
183
184
185
186
187
188
189 io_tlb_list = memblock_virt_alloc(
190 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
191 PAGE_SIZE);
192 io_tlb_orig_addr = memblock_virt_alloc(
193 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
194 PAGE_SIZE);
195 for (i = 0; i < io_tlb_nslabs; i++) {
196 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
197 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
198 }
199 io_tlb_index = 0;
200
201 if (verbose)
202 swiotlb_print_info();
203
204 return 0;
205}
206
207
208
209
210
211void __init
212swiotlb_init(int verbose)
213{
214 size_t default_size = IO_TLB_DEFAULT_SIZE;
215 unsigned char *vstart;
216 unsigned long bytes;
217
218 if (!io_tlb_nslabs) {
219 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
220 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
221 }
222
223 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
224
225
226 vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
227 if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
228 return;
229
230 if (io_tlb_start)
231 memblock_free_early(io_tlb_start,
232 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
233 pr_warn("Cannot allocate SWIOTLB buffer");
234 no_iotlb_memory = true;
235}
236
237
238
239
240
241
242int
243swiotlb_late_init_with_default_size(size_t default_size)
244{
245 unsigned long bytes, req_nslabs = io_tlb_nslabs;
246 unsigned char *vstart = NULL;
247 unsigned int order;
248 int rc = 0;
249
250 if (!io_tlb_nslabs) {
251 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
252 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
253 }
254
255
256
257
258 order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
259 io_tlb_nslabs = SLABS_PER_PAGE << order;
260 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
261
262 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
263 vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
264 order);
265 if (vstart)
266 break;
267 order--;
268 }
269
270 if (!vstart) {
271 io_tlb_nslabs = req_nslabs;
272 return -ENOMEM;
273 }
274 if (order != get_order(bytes)) {
275 printk(KERN_WARNING "Warning: only able to allocate %ld MB "
276 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
277 io_tlb_nslabs = SLABS_PER_PAGE << order;
278 }
279 rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
280 if (rc)
281 free_pages((unsigned long)vstart, order);
282 return rc;
283}
284
285int
286swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
287{
288 unsigned long i, bytes;
289 unsigned char *v_overflow_buffer;
290
291 bytes = nslabs << IO_TLB_SHIFT;
292
293 io_tlb_nslabs = nslabs;
294 io_tlb_start = virt_to_phys(tlb);
295 io_tlb_end = io_tlb_start + bytes;
296
297 memset(tlb, 0, bytes);
298
299
300
301
302 v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
303 get_order(io_tlb_overflow));
304 if (!v_overflow_buffer)
305 goto cleanup2;
306
307 io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
308
309
310
311
312
313
314 io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
315 get_order(io_tlb_nslabs * sizeof(int)));
316 if (!io_tlb_list)
317 goto cleanup3;
318
319 io_tlb_orig_addr = (phys_addr_t *)
320 __get_free_pages(GFP_KERNEL,
321 get_order(io_tlb_nslabs *
322 sizeof(phys_addr_t)));
323 if (!io_tlb_orig_addr)
324 goto cleanup4;
325
326 for (i = 0; i < io_tlb_nslabs; i++) {
327 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
328 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
329 }
330 io_tlb_index = 0;
331
332 swiotlb_print_info();
333
334 late_alloc = 1;
335
336 return 0;
337
338cleanup4:
339 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
340 sizeof(int)));
341 io_tlb_list = NULL;
342cleanup3:
343 free_pages((unsigned long)v_overflow_buffer,
344 get_order(io_tlb_overflow));
345 io_tlb_overflow_buffer = 0;
346cleanup2:
347 io_tlb_end = 0;
348 io_tlb_start = 0;
349 io_tlb_nslabs = 0;
350 return -ENOMEM;
351}
352
353void __init swiotlb_free(void)
354{
355 if (!io_tlb_orig_addr)
356 return;
357
358 if (late_alloc) {
359 free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
360 get_order(io_tlb_overflow));
361 free_pages((unsigned long)io_tlb_orig_addr,
362 get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
363 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
364 sizeof(int)));
365 free_pages((unsigned long)phys_to_virt(io_tlb_start),
366 get_order(io_tlb_nslabs << IO_TLB_SHIFT));
367 } else {
368 memblock_free_late(io_tlb_overflow_buffer,
369 PAGE_ALIGN(io_tlb_overflow));
370 memblock_free_late(__pa(io_tlb_orig_addr),
371 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
372 memblock_free_late(__pa(io_tlb_list),
373 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
374 memblock_free_late(io_tlb_start,
375 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
376 }
377 io_tlb_nslabs = 0;
378}
379
380int is_swiotlb_buffer(phys_addr_t paddr)
381{
382 return paddr >= io_tlb_start && paddr < io_tlb_end;
383}
384
385
386
387
388static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
389 size_t size, enum dma_data_direction dir)
390{
391 unsigned long pfn = PFN_DOWN(orig_addr);
392 unsigned char *vaddr = phys_to_virt(tlb_addr);
393
394 if (PageHighMem(pfn_to_page(pfn))) {
395
396 unsigned int offset = orig_addr & ~PAGE_MASK;
397 char *buffer;
398 unsigned int sz = 0;
399 unsigned long flags;
400
401 while (size) {
402 sz = min_t(size_t, PAGE_SIZE - offset, size);
403
404 local_irq_save(flags);
405 buffer = kmap_atomic(pfn_to_page(pfn));
406 if (dir == DMA_TO_DEVICE)
407 memcpy(vaddr, buffer + offset, sz);
408 else
409 memcpy(buffer + offset, vaddr, sz);
410 kunmap_atomic(buffer);
411 local_irq_restore(flags);
412
413 size -= sz;
414 pfn++;
415 vaddr += sz;
416 offset = 0;
417 }
418 } else if (dir == DMA_TO_DEVICE) {
419 memcpy(vaddr, phys_to_virt(orig_addr), size);
420 } else {
421 memcpy(phys_to_virt(orig_addr), vaddr, size);
422 }
423}
424
425phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
426 dma_addr_t tbl_dma_addr,
427 phys_addr_t orig_addr, size_t size,
428 enum dma_data_direction dir)
429{
430 unsigned long flags;
431 phys_addr_t tlb_addr;
432 unsigned int nslots, stride, index, wrap;
433 int i;
434 unsigned long mask;
435 unsigned long offset_slots;
436 unsigned long max_slots;
437
438 if (no_iotlb_memory)
439 panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
440
441 mask = dma_get_seg_boundary(hwdev);
442
443 tbl_dma_addr &= mask;
444
445 offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
446
447
448
449
450 max_slots = mask + 1
451 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
452 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
453
454
455
456
457
458 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
459 if (size > PAGE_SIZE)
460 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
461 else
462 stride = 1;
463
464 BUG_ON(!nslots);
465
466
467
468
469
470 spin_lock_irqsave(&io_tlb_lock, flags);
471 index = ALIGN(io_tlb_index, stride);
472 if (index >= io_tlb_nslabs)
473 index = 0;
474 wrap = index;
475
476 do {
477 while (iommu_is_span_boundary(index, nslots, offset_slots,
478 max_slots)) {
479 index += stride;
480 if (index >= io_tlb_nslabs)
481 index = 0;
482 if (index == wrap)
483 goto not_found;
484 }
485
486
487
488
489
490
491 if (io_tlb_list[index] >= nslots) {
492 int count = 0;
493
494 for (i = index; i < (int) (index + nslots); i++)
495 io_tlb_list[i] = 0;
496 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
497 io_tlb_list[i] = ++count;
498 tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
499
500
501
502
503
504 io_tlb_index = ((index + nslots) < io_tlb_nslabs
505 ? (index + nslots) : 0);
506
507 goto found;
508 }
509 index += stride;
510 if (index >= io_tlb_nslabs)
511 index = 0;
512 } while (index != wrap);
513
514not_found:
515 spin_unlock_irqrestore(&io_tlb_lock, flags);
516 if (printk_ratelimit())
517 dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
518 return SWIOTLB_MAP_ERROR;
519found:
520 spin_unlock_irqrestore(&io_tlb_lock, flags);
521
522
523
524
525
526
527 for (i = 0; i < nslots; i++)
528 io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
529 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
530 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
531
532 return tlb_addr;
533}
534EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
535
536
537
538
539
540static phys_addr_t
541map_single(struct device *hwdev, phys_addr_t phys, size_t size,
542 enum dma_data_direction dir)
543{
544 dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
545
546 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir);
547}
548
549
550
551
552void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
553 size_t size, enum dma_data_direction dir)
554{
555 unsigned long flags;
556 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
557 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
558 phys_addr_t orig_addr = io_tlb_orig_addr[index];
559
560
561
562
563 if (orig_addr != INVALID_PHYS_ADDR &&
564 ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
565 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
566
567
568
569
570
571
572
573 spin_lock_irqsave(&io_tlb_lock, flags);
574 {
575 count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
576 io_tlb_list[index + nslots] : 0);
577
578
579
580
581 for (i = index + nslots - 1; i >= index; i--) {
582 io_tlb_list[i] = ++count;
583 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
584 }
585
586
587
588
589 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
590 io_tlb_list[i] = ++count;
591 }
592 spin_unlock_irqrestore(&io_tlb_lock, flags);
593}
594EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
595
596void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
597 size_t size, enum dma_data_direction dir,
598 enum dma_sync_target target)
599{
600 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
601 phys_addr_t orig_addr = io_tlb_orig_addr[index];
602
603 if (orig_addr == INVALID_PHYS_ADDR)
604 return;
605 orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
606
607 switch (target) {
608 case SYNC_FOR_CPU:
609 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
610 swiotlb_bounce(orig_addr, tlb_addr,
611 size, DMA_FROM_DEVICE);
612 else
613 BUG_ON(dir != DMA_TO_DEVICE);
614 break;
615 case SYNC_FOR_DEVICE:
616 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
617 swiotlb_bounce(orig_addr, tlb_addr,
618 size, DMA_TO_DEVICE);
619 else
620 BUG_ON(dir != DMA_FROM_DEVICE);
621 break;
622 default:
623 BUG();
624 }
625}
626EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
627
628void *
629swiotlb_alloc_coherent(struct device *hwdev, size_t size,
630 dma_addr_t *dma_handle, gfp_t flags)
631{
632 dma_addr_t dev_addr;
633 void *ret;
634 int order = get_order(size);
635 u64 dma_mask = DMA_BIT_MASK(32);
636
637 if (hwdev && hwdev->coherent_dma_mask)
638 dma_mask = hwdev->coherent_dma_mask;
639
640 ret = (void *)__get_free_pages(flags, order);
641 if (ret) {
642 dev_addr = swiotlb_virt_to_bus(hwdev, ret);
643 if (dev_addr + size - 1 > dma_mask) {
644
645
646
647 free_pages((unsigned long) ret, order);
648 ret = NULL;
649 }
650 }
651 if (!ret) {
652
653
654
655
656
657 phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
658 if (paddr == SWIOTLB_MAP_ERROR)
659 goto err_warn;
660
661 ret = phys_to_virt(paddr);
662 dev_addr = phys_to_dma(hwdev, paddr);
663
664
665 if (dev_addr + size - 1 > dma_mask) {
666 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
667 (unsigned long long)dma_mask,
668 (unsigned long long)dev_addr);
669
670
671 swiotlb_tbl_unmap_single(hwdev, paddr,
672 size, DMA_TO_DEVICE);
673 goto err_warn;
674 }
675 }
676
677 *dma_handle = dev_addr;
678 memset(ret, 0, size);
679
680 return ret;
681
682err_warn:
683 pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n",
684 dev_name(hwdev), size);
685 dump_stack();
686
687 return NULL;
688}
689EXPORT_SYMBOL(swiotlb_alloc_coherent);
690
691void
692swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
693 dma_addr_t dev_addr)
694{
695 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
696
697 WARN_ON(irqs_disabled());
698 if (!is_swiotlb_buffer(paddr))
699 free_pages((unsigned long)vaddr, get_order(size));
700 else
701
702 swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE);
703}
704EXPORT_SYMBOL(swiotlb_free_coherent);
705
706static void
707swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
708 int do_panic)
709{
710
711
712
713
714
715
716
717 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
718 "device %s\n", size, dev ? dev_name(dev) : "?");
719
720 if (size <= io_tlb_overflow || !do_panic)
721 return;
722
723 if (dir == DMA_BIDIRECTIONAL)
724 panic("DMA: Random memory could be DMA accessed\n");
725 if (dir == DMA_FROM_DEVICE)
726 panic("DMA: Random memory could be DMA written\n");
727 if (dir == DMA_TO_DEVICE)
728 panic("DMA: Random memory could be DMA read\n");
729}
730
731
732
733
734
735
736
737
738dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
739 unsigned long offset, size_t size,
740 enum dma_data_direction dir,
741 struct dma_attrs *attrs)
742{
743 phys_addr_t map, phys = page_to_phys(page) + offset;
744 dma_addr_t dev_addr = phys_to_dma(dev, phys);
745
746 BUG_ON(dir == DMA_NONE);
747
748
749
750
751
752 if (dma_capable(dev, dev_addr, size) && !swiotlb_force)
753 return dev_addr;
754
755 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
756
757
758 map = map_single(dev, phys, size, dir);
759 if (map == SWIOTLB_MAP_ERROR) {
760 swiotlb_full(dev, size, dir, 1);
761 return phys_to_dma(dev, io_tlb_overflow_buffer);
762 }
763
764 dev_addr = phys_to_dma(dev, map);
765
766
767 if (!dma_capable(dev, dev_addr, size)) {
768 swiotlb_tbl_unmap_single(dev, map, size, dir);
769 return phys_to_dma(dev, io_tlb_overflow_buffer);
770 }
771
772 return dev_addr;
773}
774EXPORT_SYMBOL_GPL(swiotlb_map_page);
775
776
777
778
779
780
781
782
783
784static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
785 size_t size, enum dma_data_direction dir)
786{
787 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
788
789 BUG_ON(dir == DMA_NONE);
790
791 if (is_swiotlb_buffer(paddr)) {
792 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
793 return;
794 }
795
796 if (dir != DMA_FROM_DEVICE)
797 return;
798
799
800
801
802
803
804
805 dma_mark_clean(phys_to_virt(paddr), size);
806}
807
808void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
809 size_t size, enum dma_data_direction dir,
810 struct dma_attrs *attrs)
811{
812 unmap_single(hwdev, dev_addr, size, dir);
813}
814EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
815
816
817
818
819
820
821
822
823
824
825
826static void
827swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
828 size_t size, enum dma_data_direction dir,
829 enum dma_sync_target target)
830{
831 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
832
833 BUG_ON(dir == DMA_NONE);
834
835 if (is_swiotlb_buffer(paddr)) {
836 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
837 return;
838 }
839
840 if (dir != DMA_FROM_DEVICE)
841 return;
842
843 dma_mark_clean(phys_to_virt(paddr), size);
844}
845
846void
847swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
848 size_t size, enum dma_data_direction dir)
849{
850 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
851}
852EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
853
854void
855swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
856 size_t size, enum dma_data_direction dir)
857{
858 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
859}
860EXPORT_SYMBOL(swiotlb_sync_single_for_device);
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878int
879swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
880 enum dma_data_direction dir, struct dma_attrs *attrs)
881{
882 struct scatterlist *sg;
883 int i;
884
885 BUG_ON(dir == DMA_NONE);
886
887 for_each_sg(sgl, sg, nelems, i) {
888 phys_addr_t paddr = sg_phys(sg);
889 dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
890
891 if (swiotlb_force ||
892 !dma_capable(hwdev, dev_addr, sg->length)) {
893 phys_addr_t map = map_single(hwdev, sg_phys(sg),
894 sg->length, dir);
895 if (map == SWIOTLB_MAP_ERROR) {
896
897
898 swiotlb_full(hwdev, sg->length, dir, 0);
899 swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
900 attrs);
901 sg_dma_len(sgl) = 0;
902 return 0;
903 }
904 sg->dma_address = phys_to_dma(hwdev, map);
905 } else
906 sg->dma_address = dev_addr;
907 sg_dma_len(sg) = sg->length;
908 }
909 return nelems;
910}
911EXPORT_SYMBOL(swiotlb_map_sg_attrs);
912
913int
914swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
915 enum dma_data_direction dir)
916{
917 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
918}
919EXPORT_SYMBOL(swiotlb_map_sg);
920
921
922
923
924
925void
926swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
927 int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
928{
929 struct scatterlist *sg;
930 int i;
931
932 BUG_ON(dir == DMA_NONE);
933
934 for_each_sg(sgl, sg, nelems, i)
935 unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir);
936
937}
938EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
939
940void
941swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
942 enum dma_data_direction dir)
943{
944 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
945}
946EXPORT_SYMBOL(swiotlb_unmap_sg);
947
948
949
950
951
952
953
954
955static void
956swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
957 int nelems, enum dma_data_direction dir,
958 enum dma_sync_target target)
959{
960 struct scatterlist *sg;
961 int i;
962
963 for_each_sg(sgl, sg, nelems, i)
964 swiotlb_sync_single(hwdev, sg->dma_address,
965 sg_dma_len(sg), dir, target);
966}
967
968void
969swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
970 int nelems, enum dma_data_direction dir)
971{
972 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
973}
974EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
975
976void
977swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
978 int nelems, enum dma_data_direction dir)
979{
980 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
981}
982EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
983
984int
985swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
986{
987 return (dma_addr == phys_to_dma(hwdev, io_tlb_overflow_buffer));
988}
989EXPORT_SYMBOL(swiotlb_dma_mapping_error);
990
991
992
993
994
995
996
997int
998swiotlb_dma_supported(struct device *hwdev, u64 mask)
999{
1000 return phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
1001}
1002EXPORT_SYMBOL(swiotlb_dma_supported);
1003