1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/cache.h>
21#include <linux/dma-mapping.h>
22#include <linux/mm.h>
23#include <linux/export.h>
24#include <linux/spinlock.h>
25#include <linux/string.h>
26#include <linux/swiotlb.h>
27#include <linux/pfn.h>
28#include <linux/types.h>
29#include <linux/ctype.h>
30#include <linux/highmem.h>
31#include <linux/gfp.h>
32#include <linux/scatterlist.h>
33#include <linux/mem_encrypt.h>
34
35#include <asm/io.h>
36#include <asm/dma.h>
37
38#include <linux/init.h>
39#include <linux/bootmem.h>
40#include <linux/iommu-helper.h>
41
42#define CREATE_TRACE_POINTS
43#include <trace/events/swiotlb.h>
44
45#define OFFSET(val,align) ((unsigned long) \
46 ( (val) & ( (align) - 1)))
47
48#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
49
50
51
52
53
54
55#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
56
57enum swiotlb_force swiotlb_force;
58
59
60
61
62
63
64static phys_addr_t io_tlb_start, io_tlb_end;
65
66
67
68
69
70static unsigned long io_tlb_nslabs;
71
72
73
74
75static unsigned long io_tlb_overflow = 32*1024;
76
77static phys_addr_t io_tlb_overflow_buffer;
78
79
80
81
82
83static unsigned int *io_tlb_list;
84static unsigned int io_tlb_index;
85
86
87
88
89
90unsigned int max_segment;
91
92
93
94
95
96#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
97static phys_addr_t *io_tlb_orig_addr;
98
99
100
101
102static DEFINE_SPINLOCK(io_tlb_lock);
103
104static int late_alloc;
105
106static int __init
107setup_io_tlb_npages(char *str)
108{
109 if (isdigit(*str)) {
110 io_tlb_nslabs = simple_strtoul(str, &str, 0);
111
112 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
113 }
114 if (*str == ',')
115 ++str;
116 if (!strcmp(str, "force")) {
117 swiotlb_force = SWIOTLB_FORCE;
118 } else if (!strcmp(str, "noforce")) {
119 swiotlb_force = SWIOTLB_NO_FORCE;
120 io_tlb_nslabs = 1;
121 }
122
123 return 0;
124}
125early_param("swiotlb", setup_io_tlb_npages);
126
127
128unsigned long swiotlb_nr_tbl(void)
129{
130 return io_tlb_nslabs;
131}
132EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
133
134unsigned int swiotlb_max_segment(void)
135{
136 return max_segment;
137}
138EXPORT_SYMBOL_GPL(swiotlb_max_segment);
139
140void swiotlb_set_max_segment(unsigned int val)
141{
142 if (swiotlb_force == SWIOTLB_FORCE)
143 max_segment = 1;
144 else
145 max_segment = rounddown(val, PAGE_SIZE);
146}
147
148
149#define IO_TLB_DEFAULT_SIZE (64UL<<20)
150unsigned long swiotlb_size_or_default(void)
151{
152 unsigned long size;
153
154 size = io_tlb_nslabs << IO_TLB_SHIFT;
155
156 return size ? size : (IO_TLB_DEFAULT_SIZE);
157}
158
159void __weak swiotlb_set_mem_attributes(void *vaddr, unsigned long size) { }
160
161
162static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev,
163 phys_addr_t address)
164{
165 return __sme_clr(phys_to_dma(hwdev, address));
166}
167
168
169static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
170 volatile void *address)
171{
172 return phys_to_dma(hwdev, virt_to_phys(address));
173}
174
175static bool no_iotlb_memory;
176
177void swiotlb_print_info(void)
178{
179 unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
180 unsigned char *vstart, *vend;
181
182 if (no_iotlb_memory) {
183 pr_warn("software IO TLB: No low mem\n");
184 return;
185 }
186
187 vstart = phys_to_virt(io_tlb_start);
188 vend = phys_to_virt(io_tlb_end);
189
190 printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
191 (unsigned long long)io_tlb_start,
192 (unsigned long long)io_tlb_end,
193 bytes >> 20, vstart, vend - 1);
194}
195
196
197
198
199
200
201
202void __init swiotlb_update_mem_attributes(void)
203{
204 void *vaddr;
205 unsigned long bytes;
206
207 if (no_iotlb_memory || late_alloc)
208 return;
209
210 vaddr = phys_to_virt(io_tlb_start);
211 bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
212 swiotlb_set_mem_attributes(vaddr, bytes);
213 memset(vaddr, 0, bytes);
214
215 vaddr = phys_to_virt(io_tlb_overflow_buffer);
216 bytes = PAGE_ALIGN(io_tlb_overflow);
217 swiotlb_set_mem_attributes(vaddr, bytes);
218 memset(vaddr, 0, bytes);
219}
220
221int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
222{
223 void *v_overflow_buffer;
224 unsigned long i, bytes;
225
226 bytes = nslabs << IO_TLB_SHIFT;
227
228 io_tlb_nslabs = nslabs;
229 io_tlb_start = __pa(tlb);
230 io_tlb_end = io_tlb_start + bytes;
231
232
233
234
235 v_overflow_buffer = memblock_virt_alloc_low_nopanic(
236 PAGE_ALIGN(io_tlb_overflow),
237 PAGE_SIZE);
238 if (!v_overflow_buffer)
239 return -ENOMEM;
240
241 io_tlb_overflow_buffer = __pa(v_overflow_buffer);
242
243
244
245
246
247
248 io_tlb_list = memblock_virt_alloc(
249 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
250 PAGE_SIZE);
251 io_tlb_orig_addr = memblock_virt_alloc(
252 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
253 PAGE_SIZE);
254 for (i = 0; i < io_tlb_nslabs; i++) {
255 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
256 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
257 }
258 io_tlb_index = 0;
259
260 if (verbose)
261 swiotlb_print_info();
262
263 swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
264 return 0;
265}
266
267
268
269
270
271void __init
272swiotlb_init(int verbose)
273{
274 size_t default_size = IO_TLB_DEFAULT_SIZE;
275 unsigned char *vstart;
276 unsigned long bytes;
277
278 if (!io_tlb_nslabs) {
279 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
280 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
281 }
282
283 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
284
285
286 vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
287 if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
288 return;
289
290 if (io_tlb_start)
291 memblock_free_early(io_tlb_start,
292 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
293 pr_warn("Cannot allocate SWIOTLB buffer");
294 no_iotlb_memory = true;
295}
296
297
298
299
300
301
302int
303swiotlb_late_init_with_default_size(size_t default_size)
304{
305 unsigned long bytes, req_nslabs = io_tlb_nslabs;
306 unsigned char *vstart = NULL;
307 unsigned int order;
308 int rc = 0;
309
310 if (!io_tlb_nslabs) {
311 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
312 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
313 }
314
315
316
317
318 order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
319 io_tlb_nslabs = SLABS_PER_PAGE << order;
320 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
321
322 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
323 vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
324 order);
325 if (vstart)
326 break;
327 order--;
328 }
329
330 if (!vstart) {
331 io_tlb_nslabs = req_nslabs;
332 return -ENOMEM;
333 }
334 if (order != get_order(bytes)) {
335 printk(KERN_WARNING "Warning: only able to allocate %ld MB "
336 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
337 io_tlb_nslabs = SLABS_PER_PAGE << order;
338 }
339 rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
340 if (rc)
341 free_pages((unsigned long)vstart, order);
342
343 return rc;
344}
345
346int
347swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
348{
349 unsigned long i, bytes;
350 unsigned char *v_overflow_buffer;
351
352 bytes = nslabs << IO_TLB_SHIFT;
353
354 io_tlb_nslabs = nslabs;
355 io_tlb_start = virt_to_phys(tlb);
356 io_tlb_end = io_tlb_start + bytes;
357
358 swiotlb_set_mem_attributes(tlb, bytes);
359 memset(tlb, 0, bytes);
360
361
362
363
364 v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
365 get_order(io_tlb_overflow));
366 if (!v_overflow_buffer)
367 goto cleanup2;
368
369 swiotlb_set_mem_attributes(v_overflow_buffer, io_tlb_overflow);
370 memset(v_overflow_buffer, 0, io_tlb_overflow);
371 io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
372
373
374
375
376
377
378 io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
379 get_order(io_tlb_nslabs * sizeof(int)));
380 if (!io_tlb_list)
381 goto cleanup3;
382
383 io_tlb_orig_addr = (phys_addr_t *)
384 __get_free_pages(GFP_KERNEL,
385 get_order(io_tlb_nslabs *
386 sizeof(phys_addr_t)));
387 if (!io_tlb_orig_addr)
388 goto cleanup4;
389
390 for (i = 0; i < io_tlb_nslabs; i++) {
391 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
392 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
393 }
394 io_tlb_index = 0;
395
396 swiotlb_print_info();
397
398 late_alloc = 1;
399
400 swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
401
402 return 0;
403
404cleanup4:
405 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
406 sizeof(int)));
407 io_tlb_list = NULL;
408cleanup3:
409 free_pages((unsigned long)v_overflow_buffer,
410 get_order(io_tlb_overflow));
411 io_tlb_overflow_buffer = 0;
412cleanup2:
413 io_tlb_end = 0;
414 io_tlb_start = 0;
415 io_tlb_nslabs = 0;
416 max_segment = 0;
417 return -ENOMEM;
418}
419
420void __init swiotlb_free(void)
421{
422 if (!io_tlb_orig_addr)
423 return;
424
425 if (late_alloc) {
426 free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
427 get_order(io_tlb_overflow));
428 free_pages((unsigned long)io_tlb_orig_addr,
429 get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
430 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
431 sizeof(int)));
432 free_pages((unsigned long)phys_to_virt(io_tlb_start),
433 get_order(io_tlb_nslabs << IO_TLB_SHIFT));
434 } else {
435 memblock_free_late(io_tlb_overflow_buffer,
436 PAGE_ALIGN(io_tlb_overflow));
437 memblock_free_late(__pa(io_tlb_orig_addr),
438 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
439 memblock_free_late(__pa(io_tlb_list),
440 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
441 memblock_free_late(io_tlb_start,
442 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
443 }
444 io_tlb_nslabs = 0;
445 max_segment = 0;
446}
447
448int is_swiotlb_buffer(phys_addr_t paddr)
449{
450 return paddr >= io_tlb_start && paddr < io_tlb_end;
451}
452
453
454
455
456static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
457 size_t size, enum dma_data_direction dir)
458{
459 unsigned long pfn = PFN_DOWN(orig_addr);
460 unsigned char *vaddr = phys_to_virt(tlb_addr);
461
462 if (PageHighMem(pfn_to_page(pfn))) {
463
464 unsigned int offset = orig_addr & ~PAGE_MASK;
465 char *buffer;
466 unsigned int sz = 0;
467 unsigned long flags;
468
469 while (size) {
470 sz = min_t(size_t, PAGE_SIZE - offset, size);
471
472 local_irq_save(flags);
473 buffer = kmap_atomic(pfn_to_page(pfn));
474 if (dir == DMA_TO_DEVICE)
475 memcpy(vaddr, buffer + offset, sz);
476 else
477 memcpy(buffer + offset, vaddr, sz);
478 kunmap_atomic(buffer);
479 local_irq_restore(flags);
480
481 size -= sz;
482 pfn++;
483 vaddr += sz;
484 offset = 0;
485 }
486 } else if (dir == DMA_TO_DEVICE) {
487 memcpy(vaddr, phys_to_virt(orig_addr), size);
488 } else {
489 memcpy(phys_to_virt(orig_addr), vaddr, size);
490 }
491}
492
493phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
494 dma_addr_t tbl_dma_addr,
495 phys_addr_t orig_addr, size_t size,
496 enum dma_data_direction dir,
497 unsigned long attrs)
498{
499 unsigned long flags;
500 phys_addr_t tlb_addr;
501 unsigned int nslots, stride, index, wrap;
502 int i;
503 unsigned long mask;
504 unsigned long offset_slots;
505 unsigned long max_slots;
506
507 if (no_iotlb_memory)
508 panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
509
510 if (sme_active())
511 pr_warn_once("SME is active and system is using DMA bounce buffers\n");
512
513 mask = dma_get_seg_boundary(hwdev);
514
515 tbl_dma_addr &= mask;
516
517 offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
518
519
520
521
522 max_slots = mask + 1
523 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
524 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
525
526
527
528
529
530 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
531 if (size >= PAGE_SIZE)
532 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
533 else
534 stride = 1;
535
536 BUG_ON(!nslots);
537
538
539
540
541
542 spin_lock_irqsave(&io_tlb_lock, flags);
543 index = ALIGN(io_tlb_index, stride);
544 if (index >= io_tlb_nslabs)
545 index = 0;
546 wrap = index;
547
548 do {
549 while (iommu_is_span_boundary(index, nslots, offset_slots,
550 max_slots)) {
551 index += stride;
552 if (index >= io_tlb_nslabs)
553 index = 0;
554 if (index == wrap)
555 goto not_found;
556 }
557
558
559
560
561
562
563 if (io_tlb_list[index] >= nslots) {
564 int count = 0;
565
566 for (i = index; i < (int) (index + nslots); i++)
567 io_tlb_list[i] = 0;
568 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
569 io_tlb_list[i] = ++count;
570 tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
571
572
573
574
575
576 io_tlb_index = ((index + nslots) < io_tlb_nslabs
577 ? (index + nslots) : 0);
578
579 goto found;
580 }
581 index += stride;
582 if (index >= io_tlb_nslabs)
583 index = 0;
584 } while (index != wrap);
585
586not_found:
587 spin_unlock_irqrestore(&io_tlb_lock, flags);
588 if (printk_ratelimit())
589 dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
590 return SWIOTLB_MAP_ERROR;
591found:
592 spin_unlock_irqrestore(&io_tlb_lock, flags);
593
594
595
596
597
598
599 for (i = 0; i < nslots; i++)
600 io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
601 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
602 (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
603 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
604
605 return tlb_addr;
606}
607EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
608
609
610
611
612
613static phys_addr_t
614map_single(struct device *hwdev, phys_addr_t phys, size_t size,
615 enum dma_data_direction dir, unsigned long attrs)
616{
617 dma_addr_t start_dma_addr;
618
619 if (swiotlb_force == SWIOTLB_NO_FORCE) {
620 dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
621 &phys);
622 return SWIOTLB_MAP_ERROR;
623 }
624
625 start_dma_addr = swiotlb_phys_to_dma(hwdev, io_tlb_start);
626 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
627 dir, attrs);
628}
629
630
631
632
633void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
634 size_t size, enum dma_data_direction dir,
635 unsigned long attrs)
636{
637 unsigned long flags;
638 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
639 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
640 phys_addr_t orig_addr = io_tlb_orig_addr[index];
641
642
643
644
645 if (orig_addr != INVALID_PHYS_ADDR &&
646 !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
647 ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
648 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
649
650
651
652
653
654
655
656 spin_lock_irqsave(&io_tlb_lock, flags);
657 {
658 count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
659 io_tlb_list[index + nslots] : 0);
660
661
662
663
664 for (i = index + nslots - 1; i >= index; i--) {
665 io_tlb_list[i] = ++count;
666 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
667 }
668
669
670
671
672 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
673 io_tlb_list[i] = ++count;
674 }
675 spin_unlock_irqrestore(&io_tlb_lock, flags);
676}
677EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
678
679void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
680 size_t size, enum dma_data_direction dir,
681 enum dma_sync_target target)
682{
683 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
684 phys_addr_t orig_addr = io_tlb_orig_addr[index];
685
686 if (orig_addr == INVALID_PHYS_ADDR)
687 return;
688 orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
689
690 switch (target) {
691 case SYNC_FOR_CPU:
692 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
693 swiotlb_bounce(orig_addr, tlb_addr,
694 size, DMA_FROM_DEVICE);
695 else
696 BUG_ON(dir != DMA_TO_DEVICE);
697 break;
698 case SYNC_FOR_DEVICE:
699 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
700 swiotlb_bounce(orig_addr, tlb_addr,
701 size, DMA_TO_DEVICE);
702 else
703 BUG_ON(dir != DMA_FROM_DEVICE);
704 break;
705 default:
706 BUG();
707 }
708}
709EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
710
711void *
712swiotlb_alloc_coherent(struct device *hwdev, size_t size,
713 dma_addr_t *dma_handle, gfp_t flags)
714{
715 dma_addr_t dev_addr;
716 void *ret;
717 int order = get_order(size);
718 u64 dma_mask = DMA_BIT_MASK(32);
719
720 if (hwdev && hwdev->coherent_dma_mask)
721 dma_mask = hwdev->coherent_dma_mask;
722
723 ret = (void *)__get_free_pages(flags, order);
724 if (ret) {
725 dev_addr = swiotlb_virt_to_bus(hwdev, ret);
726 if (dev_addr + size - 1 > dma_mask) {
727
728
729
730 free_pages((unsigned long) ret, order);
731 ret = NULL;
732 }
733 }
734 if (!ret) {
735
736
737
738
739
740 phys_addr_t paddr = map_single(hwdev, 0, size,
741 DMA_FROM_DEVICE, 0);
742 if (paddr == SWIOTLB_MAP_ERROR)
743 goto err_warn;
744
745 ret = phys_to_virt(paddr);
746 dev_addr = swiotlb_phys_to_dma(hwdev, paddr);
747
748
749 if (dev_addr + size - 1 > dma_mask) {
750 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
751 (unsigned long long)dma_mask,
752 (unsigned long long)dev_addr);
753
754
755
756
757
758 swiotlb_tbl_unmap_single(hwdev, paddr,
759 size, DMA_TO_DEVICE,
760 DMA_ATTR_SKIP_CPU_SYNC);
761 goto err_warn;
762 }
763 }
764
765 *dma_handle = dev_addr;
766 memset(ret, 0, size);
767
768 return ret;
769
770err_warn:
771 pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n",
772 dev_name(hwdev), size);
773 dump_stack();
774
775 return NULL;
776}
777EXPORT_SYMBOL(swiotlb_alloc_coherent);
778
779void
780swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
781 dma_addr_t dev_addr)
782{
783 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
784
785 WARN_ON(irqs_disabled());
786 if (!is_swiotlb_buffer(paddr))
787 free_pages((unsigned long)vaddr, get_order(size));
788 else
789
790
791
792
793 swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE,
794 DMA_ATTR_SKIP_CPU_SYNC);
795}
796EXPORT_SYMBOL(swiotlb_free_coherent);
797
798static void
799swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
800 int do_panic)
801{
802 if (swiotlb_force == SWIOTLB_NO_FORCE)
803 return;
804
805
806
807
808
809
810
811
812 dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n",
813 size);
814
815 if (size <= io_tlb_overflow || !do_panic)
816 return;
817
818 if (dir == DMA_BIDIRECTIONAL)
819 panic("DMA: Random memory could be DMA accessed\n");
820 if (dir == DMA_FROM_DEVICE)
821 panic("DMA: Random memory could be DMA written\n");
822 if (dir == DMA_TO_DEVICE)
823 panic("DMA: Random memory could be DMA read\n");
824}
825
826
827
828
829
830
831
832
833dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
834 unsigned long offset, size_t size,
835 enum dma_data_direction dir,
836 unsigned long attrs)
837{
838 phys_addr_t map, phys = page_to_phys(page) + offset;
839 dma_addr_t dev_addr = phys_to_dma(dev, phys);
840
841 BUG_ON(dir == DMA_NONE);
842
843
844
845
846
847 if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
848 return dev_addr;
849
850 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
851
852
853 map = map_single(dev, phys, size, dir, attrs);
854 if (map == SWIOTLB_MAP_ERROR) {
855 swiotlb_full(dev, size, dir, 1);
856 return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
857 }
858
859 dev_addr = swiotlb_phys_to_dma(dev, map);
860
861
862 if (dma_capable(dev, dev_addr, size))
863 return dev_addr;
864
865 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
866 swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
867
868 return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
869}
870EXPORT_SYMBOL_GPL(swiotlb_map_page);
871
872
873
874
875
876
877
878
879
880static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
881 size_t size, enum dma_data_direction dir,
882 unsigned long attrs)
883{
884 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
885
886 BUG_ON(dir == DMA_NONE);
887
888 if (is_swiotlb_buffer(paddr)) {
889 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
890 return;
891 }
892
893 if (dir != DMA_FROM_DEVICE)
894 return;
895
896
897
898
899
900
901
902 dma_mark_clean(phys_to_virt(paddr), size);
903}
904
905void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
906 size_t size, enum dma_data_direction dir,
907 unsigned long attrs)
908{
909 unmap_single(hwdev, dev_addr, size, dir, attrs);
910}
911EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
912
913
914
915
916
917
918
919
920
921
922
923static void
924swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
925 size_t size, enum dma_data_direction dir,
926 enum dma_sync_target target)
927{
928 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
929
930 BUG_ON(dir == DMA_NONE);
931
932 if (is_swiotlb_buffer(paddr)) {
933 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
934 return;
935 }
936
937 if (dir != DMA_FROM_DEVICE)
938 return;
939
940 dma_mark_clean(phys_to_virt(paddr), size);
941}
942
943void
944swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
945 size_t size, enum dma_data_direction dir)
946{
947 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
948}
949EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
950
951void
952swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
953 size_t size, enum dma_data_direction dir)
954{
955 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
956}
957EXPORT_SYMBOL(swiotlb_sync_single_for_device);
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975int
976swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
977 enum dma_data_direction dir, unsigned long attrs)
978{
979 struct scatterlist *sg;
980 int i;
981
982 BUG_ON(dir == DMA_NONE);
983
984 for_each_sg(sgl, sg, nelems, i) {
985 phys_addr_t paddr = sg_phys(sg);
986 dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
987
988 if (swiotlb_force == SWIOTLB_FORCE ||
989 !dma_capable(hwdev, dev_addr, sg->length)) {
990 phys_addr_t map = map_single(hwdev, sg_phys(sg),
991 sg->length, dir, attrs);
992 if (map == SWIOTLB_MAP_ERROR) {
993
994
995 swiotlb_full(hwdev, sg->length, dir, 0);
996 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
997 swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
998 attrs);
999 sg_dma_len(sgl) = 0;
1000 return 0;
1001 }
1002 sg->dma_address = swiotlb_phys_to_dma(hwdev, map);
1003 } else
1004 sg->dma_address = dev_addr;
1005 sg_dma_len(sg) = sg->length;
1006 }
1007 return nelems;
1008}
1009EXPORT_SYMBOL(swiotlb_map_sg_attrs);
1010
1011
1012
1013
1014
1015void
1016swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
1017 int nelems, enum dma_data_direction dir,
1018 unsigned long attrs)
1019{
1020 struct scatterlist *sg;
1021 int i;
1022
1023 BUG_ON(dir == DMA_NONE);
1024
1025 for_each_sg(sgl, sg, nelems, i)
1026 unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir,
1027 attrs);
1028}
1029EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
1030
1031
1032
1033
1034
1035
1036
1037
1038static void
1039swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
1040 int nelems, enum dma_data_direction dir,
1041 enum dma_sync_target target)
1042{
1043 struct scatterlist *sg;
1044 int i;
1045
1046 for_each_sg(sgl, sg, nelems, i)
1047 swiotlb_sync_single(hwdev, sg->dma_address,
1048 sg_dma_len(sg), dir, target);
1049}
1050
1051void
1052swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
1053 int nelems, enum dma_data_direction dir)
1054{
1055 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
1056}
1057EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
1058
1059void
1060swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
1061 int nelems, enum dma_data_direction dir)
1062{
1063 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
1064}
1065EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
1066
1067int
1068swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
1069{
1070 return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer));
1071}
1072EXPORT_SYMBOL(swiotlb_dma_mapping_error);
1073
1074
1075
1076
1077
1078
1079
1080int
1081swiotlb_dma_supported(struct device *hwdev, u64 mask)
1082{
1083 return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
1084}
1085EXPORT_SYMBOL(swiotlb_dma_supported);
1086