1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#define pr_fmt(fmt) "software IO TLB: " fmt
22
23#include <linux/cache.h>
24#include <linux/dma-direct.h>
25#include <linux/dma-map-ops.h>
26#include <linux/mm.h>
27#include <linux/export.h>
28#include <linux/spinlock.h>
29#include <linux/string.h>
30#include <linux/swiotlb.h>
31#include <linux/pfn.h>
32#include <linux/types.h>
33#include <linux/ctype.h>
34#include <linux/highmem.h>
35#include <linux/gfp.h>
36#include <linux/scatterlist.h>
37#include <linux/mem_encrypt.h>
38#include <linux/set_memory.h>
39#ifdef CONFIG_DEBUG_FS
40#include <linux/debugfs.h>
41#endif
42#ifdef CONFIG_DMA_RESTRICTED_POOL
43#include <linux/io.h>
44#include <linux/of.h>
45#include <linux/of_fdt.h>
46#include <linux/of_reserved_mem.h>
47#include <linux/slab.h>
48#endif
49
50#include <asm/io.h>
51#include <asm/dma.h>
52
53#include <linux/init.h>
54#include <linux/memblock.h>
55#include <linux/iommu-helper.h>
56
57#define CREATE_TRACE_POINTS
58#include <trace/events/swiotlb.h>
59
60#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
61
62
63
64
65
66
67#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
68
69#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
70
71enum swiotlb_force swiotlb_force;
72
73struct io_tlb_mem io_tlb_default_mem;
74
75
76
77
78
79static unsigned int max_segment;
80
81static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
82
83static int __init
84setup_io_tlb_npages(char *str)
85{
86 if (isdigit(*str)) {
87
88 default_nslabs =
89 ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE);
90 }
91 if (*str == ',')
92 ++str;
93 if (!strcmp(str, "force"))
94 swiotlb_force = SWIOTLB_FORCE;
95 else if (!strcmp(str, "noforce"))
96 swiotlb_force = SWIOTLB_NO_FORCE;
97
98 return 0;
99}
100early_param("swiotlb", setup_io_tlb_npages);
101
102unsigned int swiotlb_max_segment(void)
103{
104 return io_tlb_default_mem.nslabs ? max_segment : 0;
105}
106EXPORT_SYMBOL_GPL(swiotlb_max_segment);
107
108void swiotlb_set_max_segment(unsigned int val)
109{
110 if (swiotlb_force == SWIOTLB_FORCE)
111 max_segment = 1;
112 else
113 max_segment = rounddown(val, PAGE_SIZE);
114}
115
116unsigned long swiotlb_size_or_default(void)
117{
118 return default_nslabs << IO_TLB_SHIFT;
119}
120
121void __init swiotlb_adjust_size(unsigned long size)
122{
123
124
125
126
127
128 if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT)
129 return;
130 size = ALIGN(size, IO_TLB_SIZE);
131 default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
132 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
133}
134
135void swiotlb_print_info(void)
136{
137 struct io_tlb_mem *mem = &io_tlb_default_mem;
138
139 if (!mem->nslabs) {
140 pr_warn("No low mem\n");
141 return;
142 }
143
144 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end,
145 (mem->nslabs << IO_TLB_SHIFT) >> 20);
146}
147
148static inline unsigned long io_tlb_offset(unsigned long val)
149{
150 return val & (IO_TLB_SEGSIZE - 1);
151}
152
153static inline unsigned long nr_slots(u64 val)
154{
155 return DIV_ROUND_UP(val, IO_TLB_SIZE);
156}
157
158
159
160
161
162
163
164void __init swiotlb_update_mem_attributes(void)
165{
166 struct io_tlb_mem *mem = &io_tlb_default_mem;
167 void *vaddr;
168 unsigned long bytes;
169
170 if (!mem->nslabs || mem->late_alloc)
171 return;
172 vaddr = phys_to_virt(mem->start);
173 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
174 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
175 memset(vaddr, 0, bytes);
176}
177
178static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
179 unsigned long nslabs, bool late_alloc)
180{
181 void *vaddr = phys_to_virt(start);
182 unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
183
184 mem->nslabs = nslabs;
185 mem->start = start;
186 mem->end = mem->start + bytes;
187 mem->index = 0;
188 mem->late_alloc = late_alloc;
189
190 if (swiotlb_force == SWIOTLB_FORCE)
191 mem->force_bounce = true;
192
193 spin_lock_init(&mem->lock);
194 for (i = 0; i < mem->nslabs; i++) {
195 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
196 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
197 mem->slots[i].alloc_size = 0;
198 }
199 memset(vaddr, 0, bytes);
200}
201
202int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
203{
204 struct io_tlb_mem *mem = &io_tlb_default_mem;
205 size_t alloc_size;
206
207 if (swiotlb_force == SWIOTLB_NO_FORCE)
208 return 0;
209
210
211 if (WARN_ON_ONCE(mem->nslabs))
212 return -ENOMEM;
213
214 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
215 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
216 if (!mem->slots)
217 panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
218 __func__, alloc_size, PAGE_SIZE);
219
220 swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false);
221
222 if (verbose)
223 swiotlb_print_info();
224 swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT);
225 return 0;
226}
227
228
229
230
231
232void __init
233swiotlb_init(int verbose)
234{
235 size_t bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT);
236 void *tlb;
237
238 if (swiotlb_force == SWIOTLB_NO_FORCE)
239 return;
240
241
242 tlb = memblock_alloc_low(bytes, PAGE_SIZE);
243 if (!tlb)
244 goto fail;
245 if (swiotlb_init_with_tbl(tlb, default_nslabs, verbose))
246 goto fail_free_mem;
247 return;
248
249fail_free_mem:
250 memblock_free_early(__pa(tlb), bytes);
251fail:
252 pr_warn("Cannot allocate buffer");
253}
254
255
256
257
258
259
260int
261swiotlb_late_init_with_default_size(size_t default_size)
262{
263 unsigned long nslabs =
264 ALIGN(default_size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
265 unsigned long bytes;
266 unsigned char *vstart = NULL;
267 unsigned int order;
268 int rc = 0;
269
270 if (swiotlb_force == SWIOTLB_NO_FORCE)
271 return 0;
272
273
274
275
276 order = get_order(nslabs << IO_TLB_SHIFT);
277 nslabs = SLABS_PER_PAGE << order;
278 bytes = nslabs << IO_TLB_SHIFT;
279
280 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
281 vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
282 order);
283 if (vstart)
284 break;
285 order--;
286 }
287
288 if (!vstart)
289 return -ENOMEM;
290
291 if (order != get_order(bytes)) {
292 pr_warn("only able to allocate %ld MB\n",
293 (PAGE_SIZE << order) >> 20);
294 nslabs = SLABS_PER_PAGE << order;
295 }
296 rc = swiotlb_late_init_with_tbl(vstart, nslabs);
297 if (rc)
298 free_pages((unsigned long)vstart, order);
299
300 return rc;
301}
302
303int
304swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
305{
306 struct io_tlb_mem *mem = &io_tlb_default_mem;
307 unsigned long bytes = nslabs << IO_TLB_SHIFT;
308
309 if (swiotlb_force == SWIOTLB_NO_FORCE)
310 return 0;
311
312
313 if (WARN_ON_ONCE(mem->nslabs))
314 return -ENOMEM;
315
316 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
317 get_order(array_size(sizeof(*mem->slots), nslabs)));
318 if (!mem->slots)
319 return -ENOMEM;
320
321 set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
322 swiotlb_init_io_tlb_mem(mem, virt_to_phys(tlb), nslabs, true);
323
324 swiotlb_print_info();
325 swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT);
326 return 0;
327}
328
329void __init swiotlb_exit(void)
330{
331 struct io_tlb_mem *mem = &io_tlb_default_mem;
332 unsigned long tbl_vaddr;
333 size_t tbl_size, slots_size;
334
335 if (!mem->nslabs)
336 return;
337
338 pr_info("tearing down default memory pool\n");
339 tbl_vaddr = (unsigned long)phys_to_virt(mem->start);
340 tbl_size = PAGE_ALIGN(mem->end - mem->start);
341 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs));
342
343 set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT);
344 if (mem->late_alloc) {
345 free_pages(tbl_vaddr, get_order(tbl_size));
346 free_pages((unsigned long)mem->slots, get_order(slots_size));
347 } else {
348 memblock_free_late(mem->start, tbl_size);
349 memblock_free_late(__pa(mem->slots), slots_size);
350 }
351
352 memset(mem, 0, sizeof(*mem));
353}
354
355
356
357
358static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
359{
360 return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
361}
362
363
364
365
366static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
367 enum dma_data_direction dir)
368{
369 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
370 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
371 phys_addr_t orig_addr = mem->slots[index].orig_addr;
372 size_t alloc_size = mem->slots[index].alloc_size;
373 unsigned long pfn = PFN_DOWN(orig_addr);
374 unsigned char *vaddr = phys_to_virt(tlb_addr);
375 unsigned int tlb_offset, orig_addr_offset;
376
377 if (orig_addr == INVALID_PHYS_ADDR)
378 return;
379
380 tlb_offset = tlb_addr & (IO_TLB_SIZE - 1);
381 orig_addr_offset = swiotlb_align_offset(dev, orig_addr);
382 if (tlb_offset < orig_addr_offset) {
383 dev_WARN_ONCE(dev, 1,
384 "Access before mapping start detected. orig offset %u, requested offset %u.\n",
385 orig_addr_offset, tlb_offset);
386 return;
387 }
388
389 tlb_offset -= orig_addr_offset;
390 if (tlb_offset > alloc_size) {
391 dev_WARN_ONCE(dev, 1,
392 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n",
393 alloc_size, size, tlb_offset);
394 return;
395 }
396
397 orig_addr += tlb_offset;
398 alloc_size -= tlb_offset;
399
400 if (size > alloc_size) {
401 dev_WARN_ONCE(dev, 1,
402 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
403 alloc_size, size);
404 size = alloc_size;
405 }
406
407 if (PageHighMem(pfn_to_page(pfn))) {
408
409 unsigned int offset = orig_addr & ~PAGE_MASK;
410 char *buffer;
411 unsigned int sz = 0;
412 unsigned long flags;
413
414 while (size) {
415 sz = min_t(size_t, PAGE_SIZE - offset, size);
416
417 local_irq_save(flags);
418 buffer = kmap_atomic(pfn_to_page(pfn));
419 if (dir == DMA_TO_DEVICE)
420 memcpy(vaddr, buffer + offset, sz);
421 else
422 memcpy(buffer + offset, vaddr, sz);
423 kunmap_atomic(buffer);
424 local_irq_restore(flags);
425
426 size -= sz;
427 pfn++;
428 vaddr += sz;
429 offset = 0;
430 }
431 } else if (dir == DMA_TO_DEVICE) {
432 memcpy(vaddr, phys_to_virt(orig_addr), size);
433 } else {
434 memcpy(phys_to_virt(orig_addr), vaddr, size);
435 }
436}
437
438#define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT))
439
440
441
442
443static inline unsigned long get_max_slots(unsigned long boundary_mask)
444{
445 if (boundary_mask == ~0UL)
446 return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
447 return nr_slots(boundary_mask + 1);
448}
449
450static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index)
451{
452 if (index >= mem->nslabs)
453 return 0;
454 return index;
455}
456
457
458
459
460
461static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
462 size_t alloc_size)
463{
464 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
465 unsigned long boundary_mask = dma_get_seg_boundary(dev);
466 dma_addr_t tbl_dma_addr =
467 phys_to_dma_unencrypted(dev, mem->start) & boundary_mask;
468 unsigned long max_slots = get_max_slots(boundary_mask);
469 unsigned int iotlb_align_mask =
470 dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
471 unsigned int nslots = nr_slots(alloc_size), stride;
472 unsigned int index, wrap, count = 0, i;
473 unsigned int offset = swiotlb_align_offset(dev, orig_addr);
474 unsigned long flags;
475
476 BUG_ON(!nslots);
477
478
479
480
481
482
483 stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
484 if (alloc_size >= PAGE_SIZE)
485 stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT));
486
487 spin_lock_irqsave(&mem->lock, flags);
488 if (unlikely(nslots > mem->nslabs - mem->used))
489 goto not_found;
490
491 index = wrap = wrap_index(mem, ALIGN(mem->index, stride));
492 do {
493 if (orig_addr &&
494 (slot_addr(tbl_dma_addr, index) & iotlb_align_mask) !=
495 (orig_addr & iotlb_align_mask)) {
496 index = wrap_index(mem, index + 1);
497 continue;
498 }
499
500
501
502
503
504
505 if (!iommu_is_span_boundary(index, nslots,
506 nr_slots(tbl_dma_addr),
507 max_slots)) {
508 if (mem->slots[index].list >= nslots)
509 goto found;
510 }
511 index = wrap_index(mem, index + stride);
512 } while (index != wrap);
513
514not_found:
515 spin_unlock_irqrestore(&mem->lock, flags);
516 return -1;
517
518found:
519 for (i = index; i < index + nslots; i++) {
520 mem->slots[i].list = 0;
521 mem->slots[i].alloc_size =
522 alloc_size - (offset + ((i - index) << IO_TLB_SHIFT));
523 }
524 for (i = index - 1;
525 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
526 mem->slots[i].list; i--)
527 mem->slots[i].list = ++count;
528
529
530
531
532 if (index + nslots < mem->nslabs)
533 mem->index = index + nslots;
534 else
535 mem->index = 0;
536 mem->used += nslots;
537
538 spin_unlock_irqrestore(&mem->lock, flags);
539 return index;
540}
541
542phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
543 size_t mapping_size, size_t alloc_size,
544 enum dma_data_direction dir, unsigned long attrs)
545{
546 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
547 unsigned int offset = swiotlb_align_offset(dev, orig_addr);
548 unsigned int i;
549 int index;
550 phys_addr_t tlb_addr;
551
552 if (!mem)
553 panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
554
555 if (mem_encrypt_active())
556 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
557
558 if (mapping_size > alloc_size) {
559 dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
560 mapping_size, alloc_size);
561 return (phys_addr_t)DMA_MAPPING_ERROR;
562 }
563
564 index = swiotlb_find_slots(dev, orig_addr, alloc_size + offset);
565 if (index == -1) {
566 if (!(attrs & DMA_ATTR_NO_WARN))
567 dev_warn_ratelimited(dev,
568 "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
569 alloc_size, mem->nslabs, mem->used);
570 return (phys_addr_t)DMA_MAPPING_ERROR;
571 }
572
573
574
575
576
577
578 for (i = 0; i < nr_slots(alloc_size + offset); i++)
579 mem->slots[index + i].orig_addr = slot_addr(orig_addr, i);
580 tlb_addr = slot_addr(mem->start, index) + offset;
581 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
582 (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
583 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
584 return tlb_addr;
585}
586
587static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
588{
589 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
590 unsigned long flags;
591 unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
592 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
593 int nslots = nr_slots(mem->slots[index].alloc_size + offset);
594 int count, i;
595
596
597
598
599
600
601
602 spin_lock_irqsave(&mem->lock, flags);
603 if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
604 count = mem->slots[index + nslots].list;
605 else
606 count = 0;
607
608
609
610
611
612 for (i = index + nslots - 1; i >= index; i--) {
613 mem->slots[i].list = ++count;
614 mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
615 mem->slots[i].alloc_size = 0;
616 }
617
618
619
620
621
622 for (i = index - 1;
623 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
624 i--)
625 mem->slots[i].list = ++count;
626 mem->used -= nslots;
627 spin_unlock_irqrestore(&mem->lock, flags);
628}
629
630
631
632
633void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
634 size_t mapping_size, enum dma_data_direction dir,
635 unsigned long attrs)
636{
637
638
639
640 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
641 (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
642 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
643
644 swiotlb_release_slots(dev, tlb_addr);
645}
646
647void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
648 size_t size, enum dma_data_direction dir)
649{
650 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
651 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
652 else
653 BUG_ON(dir != DMA_FROM_DEVICE);
654}
655
656void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
657 size_t size, enum dma_data_direction dir)
658{
659 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
660 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE);
661 else
662 BUG_ON(dir != DMA_TO_DEVICE);
663}
664
665
666
667
668
669dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
670 enum dma_data_direction dir, unsigned long attrs)
671{
672 phys_addr_t swiotlb_addr;
673 dma_addr_t dma_addr;
674
675 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
676 swiotlb_force);
677
678 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, dir,
679 attrs);
680 if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
681 return DMA_MAPPING_ERROR;
682
683
684 dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
685 if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
686 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir,
687 attrs | DMA_ATTR_SKIP_CPU_SYNC);
688 dev_WARN_ONCE(dev, 1,
689 "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
690 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
691 return DMA_MAPPING_ERROR;
692 }
693
694 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
695 arch_sync_dma_for_device(swiotlb_addr, size, dir);
696 return dma_addr;
697}
698
699size_t swiotlb_max_mapping_size(struct device *dev)
700{
701 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE;
702}
703
704bool is_swiotlb_active(struct device *dev)
705{
706 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
707
708 return mem && mem->nslabs;
709}
710EXPORT_SYMBOL_GPL(is_swiotlb_active);
711
712#ifdef CONFIG_DEBUG_FS
713static struct dentry *debugfs_dir;
714
715static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem)
716{
717 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
718 debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &mem->used);
719}
720
721static int __init swiotlb_create_default_debugfs(void)
722{
723 struct io_tlb_mem *mem = &io_tlb_default_mem;
724
725 debugfs_dir = debugfs_create_dir("swiotlb", NULL);
726 if (mem->nslabs) {
727 mem->debugfs = debugfs_dir;
728 swiotlb_create_debugfs_files(mem);
729 }
730 return 0;
731}
732
733late_initcall(swiotlb_create_default_debugfs);
734
735#endif
736
737#ifdef CONFIG_DMA_RESTRICTED_POOL
738
739#ifdef CONFIG_DEBUG_FS
740static void rmem_swiotlb_debugfs_init(struct reserved_mem *rmem)
741{
742 struct io_tlb_mem *mem = rmem->priv;
743
744 mem->debugfs = debugfs_create_dir(rmem->name, debugfs_dir);
745 swiotlb_create_debugfs_files(mem);
746}
747#else
748static void rmem_swiotlb_debugfs_init(struct reserved_mem *rmem)
749{
750}
751#endif
752
753struct page *swiotlb_alloc(struct device *dev, size_t size)
754{
755 struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
756 phys_addr_t tlb_addr;
757 int index;
758
759 if (!mem)
760 return NULL;
761
762 index = swiotlb_find_slots(dev, 0, size);
763 if (index == -1)
764 return NULL;
765
766 tlb_addr = slot_addr(mem->start, index);
767
768 return pfn_to_page(PFN_DOWN(tlb_addr));
769}
770
771bool swiotlb_free(struct device *dev, struct page *page, size_t size)
772{
773 phys_addr_t tlb_addr = page_to_phys(page);
774
775 if (!is_swiotlb_buffer(dev, tlb_addr))
776 return false;
777
778 swiotlb_release_slots(dev, tlb_addr);
779
780 return true;
781}
782
783static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
784 struct device *dev)
785{
786 struct io_tlb_mem *mem = rmem->priv;
787 unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
788
789
790
791
792
793
794 if (!mem) {
795 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
796 if (!mem)
797 return -ENOMEM;
798
799 mem->slots = kzalloc(array_size(sizeof(*mem->slots), nslabs),
800 GFP_KERNEL);
801 if (!mem->slots) {
802 kfree(mem);
803 return -ENOMEM;
804 }
805
806 set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
807 rmem->size >> PAGE_SHIFT);
808 swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false);
809 mem->force_bounce = true;
810 mem->for_alloc = true;
811
812 rmem->priv = mem;
813
814 rmem_swiotlb_debugfs_init(rmem);
815 }
816
817 dev->dma_io_tlb_mem = mem;
818
819 return 0;
820}
821
822static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
823 struct device *dev)
824{
825 dev->dma_io_tlb_mem = &io_tlb_default_mem;
826}
827
828static const struct reserved_mem_ops rmem_swiotlb_ops = {
829 .device_init = rmem_swiotlb_device_init,
830 .device_release = rmem_swiotlb_device_release,
831};
832
833static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
834{
835 unsigned long node = rmem->fdt_node;
836
837 if (of_get_flat_dt_prop(node, "reusable", NULL) ||
838 of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
839 of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
840 of_get_flat_dt_prop(node, "no-map", NULL))
841 return -EINVAL;
842
843 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
844 pr_err("Restricted DMA pool must be accessible within the linear mapping.");
845 return -EINVAL;
846 }
847
848 rmem->ops = &rmem_swiotlb_ops;
849 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
850 &rmem->base, (unsigned long)rmem->size / SZ_1M);
851 return 0;
852}
853
854RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
855#endif
856