1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
37
38#include <linux/bootmem.h>
39#include <linux/dma-direct.h>
40#include <linux/export.h>
41#include <xen/swiotlb-xen.h>
42#include <xen/page.h>
43#include <xen/xen-ops.h>
44#include <xen/hvc-console.h>
45
46#include <asm/dma-mapping.h>
47#include <asm/xen/page-coherent.h>
48
49#include <trace/events/swiotlb.h>
50
51
52
53
54
55
56static char *xen_io_tlb_start, *xen_io_tlb_end;
57static unsigned long xen_io_tlb_nslabs;
58
59
60
61
62static u64 start_dma_addr;
63
64
65
66
67
68
69static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
70{
71 unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
72 dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT;
73
74 dma |= paddr & ~XEN_PAGE_MASK;
75
76 return dma;
77}
78
79static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
80{
81 unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
82 dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT;
83 phys_addr_t paddr = dma;
84
85 paddr |= baddr & ~XEN_PAGE_MASK;
86
87 return paddr;
88}
89
90static inline dma_addr_t xen_virt_to_bus(void *address)
91{
92 return xen_phys_to_bus(virt_to_phys(address));
93}
94
95static int check_pages_physically_contiguous(unsigned long xen_pfn,
96 unsigned int offset,
97 size_t length)
98{
99 unsigned long next_bfn;
100 int i;
101 int nr_pages;
102
103 next_bfn = pfn_to_bfn(xen_pfn);
104 nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT;
105
106 for (i = 1; i < nr_pages; i++) {
107 if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
108 return 0;
109 }
110 return 1;
111}
112
113static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
114{
115 unsigned long xen_pfn = XEN_PFN_DOWN(p);
116 unsigned int offset = p & ~XEN_PAGE_MASK;
117
118 if (offset + size <= XEN_PAGE_SIZE)
119 return 0;
120 if (check_pages_physically_contiguous(xen_pfn, offset, size))
121 return 0;
122 return 1;
123}
124
125static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
126{
127 unsigned long bfn = XEN_PFN_DOWN(dma_addr);
128 unsigned long xen_pfn = bfn_to_local_pfn(bfn);
129 phys_addr_t paddr = XEN_PFN_PHYS(xen_pfn);
130
131
132
133
134
135 if (pfn_valid(PFN_DOWN(paddr))) {
136 return paddr >= virt_to_phys(xen_io_tlb_start) &&
137 paddr < virt_to_phys(xen_io_tlb_end);
138 }
139 return 0;
140}
141
142static int max_dma_bits = 32;
143
144static int
145xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
146{
147 int i, rc;
148 int dma_bits;
149 dma_addr_t dma_handle;
150 phys_addr_t p = virt_to_phys(buf);
151
152 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
153
154 i = 0;
155 do {
156 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
157
158 do {
159 rc = xen_create_contiguous_region(
160 p + (i << IO_TLB_SHIFT),
161 get_order(slabs << IO_TLB_SHIFT),
162 dma_bits, &dma_handle);
163 } while (rc && dma_bits++ < max_dma_bits);
164 if (rc)
165 return rc;
166
167 i += slabs;
168 } while (i < nslabs);
169 return 0;
170}
171static unsigned long xen_set_nslabs(unsigned long nr_tbl)
172{
173 if (!nr_tbl) {
174 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
175 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
176 } else
177 xen_io_tlb_nslabs = nr_tbl;
178
179 return xen_io_tlb_nslabs << IO_TLB_SHIFT;
180}
181
182enum xen_swiotlb_err {
183 XEN_SWIOTLB_UNKNOWN = 0,
184 XEN_SWIOTLB_ENOMEM,
185 XEN_SWIOTLB_EFIXUP
186};
187
188static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
189{
190 switch (err) {
191 case XEN_SWIOTLB_ENOMEM:
192 return "Cannot allocate Xen-SWIOTLB buffer\n";
193 case XEN_SWIOTLB_EFIXUP:
194 return "Failed to get contiguous memory for DMA from Xen!\n"\
195 "You either: don't have the permissions, do not have"\
196 " enough free memory under 4GB, or the hypervisor memory"\
197 " is too fragmented!";
198 default:
199 break;
200 }
201 return "";
202}
203int __ref xen_swiotlb_init(int verbose, bool early)
204{
205 unsigned long bytes, order;
206 int rc = -ENOMEM;
207 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
208 unsigned int repeat = 3;
209
210 xen_io_tlb_nslabs = swiotlb_nr_tbl();
211retry:
212 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
213 order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
214
215
216
217 if (early)
218 xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
219 else {
220#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
221#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
222 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
223 xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
224 if (xen_io_tlb_start)
225 break;
226 order--;
227 }
228 if (order != get_order(bytes)) {
229 pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
230 (PAGE_SIZE << order) >> 20);
231 xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
232 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
233 }
234 }
235 if (!xen_io_tlb_start) {
236 m_ret = XEN_SWIOTLB_ENOMEM;
237 goto error;
238 }
239 xen_io_tlb_end = xen_io_tlb_start + bytes;
240
241
242
243 rc = xen_swiotlb_fixup(xen_io_tlb_start,
244 bytes,
245 xen_io_tlb_nslabs);
246 if (rc) {
247 if (early)
248 free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
249 else {
250 free_pages((unsigned long)xen_io_tlb_start, order);
251 xen_io_tlb_start = NULL;
252 }
253 m_ret = XEN_SWIOTLB_EFIXUP;
254 goto error;
255 }
256 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
257 if (early) {
258 if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
259 verbose))
260 panic("Cannot allocate SWIOTLB buffer");
261 rc = 0;
262 } else
263 rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
264
265 if (!rc)
266 swiotlb_set_max_segment(PAGE_SIZE);
267
268 return rc;
269error:
270 if (repeat--) {
271 xen_io_tlb_nslabs = max(1024UL,
272 (xen_io_tlb_nslabs >> 1));
273 pr_info("Lowering to %luMB\n",
274 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
275 goto retry;
276 }
277 pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
278 if (early)
279 panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
280 else
281 free_pages((unsigned long)xen_io_tlb_start, order);
282 return rc;
283}
284
285static void *
286xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
287 dma_addr_t *dma_handle, gfp_t flags,
288 unsigned long attrs)
289{
290 void *ret;
291 int order = get_order(size);
292 u64 dma_mask = DMA_BIT_MASK(32);
293 phys_addr_t phys;
294 dma_addr_t dev_addr;
295
296
297
298
299
300
301
302 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
303
304
305
306
307
308
309 ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
310
311 if (!ret)
312 return ret;
313
314 if (hwdev && hwdev->coherent_dma_mask)
315 dma_mask = hwdev->coherent_dma_mask;
316
317
318
319
320
321 phys = *dma_handle;
322 dev_addr = xen_phys_to_bus(phys);
323 if (((dev_addr + size - 1 <= dma_mask)) &&
324 !range_straddles_page_boundary(phys, size))
325 *dma_handle = dev_addr;
326 else {
327 if (xen_create_contiguous_region(phys, order,
328 fls64(dma_mask), dma_handle) != 0) {
329 xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
330 return NULL;
331 }
332 }
333 memset(ret, 0, size);
334 return ret;
335}
336
337static void
338xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
339 dma_addr_t dev_addr, unsigned long attrs)
340{
341 int order = get_order(size);
342 phys_addr_t phys;
343 u64 dma_mask = DMA_BIT_MASK(32);
344
345 if (hwdev && hwdev->coherent_dma_mask)
346 dma_mask = hwdev->coherent_dma_mask;
347
348
349
350 phys = xen_bus_to_phys(dev_addr);
351
352 if (((dev_addr + size - 1 <= dma_mask)) ||
353 range_straddles_page_boundary(phys, size))
354 xen_destroy_contiguous_region(phys, order);
355
356 xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
357}
358
359
360
361
362
363
364
365
366static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
367 unsigned long offset, size_t size,
368 enum dma_data_direction dir,
369 unsigned long attrs)
370{
371 phys_addr_t map, phys = page_to_phys(page) + offset;
372 dma_addr_t dev_addr = xen_phys_to_bus(phys);
373
374 BUG_ON(dir == DMA_NONE);
375
376
377
378
379
380 if (dma_capable(dev, dev_addr, size) &&
381 !range_straddles_page_boundary(phys, size) &&
382 !xen_arch_need_swiotlb(dev, phys, dev_addr) &&
383 (swiotlb_force != SWIOTLB_FORCE)) {
384
385
386
387 xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
388 return dev_addr;
389 }
390
391
392
393
394 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
395
396 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir,
397 attrs);
398 if (map == (phys_addr_t)DMA_MAPPING_ERROR)
399 return DMA_MAPPING_ERROR;
400
401 dev_addr = xen_phys_to_bus(map);
402 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
403 dev_addr, map & ~PAGE_MASK, size, dir, attrs);
404
405
406
407
408 if (dma_capable(dev, dev_addr, size))
409 return dev_addr;
410
411 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
412 swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
413
414 return DMA_MAPPING_ERROR;
415}
416
417
418
419
420
421
422
423
424
425static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
426 size_t size, enum dma_data_direction dir,
427 unsigned long attrs)
428{
429 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
430
431 BUG_ON(dir == DMA_NONE);
432
433 xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
434
435
436 if (is_xen_swiotlb_buffer(dev_addr))
437 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
438}
439
440static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
441 size_t size, enum dma_data_direction dir,
442 unsigned long attrs)
443{
444 xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
445}
446
447
448
449
450
451
452
453
454
455
456
457static void
458xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
459 size_t size, enum dma_data_direction dir,
460 enum dma_sync_target target)
461{
462 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
463
464 BUG_ON(dir == DMA_NONE);
465
466 if (target == SYNC_FOR_CPU)
467 xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
468
469
470 if (is_xen_swiotlb_buffer(dev_addr))
471 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
472
473 if (target == SYNC_FOR_DEVICE)
474 xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
475}
476
477void
478xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
479 size_t size, enum dma_data_direction dir)
480{
481 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
482}
483
484void
485xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
486 size_t size, enum dma_data_direction dir)
487{
488 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
489}
490
491
492
493
494
495static void
496xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
497 int nelems, enum dma_data_direction dir,
498 unsigned long attrs)
499{
500 struct scatterlist *sg;
501 int i;
502
503 BUG_ON(dir == DMA_NONE);
504
505 for_each_sg(sgl, sg, nelems, i)
506 xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
507
508}
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526static int
527xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
528 int nelems, enum dma_data_direction dir,
529 unsigned long attrs)
530{
531 struct scatterlist *sg;
532 int i;
533
534 BUG_ON(dir == DMA_NONE);
535
536 for_each_sg(sgl, sg, nelems, i) {
537 phys_addr_t paddr = sg_phys(sg);
538 dma_addr_t dev_addr = xen_phys_to_bus(paddr);
539
540 if (swiotlb_force == SWIOTLB_FORCE ||
541 xen_arch_need_swiotlb(hwdev, paddr, dev_addr) ||
542 !dma_capable(hwdev, dev_addr, sg->length) ||
543 range_straddles_page_boundary(paddr, sg->length)) {
544 phys_addr_t map = swiotlb_tbl_map_single(hwdev,
545 start_dma_addr,
546 sg_phys(sg),
547 sg->length,
548 dir, attrs);
549 if (map == DMA_MAPPING_ERROR) {
550 dev_warn(hwdev, "swiotlb buffer is full\n");
551
552
553 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
554 xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
555 attrs);
556 sg_dma_len(sgl) = 0;
557 return 0;
558 }
559 dev_addr = xen_phys_to_bus(map);
560 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
561 dev_addr,
562 map & ~PAGE_MASK,
563 sg->length,
564 dir,
565 attrs);
566 sg->dma_address = dev_addr;
567 } else {
568
569
570
571 xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
572 dev_addr,
573 paddr & ~PAGE_MASK,
574 sg->length,
575 dir,
576 attrs);
577 sg->dma_address = dev_addr;
578 }
579 sg_dma_len(sg) = sg->length;
580 }
581 return nelems;
582}
583
584
585
586
587
588
589
590
591static void
592xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
593 int nelems, enum dma_data_direction dir,
594 enum dma_sync_target target)
595{
596 struct scatterlist *sg;
597 int i;
598
599 for_each_sg(sgl, sg, nelems, i)
600 xen_swiotlb_sync_single(hwdev, sg->dma_address,
601 sg_dma_len(sg), dir, target);
602}
603
604static void
605xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
606 int nelems, enum dma_data_direction dir)
607{
608 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
609}
610
611static void
612xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
613 int nelems, enum dma_data_direction dir)
614{
615 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
616}
617
618
619
620
621
622
623
624static int
625xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
626{
627 return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
628}
629
630
631
632
633
634
635static int
636xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
637 void *cpu_addr, dma_addr_t dma_addr, size_t size,
638 unsigned long attrs)
639{
640#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
641 if (xen_get_dma_ops(dev)->mmap)
642 return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr,
643 dma_addr, size, attrs);
644#endif
645 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
646}
647
648
649
650
651
652static int
653xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
654 void *cpu_addr, dma_addr_t handle, size_t size,
655 unsigned long attrs)
656{
657#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
658 if (xen_get_dma_ops(dev)->get_sgtable) {
659#if 0
660
661
662
663
664
665 unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle));
666 BUG_ON (!page_is_ram(bfn));
667#endif
668 return xen_get_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
669 handle, size, attrs);
670 }
671#endif
672 return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs);
673}
674
675const struct dma_map_ops xen_swiotlb_dma_ops = {
676 .alloc = xen_swiotlb_alloc_coherent,
677 .free = xen_swiotlb_free_coherent,
678 .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
679 .sync_single_for_device = xen_swiotlb_sync_single_for_device,
680 .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
681 .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
682 .map_sg = xen_swiotlb_map_sg_attrs,
683 .unmap_sg = xen_swiotlb_unmap_sg_attrs,
684 .map_page = xen_swiotlb_map_page,
685 .unmap_page = xen_swiotlb_unmap_page,
686 .dma_supported = xen_swiotlb_dma_supported,
687 .mmap = xen_swiotlb_dma_mmap,
688 .get_sgtable = xen_swiotlb_get_sgtable,
689};
690