1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/cache.h>
20#include <linux/dma-mapping.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/spinlock.h>
24#include <linux/string.h>
25#include <linux/types.h>
26#include <linux/ctype.h>
27
28#include <asm/io.h>
29#include <asm/dma.h>
30#include <asm/scatterlist.h>
31
32#include <linux/init.h>
33#include <linux/bootmem.h>
34
35#define OFFSET(val,align) ((unsigned long) \
36 ( (val) & ( (align) - 1)))
37
38#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
39#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
40
41
42
43
44
45
46#define IO_TLB_SEGSIZE 128
47
48
49
50
51
52#define IO_TLB_SHIFT 11
53
54#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
55
56
57
58
59
60
61#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
62
63
64
65
66enum dma_sync_target {
67 SYNC_FOR_CPU = 0,
68 SYNC_FOR_DEVICE = 1,
69};
70
71int swiotlb_force;
72
73
74
75
76
77
78static char *io_tlb_start, *io_tlb_end;
79
80
81
82
83
84static unsigned long io_tlb_nslabs;
85
86
87
88
89static unsigned long io_tlb_overflow = 32*1024;
90
91void *io_tlb_overflow_buffer;
92
93
94
95
96
97static unsigned int *io_tlb_list;
98static unsigned int io_tlb_index;
99
100
101
102
103
104static unsigned char **io_tlb_orig_addr;
105
106
107
108
109static DEFINE_SPINLOCK(io_tlb_lock);
110
111static int __init
112setup_io_tlb_npages(char *str)
113{
114 if (isdigit(*str)) {
115 io_tlb_nslabs = simple_strtoul(str, &str, 0);
116
117 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
118 }
119 if (*str == ',')
120 ++str;
121 if (!strcmp(str, "force"))
122 swiotlb_force = 1;
123 return 1;
124}
125__setup("swiotlb=", setup_io_tlb_npages);
126
127
128
129
130
131
132void __init
133swiotlb_init_with_default_size(size_t default_size)
134{
135 unsigned long i, bytes;
136
137 if (!io_tlb_nslabs) {
138 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
139 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
140 }
141
142 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
143
144
145
146
147 io_tlb_start = alloc_bootmem_low_pages(bytes);
148 if (!io_tlb_start)
149 panic("Cannot allocate SWIOTLB buffer");
150 io_tlb_end = io_tlb_start + bytes;
151
152
153
154
155
156
157 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
158 for (i = 0; i < io_tlb_nslabs; i++)
159 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
160 io_tlb_index = 0;
161 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
162
163
164
165
166 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
167 if (!io_tlb_overflow_buffer)
168 panic("Cannot allocate SWIOTLB overflow buffer!\n");
169
170 printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
171 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
172}
173
174void __init
175swiotlb_init(void)
176{
177 swiotlb_init_with_default_size(64 * (1<<20));
178}
179
180
181
182
183
184
185int
186swiotlb_late_init_with_default_size(size_t default_size)
187{
188 unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
189 unsigned int order;
190
191 if (!io_tlb_nslabs) {
192 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
193 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
194 }
195
196
197
198
199 order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
200 io_tlb_nslabs = SLABS_PER_PAGE << order;
201 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
202
203 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
204 io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
205 order);
206 if (io_tlb_start)
207 break;
208 order--;
209 }
210
211 if (!io_tlb_start)
212 goto cleanup1;
213
214 if (order != get_order(bytes)) {
215 printk(KERN_WARNING "Warning: only able to allocate %ld MB "
216 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
217 io_tlb_nslabs = SLABS_PER_PAGE << order;
218 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
219 }
220 io_tlb_end = io_tlb_start + bytes;
221 memset(io_tlb_start, 0, bytes);
222
223
224
225
226
227
228 io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
229 get_order(io_tlb_nslabs * sizeof(int)));
230 if (!io_tlb_list)
231 goto cleanup2;
232
233 for (i = 0; i < io_tlb_nslabs; i++)
234 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
235 io_tlb_index = 0;
236
237 io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
238 get_order(io_tlb_nslabs * sizeof(char *)));
239 if (!io_tlb_orig_addr)
240 goto cleanup3;
241
242 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
243
244
245
246
247 io_tlb_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
248 get_order(io_tlb_overflow));
249 if (!io_tlb_overflow_buffer)
250 goto cleanup4;
251
252 printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - "
253 "0x%lx\n", bytes >> 20,
254 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
255
256 return 0;
257
258cleanup4:
259 free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs *
260 sizeof(char *)));
261 io_tlb_orig_addr = NULL;
262cleanup3:
263 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
264 sizeof(int)));
265 io_tlb_list = NULL;
266cleanup2:
267 io_tlb_end = NULL;
268 free_pages((unsigned long)io_tlb_start, order);
269 io_tlb_start = NULL;
270cleanup1:
271 io_tlb_nslabs = req_nslabs;
272 return -ENOMEM;
273}
274
275static int
276address_needs_mapping(struct device *hwdev, dma_addr_t addr)
277{
278 dma_addr_t mask = 0xffffffff;
279
280 if (hwdev && hwdev->dma_mask)
281 mask = *hwdev->dma_mask;
282 return (addr & ~mask) != 0;
283}
284
285
286
287
288static void *
289map_single(struct device *hwdev, char *buffer, size_t size, int dir)
290{
291 unsigned long flags;
292 char *dma_addr;
293 unsigned int nslots, stride, index, wrap;
294 int i;
295
296
297
298
299
300 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
301 if (size > PAGE_SIZE)
302 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
303 else
304 stride = 1;
305
306 BUG_ON(!nslots);
307
308
309
310
311
312 spin_lock_irqsave(&io_tlb_lock, flags);
313 {
314 wrap = index = ALIGN(io_tlb_index, stride);
315
316 if (index >= io_tlb_nslabs)
317 wrap = index = 0;
318
319 do {
320
321
322
323
324
325
326 if (io_tlb_list[index] >= nslots) {
327 int count = 0;
328
329 for (i = index; i < (int) (index + nslots); i++)
330 io_tlb_list[i] = 0;
331 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
332 io_tlb_list[i] = ++count;
333 dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
334
335
336
337
338
339 io_tlb_index = ((index + nslots) < io_tlb_nslabs
340 ? (index + nslots) : 0);
341
342 goto found;
343 }
344 index += stride;
345 if (index >= io_tlb_nslabs)
346 index = 0;
347 } while (index != wrap);
348
349 spin_unlock_irqrestore(&io_tlb_lock, flags);
350 return NULL;
351 }
352 found:
353 spin_unlock_irqrestore(&io_tlb_lock, flags);
354
355
356
357
358
359
360 for (i = 0; i < nslots; i++)
361 io_tlb_orig_addr[index+i] = buffer + (i << IO_TLB_SHIFT);
362 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
363 memcpy(dma_addr, buffer, size);
364
365 return dma_addr;
366}
367
368
369
370
371static void
372unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
373{
374 unsigned long flags;
375 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
376 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
377 char *buffer = io_tlb_orig_addr[index];
378
379
380
381
382 if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
383
384
385
386
387 memcpy(buffer, dma_addr, size);
388
389
390
391
392
393
394
395 spin_lock_irqsave(&io_tlb_lock, flags);
396 {
397 count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
398 io_tlb_list[index + nslots] : 0);
399
400
401
402
403 for (i = index + nslots - 1; i >= index; i--)
404 io_tlb_list[i] = ++count;
405
406
407
408
409 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
410 io_tlb_list[i] = ++count;
411 }
412 spin_unlock_irqrestore(&io_tlb_lock, flags);
413}
414
415static void
416sync_single(struct device *hwdev, char *dma_addr, size_t size,
417 int dir, int target)
418{
419 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
420 char *buffer = io_tlb_orig_addr[index];
421
422 buffer += ((unsigned long)dma_addr & ((1 << IO_TLB_SHIFT) - 1));
423
424 switch (target) {
425 case SYNC_FOR_CPU:
426 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
427 memcpy(buffer, dma_addr, size);
428 else
429 BUG_ON(dir != DMA_TO_DEVICE);
430 break;
431 case SYNC_FOR_DEVICE:
432 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
433 memcpy(dma_addr, buffer, size);
434 else
435 BUG_ON(dir != DMA_FROM_DEVICE);
436 break;
437 default:
438 BUG();
439 }
440}
441
442void *
443swiotlb_alloc_coherent(struct device *hwdev, size_t size,
444 dma_addr_t *dma_handle, gfp_t flags)
445{
446 dma_addr_t dev_addr;
447 void *ret;
448 int order = get_order(size);
449
450
451
452
453
454
455 flags |= GFP_DMA;
456
457 ret = (void *)__get_free_pages(flags, order);
458 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
459
460
461
462
463 free_pages((unsigned long) ret, order);
464 ret = NULL;
465 }
466 if (!ret) {
467
468
469
470
471
472
473 dma_addr_t handle;
474 handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
475 if (swiotlb_dma_mapping_error(handle))
476 return NULL;
477
478 ret = bus_to_virt(handle);
479 }
480
481 memset(ret, 0, size);
482 dev_addr = virt_to_bus(ret);
483
484
485 if (address_needs_mapping(hwdev, dev_addr)) {
486 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
487 (unsigned long long)*hwdev->dma_mask,
488 (unsigned long long)dev_addr);
489 panic("swiotlb_alloc_coherent: allocated memory is out of "
490 "range for device");
491 }
492 *dma_handle = dev_addr;
493 return ret;
494}
495
496void
497swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
498 dma_addr_t dma_handle)
499{
500 WARN_ON(irqs_disabled());
501 if (!(vaddr >= (void *)io_tlb_start
502 && vaddr < (void *)io_tlb_end))
503 free_pages((unsigned long) vaddr, get_order(size));
504 else
505
506 swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
507}
508
509static void
510swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
511{
512
513
514
515
516
517
518
519 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
520 "device %s\n", size, dev ? dev->bus_id : "?");
521
522 if (size > io_tlb_overflow && do_panic) {
523 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
524 panic("DMA: Memory would be corrupted\n");
525 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
526 panic("DMA: Random memory would be DMAed\n");
527 }
528}
529
530
531
532
533
534
535
536
537dma_addr_t
538swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
539{
540 dma_addr_t dev_addr = virt_to_bus(ptr);
541 void *map;
542
543 BUG_ON(dir == DMA_NONE);
544
545
546
547
548
549 if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
550 return dev_addr;
551
552
553
554
555 map = map_single(hwdev, ptr, size, dir);
556 if (!map) {
557 swiotlb_full(hwdev, size, dir, 1);
558 map = io_tlb_overflow_buffer;
559 }
560
561 dev_addr = virt_to_bus(map);
562
563
564
565
566 if (address_needs_mapping(hwdev, dev_addr))
567 panic("map_single: bounce buffer is not DMA'ble");
568
569 return dev_addr;
570}
571
572
573
574
575
576
577
578
579
580void
581swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
582 int dir)
583{
584 char *dma_addr = bus_to_virt(dev_addr);
585
586 BUG_ON(dir == DMA_NONE);
587 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
588 unmap_single(hwdev, dma_addr, size, dir);
589 else if (dir == DMA_FROM_DEVICE)
590 dma_mark_clean(dma_addr, size);
591}
592
593
594
595
596
597
598
599
600
601
602
603static void
604swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
605 size_t size, int dir, int target)
606{
607 char *dma_addr = bus_to_virt(dev_addr);
608
609 BUG_ON(dir == DMA_NONE);
610 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
611 sync_single(hwdev, dma_addr, size, dir, target);
612 else if (dir == DMA_FROM_DEVICE)
613 dma_mark_clean(dma_addr, size);
614}
615
616void
617swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
618 size_t size, int dir)
619{
620 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
621}
622
623void
624swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
625 size_t size, int dir)
626{
627 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
628}
629
630
631
632
633static void
634swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
635 unsigned long offset, size_t size,
636 int dir, int target)
637{
638 char *dma_addr = bus_to_virt(dev_addr) + offset;
639
640 BUG_ON(dir == DMA_NONE);
641 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
642 sync_single(hwdev, dma_addr, size, dir, target);
643 else if (dir == DMA_FROM_DEVICE)
644 dma_mark_clean(dma_addr, size);
645}
646
647void
648swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
649 unsigned long offset, size_t size, int dir)
650{
651 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
652 SYNC_FOR_CPU);
653}
654
655void
656swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
657 unsigned long offset, size_t size, int dir)
658{
659 swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir,
660 SYNC_FOR_DEVICE);
661}
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679int
680swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
681 int dir)
682{
683 struct scatterlist *sg;
684 void *addr;
685 dma_addr_t dev_addr;
686 int i;
687
688 BUG_ON(dir == DMA_NONE);
689
690 for_each_sg(sgl, sg, nelems, i) {
691 addr = SG_ENT_VIRT_ADDRESS(sg);
692 dev_addr = virt_to_bus(addr);
693 if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
694 void *map = map_single(hwdev, addr, sg->length, dir);
695 if (!map) {
696
697
698 swiotlb_full(hwdev, sg->length, dir, 0);
699 swiotlb_unmap_sg(hwdev, sgl, i, dir);
700 sgl[0].dma_length = 0;
701 return 0;
702 }
703 sg->dma_address = virt_to_bus(map);
704 } else
705 sg->dma_address = dev_addr;
706 sg->dma_length = sg->length;
707 }
708 return nelems;
709}
710
711
712
713
714
715void
716swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
717 int dir)
718{
719 struct scatterlist *sg;
720 int i;
721
722 BUG_ON(dir == DMA_NONE);
723
724 for_each_sg(sgl, sg, nelems, i) {
725 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
726 unmap_single(hwdev, bus_to_virt(sg->dma_address),
727 sg->dma_length, dir);
728 else if (dir == DMA_FROM_DEVICE)
729 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
730 }
731}
732
733
734
735
736
737
738
739
740static void
741swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
742 int nelems, int dir, int target)
743{
744 struct scatterlist *sg;
745 int i;
746
747 BUG_ON(dir == DMA_NONE);
748
749 for_each_sg(sgl, sg, nelems, i) {
750 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
751 sync_single(hwdev, bus_to_virt(sg->dma_address),
752 sg->dma_length, dir, target);
753 else if (dir == DMA_FROM_DEVICE)
754 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
755 }
756}
757
758void
759swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
760 int nelems, int dir)
761{
762 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
763}
764
765void
766swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
767 int nelems, int dir)
768{
769 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
770}
771
772int
773swiotlb_dma_mapping_error(dma_addr_t dma_addr)
774{
775 return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
776}
777
778
779
780
781
782
783
784int
785swiotlb_dma_supported(struct device *hwdev, u64 mask)
786{
787 return virt_to_bus(io_tlb_end - 1) <= mask;
788}
789
790EXPORT_SYMBOL(swiotlb_map_single);
791EXPORT_SYMBOL(swiotlb_unmap_single);
792EXPORT_SYMBOL(swiotlb_map_sg);
793EXPORT_SYMBOL(swiotlb_unmap_sg);
794EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
795EXPORT_SYMBOL(swiotlb_sync_single_for_device);
796EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu);
797EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
798EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
799EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
800EXPORT_SYMBOL(swiotlb_dma_mapping_error);
801EXPORT_SYMBOL(swiotlb_alloc_coherent);
802EXPORT_SYMBOL(swiotlb_free_coherent);
803EXPORT_SYMBOL(swiotlb_dma_supported);
804