1
2
3
4
5
6
7#include <linux/memblock.h>
8#include <linux/export.h>
9#include <linux/mm.h>
10#include <linux/dma-direct.h>
11#include <linux/scatterlist.h>
12#include <linux/dma-contiguous.h>
13#include <linux/dma-noncoherent.h>
14#include <linux/pfn.h>
15#include <linux/vmalloc.h>
16#include <linux/set_memory.h>
17#include <linux/swiotlb.h>
18
19
20
21
22
23
24unsigned int zone_dma_bits __ro_after_init = 24;
25
26static inline dma_addr_t phys_to_dma_direct(struct device *dev,
27 phys_addr_t phys)
28{
29 if (force_dma_unencrypted(dev))
30 return __phys_to_dma(dev, phys);
31 return phys_to_dma(dev, phys);
32}
33
34static inline struct page *dma_direct_to_page(struct device *dev,
35 dma_addr_t dma_addr)
36{
37 return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
38}
39
40u64 dma_direct_get_required_mask(struct device *dev)
41{
42 phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
43 u64 max_dma = phys_to_dma_direct(dev, phys);
44
45 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
46}
47
48static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
49 u64 *phys_limit)
50{
51 u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
52
53 if (force_dma_unencrypted(dev))
54 *phys_limit = __dma_to_phys(dev, dma_limit);
55 else
56 *phys_limit = dma_to_phys(dev, dma_limit);
57
58
59
60
61
62
63
64
65
66 if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
67 return GFP_DMA;
68 if (*phys_limit <= DMA_BIT_MASK(32))
69 return GFP_DMA32;
70 return 0;
71}
72
73static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
74{
75 return phys_to_dma_direct(dev, phys) + size - 1 <=
76 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
77}
78
79struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
80 gfp_t gfp, unsigned long attrs)
81{
82 size_t alloc_size = PAGE_ALIGN(size);
83 int node = dev_to_node(dev);
84 struct page *page = NULL;
85 u64 phys_limit;
86
87 if (attrs & DMA_ATTR_NO_WARN)
88 gfp |= __GFP_NOWARN;
89
90
91 gfp &= ~__GFP_ZERO;
92 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
93 &phys_limit);
94 page = dma_alloc_contiguous(dev, alloc_size, gfp);
95 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
96 dma_free_contiguous(dev, page, alloc_size);
97 page = NULL;
98 }
99again:
100 if (!page)
101 page = alloc_pages_node(node, gfp, get_order(alloc_size));
102 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
103 dma_free_contiguous(dev, page, size);
104 page = NULL;
105
106 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
107 phys_limit < DMA_BIT_MASK(64) &&
108 !(gfp & (GFP_DMA32 | GFP_DMA))) {
109 gfp |= GFP_DMA32;
110 goto again;
111 }
112
113 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
114 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
115 goto again;
116 }
117 }
118
119 return page;
120}
121
122void *dma_direct_alloc_pages(struct device *dev, size_t size,
123 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
124{
125 struct page *page;
126 void *ret;
127
128 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
129 dma_alloc_need_uncached(dev, attrs) &&
130 !gfpflags_allow_blocking(gfp)) {
131 ret = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
132 if (!ret)
133 return NULL;
134 goto done;
135 }
136
137 page = __dma_direct_alloc_pages(dev, size, gfp, attrs);
138 if (!page)
139 return NULL;
140
141 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
142 !force_dma_unencrypted(dev)) {
143
144 if (!PageHighMem(page))
145 arch_dma_prep_coherent(page, size);
146
147 ret = page;
148 goto done;
149 }
150
151 if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
152 dma_alloc_need_uncached(dev, attrs)) ||
153 (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
154
155 arch_dma_prep_coherent(page, PAGE_ALIGN(size));
156
157
158 ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size),
159 dma_pgprot(dev, PAGE_KERNEL, attrs),
160 __builtin_return_address(0));
161 if (!ret)
162 goto out_free_pages;
163 memset(ret, 0, size);
164 goto done;
165 }
166
167 if (PageHighMem(page)) {
168
169
170
171
172
173
174 dev_info(dev, "Rejecting highmem page from CMA.\n");
175 goto out_free_pages;
176 }
177
178 ret = page_address(page);
179 if (force_dma_unencrypted(dev))
180 set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
181
182 memset(ret, 0, size);
183
184 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
185 dma_alloc_need_uncached(dev, attrs)) {
186 arch_dma_prep_coherent(page, size);
187 ret = arch_dma_set_uncached(ret, size);
188 if (IS_ERR(ret))
189 goto out_free_pages;
190 }
191done:
192 if (force_dma_unencrypted(dev))
193 *dma_handle = __phys_to_dma(dev, page_to_phys(page));
194 else
195 *dma_handle = phys_to_dma(dev, page_to_phys(page));
196 return ret;
197out_free_pages:
198 dma_free_contiguous(dev, page, size);
199 return NULL;
200}
201
202void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
203 dma_addr_t dma_addr, unsigned long attrs)
204{
205 unsigned int page_order = get_order(size);
206
207 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
208 !force_dma_unencrypted(dev)) {
209
210 dma_free_contiguous(dev, cpu_addr, size);
211 return;
212 }
213
214 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
215 dma_free_from_pool(cpu_addr, PAGE_ALIGN(size)))
216 return;
217
218 if (force_dma_unencrypted(dev))
219 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
220
221 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
222 vunmap(cpu_addr);
223 else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
224 arch_dma_clear_uncached(cpu_addr, size);
225
226 dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size);
227}
228
229void *dma_direct_alloc(struct device *dev, size_t size,
230 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
231{
232 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
233 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
234 dma_alloc_need_uncached(dev, attrs))
235 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
236 return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
237}
238
239void dma_direct_free(struct device *dev, size_t size,
240 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
241{
242 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
243 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
244 dma_alloc_need_uncached(dev, attrs))
245 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
246 else
247 dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
248}
249
250#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
251 defined(CONFIG_SWIOTLB)
252void dma_direct_sync_single_for_device(struct device *dev,
253 dma_addr_t addr, size_t size, enum dma_data_direction dir)
254{
255 phys_addr_t paddr = dma_to_phys(dev, addr);
256
257 if (unlikely(is_swiotlb_buffer(paddr)))
258 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
259
260 if (!dev_is_dma_coherent(dev))
261 arch_sync_dma_for_device(paddr, size, dir);
262}
263EXPORT_SYMBOL(dma_direct_sync_single_for_device);
264
265void dma_direct_sync_sg_for_device(struct device *dev,
266 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
267{
268 struct scatterlist *sg;
269 int i;
270
271 for_each_sg(sgl, sg, nents, i) {
272 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
273
274 if (unlikely(is_swiotlb_buffer(paddr)))
275 swiotlb_tbl_sync_single(dev, paddr, sg->length,
276 dir, SYNC_FOR_DEVICE);
277
278 if (!dev_is_dma_coherent(dev))
279 arch_sync_dma_for_device(paddr, sg->length,
280 dir);
281 }
282}
283EXPORT_SYMBOL(dma_direct_sync_sg_for_device);
284#endif
285
286#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
287 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
288 defined(CONFIG_SWIOTLB)
289void dma_direct_sync_single_for_cpu(struct device *dev,
290 dma_addr_t addr, size_t size, enum dma_data_direction dir)
291{
292 phys_addr_t paddr = dma_to_phys(dev, addr);
293
294 if (!dev_is_dma_coherent(dev)) {
295 arch_sync_dma_for_cpu(paddr, size, dir);
296 arch_sync_dma_for_cpu_all();
297 }
298
299 if (unlikely(is_swiotlb_buffer(paddr)))
300 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
301}
302EXPORT_SYMBOL(dma_direct_sync_single_for_cpu);
303
304void dma_direct_sync_sg_for_cpu(struct device *dev,
305 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
306{
307 struct scatterlist *sg;
308 int i;
309
310 for_each_sg(sgl, sg, nents, i) {
311 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
312
313 if (!dev_is_dma_coherent(dev))
314 arch_sync_dma_for_cpu(paddr, sg->length, dir);
315
316 if (unlikely(is_swiotlb_buffer(paddr)))
317 swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
318 SYNC_FOR_CPU);
319 }
320
321 if (!dev_is_dma_coherent(dev))
322 arch_sync_dma_for_cpu_all();
323}
324EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu);
325
326void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
327 size_t size, enum dma_data_direction dir, unsigned long attrs)
328{
329 phys_addr_t phys = dma_to_phys(dev, addr);
330
331 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
332 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
333
334 if (unlikely(is_swiotlb_buffer(phys)))
335 swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
336}
337EXPORT_SYMBOL(dma_direct_unmap_page);
338
339void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
340 int nents, enum dma_data_direction dir, unsigned long attrs)
341{
342 struct scatterlist *sg;
343 int i;
344
345 for_each_sg(sgl, sg, nents, i)
346 dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
347 attrs);
348}
349EXPORT_SYMBOL(dma_direct_unmap_sg);
350#endif
351
352dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
353 unsigned long offset, size_t size, enum dma_data_direction dir,
354 unsigned long attrs)
355{
356 phys_addr_t phys = page_to_phys(page) + offset;
357 dma_addr_t dma_addr = phys_to_dma(dev, phys);
358
359 if (unlikely(swiotlb_force == SWIOTLB_FORCE))
360 return swiotlb_map(dev, phys, size, dir, attrs);
361
362 if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
363 if (swiotlb_force != SWIOTLB_NO_FORCE)
364 return swiotlb_map(dev, phys, size, dir, attrs);
365
366 dev_WARN_ONCE(dev, 1,
367 "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
368 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
369 return DMA_MAPPING_ERROR;
370 }
371
372 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
373 arch_sync_dma_for_device(phys, size, dir);
374 return dma_addr;
375}
376EXPORT_SYMBOL(dma_direct_map_page);
377
378int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
379 enum dma_data_direction dir, unsigned long attrs)
380{
381 int i;
382 struct scatterlist *sg;
383
384 for_each_sg(sgl, sg, nents, i) {
385 sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
386 sg->offset, sg->length, dir, attrs);
387 if (sg->dma_address == DMA_MAPPING_ERROR)
388 goto out_unmap;
389 sg_dma_len(sg) = sg->length;
390 }
391
392 return nents;
393
394out_unmap:
395 dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
396 return 0;
397}
398EXPORT_SYMBOL(dma_direct_map_sg);
399
400dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
401 size_t size, enum dma_data_direction dir, unsigned long attrs)
402{
403 dma_addr_t dma_addr = paddr;
404
405 if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
406 dev_err_once(dev,
407 "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
408 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
409 WARN_ON_ONCE(1);
410 return DMA_MAPPING_ERROR;
411 }
412
413 return dma_addr;
414}
415EXPORT_SYMBOL(dma_direct_map_resource);
416
417int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
418 void *cpu_addr, dma_addr_t dma_addr, size_t size,
419 unsigned long attrs)
420{
421 struct page *page = dma_direct_to_page(dev, dma_addr);
422 int ret;
423
424 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
425 if (!ret)
426 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
427 return ret;
428}
429
430#ifdef CONFIG_MMU
431bool dma_direct_can_mmap(struct device *dev)
432{
433 return dev_is_dma_coherent(dev) ||
434 IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
435}
436
437int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
438 void *cpu_addr, dma_addr_t dma_addr, size_t size,
439 unsigned long attrs)
440{
441 unsigned long user_count = vma_pages(vma);
442 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
443 unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
444 int ret = -ENXIO;
445
446 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
447
448 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
449 return ret;
450
451 if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
452 return -ENXIO;
453 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
454 user_count << PAGE_SHIFT, vma->vm_page_prot);
455}
456#else
457bool dma_direct_can_mmap(struct device *dev)
458{
459 return false;
460}
461
462int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
463 void *cpu_addr, dma_addr_t dma_addr, size_t size,
464 unsigned long attrs)
465{
466 return -ENXIO;
467}
468#endif
469
470int dma_direct_supported(struct device *dev, u64 mask)
471{
472 u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
473
474
475
476
477
478
479
480 if (mask >= DMA_BIT_MASK(32))
481 return 1;
482
483
484
485
486
487
488 if (IS_ENABLED(CONFIG_ZONE_DMA))
489 min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
490 return mask >= __phys_to_dma(dev, min_mask);
491}
492
493size_t dma_direct_max_mapping_size(struct device *dev)
494{
495
496 if (is_swiotlb_active() &&
497 (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE))
498 return swiotlb_max_mapping_size(dev);
499 return SIZE_MAX;
500}
501