1
2
3
4
5
6#include <linux/export.h>
7#include <linux/mm.h>
8#include <linux/dma-direct.h>
9#include <linux/scatterlist.h>
10#include <linux/dma-contiguous.h>
11#include <linux/pfn.h>
12#include <linux/set_memory.h>
13
14#define DIRECT_MAPPING_ERROR 0
15
16
17
18
19
20#ifndef ARCH_ZONE_DMA_BITS
21#define ARCH_ZONE_DMA_BITS 24
22#endif
23
24
25
26
27static inline bool force_dma_unencrypted(void)
28{
29 return sev_active();
30}
31
32static bool
33check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
34 const char *caller)
35{
36 if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
37 if (!dev->dma_mask) {
38 dev_err(dev,
39 "%s: call on device without dma_mask\n",
40 caller);
41 return false;
42 }
43
44 if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
45 dev_err(dev,
46 "%s: overflow %pad+%zu of device mask %llx\n",
47 caller, &dma_addr, size, *dev->dma_mask);
48 }
49 return false;
50 }
51 return true;
52}
53
54static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
55{
56 dma_addr_t addr = force_dma_unencrypted() ?
57 __phys_to_dma(dev, phys) : phys_to_dma(dev, phys);
58 return addr + size - 1 <= dev->coherent_dma_mask;
59}
60
61void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
62 gfp_t gfp, unsigned long attrs)
63{
64 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
65 int page_order = get_order(size);
66 struct page *page = NULL;
67 void *ret;
68
69
70 gfp &= ~__GFP_ZERO;
71
72
73 if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
74 gfp |= GFP_DMA;
75 if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
76 gfp |= GFP_DMA32;
77
78again:
79
80 if (gfpflags_allow_blocking(gfp)) {
81 page = dma_alloc_from_contiguous(dev, count, page_order,
82 gfp & __GFP_NOWARN);
83 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
84 dma_release_from_contiguous(dev, page, count);
85 page = NULL;
86 }
87 }
88 if (!page)
89 page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
90
91 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
92 __free_pages(page, page_order);
93 page = NULL;
94
95 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
96 dev->coherent_dma_mask < DMA_BIT_MASK(64) &&
97 !(gfp & (GFP_DMA32 | GFP_DMA))) {
98 gfp |= GFP_DMA32;
99 goto again;
100 }
101
102 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
103 dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
104 !(gfp & GFP_DMA)) {
105 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
106 goto again;
107 }
108 }
109
110 if (!page)
111 return NULL;
112 ret = page_address(page);
113 if (force_dma_unencrypted()) {
114 set_memory_decrypted((unsigned long)ret, 1 << page_order);
115 *dma_handle = __phys_to_dma(dev, page_to_phys(page));
116 } else {
117 *dma_handle = phys_to_dma(dev, page_to_phys(page));
118 }
119 memset(ret, 0, size);
120 return ret;
121}
122
123
124
125
126
127void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
128 dma_addr_t dma_addr, unsigned long attrs)
129{
130 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
131 unsigned int page_order = get_order(size);
132
133 if (force_dma_unencrypted())
134 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
135 if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
136 free_pages((unsigned long)cpu_addr, page_order);
137}
138
139dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
140 unsigned long offset, size_t size, enum dma_data_direction dir,
141 unsigned long attrs)
142{
143 dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
144
145 if (!check_addr(dev, dma_addr, size, __func__))
146 return DIRECT_MAPPING_ERROR;
147 return dma_addr;
148}
149
150int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
151 enum dma_data_direction dir, unsigned long attrs)
152{
153 int i;
154 struct scatterlist *sg;
155
156 for_each_sg(sgl, sg, nents, i) {
157 BUG_ON(!sg_page(sg));
158
159 sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
160 if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
161 return 0;
162 sg_dma_len(sg) = sg->length;
163 }
164
165 return nents;
166}
167
168int dma_direct_supported(struct device *dev, u64 mask)
169{
170#ifdef CONFIG_ZONE_DMA
171 if (mask < phys_to_dma(dev, DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)))
172 return 0;
173#else
174
175
176
177
178
179
180 if (mask < phys_to_dma(dev, DMA_BIT_MASK(32)))
181 return 0;
182#endif
183
184
185
186
187 if (dev->bus_dma_mask && mask > dev->bus_dma_mask)
188 return 0;
189 return 1;
190}
191
192int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
193{
194 return dma_addr == DIRECT_MAPPING_ERROR;
195}
196
197const struct dma_map_ops dma_direct_ops = {
198 .alloc = dma_direct_alloc,
199 .free = dma_direct_free,
200 .map_page = dma_direct_map_page,
201 .map_sg = dma_direct_map_sg,
202 .dma_supported = dma_direct_supported,
203 .mapping_error = dma_direct_mapping_error,
204};
205EXPORT_SYMBOL(dma_direct_ops);
206