1
2
3
4
5
6
7
8
9
10#include <linux/vmalloc.h>
11
12#include "ipu3.h"
13#include "ipu3-css-pool.h"
14#include "ipu3-mmu.h"
15#include "ipu3-dmamap.h"
16
17
18
19
20static void imgu_dmamap_free_buffer(struct page **pages,
21 size_t size)
22{
23 int count = size >> PAGE_SHIFT;
24
25 while (count--)
26 __free_page(pages[count]);
27 kvfree(pages);
28}
29
30
31
32
33
34static struct page **imgu_dmamap_alloc_buffer(size_t size, gfp_t gfp)
35{
36 struct page **pages;
37 unsigned int i = 0, count = size >> PAGE_SHIFT;
38 unsigned int order_mask = 1;
39 const gfp_t high_order_gfp = __GFP_NOWARN | __GFP_NORETRY;
40
41
42 pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL);
43
44 if (!pages)
45 return NULL;
46
47 gfp |= __GFP_HIGHMEM | __GFP_ZERO;
48
49 while (count) {
50 struct page *page = NULL;
51 unsigned int order_size;
52
53 for (order_mask &= (2U << __fls(count)) - 1;
54 order_mask; order_mask &= ~order_size) {
55 unsigned int order = __fls(order_mask);
56
57 order_size = 1U << order;
58 page = alloc_pages((order_mask - order_size) ?
59 gfp | high_order_gfp : gfp, order);
60 if (!page)
61 continue;
62 if (!order)
63 break;
64 if (!PageCompound(page)) {
65 split_page(page, order);
66 break;
67 }
68
69 __free_pages(page, order);
70 }
71 if (!page) {
72 imgu_dmamap_free_buffer(pages, i << PAGE_SHIFT);
73 return NULL;
74 }
75 count -= order_size;
76 while (order_size--)
77 pages[i++] = page++;
78 }
79
80 return pages;
81}
82
83
84
85
86
87
88
89
90
91
92
93void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
94 size_t len)
95{
96 unsigned long shift = iova_shift(&imgu->iova_domain);
97 struct device *dev = &imgu->pci_dev->dev;
98 size_t size = PAGE_ALIGN(len);
99 int count = size >> PAGE_SHIFT;
100 struct page **pages;
101 dma_addr_t iovaddr;
102 struct iova *iova;
103 int i, rval;
104
105 dev_dbg(dev, "%s: allocating %zu\n", __func__, size);
106
107 iova = alloc_iova(&imgu->iova_domain, size >> shift,
108 imgu->mmu->aperture_end >> shift, 0);
109 if (!iova)
110 return NULL;
111
112 pages = imgu_dmamap_alloc_buffer(size, GFP_KERNEL);
113 if (!pages)
114 goto out_free_iova;
115
116
117 iovaddr = iova_dma_addr(&imgu->iova_domain, iova);
118 for (i = 0; i < count; ++i) {
119 rval = imgu_mmu_map(imgu->mmu, iovaddr,
120 page_to_phys(pages[i]), PAGE_SIZE);
121 if (rval)
122 goto out_unmap;
123
124 iovaddr += PAGE_SIZE;
125 }
126
127 map->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL);
128 if (!map->vaddr)
129 goto out_unmap;
130
131 map->pages = pages;
132 map->size = size;
133 map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
134
135 dev_dbg(dev, "%s: allocated %zu @ IOVA %pad @ VA %p\n", __func__,
136 size, &map->daddr, map->vaddr);
137
138 return map->vaddr;
139
140out_unmap:
141 imgu_dmamap_free_buffer(pages, size);
142 imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
143 i * PAGE_SIZE);
144
145out_free_iova:
146 __free_iova(&imgu->iova_domain, iova);
147
148 return NULL;
149}
150
151void imgu_dmamap_unmap(struct imgu_device *imgu, struct imgu_css_map *map)
152{
153 struct iova *iova;
154
155 iova = find_iova(&imgu->iova_domain,
156 iova_pfn(&imgu->iova_domain, map->daddr));
157 if (WARN_ON(!iova))
158 return;
159
160 imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
161 iova_size(iova) << iova_shift(&imgu->iova_domain));
162
163 __free_iova(&imgu->iova_domain, iova);
164}
165
166
167
168
169void imgu_dmamap_free(struct imgu_device *imgu, struct imgu_css_map *map)
170{
171 dev_dbg(&imgu->pci_dev->dev, "%s: freeing %zu @ IOVA %pad @ VA %p\n",
172 __func__, map->size, &map->daddr, map->vaddr);
173
174 if (!map->vaddr)
175 return;
176
177 imgu_dmamap_unmap(imgu, map);
178
179 vunmap(map->vaddr);
180 imgu_dmamap_free_buffer(map->pages, map->size);
181 map->vaddr = NULL;
182}
183
184int imgu_dmamap_map_sg(struct imgu_device *imgu, struct scatterlist *sglist,
185 int nents, struct imgu_css_map *map)
186{
187 unsigned long shift = iova_shift(&imgu->iova_domain);
188 struct scatterlist *sg;
189 struct iova *iova;
190 size_t size = 0;
191 int i;
192
193 for_each_sg(sglist, sg, nents, i) {
194 if (sg->offset)
195 return -EINVAL;
196
197 if (i != nents - 1 && !PAGE_ALIGNED(sg->length))
198 return -EINVAL;
199
200 size += sg->length;
201 }
202
203 size = iova_align(&imgu->iova_domain, size);
204 dev_dbg(&imgu->pci_dev->dev, "dmamap: mapping sg %d entries, %zu pages\n",
205 nents, size >> shift);
206
207 iova = alloc_iova(&imgu->iova_domain, size >> shift,
208 imgu->mmu->aperture_end >> shift, 0);
209 if (!iova)
210 return -ENOMEM;
211
212 dev_dbg(&imgu->pci_dev->dev, "dmamap: iova low pfn %lu, high pfn %lu\n",
213 iova->pfn_lo, iova->pfn_hi);
214
215 if (imgu_mmu_map_sg(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
216 sglist, nents) < size)
217 goto out_fail;
218
219 memset(map, 0, sizeof(*map));
220 map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
221 map->size = size;
222
223 return 0;
224
225out_fail:
226 __free_iova(&imgu->iova_domain, iova);
227
228 return -EFAULT;
229}
230
231int imgu_dmamap_init(struct imgu_device *imgu)
232{
233 unsigned long order, base_pfn;
234 int ret = iova_cache_get();
235
236 if (ret)
237 return ret;
238
239 order = __ffs(IPU3_PAGE_SIZE);
240 base_pfn = max_t(unsigned long, 1, imgu->mmu->aperture_start >> order);
241 init_iova_domain(&imgu->iova_domain, 1UL << order, base_pfn);
242
243 return 0;
244}
245
246void imgu_dmamap_exit(struct imgu_device *imgu)
247{
248 put_iova_domain(&imgu->iova_domain);
249 iova_cache_put();
250}
251