1
2
3
4
5
6
7
8
9
10#include <linux/mm.h>
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15#include <linux/scatterlist.h>
16
17#include "dma.h"
18
19
20
21void dma_prog_region_init(struct dma_prog_region *prog)
22{
23 prog->kvirt = NULL;
24 prog->dev = NULL;
25 prog->n_pages = 0;
26 prog->bus_addr = 0;
27}
28
29int dma_prog_region_alloc(struct dma_prog_region *prog, unsigned long n_bytes,
30 struct pci_dev *dev)
31{
32
33 n_bytes = PAGE_ALIGN(n_bytes);
34
35 prog->n_pages = n_bytes >> PAGE_SHIFT;
36
37 prog->kvirt = pci_alloc_consistent(dev, n_bytes, &prog->bus_addr);
38 if (!prog->kvirt) {
39 printk(KERN_ERR
40 "dma_prog_region_alloc: pci_alloc_consistent() failed\n");
41 dma_prog_region_free(prog);
42 return -ENOMEM;
43 }
44
45 prog->dev = dev;
46
47 return 0;
48}
49
50void dma_prog_region_free(struct dma_prog_region *prog)
51{
52 if (prog->kvirt) {
53 pci_free_consistent(prog->dev, prog->n_pages << PAGE_SHIFT,
54 prog->kvirt, prog->bus_addr);
55 }
56
57 prog->kvirt = NULL;
58 prog->dev = NULL;
59 prog->n_pages = 0;
60 prog->bus_addr = 0;
61}
62
63
64
65
66
67
68void dma_region_init(struct dma_region *dma)
69{
70 dma->kvirt = NULL;
71 dma->dev = NULL;
72 dma->n_pages = 0;
73 dma->n_dma_pages = 0;
74 dma->sglist = NULL;
75}
76
77
78
79
80int dma_region_alloc(struct dma_region *dma, unsigned long n_bytes,
81 struct pci_dev *dev, int direction)
82{
83 unsigned int i;
84
85
86 n_bytes = PAGE_ALIGN(n_bytes);
87
88 dma->n_pages = n_bytes >> PAGE_SHIFT;
89
90 dma->kvirt = vmalloc_32(n_bytes);
91 if (!dma->kvirt) {
92 printk(KERN_ERR "dma_region_alloc: vmalloc_32() failed\n");
93 goto err;
94 }
95
96
97 memset(dma->kvirt, 0, n_bytes);
98
99
100 dma->sglist = vmalloc(dma->n_pages * sizeof(*dma->sglist));
101 if (!dma->sglist) {
102 printk(KERN_ERR "dma_region_alloc: vmalloc(sglist) failed\n");
103 goto err;
104 }
105
106 sg_init_table(dma->sglist, dma->n_pages);
107
108
109 for (i = 0; i < dma->n_pages; i++) {
110 unsigned long va =
111 (unsigned long)dma->kvirt + (i << PAGE_SHIFT);
112
113 sg_set_page(&dma->sglist[i], vmalloc_to_page((void *)va),
114 PAGE_SIZE, 0);
115 }
116
117
118 dma->n_dma_pages =
119 pci_map_sg(dev, dma->sglist, dma->n_pages, direction);
120
121 if (dma->n_dma_pages == 0) {
122 printk(KERN_ERR "dma_region_alloc: pci_map_sg() failed\n");
123 goto err;
124 }
125
126 dma->dev = dev;
127 dma->direction = direction;
128
129 return 0;
130
131 err:
132 dma_region_free(dma);
133 return -ENOMEM;
134}
135
136
137
138
139void dma_region_free(struct dma_region *dma)
140{
141 if (dma->n_dma_pages) {
142 pci_unmap_sg(dma->dev, dma->sglist, dma->n_pages,
143 dma->direction);
144 dma->n_dma_pages = 0;
145 dma->dev = NULL;
146 }
147
148 vfree(dma->sglist);
149 dma->sglist = NULL;
150
151 vfree(dma->kvirt);
152 dma->kvirt = NULL;
153 dma->n_pages = 0;
154}
155
156
157
158static inline int dma_region_find(struct dma_region *dma, unsigned long offset,
159 unsigned int start, unsigned long *rem)
160{
161 int i;
162 unsigned long off = offset;
163
164 for (i = start; i < dma->n_dma_pages; i++) {
165 if (off < sg_dma_len(&dma->sglist[i])) {
166 *rem = off;
167 break;
168 }
169
170 off -= sg_dma_len(&dma->sglist[i]);
171 }
172
173 BUG_ON(i >= dma->n_dma_pages);
174
175 return i;
176}
177
178
179
180
181
182
183
184dma_addr_t dma_region_offset_to_bus(struct dma_region * dma,
185 unsigned long offset)
186{
187 unsigned long rem = 0;
188
189 struct scatterlist *sg =
190 &dma->sglist[dma_region_find(dma, offset, 0, &rem)];
191 return sg_dma_address(sg) + rem;
192}
193
194
195
196
197void dma_region_sync_for_cpu(struct dma_region *dma, unsigned long offset,
198 unsigned long len)
199{
200 int first, last;
201 unsigned long rem = 0;
202
203 if (!len)
204 len = 1;
205
206 first = dma_region_find(dma, offset, 0, &rem);
207 last = dma_region_find(dma, rem + len - 1, first, &rem);
208
209 pci_dma_sync_sg_for_cpu(dma->dev, &dma->sglist[first], last - first + 1,
210 dma->direction);
211}
212
213
214
215
216void dma_region_sync_for_device(struct dma_region *dma, unsigned long offset,
217 unsigned long len)
218{
219 int first, last;
220 unsigned long rem = 0;
221
222 if (!len)
223 len = 1;
224
225 first = dma_region_find(dma, offset, 0, &rem);
226 last = dma_region_find(dma, rem + len - 1, first, &rem);
227
228 pci_dma_sync_sg_for_device(dma->dev, &dma->sglist[first],
229 last - first + 1, dma->direction);
230}
231
232#ifdef CONFIG_MMU
233
234static int dma_region_pagefault(struct vm_area_struct *vma,
235 struct vm_fault *vmf)
236{
237 struct dma_region *dma = (struct dma_region *)vma->vm_private_data;
238
239 if (!dma->kvirt)
240 return VM_FAULT_SIGBUS;
241
242 if (vmf->pgoff >= dma->n_pages)
243 return VM_FAULT_SIGBUS;
244
245 vmf->page = vmalloc_to_page(dma->kvirt + (vmf->pgoff << PAGE_SHIFT));
246 get_page(vmf->page);
247 return 0;
248}
249
250static const struct vm_operations_struct dma_region_vm_ops = {
251 .fault = dma_region_pagefault,
252};
253
254
255
256
257int dma_region_mmap(struct dma_region *dma, struct file *file,
258 struct vm_area_struct *vma)
259{
260 unsigned long size;
261
262 if (!dma->kvirt)
263 return -EINVAL;
264
265
266 if (vma->vm_pgoff != 0)
267 return -EINVAL;
268
269
270 size = vma->vm_end - vma->vm_start;
271 if (size > (dma->n_pages << PAGE_SHIFT))
272 return -EINVAL;
273
274 vma->vm_ops = &dma_region_vm_ops;
275 vma->vm_private_data = dma;
276 vma->vm_file = file;
277 vma->vm_flags |= VM_RESERVED | VM_ALWAYSDUMP;
278
279 return 0;
280}
281
282#else
283
284int dma_region_mmap(struct dma_region *dma, struct file *file,
285 struct vm_area_struct *vma)
286{
287 return -EINVAL;
288}
289
290#endif
291