1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/slab.h>
28#include <linux/page-flags.h>
29#include <linux/device.h>
30#include <linux/dma-mapping.h>
31#include <linux/dmapool.h>
32#include <linux/list.h>
33#include <linux/scatterlist.h>
34
35#include <asm/cacheflush.h>
36
37#undef STATS
38
39#ifdef STATS
40#define DO_STATS(X) do { X ; } while (0)
41#else
42#define DO_STATS(X) do { } while (0)
43#endif
44
45
46
47struct safe_buffer {
48 struct list_head node;
49
50
51 void *ptr;
52 size_t size;
53 int direction;
54
55
56 struct dmabounce_pool *pool;
57 void *safe;
58 dma_addr_t safe_dma_addr;
59};
60
61struct dmabounce_pool {
62 unsigned long size;
63 struct dma_pool *pool;
64#ifdef STATS
65 unsigned long allocs;
66#endif
67};
68
69struct dmabounce_device_info {
70 struct device *dev;
71 struct list_head safe_buffers;
72#ifdef STATS
73 unsigned long total_allocs;
74 unsigned long map_op_count;
75 unsigned long bounce_count;
76 int attr_res;
77#endif
78 struct dmabounce_pool small;
79 struct dmabounce_pool large;
80
81 rwlock_t lock;
82};
83
84#ifdef STATS
85static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
86 char *buf)
87{
88 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
89 return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
90 device_info->small.allocs,
91 device_info->large.allocs,
92 device_info->total_allocs - device_info->small.allocs -
93 device_info->large.allocs,
94 device_info->total_allocs,
95 device_info->map_op_count,
96 device_info->bounce_count);
97}
98
99static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
100#endif
101
102
103
104static inline struct safe_buffer *
105alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
106 size_t size, enum dma_data_direction dir)
107{
108 struct safe_buffer *buf;
109 struct dmabounce_pool *pool;
110 struct device *dev = device_info->dev;
111 unsigned long flags;
112
113 dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
114 __func__, ptr, size, dir);
115
116 if (size <= device_info->small.size) {
117 pool = &device_info->small;
118 } else if (size <= device_info->large.size) {
119 pool = &device_info->large;
120 } else {
121 pool = NULL;
122 }
123
124 buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
125 if (buf == NULL) {
126 dev_warn(dev, "%s: kmalloc failed\n", __func__);
127 return NULL;
128 }
129
130 buf->ptr = ptr;
131 buf->size = size;
132 buf->direction = dir;
133 buf->pool = pool;
134
135 if (pool) {
136 buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
137 &buf->safe_dma_addr);
138 } else {
139 buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
140 GFP_ATOMIC);
141 }
142
143 if (buf->safe == NULL) {
144 dev_warn(dev,
145 "%s: could not alloc dma memory (size=%d)\n",
146 __func__, size);
147 kfree(buf);
148 return NULL;
149 }
150
151#ifdef STATS
152 if (pool)
153 pool->allocs++;
154 device_info->total_allocs++;
155#endif
156
157 write_lock_irqsave(&device_info->lock, flags);
158 list_add(&buf->node, &device_info->safe_buffers);
159 write_unlock_irqrestore(&device_info->lock, flags);
160
161 return buf;
162}
163
164
165static inline struct safe_buffer *
166find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
167{
168 struct safe_buffer *b, *rb = NULL;
169 unsigned long flags;
170
171 read_lock_irqsave(&device_info->lock, flags);
172
173 list_for_each_entry(b, &device_info->safe_buffers, node)
174 if (b->safe_dma_addr == safe_dma_addr) {
175 rb = b;
176 break;
177 }
178
179 read_unlock_irqrestore(&device_info->lock, flags);
180 return rb;
181}
182
183static inline void
184free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
185{
186 unsigned long flags;
187
188 dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
189
190 write_lock_irqsave(&device_info->lock, flags);
191
192 list_del(&buf->node);
193
194 write_unlock_irqrestore(&device_info->lock, flags);
195
196 if (buf->pool)
197 dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
198 else
199 dma_free_coherent(device_info->dev, buf->size, buf->safe,
200 buf->safe_dma_addr);
201
202 kfree(buf);
203}
204
205
206
207static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
208 dma_addr_t dma_addr, const char *where)
209{
210 if (!dev || !dev->archdata.dmabounce)
211 return NULL;
212 if (dma_mapping_error(dev, dma_addr)) {
213 if (dev)
214 dev_err(dev, "Trying to %s invalid mapping\n", where);
215 else
216 pr_err("unknown device: Trying to %s invalid mapping\n", where);
217 return NULL;
218 }
219 return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
220}
221
222static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
223 enum dma_data_direction dir)
224{
225 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
226 dma_addr_t dma_addr;
227 int needs_bounce = 0;
228
229 if (device_info)
230 DO_STATS ( device_info->map_op_count++ );
231
232 dma_addr = virt_to_dma(dev, ptr);
233
234 if (dev->dma_mask) {
235 unsigned long mask = *dev->dma_mask;
236 unsigned long limit;
237
238 limit = (mask + 1) & ~mask;
239 if (limit && size > limit) {
240 dev_err(dev, "DMA mapping too big (requested %#x "
241 "mask %#Lx)\n", size, *dev->dma_mask);
242 return ~0;
243 }
244
245
246
247
248 needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
249 }
250
251 if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) {
252 struct safe_buffer *buf;
253
254 buf = alloc_safe_buffer(device_info, ptr, size, dir);
255 if (buf == 0) {
256 dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
257 __func__, ptr);
258 return 0;
259 }
260
261 dev_dbg(dev,
262 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
263 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
264 buf->safe, buf->safe_dma_addr);
265
266 if ((dir == DMA_TO_DEVICE) ||
267 (dir == DMA_BIDIRECTIONAL)) {
268 dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
269 __func__, ptr, buf->safe, size);
270 memcpy(buf->safe, ptr, size);
271 }
272 ptr = buf->safe;
273
274 dma_addr = buf->safe_dma_addr;
275 } else {
276
277
278
279
280 __dma_single_cpu_to_dev(ptr, size, dir);
281 }
282
283 return dma_addr;
284}
285
286static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
287 size_t size, enum dma_data_direction dir)
288{
289 struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap");
290
291 if (buf) {
292 BUG_ON(buf->size != size);
293 BUG_ON(buf->direction != dir);
294
295 dev_dbg(dev,
296 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
297 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
298 buf->safe, buf->safe_dma_addr);
299
300 DO_STATS(dev->archdata.dmabounce->bounce_count++);
301
302 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
303 void *ptr = buf->ptr;
304
305 dev_dbg(dev,
306 "%s: copy back safe %p to unsafe %p size %d\n",
307 __func__, buf->safe, ptr, size);
308 memcpy(ptr, buf->safe, size);
309
310
311
312
313
314
315 __cpuc_flush_dcache_area(ptr, size);
316 }
317 free_safe_buffer(dev->archdata.dmabounce, buf);
318 } else {
319 __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
320 }
321}
322
323
324
325
326
327
328
329
330
331dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size,
332 enum dma_data_direction dir)
333{
334 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
335 __func__, ptr, size, dir);
336
337 BUG_ON(!valid_dma_direction(dir));
338
339 return map_single(dev, ptr, size, dir);
340}
341EXPORT_SYMBOL(__dma_map_single);
342
343
344
345
346
347
348
349void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
350 enum dma_data_direction dir)
351{
352 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
353 __func__, (void *) dma_addr, size, dir);
354
355 unmap_single(dev, dma_addr, size, dir);
356}
357EXPORT_SYMBOL(__dma_unmap_single);
358
359dma_addr_t __dma_map_page(struct device *dev, struct page *page,
360 unsigned long offset, size_t size, enum dma_data_direction dir)
361{
362 dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
363 __func__, page, offset, size, dir);
364
365 BUG_ON(!valid_dma_direction(dir));
366
367 if (PageHighMem(page)) {
368 dev_err(dev, "DMA buffer bouncing of HIGHMEM pages "
369 "is not supported\n");
370 return ~0;
371 }
372
373 return map_single(dev, page_address(page) + offset, size, dir);
374}
375EXPORT_SYMBOL(__dma_map_page);
376
377
378
379
380
381
382
383void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
384 enum dma_data_direction dir)
385{
386 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
387 __func__, (void *) dma_addr, size, dir);
388
389 unmap_single(dev, dma_addr, size, dir);
390}
391EXPORT_SYMBOL(__dma_unmap_page);
392
393int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
394 unsigned long off, size_t sz, enum dma_data_direction dir)
395{
396 struct safe_buffer *buf;
397
398 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
399 __func__, addr, off, sz, dir);
400
401 buf = find_safe_buffer_dev(dev, addr, __func__);
402 if (!buf)
403 return 1;
404
405 BUG_ON(buf->direction != dir);
406
407 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
408 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
409 buf->safe, buf->safe_dma_addr);
410
411 DO_STATS(dev->archdata.dmabounce->bounce_count++);
412
413 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
414 dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
415 __func__, buf->safe + off, buf->ptr + off, sz);
416 memcpy(buf->ptr + off, buf->safe + off, sz);
417 }
418 return 0;
419}
420EXPORT_SYMBOL(dmabounce_sync_for_cpu);
421
422int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
423 unsigned long off, size_t sz, enum dma_data_direction dir)
424{
425 struct safe_buffer *buf;
426
427 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
428 __func__, addr, off, sz, dir);
429
430 buf = find_safe_buffer_dev(dev, addr, __func__);
431 if (!buf)
432 return 1;
433
434 BUG_ON(buf->direction != dir);
435
436 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
437 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
438 buf->safe, buf->safe_dma_addr);
439
440 DO_STATS(dev->archdata.dmabounce->bounce_count++);
441
442 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
443 dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
444 __func__,buf->ptr + off, buf->safe + off, sz);
445 memcpy(buf->safe + off, buf->ptr + off, sz);
446 }
447 return 0;
448}
449EXPORT_SYMBOL(dmabounce_sync_for_device);
450
451static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
452 const char *name, unsigned long size)
453{
454 pool->size = size;
455 DO_STATS(pool->allocs = 0);
456 pool->pool = dma_pool_create(name, dev, size,
457 0 ,
458 0 );
459
460 return pool->pool ? 0 : -ENOMEM;
461}
462
463int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
464 unsigned long large_buffer_size)
465{
466 struct dmabounce_device_info *device_info;
467 int ret;
468
469 device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
470 if (!device_info) {
471 dev_err(dev,
472 "Could not allocated dmabounce_device_info\n");
473 return -ENOMEM;
474 }
475
476 ret = dmabounce_init_pool(&device_info->small, dev,
477 "small_dmabounce_pool", small_buffer_size);
478 if (ret) {
479 dev_err(dev,
480 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
481 small_buffer_size);
482 goto err_free;
483 }
484
485 if (large_buffer_size) {
486 ret = dmabounce_init_pool(&device_info->large, dev,
487 "large_dmabounce_pool",
488 large_buffer_size);
489 if (ret) {
490 dev_err(dev,
491 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
492 large_buffer_size);
493 goto err_destroy;
494 }
495 }
496
497 device_info->dev = dev;
498 INIT_LIST_HEAD(&device_info->safe_buffers);
499 rwlock_init(&device_info->lock);
500
501#ifdef STATS
502 device_info->total_allocs = 0;
503 device_info->map_op_count = 0;
504 device_info->bounce_count = 0;
505 device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
506#endif
507
508 dev->archdata.dmabounce = device_info;
509
510 dev_info(dev, "dmabounce: registered device\n");
511
512 return 0;
513
514 err_destroy:
515 dma_pool_destroy(device_info->small.pool);
516 err_free:
517 kfree(device_info);
518 return ret;
519}
520EXPORT_SYMBOL(dmabounce_register_dev);
521
522void dmabounce_unregister_dev(struct device *dev)
523{
524 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
525
526 dev->archdata.dmabounce = NULL;
527
528 if (!device_info) {
529 dev_warn(dev,
530 "Never registered with dmabounce but attempting"
531 "to unregister!\n");
532 return;
533 }
534
535 if (!list_empty(&device_info->safe_buffers)) {
536 dev_err(dev,
537 "Removing from dmabounce with pending buffers!\n");
538 BUG();
539 }
540
541 if (device_info->small.pool)
542 dma_pool_destroy(device_info->small.pool);
543 if (device_info->large.pool)
544 dma_pool_destroy(device_info->large.pool);
545
546#ifdef STATS
547 if (device_info->attr_res == 0)
548 device_remove_file(dev, &dev_attr_dmabounce_stats);
549#endif
550
551 kfree(device_info);
552
553 dev_info(dev, "dmabounce: device unregistered\n");
554}
555EXPORT_SYMBOL(dmabounce_unregister_dev);
556
557MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
558MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
559MODULE_LICENSE("GPL");
560