1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/device.h>
26#include <linux/dma-mapping.h>
27#include <linux/dmapool.h>
28#include <linux/kernel.h>
29#include <linux/list.h>
30#include <linux/export.h>
31#include <linux/mutex.h>
32#include <linux/poison.h>
33#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/stat.h>
36#include <linux/spinlock.h>
37#include <linux/string.h>
38#include <linux/types.h>
39#include <linux/wait.h>
40
41#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
42#define DMAPOOL_DEBUG 1
43#endif
44
45struct dma_pool {
46 struct list_head page_list;
47 spinlock_t lock;
48 size_t size;
49 struct device *dev;
50 size_t allocation;
51 size_t boundary;
52 char name[32];
53 struct list_head pools;
54};
55
56struct dma_page {
57 struct list_head page_list;
58 void *vaddr;
59 dma_addr_t dma;
60 unsigned int in_use;
61 unsigned int offset;
62};
63
64static DEFINE_MUTEX(pools_lock);
65
66static ssize_t
67show_pools(struct device *dev, struct device_attribute *attr, char *buf)
68{
69 unsigned temp;
70 unsigned size;
71 char *next;
72 struct dma_page *page;
73 struct dma_pool *pool;
74
75 next = buf;
76 size = PAGE_SIZE;
77
78 temp = scnprintf(next, size, "poolinfo - 0.1\n");
79 size -= temp;
80 next += temp;
81
82 mutex_lock(&pools_lock);
83 list_for_each_entry(pool, &dev->dma_pools, pools) {
84 unsigned pages = 0;
85 unsigned blocks = 0;
86
87 spin_lock_irq(&pool->lock);
88 list_for_each_entry(page, &pool->page_list, page_list) {
89 pages++;
90 blocks += page->in_use;
91 }
92 spin_unlock_irq(&pool->lock);
93
94
95 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
96 pool->name, blocks,
97 pages * (pool->allocation / pool->size),
98 pool->size, pages);
99 size -= temp;
100 next += temp;
101 }
102 mutex_unlock(&pools_lock);
103
104 return PAGE_SIZE - size;
105}
106
107static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130struct dma_pool *dma_pool_create(const char *name, struct device *dev,
131 size_t size, size_t align, size_t boundary)
132{
133 struct dma_pool *retval;
134 size_t allocation;
135
136 if (align == 0) {
137 align = 1;
138 } else if (align & (align - 1)) {
139 return NULL;
140 }
141
142 if (size == 0) {
143 return NULL;
144 } else if (size < 4) {
145 size = 4;
146 }
147
148 if ((size % align) != 0)
149 size = ALIGN(size, align);
150
151 allocation = max_t(size_t, size, PAGE_SIZE);
152
153 if (!boundary) {
154 boundary = allocation;
155 } else if ((boundary < size) || (boundary & (boundary - 1))) {
156 return NULL;
157 }
158
159 retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
160 if (!retval)
161 return retval;
162
163 strlcpy(retval->name, name, sizeof(retval->name));
164
165 retval->dev = dev;
166
167 INIT_LIST_HEAD(&retval->page_list);
168 spin_lock_init(&retval->lock);
169 retval->size = size;
170 retval->boundary = boundary;
171 retval->allocation = allocation;
172
173 if (dev) {
174 int ret;
175
176 mutex_lock(&pools_lock);
177 if (list_empty(&dev->dma_pools))
178 ret = device_create_file(dev, &dev_attr_pools);
179 else
180 ret = 0;
181
182 if (!ret)
183 list_add(&retval->pools, &dev->dma_pools);
184 else {
185 kfree(retval);
186 retval = NULL;
187 }
188 mutex_unlock(&pools_lock);
189 } else
190 INIT_LIST_HEAD(&retval->pools);
191
192 return retval;
193}
194EXPORT_SYMBOL(dma_pool_create);
195
196static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
197{
198 unsigned int offset = 0;
199 unsigned int next_boundary = pool->boundary;
200
201 do {
202 unsigned int next = offset + pool->size;
203 if (unlikely((next + pool->size) >= next_boundary)) {
204 next = next_boundary;
205 next_boundary += pool->boundary;
206 }
207 *(int *)(page->vaddr + offset) = next;
208 offset = next;
209 } while (offset < pool->allocation);
210}
211
212static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
213{
214 struct dma_page *page;
215
216 page = kmalloc(sizeof(*page), mem_flags);
217 if (!page)
218 return NULL;
219 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
220 &page->dma, mem_flags);
221 if (page->vaddr) {
222#ifdef DMAPOOL_DEBUG
223 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
224#endif
225 pool_initialise_page(pool, page);
226 page->in_use = 0;
227 page->offset = 0;
228 } else {
229 kfree(page);
230 page = NULL;
231 }
232 return page;
233}
234
235static inline int is_page_busy(struct dma_page *page)
236{
237 return page->in_use != 0;
238}
239
240static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
241{
242 dma_addr_t dma = page->dma;
243
244#ifdef DMAPOOL_DEBUG
245 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
246#endif
247 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
248 list_del(&page->page_list);
249 kfree(page);
250}
251
252
253
254
255
256
257
258
259
260void dma_pool_destroy(struct dma_pool *pool)
261{
262 if (unlikely(!pool))
263 return;
264
265 mutex_lock(&pools_lock);
266 list_del(&pool->pools);
267 if (pool->dev && list_empty(&pool->dev->dma_pools))
268 device_remove_file(pool->dev, &dev_attr_pools);
269 mutex_unlock(&pools_lock);
270
271 while (!list_empty(&pool->page_list)) {
272 struct dma_page *page;
273 page = list_entry(pool->page_list.next,
274 struct dma_page, page_list);
275 if (is_page_busy(page)) {
276 if (pool->dev)
277 dev_err(pool->dev,
278 "dma_pool_destroy %s, %p busy\n",
279 pool->name, page->vaddr);
280 else
281 printk(KERN_ERR
282 "dma_pool_destroy %s, %p busy\n",
283 pool->name, page->vaddr);
284
285 list_del(&page->page_list);
286 kfree(page);
287 } else
288 pool_free_page(pool, page);
289 }
290
291 kfree(pool);
292}
293EXPORT_SYMBOL(dma_pool_destroy);
294
295
296
297
298
299
300
301
302
303
304
305void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
306 dma_addr_t *handle)
307{
308 unsigned long flags;
309 struct dma_page *page;
310 size_t offset;
311 void *retval;
312
313 might_sleep_if(mem_flags & __GFP_WAIT);
314
315 spin_lock_irqsave(&pool->lock, flags);
316 list_for_each_entry(page, &pool->page_list, page_list) {
317 if (page->offset < pool->allocation)
318 goto ready;
319 }
320
321
322 spin_unlock_irqrestore(&pool->lock, flags);
323
324 page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
325 if (!page)
326 return NULL;
327
328 spin_lock_irqsave(&pool->lock, flags);
329
330 list_add(&page->page_list, &pool->page_list);
331 ready:
332 page->in_use++;
333 offset = page->offset;
334 page->offset = *(int *)(page->vaddr + offset);
335 retval = offset + page->vaddr;
336 *handle = offset + page->dma;
337#ifdef DMAPOOL_DEBUG
338 {
339 int i;
340 u8 *data = retval;
341
342 for (i = sizeof(page->offset); i < pool->size; i++) {
343 if (data[i] == POOL_POISON_FREED)
344 continue;
345 if (pool->dev)
346 dev_err(pool->dev,
347 "dma_pool_alloc %s, %p (corruped)\n",
348 pool->name, retval);
349 else
350 pr_err("dma_pool_alloc %s, %p (corruped)\n",
351 pool->name, retval);
352
353
354
355
356
357 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
358 data, pool->size, 1);
359 break;
360 }
361 }
362 if (!(mem_flags & __GFP_ZERO))
363 memset(retval, POOL_POISON_ALLOCATED, pool->size);
364#endif
365 spin_unlock_irqrestore(&pool->lock, flags);
366
367 if (mem_flags & __GFP_ZERO)
368 memset(retval, 0, pool->size);
369
370 return retval;
371}
372EXPORT_SYMBOL(dma_pool_alloc);
373
374static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
375{
376 struct dma_page *page;
377
378 list_for_each_entry(page, &pool->page_list, page_list) {
379 if (dma < page->dma)
380 continue;
381 if (dma < (page->dma + pool->allocation))
382 return page;
383 }
384 return NULL;
385}
386
387
388
389
390
391
392
393
394
395
396void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
397{
398 struct dma_page *page;
399 unsigned long flags;
400 unsigned int offset;
401
402 spin_lock_irqsave(&pool->lock, flags);
403 page = pool_find_page(pool, dma);
404 if (!page) {
405 spin_unlock_irqrestore(&pool->lock, flags);
406 if (pool->dev)
407 dev_err(pool->dev,
408 "dma_pool_free %s, %p/%lx (bad dma)\n",
409 pool->name, vaddr, (unsigned long)dma);
410 else
411 printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
412 pool->name, vaddr, (unsigned long)dma);
413 return;
414 }
415
416 offset = vaddr - page->vaddr;
417#ifdef DMAPOOL_DEBUG
418 if ((dma - page->dma) != offset) {
419 spin_unlock_irqrestore(&pool->lock, flags);
420 if (pool->dev)
421 dev_err(pool->dev,
422 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
423 pool->name, vaddr, (unsigned long long)dma);
424 else
425 printk(KERN_ERR
426 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
427 pool->name, vaddr, (unsigned long long)dma);
428 return;
429 }
430 {
431 unsigned int chain = page->offset;
432 while (chain < pool->allocation) {
433 if (chain != offset) {
434 chain = *(int *)(page->vaddr + chain);
435 continue;
436 }
437 spin_unlock_irqrestore(&pool->lock, flags);
438 if (pool->dev)
439 dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
440 "already free\n", pool->name,
441 (unsigned long long)dma);
442 else
443 printk(KERN_ERR "dma_pool_free %s, dma %Lx "
444 "already free\n", pool->name,
445 (unsigned long long)dma);
446 return;
447 }
448 }
449 memset(vaddr, POOL_POISON_FREED, pool->size);
450#endif
451
452 page->in_use--;
453 *(int *)vaddr = page->offset;
454 page->offset = offset;
455
456
457
458
459
460 spin_unlock_irqrestore(&pool->lock, flags);
461}
462EXPORT_SYMBOL(dma_pool_free);
463
464
465
466
467static void dmam_pool_release(struct device *dev, void *res)
468{
469 struct dma_pool *pool = *(struct dma_pool **)res;
470
471 dma_pool_destroy(pool);
472}
473
474static int dmam_pool_match(struct device *dev, void *res, void *match_data)
475{
476 return *(struct dma_pool **)res == match_data;
477}
478
479
480
481
482
483
484
485
486
487
488
489
490struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
491 size_t size, size_t align, size_t allocation)
492{
493 struct dma_pool **ptr, *pool;
494
495 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
496 if (!ptr)
497 return NULL;
498
499 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
500 if (pool)
501 devres_add(dev, ptr);
502 else
503 devres_free(ptr);
504
505 return pool;
506}
507EXPORT_SYMBOL(dmam_pool_create);
508
509
510
511
512
513
514
515void dmam_pool_destroy(struct dma_pool *pool)
516{
517 struct device *dev = pool->dev;
518
519 WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
520 dma_pool_destroy(pool);
521}
522EXPORT_SYMBOL(dmam_pool_destroy);
523