1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/device.h>
23#include <linux/dma-mapping.h>
24#include <linux/dmapool.h>
25#include <linux/kernel.h>
26#include <linux/list.h>
27#include <linux/export.h>
28#include <linux/mutex.h>
29#include <linux/poison.h>
30#include <linux/sched.h>
31#include <linux/sched/mm.h>
32#include <linux/slab.h>
33#include <linux/stat.h>
34#include <linux/spinlock.h>
35#include <linux/string.h>
36#include <linux/types.h>
37#include <linux/wait.h>
38
39#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
40#define DMAPOOL_DEBUG 1
41#endif
42
43struct dma_pool {
44 struct list_head page_list;
45 spinlock_t lock;
46 size_t size;
47 struct device *dev;
48 size_t allocation;
49 size_t boundary;
50 char name[32];
51 struct list_head pools;
52};
53
54struct dma_page {
55 struct list_head page_list;
56 void *vaddr;
57 dma_addr_t dma;
58 unsigned int in_use;
59 unsigned int offset;
60};
61
62static DEFINE_MUTEX(pools_lock);
63static DEFINE_MUTEX(pools_reg_lock);
64
65static ssize_t pools_show(struct device *dev, struct device_attribute *attr, char *buf)
66{
67 unsigned temp;
68 unsigned size;
69 char *next;
70 struct dma_page *page;
71 struct dma_pool *pool;
72
73 next = buf;
74 size = PAGE_SIZE;
75
76 temp = scnprintf(next, size, "poolinfo - 0.1\n");
77 size -= temp;
78 next += temp;
79
80 mutex_lock(&pools_lock);
81 list_for_each_entry(pool, &dev->dma_pools, pools) {
82 unsigned pages = 0;
83 unsigned blocks = 0;
84
85 spin_lock_irq(&pool->lock);
86 list_for_each_entry(page, &pool->page_list, page_list) {
87 pages++;
88 blocks += page->in_use;
89 }
90 spin_unlock_irq(&pool->lock);
91
92
93 temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
94 pool->name, blocks,
95 pages * (pool->allocation / pool->size),
96 pool->size, pages);
97 size -= temp;
98 next += temp;
99 }
100 mutex_unlock(&pools_lock);
101
102 return PAGE_SIZE - size;
103}
104
105static DEVICE_ATTR_RO(pools);
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130struct dma_pool *dma_pool_create(const char *name, struct device *dev,
131 size_t size, size_t align, size_t boundary)
132{
133 struct dma_pool *retval;
134 size_t allocation;
135 bool empty = false;
136
137 if (align == 0)
138 align = 1;
139 else if (align & (align - 1))
140 return NULL;
141
142 if (size == 0)
143 return NULL;
144 else if (size < 4)
145 size = 4;
146
147 size = ALIGN(size, align);
148 allocation = max_t(size_t, size, PAGE_SIZE);
149
150 if (!boundary)
151 boundary = allocation;
152 else if ((boundary < size) || (boundary & (boundary - 1)))
153 return NULL;
154
155 retval = kmalloc(sizeof(*retval), GFP_KERNEL);
156 if (!retval)
157 return retval;
158
159 strscpy(retval->name, name, sizeof(retval->name));
160
161 retval->dev = dev;
162
163 INIT_LIST_HEAD(&retval->page_list);
164 spin_lock_init(&retval->lock);
165 retval->size = size;
166 retval->boundary = boundary;
167 retval->allocation = allocation;
168
169 INIT_LIST_HEAD(&retval->pools);
170
171
172
173
174
175
176
177
178
179 mutex_lock(&pools_reg_lock);
180 mutex_lock(&pools_lock);
181 if (list_empty(&dev->dma_pools))
182 empty = true;
183 list_add(&retval->pools, &dev->dma_pools);
184 mutex_unlock(&pools_lock);
185 if (empty) {
186 int err;
187
188 err = device_create_file(dev, &dev_attr_pools);
189 if (err) {
190 mutex_lock(&pools_lock);
191 list_del(&retval->pools);
192 mutex_unlock(&pools_lock);
193 mutex_unlock(&pools_reg_lock);
194 kfree(retval);
195 return NULL;
196 }
197 }
198 mutex_unlock(&pools_reg_lock);
199 return retval;
200}
201EXPORT_SYMBOL(dma_pool_create);
202
203static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
204{
205 unsigned int offset = 0;
206 unsigned int next_boundary = pool->boundary;
207
208 do {
209 unsigned int next = offset + pool->size;
210 if (unlikely((next + pool->size) >= next_boundary)) {
211 next = next_boundary;
212 next_boundary += pool->boundary;
213 }
214 *(int *)(page->vaddr + offset) = next;
215 offset = next;
216 } while (offset < pool->allocation);
217}
218
219static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
220{
221 struct dma_page *page;
222
223 page = kmalloc(sizeof(*page), mem_flags);
224 if (!page)
225 return NULL;
226 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
227 &page->dma, mem_flags);
228 if (page->vaddr) {
229#ifdef DMAPOOL_DEBUG
230 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
231#endif
232 pool_initialise_page(pool, page);
233 page->in_use = 0;
234 page->offset = 0;
235 } else {
236 kfree(page);
237 page = NULL;
238 }
239 return page;
240}
241
242static inline bool is_page_busy(struct dma_page *page)
243{
244 return page->in_use != 0;
245}
246
247static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
248{
249 dma_addr_t dma = page->dma;
250
251#ifdef DMAPOOL_DEBUG
252 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
253#endif
254 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
255 list_del(&page->page_list);
256 kfree(page);
257}
258
259
260
261
262
263
264
265
266
267void dma_pool_destroy(struct dma_pool *pool)
268{
269 struct dma_page *page, *tmp;
270 bool empty = false;
271
272 if (unlikely(!pool))
273 return;
274
275 mutex_lock(&pools_reg_lock);
276 mutex_lock(&pools_lock);
277 list_del(&pool->pools);
278 if (pool->dev && list_empty(&pool->dev->dma_pools))
279 empty = true;
280 mutex_unlock(&pools_lock);
281 if (empty)
282 device_remove_file(pool->dev, &dev_attr_pools);
283 mutex_unlock(&pools_reg_lock);
284
285 list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
286 if (is_page_busy(page)) {
287 if (pool->dev)
288 dev_err(pool->dev, "%s %s, %p busy\n", __func__,
289 pool->name, page->vaddr);
290 else
291 pr_err("%s %s, %p busy\n", __func__,
292 pool->name, page->vaddr);
293
294 list_del(&page->page_list);
295 kfree(page);
296 } else
297 pool_free_page(pool, page);
298 }
299
300 kfree(pool);
301}
302EXPORT_SYMBOL(dma_pool_destroy);
303
304
305
306
307
308
309
310
311
312
313
314void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
315 dma_addr_t *handle)
316{
317 unsigned long flags;
318 struct dma_page *page;
319 size_t offset;
320 void *retval;
321
322 might_alloc(mem_flags);
323
324 spin_lock_irqsave(&pool->lock, flags);
325 list_for_each_entry(page, &pool->page_list, page_list) {
326 if (page->offset < pool->allocation)
327 goto ready;
328 }
329
330
331 spin_unlock_irqrestore(&pool->lock, flags);
332
333 page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
334 if (!page)
335 return NULL;
336
337 spin_lock_irqsave(&pool->lock, flags);
338
339 list_add(&page->page_list, &pool->page_list);
340 ready:
341 page->in_use++;
342 offset = page->offset;
343 page->offset = *(int *)(page->vaddr + offset);
344 retval = offset + page->vaddr;
345 *handle = offset + page->dma;
346#ifdef DMAPOOL_DEBUG
347 {
348 int i;
349 u8 *data = retval;
350
351 for (i = sizeof(page->offset); i < pool->size; i++) {
352 if (data[i] == POOL_POISON_FREED)
353 continue;
354 if (pool->dev)
355 dev_err(pool->dev, "%s %s, %p (corrupted)\n",
356 __func__, pool->name, retval);
357 else
358 pr_err("%s %s, %p (corrupted)\n",
359 __func__, pool->name, retval);
360
361
362
363
364
365 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
366 data, pool->size, 1);
367 break;
368 }
369 }
370 if (!(mem_flags & __GFP_ZERO))
371 memset(retval, POOL_POISON_ALLOCATED, pool->size);
372#endif
373 spin_unlock_irqrestore(&pool->lock, flags);
374
375 if (want_init_on_alloc(mem_flags))
376 memset(retval, 0, pool->size);
377
378 return retval;
379}
380EXPORT_SYMBOL(dma_pool_alloc);
381
382static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
383{
384 struct dma_page *page;
385
386 list_for_each_entry(page, &pool->page_list, page_list) {
387 if (dma < page->dma)
388 continue;
389 if ((dma - page->dma) < pool->allocation)
390 return page;
391 }
392 return NULL;
393}
394
395
396
397
398
399
400
401
402
403
404void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
405{
406 struct dma_page *page;
407 unsigned long flags;
408 unsigned int offset;
409
410 spin_lock_irqsave(&pool->lock, flags);
411 page = pool_find_page(pool, dma);
412 if (!page) {
413 spin_unlock_irqrestore(&pool->lock, flags);
414 if (pool->dev)
415 dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
416 __func__, pool->name, vaddr, &dma);
417 else
418 pr_err("%s %s, %p/%pad (bad dma)\n",
419 __func__, pool->name, vaddr, &dma);
420 return;
421 }
422
423 offset = vaddr - page->vaddr;
424 if (want_init_on_free())
425 memset(vaddr, 0, pool->size);
426#ifdef DMAPOOL_DEBUG
427 if ((dma - page->dma) != offset) {
428 spin_unlock_irqrestore(&pool->lock, flags);
429 if (pool->dev)
430 dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
431 __func__, pool->name, vaddr, &dma);
432 else
433 pr_err("%s %s, %p (bad vaddr)/%pad\n",
434 __func__, pool->name, vaddr, &dma);
435 return;
436 }
437 {
438 unsigned int chain = page->offset;
439 while (chain < pool->allocation) {
440 if (chain != offset) {
441 chain = *(int *)(page->vaddr + chain);
442 continue;
443 }
444 spin_unlock_irqrestore(&pool->lock, flags);
445 if (pool->dev)
446 dev_err(pool->dev, "%s %s, dma %pad already free\n",
447 __func__, pool->name, &dma);
448 else
449 pr_err("%s %s, dma %pad already free\n",
450 __func__, pool->name, &dma);
451 return;
452 }
453 }
454 memset(vaddr, POOL_POISON_FREED, pool->size);
455#endif
456
457 page->in_use--;
458 *(int *)vaddr = page->offset;
459 page->offset = offset;
460
461
462
463
464
465 spin_unlock_irqrestore(&pool->lock, flags);
466}
467EXPORT_SYMBOL(dma_pool_free);
468
469
470
471
472static void dmam_pool_release(struct device *dev, void *res)
473{
474 struct dma_pool *pool = *(struct dma_pool **)res;
475
476 dma_pool_destroy(pool);
477}
478
479static int dmam_pool_match(struct device *dev, void *res, void *match_data)
480{
481 return *(struct dma_pool **)res == match_data;
482}
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
499 size_t size, size_t align, size_t allocation)
500{
501 struct dma_pool **ptr, *pool;
502
503 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
504 if (!ptr)
505 return NULL;
506
507 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
508 if (pool)
509 devres_add(dev, ptr);
510 else
511 devres_free(ptr);
512
513 return pool;
514}
515EXPORT_SYMBOL(dmam_pool_create);
516
517
518
519
520
521
522
523void dmam_pool_destroy(struct dma_pool *pool)
524{
525 struct device *dev = pool->dev;
526
527 WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
528}
529EXPORT_SYMBOL(dmam_pool_destroy);
530