1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
37#define pr_fmt(fmt) "[TTM] " fmt
38
39#include <linux/dma-mapping.h>
40#include <linux/list.h>
41#include <linux/seq_file.h>
42#include <linux/slab.h>
43#include <linux/spinlock.h>
44#include <linux/highmem.h>
45#include <linux/mm_types.h>
46#include <linux/module.h>
47#include <linux/mm.h>
48#include <linux/atomic.h>
49#include <linux/device.h>
50#include <linux/kthread.h>
51#include <drm/ttm/ttm_bo_driver.h>
52#include <drm/ttm/ttm_page_alloc.h>
53#if IS_ENABLED(CONFIG_AGP)
54#include <asm/agp.h>
55#endif
56#ifdef CONFIG_X86
57#include <asm/set_memory.h>
58#endif
59
60#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
61#define SMALL_ALLOCATION 4
62#define FREE_ALL_PAGES (~0U)
63#define VADDR_FLAG_HUGE_POOL 1UL
64#define VADDR_FLAG_UPDATED_COUNT 2UL
65
66enum pool_type {
67 IS_UNDEFINED = 0,
68 IS_WC = 1 << 1,
69 IS_UC = 1 << 2,
70 IS_CACHED = 1 << 3,
71 IS_DMA32 = 1 << 4,
72 IS_HUGE = 1 << 5
73};
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102struct dma_pool {
103 struct list_head pools;
104 enum pool_type type;
105 spinlock_t lock;
106 struct list_head free_list;
107 struct device *dev;
108 unsigned size;
109 unsigned npages_free;
110 unsigned npages_in_use;
111 unsigned long nfrees;
112 unsigned long nrefills;
113 gfp_t gfp_flags;
114 char name[13];
115 char dev_name[64];
116};
117
118
119
120
121
122
123
124
125
126
127struct dma_page {
128 struct list_head page_list;
129 unsigned long vaddr;
130 struct page *p;
131 dma_addr_t dma;
132};
133
134
135
136
137
138
139
140struct ttm_pool_opts {
141 unsigned alloc_size;
142 unsigned max_size;
143 unsigned small;
144};
145
146
147
148
149
150
151
152
153struct device_pools {
154 struct list_head pools;
155 struct device *dev;
156 struct dma_pool *pool;
157};
158
159
160
161
162
163
164
165
166
167
168struct ttm_pool_manager {
169 struct mutex lock;
170 struct list_head pools;
171 struct ttm_pool_opts options;
172 unsigned npools;
173 struct shrinker mm_shrink;
174 struct kobject kobj;
175};
176
177static struct ttm_pool_manager *_manager;
178
179static struct attribute ttm_page_pool_max = {
180 .name = "pool_max_size",
181 .mode = S_IRUGO | S_IWUSR
182};
183static struct attribute ttm_page_pool_small = {
184 .name = "pool_small_allocation",
185 .mode = S_IRUGO | S_IWUSR
186};
187static struct attribute ttm_page_pool_alloc_size = {
188 .name = "pool_allocation_size",
189 .mode = S_IRUGO | S_IWUSR
190};
191
192static struct attribute *ttm_pool_attrs[] = {
193 &ttm_page_pool_max,
194 &ttm_page_pool_small,
195 &ttm_page_pool_alloc_size,
196 NULL
197};
198
199static void ttm_pool_kobj_release(struct kobject *kobj)
200{
201 struct ttm_pool_manager *m =
202 container_of(kobj, struct ttm_pool_manager, kobj);
203 kfree(m);
204}
205
206static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
207 const char *buffer, size_t size)
208{
209 struct ttm_pool_manager *m =
210 container_of(kobj, struct ttm_pool_manager, kobj);
211 int chars;
212 unsigned val;
213
214 chars = sscanf(buffer, "%u", &val);
215 if (chars == 0)
216 return size;
217
218
219 val = val / (PAGE_SIZE >> 10);
220
221 if (attr == &ttm_page_pool_max) {
222 m->options.max_size = val;
223 } else if (attr == &ttm_page_pool_small) {
224 m->options.small = val;
225 } else if (attr == &ttm_page_pool_alloc_size) {
226 if (val > NUM_PAGES_TO_ALLOC*8) {
227 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
228 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
229 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
230 return size;
231 } else if (val > NUM_PAGES_TO_ALLOC) {
232 pr_warn("Setting allocation size to larger than %lu is not recommended\n",
233 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
234 }
235 m->options.alloc_size = val;
236 }
237
238 return size;
239}
240
241static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
242 char *buffer)
243{
244 struct ttm_pool_manager *m =
245 container_of(kobj, struct ttm_pool_manager, kobj);
246 unsigned val = 0;
247
248 if (attr == &ttm_page_pool_max)
249 val = m->options.max_size;
250 else if (attr == &ttm_page_pool_small)
251 val = m->options.small;
252 else if (attr == &ttm_page_pool_alloc_size)
253 val = m->options.alloc_size;
254
255 val = val * (PAGE_SIZE >> 10);
256
257 return snprintf(buffer, PAGE_SIZE, "%u\n", val);
258}
259
260static const struct sysfs_ops ttm_pool_sysfs_ops = {
261 .show = &ttm_pool_show,
262 .store = &ttm_pool_store,
263};
264
265static struct kobj_type ttm_pool_kobj_type = {
266 .release = &ttm_pool_kobj_release,
267 .sysfs_ops = &ttm_pool_sysfs_ops,
268 .default_attrs = ttm_pool_attrs,
269};
270
271#ifndef CONFIG_X86
272static int set_pages_array_wb(struct page **pages, int addrinarray)
273{
274#if IS_ENABLED(CONFIG_AGP)
275 int i;
276
277 for (i = 0; i < addrinarray; i++)
278 unmap_page_from_agp(pages[i]);
279#endif
280 return 0;
281}
282
283static int set_pages_array_wc(struct page **pages, int addrinarray)
284{
285#if IS_ENABLED(CONFIG_AGP)
286 int i;
287
288 for (i = 0; i < addrinarray; i++)
289 map_page_into_agp(pages[i]);
290#endif
291 return 0;
292}
293
294static int set_pages_array_uc(struct page **pages, int addrinarray)
295{
296#if IS_ENABLED(CONFIG_AGP)
297 int i;
298
299 for (i = 0; i < addrinarray; i++)
300 map_page_into_agp(pages[i]);
301#endif
302 return 0;
303}
304#endif
305
306static int ttm_set_pages_caching(struct dma_pool *pool,
307 struct page **pages, unsigned cpages)
308{
309 int r = 0;
310
311 if (pool->type & IS_UC) {
312 r = set_pages_array_uc(pages, cpages);
313 if (r)
314 pr_err("%s: Failed to set %d pages to uc!\n",
315 pool->dev_name, cpages);
316 }
317 if (pool->type & IS_WC) {
318 r = set_pages_array_wc(pages, cpages);
319 if (r)
320 pr_err("%s: Failed to set %d pages to wc!\n",
321 pool->dev_name, cpages);
322 }
323 return r;
324}
325
326static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
327{
328 dma_addr_t dma = d_page->dma;
329 d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL;
330 dma_free_coherent(pool->dev, pool->size, (void *)d_page->vaddr, dma);
331
332 kfree(d_page);
333 d_page = NULL;
334}
335static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
336{
337 struct dma_page *d_page;
338 unsigned long attrs = 0;
339 void *vaddr;
340
341 d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
342 if (!d_page)
343 return NULL;
344
345 if (pool->type & IS_HUGE)
346 attrs = DMA_ATTR_NO_WARN;
347
348 vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma,
349 pool->gfp_flags, attrs);
350 if (vaddr) {
351 if (is_vmalloc_addr(vaddr))
352 d_page->p = vmalloc_to_page(vaddr);
353 else
354 d_page->p = virt_to_page(vaddr);
355 d_page->vaddr = (unsigned long)vaddr;
356 if (pool->type & IS_HUGE)
357 d_page->vaddr |= VADDR_FLAG_HUGE_POOL;
358 } else {
359 kfree(d_page);
360 d_page = NULL;
361 }
362 return d_page;
363}
364static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
365{
366 enum pool_type type = IS_UNDEFINED;
367
368 if (flags & TTM_PAGE_FLAG_DMA32)
369 type |= IS_DMA32;
370 if (cstate == tt_cached)
371 type |= IS_CACHED;
372 else if (cstate == tt_uncached)
373 type |= IS_UC;
374 else
375 type |= IS_WC;
376
377 return type;
378}
379
380static void ttm_pool_update_free_locked(struct dma_pool *pool,
381 unsigned freed_pages)
382{
383 pool->npages_free -= freed_pages;
384 pool->nfrees += freed_pages;
385
386}
387
388
389static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
390{
391 struct page *page = d_page->p;
392 unsigned i, num_pages;
393
394
395 if (!(pool->type & IS_CACHED)) {
396 num_pages = pool->size / PAGE_SIZE;
397 for (i = 0; i < num_pages; ++i, ++page) {
398 if (set_pages_array_wb(&page, 1)) {
399 pr_err("%s: Failed to set %d pages to wb!\n",
400 pool->dev_name, 1);
401 }
402 }
403 }
404
405 list_del(&d_page->page_list);
406 __ttm_dma_free_page(pool, d_page);
407}
408
409static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
410 struct page *pages[], unsigned npages)
411{
412 struct dma_page *d_page, *tmp;
413
414 if (pool->type & IS_HUGE) {
415 list_for_each_entry_safe(d_page, tmp, d_pages, page_list)
416 ttm_dma_page_put(pool, d_page);
417
418 return;
419 }
420
421
422 if (npages && !(pool->type & IS_CACHED) &&
423 set_pages_array_wb(pages, npages))
424 pr_err("%s: Failed to set %d pages to wb!\n",
425 pool->dev_name, npages);
426
427 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
428 list_del(&d_page->page_list);
429 __ttm_dma_free_page(pool, d_page);
430 }
431}
432
433
434
435
436
437
438
439
440
441
442
443static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
444 bool use_static)
445{
446 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
447 unsigned long irq_flags;
448 struct dma_page *dma_p, *tmp;
449 struct page **pages_to_free;
450 struct list_head d_pages;
451 unsigned freed_pages = 0,
452 npages_to_free = nr_free;
453
454 if (NUM_PAGES_TO_ALLOC < nr_free)
455 npages_to_free = NUM_PAGES_TO_ALLOC;
456#if 0
457 if (nr_free > 1) {
458 pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
459 pool->dev_name, pool->name, current->pid,
460 npages_to_free, nr_free);
461 }
462#endif
463 if (use_static)
464 pages_to_free = static_buf;
465 else
466 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
467 GFP_KERNEL);
468
469 if (!pages_to_free) {
470 pr_debug("%s: Failed to allocate memory for pool free operation\n",
471 pool->dev_name);
472 return 0;
473 }
474 INIT_LIST_HEAD(&d_pages);
475restart:
476 spin_lock_irqsave(&pool->lock, irq_flags);
477
478
479 list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
480 page_list) {
481 if (freed_pages >= npages_to_free)
482 break;
483
484
485 list_move(&dma_p->page_list, &d_pages);
486
487 pages_to_free[freed_pages++] = dma_p->p;
488
489 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
490
491 ttm_pool_update_free_locked(pool, freed_pages);
492
493
494
495
496 spin_unlock_irqrestore(&pool->lock, irq_flags);
497
498 ttm_dma_pages_put(pool, &d_pages, pages_to_free,
499 freed_pages);
500
501 INIT_LIST_HEAD(&d_pages);
502
503 if (likely(nr_free != FREE_ALL_PAGES))
504 nr_free -= freed_pages;
505
506 if (NUM_PAGES_TO_ALLOC >= nr_free)
507 npages_to_free = nr_free;
508 else
509 npages_to_free = NUM_PAGES_TO_ALLOC;
510
511 freed_pages = 0;
512
513
514 if (nr_free)
515 goto restart;
516
517
518
519
520
521 goto out;
522
523 }
524 }
525
526
527 if (freed_pages) {
528 ttm_pool_update_free_locked(pool, freed_pages);
529 nr_free -= freed_pages;
530 }
531
532 spin_unlock_irqrestore(&pool->lock, irq_flags);
533
534 if (freed_pages)
535 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
536out:
537 if (pages_to_free != static_buf)
538 kfree(pages_to_free);
539 return nr_free;
540}
541
542static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
543{
544 struct device_pools *p;
545 struct dma_pool *pool;
546
547 if (!dev)
548 return;
549
550 mutex_lock(&_manager->lock);
551 list_for_each_entry_reverse(p, &_manager->pools, pools) {
552 if (p->dev != dev)
553 continue;
554 pool = p->pool;
555 if (pool->type != type)
556 continue;
557
558 list_del(&p->pools);
559 kfree(p);
560 _manager->npools--;
561 break;
562 }
563 list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
564 if (pool->type != type)
565 continue;
566
567
568 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
569 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
570
571
572
573
574 list_del(&pool->pools);
575 kfree(pool);
576 break;
577 }
578 mutex_unlock(&_manager->lock);
579}
580
581
582
583
584
585static void ttm_dma_pool_release(struct device *dev, void *res)
586{
587 struct dma_pool *pool = *(struct dma_pool **)res;
588
589 if (pool)
590 ttm_dma_free_pool(dev, pool->type);
591}
592
593static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
594{
595 return *(struct dma_pool **)res == match_data;
596}
597
598static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
599 enum pool_type type)
600{
601 const char *n[] = {"wc", "uc", "cached", " dma32", "huge"};
602 enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_HUGE};
603 struct device_pools *sec_pool = NULL;
604 struct dma_pool *pool = NULL, **ptr;
605 unsigned i;
606 int ret = -ENODEV;
607 char *p;
608
609 if (!dev)
610 return NULL;
611
612 ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
613 if (!ptr)
614 return NULL;
615
616 ret = -ENOMEM;
617
618 pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
619 dev_to_node(dev));
620 if (!pool)
621 goto err_mem;
622
623 sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
624 dev_to_node(dev));
625 if (!sec_pool)
626 goto err_mem;
627
628 INIT_LIST_HEAD(&sec_pool->pools);
629 sec_pool->dev = dev;
630 sec_pool->pool = pool;
631
632 INIT_LIST_HEAD(&pool->free_list);
633 INIT_LIST_HEAD(&pool->pools);
634 spin_lock_init(&pool->lock);
635 pool->dev = dev;
636 pool->npages_free = pool->npages_in_use = 0;
637 pool->nfrees = 0;
638 pool->gfp_flags = flags;
639 if (type & IS_HUGE)
640#ifdef CONFIG_TRANSPARENT_HUGEPAGE
641 pool->size = HPAGE_PMD_SIZE;
642#else
643 BUG();
644#endif
645 else
646 pool->size = PAGE_SIZE;
647 pool->type = type;
648 pool->nrefills = 0;
649 p = pool->name;
650 for (i = 0; i < ARRAY_SIZE(t); i++) {
651 if (type & t[i]) {
652 p += snprintf(p, sizeof(pool->name) - (p - pool->name),
653 "%s", n[i]);
654 }
655 }
656 *p = 0;
657
658
659 snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
660 dev_driver_string(dev), dev_name(dev));
661 mutex_lock(&_manager->lock);
662
663 list_add(&sec_pool->pools, &_manager->pools);
664 _manager->npools++;
665
666 list_add(&pool->pools, &dev->dma_pools);
667 mutex_unlock(&_manager->lock);
668
669 *ptr = pool;
670 devres_add(dev, ptr);
671
672 return pool;
673err_mem:
674 devres_free(ptr);
675 kfree(sec_pool);
676 kfree(pool);
677 return ERR_PTR(ret);
678}
679
680static struct dma_pool *ttm_dma_find_pool(struct device *dev,
681 enum pool_type type)
682{
683 struct dma_pool *pool, *tmp;
684
685 if (type == IS_UNDEFINED)
686 return NULL;
687
688
689
690
691
692
693
694
695
696
697
698
699 list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools)
700 if (pool->type == type)
701 return pool;
702 return NULL;
703}
704
705
706
707
708
709
710static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
711 struct list_head *d_pages,
712 struct page **failed_pages,
713 unsigned cpages)
714{
715 struct dma_page *d_page, *tmp;
716 struct page *p;
717 unsigned i = 0;
718
719 p = failed_pages[0];
720 if (!p)
721 return;
722
723 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
724 if (d_page->p != p)
725 continue;
726
727 list_del(&d_page->page_list);
728 __ttm_dma_free_page(pool, d_page);
729 if (++i < cpages)
730 p = failed_pages[i];
731 else
732 break;
733 }
734
735}
736
737
738
739
740
741
742
743static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
744 struct list_head *d_pages,
745 unsigned count)
746{
747 struct page **caching_array;
748 struct dma_page *dma_p;
749 struct page *p;
750 int r = 0;
751 unsigned i, j, npages, cpages;
752 unsigned max_cpages = min(count,
753 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
754
755
756 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
757
758 if (!caching_array) {
759 pr_debug("%s: Unable to allocate table for new pages\n",
760 pool->dev_name);
761 return -ENOMEM;
762 }
763
764 if (count > 1)
765 pr_debug("%s: (%s:%d) Getting %d pages\n",
766 pool->dev_name, pool->name, current->pid, count);
767
768 for (i = 0, cpages = 0; i < count; ++i) {
769 dma_p = __ttm_dma_alloc_page(pool);
770 if (!dma_p) {
771 pr_debug("%s: Unable to get page %u\n",
772 pool->dev_name, i);
773
774
775
776 if (cpages) {
777 r = ttm_set_pages_caching(pool, caching_array,
778 cpages);
779 if (r)
780 ttm_dma_handle_caching_state_failure(
781 pool, d_pages, caching_array,
782 cpages);
783 }
784 r = -ENOMEM;
785 goto out;
786 }
787 p = dma_p->p;
788 list_add(&dma_p->page_list, d_pages);
789
790#ifdef CONFIG_HIGHMEM
791
792
793
794 if (PageHighMem(p))
795 continue;
796#endif
797
798 npages = pool->size / PAGE_SIZE;
799 for (j = 0; j < npages; ++j) {
800 caching_array[cpages++] = p + j;
801 if (cpages == max_cpages) {
802
803 r = ttm_set_pages_caching(pool, caching_array,
804 cpages);
805 if (r) {
806 ttm_dma_handle_caching_state_failure(
807 pool, d_pages, caching_array,
808 cpages);
809 goto out;
810 }
811 cpages = 0;
812 }
813 }
814 }
815
816 if (cpages) {
817 r = ttm_set_pages_caching(pool, caching_array, cpages);
818 if (r)
819 ttm_dma_handle_caching_state_failure(pool, d_pages,
820 caching_array, cpages);
821 }
822out:
823 kfree(caching_array);
824 return r;
825}
826
827
828
829
830static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
831 unsigned long *irq_flags)
832{
833 unsigned count = _manager->options.small;
834 int r = pool->npages_free;
835
836 if (count > pool->npages_free) {
837 struct list_head d_pages;
838
839 INIT_LIST_HEAD(&d_pages);
840
841 spin_unlock_irqrestore(&pool->lock, *irq_flags);
842
843
844
845 r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
846
847 spin_lock_irqsave(&pool->lock, *irq_flags);
848 if (!r) {
849
850 list_splice(&d_pages, &pool->free_list);
851 ++pool->nrefills;
852 pool->npages_free += count;
853 r = count;
854 } else {
855 struct dma_page *d_page;
856 unsigned cpages = 0;
857
858 pr_debug("%s: Failed to fill %s pool (r:%d)!\n",
859 pool->dev_name, pool->name, r);
860
861 list_for_each_entry(d_page, &d_pages, page_list) {
862 cpages++;
863 }
864 list_splice_tail(&d_pages, &pool->free_list);
865 pool->npages_free += cpages;
866 r = cpages;
867 }
868 }
869 return r;
870}
871
872
873
874
875
876
877static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
878 struct ttm_dma_tt *ttm_dma,
879 unsigned index)
880{
881 struct dma_page *d_page = NULL;
882 struct ttm_tt *ttm = &ttm_dma->ttm;
883 unsigned long irq_flags;
884 int count;
885
886 spin_lock_irqsave(&pool->lock, irq_flags);
887 count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
888 if (count) {
889 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
890 ttm->pages[index] = d_page->p;
891 ttm_dma->dma_address[index] = d_page->dma;
892 list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
893 pool->npages_in_use += 1;
894 pool->npages_free -= 1;
895 }
896 spin_unlock_irqrestore(&pool->lock, irq_flags);
897 return d_page;
898}
899
900static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
901{
902 struct ttm_tt *ttm = &ttm_dma->ttm;
903 gfp_t gfp_flags;
904
905 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
906 gfp_flags = GFP_USER | GFP_DMA32;
907 else
908 gfp_flags = GFP_HIGHUSER;
909 if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
910 gfp_flags |= __GFP_ZERO;
911
912 if (huge) {
913 gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
914 __GFP_KSWAPD_RECLAIM;
915 gfp_flags &= ~__GFP_MOVABLE;
916 gfp_flags &= ~__GFP_COMP;
917 }
918
919 if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
920 gfp_flags |= __GFP_RETRY_MAYFAIL;
921
922 return gfp_flags;
923}
924
925
926
927
928
929int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
930 struct ttm_operation_ctx *ctx)
931{
932 struct ttm_tt *ttm = &ttm_dma->ttm;
933 struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
934 unsigned long num_pages = ttm->num_pages;
935 struct dma_pool *pool;
936 struct dma_page *d_page;
937 enum pool_type type;
938 unsigned i;
939 int ret;
940
941 if (ttm->state != tt_unpopulated)
942 return 0;
943
944 if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
945 return -ENOMEM;
946
947 INIT_LIST_HEAD(&ttm_dma->pages_list);
948 i = 0;
949
950 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
951
952#ifdef CONFIG_TRANSPARENT_HUGEPAGE
953 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
954 goto skip_huge;
955
956 pool = ttm_dma_find_pool(dev, type | IS_HUGE);
957 if (!pool) {
958 gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true);
959
960 pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
961 if (IS_ERR_OR_NULL(pool))
962 goto skip_huge;
963 }
964
965 while (num_pages >= HPAGE_PMD_NR) {
966 unsigned j;
967
968 d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
969 if (!d_page)
970 break;
971
972 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
973 pool->size, ctx);
974 if (unlikely(ret != 0)) {
975 ttm_dma_unpopulate(ttm_dma, dev);
976 return -ENOMEM;
977 }
978
979 d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
980 for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
981 ttm->pages[j] = ttm->pages[j - 1] + 1;
982 ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
983 PAGE_SIZE;
984 }
985
986 i += HPAGE_PMD_NR;
987 num_pages -= HPAGE_PMD_NR;
988 }
989
990skip_huge:
991#endif
992
993 pool = ttm_dma_find_pool(dev, type);
994 if (!pool) {
995 gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false);
996
997 pool = ttm_dma_pool_init(dev, gfp_flags, type);
998 if (IS_ERR_OR_NULL(pool))
999 return -ENOMEM;
1000 }
1001
1002 while (num_pages) {
1003 d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
1004 if (!d_page) {
1005 ttm_dma_unpopulate(ttm_dma, dev);
1006 return -ENOMEM;
1007 }
1008
1009 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
1010 pool->size, ctx);
1011 if (unlikely(ret != 0)) {
1012 ttm_dma_unpopulate(ttm_dma, dev);
1013 return -ENOMEM;
1014 }
1015
1016 d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
1017 ++i;
1018 --num_pages;
1019 }
1020
1021 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
1022 ret = ttm_tt_swapin(ttm);
1023 if (unlikely(ret != 0)) {
1024 ttm_dma_unpopulate(ttm_dma, dev);
1025 return ret;
1026 }
1027 }
1028
1029 ttm->state = tt_unbound;
1030 return 0;
1031}
1032EXPORT_SYMBOL_GPL(ttm_dma_populate);
1033
1034
1035void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
1036{
1037 struct ttm_tt *ttm = &ttm_dma->ttm;
1038 struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
1039 struct dma_pool *pool;
1040 struct dma_page *d_page, *next;
1041 enum pool_type type;
1042 bool is_cached = false;
1043 unsigned count, i, npages = 0;
1044 unsigned long irq_flags;
1045
1046 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
1047
1048#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1049 pool = ttm_dma_find_pool(dev, type | IS_HUGE);
1050 if (pool) {
1051 count = 0;
1052 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
1053 page_list) {
1054 if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL))
1055 continue;
1056
1057 count++;
1058 if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
1059 ttm_mem_global_free_page(mem_glob, d_page->p,
1060 pool->size);
1061 d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
1062 }
1063 ttm_dma_page_put(pool, d_page);
1064 }
1065
1066 spin_lock_irqsave(&pool->lock, irq_flags);
1067 pool->npages_in_use -= count;
1068 pool->nfrees += count;
1069 spin_unlock_irqrestore(&pool->lock, irq_flags);
1070 }
1071#endif
1072
1073 pool = ttm_dma_find_pool(dev, type);
1074 if (!pool)
1075 return;
1076
1077 is_cached = (ttm_dma_find_pool(pool->dev,
1078 ttm_to_type(ttm->page_flags, tt_cached)) == pool);
1079
1080
1081 count = 0;
1082 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
1083 page_list) {
1084 ttm->pages[count] = d_page->p;
1085 count++;
1086
1087 if (d_page->vaddr & VADDR_FLAG_UPDATED_COUNT) {
1088 ttm_mem_global_free_page(mem_glob, d_page->p,
1089 pool->size);
1090 d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
1091 }
1092
1093 if (is_cached)
1094 ttm_dma_page_put(pool, d_page);
1095 }
1096
1097 spin_lock_irqsave(&pool->lock, irq_flags);
1098 pool->npages_in_use -= count;
1099 if (is_cached) {
1100 pool->nfrees += count;
1101 } else {
1102 pool->npages_free += count;
1103 list_splice(&ttm_dma->pages_list, &pool->free_list);
1104
1105
1106
1107
1108 if (pool->npages_free >= (_manager->options.max_size +
1109 NUM_PAGES_TO_ALLOC))
1110 npages = pool->npages_free - _manager->options.max_size;
1111 }
1112 spin_unlock_irqrestore(&pool->lock, irq_flags);
1113
1114 INIT_LIST_HEAD(&ttm_dma->pages_list);
1115 for (i = 0; i < ttm->num_pages; i++) {
1116 ttm->pages[i] = NULL;
1117 ttm_dma->dma_address[i] = 0;
1118 }
1119
1120
1121 if (npages)
1122 ttm_dma_page_pool_free(pool, npages, false);
1123 ttm->state = tt_unpopulated;
1124}
1125EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135static unsigned long
1136ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1137{
1138 static unsigned start_pool;
1139 unsigned idx = 0;
1140 unsigned pool_offset;
1141 unsigned shrink_pages = sc->nr_to_scan;
1142 struct device_pools *p;
1143 unsigned long freed = 0;
1144
1145 if (list_empty(&_manager->pools))
1146 return SHRINK_STOP;
1147
1148 if (!mutex_trylock(&_manager->lock))
1149 return SHRINK_STOP;
1150 if (!_manager->npools)
1151 goto out;
1152 pool_offset = ++start_pool % _manager->npools;
1153 list_for_each_entry(p, &_manager->pools, pools) {
1154 unsigned nr_free;
1155
1156 if (!p->dev)
1157 continue;
1158 if (shrink_pages == 0)
1159 break;
1160
1161 if (++idx < pool_offset)
1162 continue;
1163 nr_free = shrink_pages;
1164
1165 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
1166 freed += nr_free - shrink_pages;
1167
1168 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1169 p->pool->dev_name, p->pool->name, current->pid,
1170 nr_free, shrink_pages);
1171 }
1172out:
1173 mutex_unlock(&_manager->lock);
1174 return freed;
1175}
1176
1177static unsigned long
1178ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1179{
1180 struct device_pools *p;
1181 unsigned long count = 0;
1182
1183 if (!mutex_trylock(&_manager->lock))
1184 return 0;
1185 list_for_each_entry(p, &_manager->pools, pools)
1186 count += p->pool->npages_free;
1187 mutex_unlock(&_manager->lock);
1188 return count;
1189}
1190
1191static int ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
1192{
1193 manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
1194 manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
1195 manager->mm_shrink.seeks = 1;
1196 return register_shrinker(&manager->mm_shrink);
1197}
1198
1199static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
1200{
1201 unregister_shrinker(&manager->mm_shrink);
1202}
1203
1204int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1205{
1206 int ret;
1207
1208 WARN_ON(_manager);
1209
1210 pr_info("Initializing DMA pool allocator\n");
1211
1212 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1213 if (!_manager)
1214 return -ENOMEM;
1215
1216 mutex_init(&_manager->lock);
1217 INIT_LIST_HEAD(&_manager->pools);
1218
1219 _manager->options.max_size = max_pages;
1220 _manager->options.small = SMALL_ALLOCATION;
1221 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
1222
1223
1224 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
1225 &glob->kobj, "dma_pool");
1226 if (unlikely(ret != 0))
1227 goto error;
1228
1229 ret = ttm_dma_pool_mm_shrink_init(_manager);
1230 if (unlikely(ret != 0))
1231 goto error;
1232 return 0;
1233
1234error:
1235 kobject_put(&_manager->kobj);
1236 _manager = NULL;
1237 return ret;
1238}
1239
1240void ttm_dma_page_alloc_fini(void)
1241{
1242 struct device_pools *p, *t;
1243
1244 pr_info("Finalizing DMA pool allocator\n");
1245 ttm_dma_pool_mm_shrink_fini(_manager);
1246
1247 list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
1248 dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
1249 current->pid);
1250 WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
1251 ttm_dma_pool_match, p->pool));
1252 ttm_dma_free_pool(p->dev, p->pool->type);
1253 }
1254 kobject_put(&_manager->kobj);
1255 _manager = NULL;
1256}
1257
1258int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
1259{
1260 struct device_pools *p;
1261 struct dma_pool *pool = NULL;
1262
1263 if (!_manager) {
1264 seq_printf(m, "No pool allocator running.\n");
1265 return 0;
1266 }
1267 seq_printf(m, " pool refills pages freed inuse available name\n");
1268 mutex_lock(&_manager->lock);
1269 list_for_each_entry(p, &_manager->pools, pools) {
1270 struct device *dev = p->dev;
1271 if (!dev)
1272 continue;
1273 pool = p->pool;
1274 seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
1275 pool->name, pool->nrefills,
1276 pool->nfrees, pool->npages_in_use,
1277 pool->npages_free,
1278 pool->dev_name);
1279 }
1280 mutex_unlock(&_manager->lock);
1281 return 0;
1282}
1283EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
1284
1285#endif
1286