1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include "drmP.h"
32
33#if defined(CONFIG_X86)
34static void
35drm_clflush_page(struct page *page)
36{
37 uint8_t *page_virtual;
38 unsigned int i;
39
40 if (unlikely(page == NULL))
41 return;
42
43 page_virtual = kmap_atomic(page, KM_USER0);
44 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
45 clflush(page_virtual + i);
46 kunmap_atomic(page_virtual, KM_USER0);
47}
48
49static void drm_cache_flush_clflush(struct page *pages[],
50 unsigned long num_pages)
51{
52 unsigned long i;
53
54 mb();
55 for (i = 0; i < num_pages; i++)
56 drm_clflush_page(*pages++);
57 mb();
58}
59
60static void
61drm_clflush_ipi_handler(void *null)
62{
63 wbinvd();
64}
65#endif
66
67void
68drm_clflush_pages(struct page *pages[], unsigned long num_pages)
69{
70
71#if defined(CONFIG_X86)
72 if (cpu_has_clflush) {
73 drm_cache_flush_clflush(pages, num_pages);
74 return;
75 }
76
77 if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
78 printk(KERN_ERR "Timed out waiting for cache flush.\n");
79
80#elif defined(__powerpc__)
81 unsigned long i;
82 for (i = 0; i < num_pages; i++) {
83 struct page *page = pages[i];
84 void *page_virtual;
85
86 if (unlikely(page == NULL))
87 continue;
88
89 page_virtual = kmap_atomic(page, KM_USER0);
90 flush_dcache_range((unsigned long)page_virtual,
91 (unsigned long)page_virtual + PAGE_SIZE);
92 kunmap_atomic(page_virtual, KM_USER0);
93 }
94#else
95 printk(KERN_ERR "Architecture has no drm_cache.c support\n");
96 WARN_ON_ONCE(1);
97#endif
98}
99EXPORT_SYMBOL(drm_clflush_pages);
100