1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#include <linux/kernel.h>
24#include <linux/types.h>
25#include <linux/mm.h>
26
27#include <asm/set_memory.h>
28
29#include "atomisp_internal.h"
30
31#include "hmm/hmm_pool.h"
32
33
34
35
36static unsigned int get_pages_from_dynamic_pool(void *pool,
37 struct hmm_page_object *page_obj,
38 unsigned int size, bool cached)
39{
40 struct hmm_page *hmm_page;
41 unsigned long flags;
42 unsigned int i = 0;
43 struct hmm_dynamic_pool_info *dypool_info = pool;
44
45 if (!dypool_info)
46 return 0;
47
48 spin_lock_irqsave(&dypool_info->list_lock, flags);
49 if (dypool_info->initialized) {
50 while (!list_empty(&dypool_info->pages_list)) {
51 hmm_page = list_entry(dypool_info->pages_list.next,
52 struct hmm_page, list);
53
54 list_del(&hmm_page->list);
55 dypool_info->pgnr--;
56 spin_unlock_irqrestore(&dypool_info->list_lock, flags);
57
58 page_obj[i].page = hmm_page->page;
59 page_obj[i++].type = HMM_PAGE_TYPE_DYNAMIC;
60 kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
61
62 if (i == size)
63 return i;
64
65 spin_lock_irqsave(&dypool_info->list_lock, flags);
66 }
67 }
68 spin_unlock_irqrestore(&dypool_info->list_lock, flags);
69
70 return i;
71}
72
73static void free_pages_to_dynamic_pool(void *pool,
74 struct hmm_page_object *page_obj)
75{
76 struct hmm_page *hmm_page;
77 unsigned long flags;
78 int ret;
79 struct hmm_dynamic_pool_info *dypool_info = pool;
80
81 if (!dypool_info)
82 return;
83
84 spin_lock_irqsave(&dypool_info->list_lock, flags);
85 if (!dypool_info->initialized) {
86 spin_unlock_irqrestore(&dypool_info->list_lock, flags);
87 return;
88 }
89 spin_unlock_irqrestore(&dypool_info->list_lock, flags);
90
91 if (page_obj->type == HMM_PAGE_TYPE_RESERVED)
92 return;
93
94 if (dypool_info->pgnr >= dypool_info->pool_size) {
95
96 ret = set_pages_wb(page_obj->page, 1);
97 if (ret)
98 dev_err(atomisp_dev,
99 "set page to WB err ...ret=%d\n", ret);
100
101
102
103
104
105
106
107 if (!ret) {
108 __free_pages(page_obj->page, 0);
109 hmm_mem_stat.sys_size--;
110 }
111 return;
112 }
113 hmm_page = kmem_cache_zalloc(dypool_info->pgptr_cache,
114 GFP_KERNEL);
115 if (!hmm_page) {
116
117 ret = set_pages_wb(page_obj->page, 1);
118 if (ret)
119 dev_err(atomisp_dev,
120 "set page to WB err ...ret=%d\n", ret);
121 if (!ret) {
122 __free_pages(page_obj->page, 0);
123 hmm_mem_stat.sys_size--;
124 }
125 return;
126 }
127
128 hmm_page->page = page_obj->page;
129
130
131
132
133 spin_lock_irqsave(&dypool_info->list_lock, flags);
134 list_add_tail(&hmm_page->list, &dypool_info->pages_list);
135 dypool_info->pgnr++;
136 spin_unlock_irqrestore(&dypool_info->list_lock, flags);
137 hmm_mem_stat.dyc_size++;
138}
139
140static int hmm_dynamic_pool_init(void **pool, unsigned int pool_size)
141{
142 struct hmm_dynamic_pool_info *dypool_info;
143
144 if (pool_size == 0)
145 return 0;
146
147 dypool_info = kmalloc(sizeof(struct hmm_dynamic_pool_info),
148 GFP_KERNEL);
149 if (unlikely(!dypool_info))
150 return -ENOMEM;
151
152 dypool_info->pgptr_cache = kmem_cache_create("pgptr_cache",
153 sizeof(struct hmm_page), 0,
154 SLAB_HWCACHE_ALIGN, NULL);
155 if (!dypool_info->pgptr_cache) {
156 kfree(dypool_info);
157 return -ENOMEM;
158 }
159
160 INIT_LIST_HEAD(&dypool_info->pages_list);
161 spin_lock_init(&dypool_info->list_lock);
162 dypool_info->initialized = true;
163 dypool_info->pool_size = pool_size;
164 dypool_info->pgnr = 0;
165
166 *pool = dypool_info;
167
168 return 0;
169}
170
171static void hmm_dynamic_pool_exit(void **pool)
172{
173 struct hmm_dynamic_pool_info *dypool_info = *pool;
174 struct hmm_page *hmm_page;
175 unsigned long flags;
176 int ret;
177
178 if (!dypool_info)
179 return;
180
181 spin_lock_irqsave(&dypool_info->list_lock, flags);
182 if (!dypool_info->initialized) {
183 spin_unlock_irqrestore(&dypool_info->list_lock, flags);
184 return;
185 }
186 dypool_info->initialized = false;
187
188 while (!list_empty(&dypool_info->pages_list)) {
189 hmm_page = list_entry(dypool_info->pages_list.next,
190 struct hmm_page, list);
191
192 list_del(&hmm_page->list);
193 spin_unlock_irqrestore(&dypool_info->list_lock, flags);
194
195
196 ret = set_pages_wb(hmm_page->page, 1);
197 if (ret)
198 dev_err(atomisp_dev,
199 "set page to WB err...ret=%d\n", ret);
200 if (!ret) {
201 __free_pages(hmm_page->page, 0);
202 hmm_mem_stat.dyc_size--;
203 hmm_mem_stat.sys_size--;
204 }
205 kmem_cache_free(dypool_info->pgptr_cache, hmm_page);
206 spin_lock_irqsave(&dypool_info->list_lock, flags);
207 }
208
209 spin_unlock_irqrestore(&dypool_info->list_lock, flags);
210
211 kmem_cache_destroy(dypool_info->pgptr_cache);
212
213 kfree(dypool_info);
214
215 *pool = NULL;
216}
217
218static int hmm_dynamic_pool_inited(void *pool)
219{
220 struct hmm_dynamic_pool_info *dypool_info = pool;
221
222 if (!dypool_info)
223 return 0;
224
225 return dypool_info->initialized;
226}
227
228struct hmm_pool_ops dynamic_pops = {
229 .pool_init = hmm_dynamic_pool_init,
230 .pool_exit = hmm_dynamic_pool_exit,
231 .pool_alloc_pages = get_pages_from_dynamic_pool,
232 .pool_free_pages = free_pages_to_dynamic_pool,
233 .pool_inited = hmm_dynamic_pool_inited,
234};
235