1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41#include <linux/slab.h>
42
43#include "ehca_tools.h"
44#include "ipz_pt_fn.h"
45#include "ehca_classes.h"
46
47#define PAGES_PER_KPAGE (PAGE_SIZE >> EHCA_PAGESHIFT)
48
49struct kmem_cache *small_qp_cache;
50
51void *ipz_qpageit_get_inc(struct ipz_queue *queue)
52{
53 void *ret = ipz_qeit_get(queue);
54 queue->current_q_offset += queue->pagesize;
55 if (queue->current_q_offset > queue->queue_length) {
56 queue->current_q_offset -= queue->pagesize;
57 ret = NULL;
58 }
59 if (((u64)ret) % queue->pagesize) {
60 ehca_gen_err("ERROR!! not at PAGE-Boundary");
61 return NULL;
62 }
63 return ret;
64}
65
66void *ipz_qeit_eq_get_inc(struct ipz_queue *queue)
67{
68 void *ret = ipz_qeit_get(queue);
69 u64 last_entry_in_q = queue->queue_length - queue->qe_size;
70
71 queue->current_q_offset += queue->qe_size;
72 if (queue->current_q_offset > last_entry_in_q) {
73 queue->current_q_offset = 0;
74 queue->toggle_state = (~queue->toggle_state) & 1;
75 }
76
77 return ret;
78}
79
80int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset)
81{
82 int i;
83 for (i = 0; i < queue->queue_length / queue->pagesize; i++) {
84 u64 page = __pa(queue->queue_pages[i]);
85 if (addr >= page && addr < page + queue->pagesize) {
86 *q_offset = addr - page + i * queue->pagesize;
87 return 0;
88 }
89 }
90 return -EINVAL;
91}
92
93#if PAGE_SHIFT < EHCA_PAGESHIFT
94#error Kernel pages must be at least as large than eHCA pages (4K) !
95#endif
96
97
98
99
100
101
102static int alloc_queue_pages(struct ipz_queue *queue, const u32 nr_of_pages)
103{
104 int k, f = 0;
105 u8 *kpage;
106
107 while (f < nr_of_pages) {
108 kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
109 if (!kpage)
110 goto out;
111
112 for (k = 0; k < PAGES_PER_KPAGE && f < nr_of_pages; k++) {
113 queue->queue_pages[f] = (struct ipz_page *)kpage;
114 kpage += EHCA_PAGESIZE;
115 f++;
116 }
117 }
118 return 1;
119
120out:
121 for (f = 0; f < nr_of_pages && queue->queue_pages[f];
122 f += PAGES_PER_KPAGE)
123 free_page((unsigned long)(queue->queue_pages)[f]);
124 return 0;
125}
126
127static int alloc_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
128{
129 int order = ilog2(queue->pagesize) - 9;
130 struct ipz_small_queue_page *page;
131 unsigned long bit;
132
133 mutex_lock(&pd->lock);
134
135 if (!list_empty(&pd->free[order]))
136 page = list_entry(pd->free[order].next,
137 struct ipz_small_queue_page, list);
138 else {
139 page = kmem_cache_zalloc(small_qp_cache, GFP_KERNEL);
140 if (!page)
141 goto out;
142
143 page->page = get_zeroed_page(GFP_KERNEL);
144 if (!page->page) {
145 kmem_cache_free(small_qp_cache, page);
146 goto out;
147 }
148
149 list_add(&page->list, &pd->free[order]);
150 }
151
152 bit = find_first_zero_bit(page->bitmap, IPZ_SPAGE_PER_KPAGE >> order);
153 __set_bit(bit, page->bitmap);
154 page->fill++;
155
156 if (page->fill == IPZ_SPAGE_PER_KPAGE >> order)
157 list_move(&page->list, &pd->full[order]);
158
159 mutex_unlock(&pd->lock);
160
161 queue->queue_pages[0] = (void *)(page->page | (bit << (order + 9)));
162 queue->small_page = page;
163 queue->offset = bit << (order + 9);
164 return 1;
165
166out:
167 ehca_err(pd->ib_pd.device, "failed to allocate small queue page");
168 mutex_unlock(&pd->lock);
169 return 0;
170}
171
172static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
173{
174 int order = ilog2(queue->pagesize) - 9;
175 struct ipz_small_queue_page *page = queue->small_page;
176 unsigned long bit;
177 int free_page = 0;
178
179 bit = ((unsigned long)queue->queue_pages[0] & ~PAGE_MASK)
180 >> (order + 9);
181
182 mutex_lock(&pd->lock);
183
184 __clear_bit(bit, page->bitmap);
185 page->fill--;
186
187 if (page->fill == 0) {
188 list_del(&page->list);
189 free_page = 1;
190 }
191
192 if (page->fill == (IPZ_SPAGE_PER_KPAGE >> order) - 1)
193
194 list_move_tail(&page->list, &pd->free[order]);
195
196 mutex_unlock(&pd->lock);
197
198 if (free_page) {
199 free_page(page->page);
200 kmem_cache_free(small_qp_cache, page);
201 }
202}
203
204int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
205 const u32 nr_of_pages, const u32 pagesize,
206 const u32 qe_size, const u32 nr_of_sg,
207 int is_small)
208{
209 if (pagesize > PAGE_SIZE) {
210 ehca_gen_err("FATAL ERROR: pagesize=%x "
211 "is greater than kernel page size", pagesize);
212 return 0;
213 }
214
215
216 queue->queue_length = nr_of_pages * pagesize;
217 queue->pagesize = pagesize;
218 queue->qe_size = qe_size;
219 queue->act_nr_of_sg = nr_of_sg;
220 queue->current_q_offset = 0;
221 queue->toggle_state = 1;
222 queue->small_page = NULL;
223
224
225 queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *),
226 GFP_KERNEL | __GFP_NOWARN);
227 if (!queue->queue_pages) {
228 queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
229 if (!queue->queue_pages) {
230 ehca_gen_err("Couldn't allocate queue page list");
231 return 0;
232 }
233 }
234
235
236 if (is_small) {
237 if (!alloc_small_queue_page(queue, pd))
238 goto ipz_queue_ctor_exit0;
239 } else
240 if (!alloc_queue_pages(queue, nr_of_pages))
241 goto ipz_queue_ctor_exit0;
242
243 return 1;
244
245ipz_queue_ctor_exit0:
246 ehca_gen_err("Couldn't alloc pages queue=%p "
247 "nr_of_pages=%x", queue, nr_of_pages);
248 if (is_vmalloc_addr(queue->queue_pages))
249 vfree(queue->queue_pages);
250 else
251 kfree(queue->queue_pages);
252
253 return 0;
254}
255
256int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue)
257{
258 int i, nr_pages;
259
260 if (!queue || !queue->queue_pages) {
261 ehca_gen_dbg("queue or queue_pages is NULL");
262 return 0;
263 }
264
265 if (queue->small_page)
266 free_small_queue_page(queue, pd);
267 else {
268 nr_pages = queue->queue_length / queue->pagesize;
269 for (i = 0; i < nr_pages; i += PAGES_PER_KPAGE)
270 free_page((unsigned long)queue->queue_pages[i]);
271 }
272
273 if (is_vmalloc_addr(queue->queue_pages))
274 vfree(queue->queue_pages);
275 else
276 kfree(queue->queue_pages);
277
278 return 1;
279}
280
281int ehca_init_small_qp_cache(void)
282{
283 small_qp_cache = kmem_cache_create("ehca_cache_small_qp",
284 sizeof(struct ipz_small_queue_page),
285 0, SLAB_HWCACHE_ALIGN, NULL);
286 if (!small_qp_cache)
287 return -ENOMEM;
288
289 return 0;
290}
291
292void ehca_cleanup_small_qp_cache(void)
293{
294 kmem_cache_destroy(small_qp_cache);
295}
296