1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/errno.h>
35#include <linux/spinlock.h>
36#include <linux/export.h>
37#include <linux/slab.h>
38#include <linux/jhash.h>
39#include <linux/kthread.h>
40
41#include <rdma/ib_fmr_pool.h>
42
43#include "core_priv.h"
44
45#define PFX "fmr_pool: "
46
47enum {
48 IB_FMR_MAX_REMAPS = 32,
49
50 IB_FMR_HASH_BITS = 8,
51 IB_FMR_HASH_SIZE = 1 << IB_FMR_HASH_BITS,
52 IB_FMR_HASH_MASK = IB_FMR_HASH_SIZE - 1
53};
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83struct ib_fmr_pool {
84 spinlock_t pool_lock;
85
86 int pool_size;
87 int max_pages;
88 int max_remaps;
89 int dirty_watermark;
90 int dirty_len;
91 struct list_head free_list;
92 struct list_head dirty_list;
93 struct hlist_head *cache_bucket;
94
95 void (*flush_function)(struct ib_fmr_pool *pool,
96 void * arg);
97 void *flush_arg;
98
99 struct kthread_worker *worker;
100 struct kthread_work work;
101
102 atomic_t req_ser;
103 atomic_t flush_ser;
104
105 wait_queue_head_t force_wait;
106};
107
108static inline u32 ib_fmr_hash(u64 first_page)
109{
110 return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) &
111 (IB_FMR_HASH_SIZE - 1);
112}
113
114
115static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
116 u64 *page_list,
117 int page_list_len,
118 u64 io_virtual_address)
119{
120 struct hlist_head *bucket;
121 struct ib_pool_fmr *fmr;
122
123 if (!pool->cache_bucket)
124 return NULL;
125
126 bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
127
128 hlist_for_each_entry(fmr, bucket, cache_node)
129 if (io_virtual_address == fmr->io_virtual_address &&
130 page_list_len == fmr->page_list_len &&
131 !memcmp(page_list, fmr->page_list,
132 page_list_len * sizeof *page_list))
133 return fmr;
134
135 return NULL;
136}
137
138static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
139{
140 int ret;
141 struct ib_pool_fmr *fmr;
142 LIST_HEAD(unmap_list);
143 LIST_HEAD(fmr_list);
144
145 spin_lock_irq(&pool->pool_lock);
146
147 list_for_each_entry(fmr, &pool->dirty_list, list) {
148 hlist_del_init(&fmr->cache_node);
149 fmr->remap_count = 0;
150 list_add_tail(&fmr->fmr->list, &fmr_list);
151
152#ifdef DEBUG
153 if (fmr->ref_count !=0) {
154 pr_warn(PFX "Unmapping FMR 0x%08x with ref count %d\n",
155 fmr, fmr->ref_count);
156 }
157#endif
158 }
159
160 list_splice_init(&pool->dirty_list, &unmap_list);
161 pool->dirty_len = 0;
162
163 spin_unlock_irq(&pool->pool_lock);
164
165 if (list_empty(&unmap_list)) {
166 return;
167 }
168
169 ret = ib_unmap_fmr(&fmr_list);
170 if (ret)
171 pr_warn(PFX "ib_unmap_fmr returned %d\n", ret);
172
173 spin_lock_irq(&pool->pool_lock);
174 list_splice(&unmap_list, &pool->free_list);
175 spin_unlock_irq(&pool->pool_lock);
176}
177
178static void ib_fmr_cleanup_func(struct kthread_work *work)
179{
180 struct ib_fmr_pool *pool = container_of(work, struct ib_fmr_pool, work);
181
182 ib_fmr_batch_release(pool);
183 atomic_inc(&pool->flush_ser);
184 wake_up_interruptible(&pool->force_wait);
185
186 if (pool->flush_function)
187 pool->flush_function(pool, pool->flush_arg);
188
189 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0)
190 kthread_queue_work(pool->worker, &pool->work);
191}
192
193
194
195
196
197
198
199
200
201struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
202 struct ib_fmr_pool_param *params)
203{
204 struct ib_device *device;
205 struct ib_fmr_pool *pool;
206 int i;
207 int ret;
208 int max_remaps;
209
210 if (!params)
211 return ERR_PTR(-EINVAL);
212
213 device = pd->device;
214 if (!device->ops.alloc_fmr || !device->ops.dealloc_fmr ||
215 !device->ops.map_phys_fmr || !device->ops.unmap_fmr) {
216 dev_info(&device->dev, "Device does not support FMRs\n");
217 return ERR_PTR(-ENOSYS);
218 }
219
220 if (!device->attrs.max_map_per_fmr)
221 max_remaps = IB_FMR_MAX_REMAPS;
222 else
223 max_remaps = device->attrs.max_map_per_fmr;
224
225 pool = kmalloc(sizeof *pool, GFP_KERNEL);
226 if (!pool)
227 return ERR_PTR(-ENOMEM);
228
229 pool->cache_bucket = NULL;
230 pool->flush_function = params->flush_function;
231 pool->flush_arg = params->flush_arg;
232
233 INIT_LIST_HEAD(&pool->free_list);
234 INIT_LIST_HEAD(&pool->dirty_list);
235
236 if (params->cache) {
237 pool->cache_bucket =
238 kmalloc_array(IB_FMR_HASH_SIZE,
239 sizeof(*pool->cache_bucket),
240 GFP_KERNEL);
241 if (!pool->cache_bucket) {
242 ret = -ENOMEM;
243 goto out_free_pool;
244 }
245
246 for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
247 INIT_HLIST_HEAD(pool->cache_bucket + i);
248 }
249
250 pool->pool_size = 0;
251 pool->max_pages = params->max_pages_per_fmr;
252 pool->max_remaps = max_remaps;
253 pool->dirty_watermark = params->dirty_watermark;
254 pool->dirty_len = 0;
255 spin_lock_init(&pool->pool_lock);
256 atomic_set(&pool->req_ser, 0);
257 atomic_set(&pool->flush_ser, 0);
258 init_waitqueue_head(&pool->force_wait);
259
260 pool->worker =
261 kthread_create_worker(0, "ib_fmr(%s)", dev_name(&device->dev));
262 if (IS_ERR(pool->worker)) {
263 pr_warn(PFX "couldn't start cleanup kthread worker\n");
264 ret = PTR_ERR(pool->worker);
265 goto out_free_pool;
266 }
267 kthread_init_work(&pool->work, ib_fmr_cleanup_func);
268
269 {
270 struct ib_pool_fmr *fmr;
271 struct ib_fmr_attr fmr_attr = {
272 .max_pages = params->max_pages_per_fmr,
273 .max_maps = pool->max_remaps,
274 .page_shift = params->page_shift
275 };
276 int bytes_per_fmr = sizeof *fmr;
277
278 if (pool->cache_bucket)
279 bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64);
280
281 for (i = 0; i < params->pool_size; ++i) {
282 fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
283 if (!fmr)
284 goto out_fail;
285
286 fmr->pool = pool;
287 fmr->remap_count = 0;
288 fmr->ref_count = 0;
289 INIT_HLIST_NODE(&fmr->cache_node);
290
291 fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
292 if (IS_ERR(fmr->fmr)) {
293 pr_warn(PFX "fmr_create failed for FMR %d\n",
294 i);
295 kfree(fmr);
296 goto out_fail;
297 }
298
299 list_add_tail(&fmr->list, &pool->free_list);
300 ++pool->pool_size;
301 }
302 }
303
304 return pool;
305
306 out_free_pool:
307 kfree(pool->cache_bucket);
308 kfree(pool);
309
310 return ERR_PTR(ret);
311
312 out_fail:
313 ib_destroy_fmr_pool(pool);
314
315 return ERR_PTR(-ENOMEM);
316}
317EXPORT_SYMBOL(ib_create_fmr_pool);
318
319
320
321
322
323
324
325void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
326{
327 struct ib_pool_fmr *fmr;
328 struct ib_pool_fmr *tmp;
329 LIST_HEAD(fmr_list);
330 int i;
331
332 kthread_destroy_worker(pool->worker);
333 ib_fmr_batch_release(pool);
334
335 i = 0;
336 list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
337 if (fmr->remap_count) {
338 INIT_LIST_HEAD(&fmr_list);
339 list_add_tail(&fmr->fmr->list, &fmr_list);
340 ib_unmap_fmr(&fmr_list);
341 }
342 ib_dealloc_fmr(fmr->fmr);
343 list_del(&fmr->list);
344 kfree(fmr);
345 ++i;
346 }
347
348 if (i < pool->pool_size)
349 pr_warn(PFX "pool still has %d regions registered\n",
350 pool->pool_size - i);
351
352 kfree(pool->cache_bucket);
353 kfree(pool);
354}
355EXPORT_SYMBOL(ib_destroy_fmr_pool);
356
357
358
359
360
361
362
363int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
364{
365 int serial;
366 struct ib_pool_fmr *fmr, *next;
367
368
369
370
371
372
373
374 spin_lock_irq(&pool->pool_lock);
375 list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
376 if (fmr->remap_count > 0)
377 list_move(&fmr->list, &pool->dirty_list);
378 }
379 spin_unlock_irq(&pool->pool_lock);
380
381 serial = atomic_inc_return(&pool->req_ser);
382 kthread_queue_work(pool->worker, &pool->work);
383
384 if (wait_event_interruptible(pool->force_wait,
385 atomic_read(&pool->flush_ser) - serial >= 0))
386 return -EINTR;
387
388 return 0;
389}
390EXPORT_SYMBOL(ib_flush_fmr_pool);
391
392
393
394
395
396
397
398
399struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
400 u64 *page_list,
401 int list_len,
402 u64 io_virtual_address)
403{
404 struct ib_fmr_pool *pool = pool_handle;
405 struct ib_pool_fmr *fmr;
406 unsigned long flags;
407 int result;
408
409 if (list_len < 1 || list_len > pool->max_pages)
410 return ERR_PTR(-EINVAL);
411
412 spin_lock_irqsave(&pool->pool_lock, flags);
413 fmr = ib_fmr_cache_lookup(pool,
414 page_list,
415 list_len,
416 io_virtual_address);
417 if (fmr) {
418
419 ++fmr->ref_count;
420 if (fmr->ref_count == 1) {
421 list_del(&fmr->list);
422 }
423
424 spin_unlock_irqrestore(&pool->pool_lock, flags);
425
426 return fmr;
427 }
428
429 if (list_empty(&pool->free_list)) {
430 spin_unlock_irqrestore(&pool->pool_lock, flags);
431 return ERR_PTR(-EAGAIN);
432 }
433
434 fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
435 list_del(&fmr->list);
436 hlist_del_init(&fmr->cache_node);
437 spin_unlock_irqrestore(&pool->pool_lock, flags);
438
439 result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
440 io_virtual_address);
441
442 if (result) {
443 spin_lock_irqsave(&pool->pool_lock, flags);
444 list_add(&fmr->list, &pool->free_list);
445 spin_unlock_irqrestore(&pool->pool_lock, flags);
446
447 pr_warn(PFX "fmr_map returns %d\n", result);
448
449 return ERR_PTR(result);
450 }
451
452 ++fmr->remap_count;
453 fmr->ref_count = 1;
454
455 if (pool->cache_bucket) {
456 fmr->io_virtual_address = io_virtual_address;
457 fmr->page_list_len = list_len;
458 memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
459
460 spin_lock_irqsave(&pool->pool_lock, flags);
461 hlist_add_head(&fmr->cache_node,
462 pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
463 spin_unlock_irqrestore(&pool->pool_lock, flags);
464 }
465
466 return fmr;
467}
468EXPORT_SYMBOL(ib_fmr_pool_map_phys);
469
470
471
472
473
474
475
476
477void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
478{
479 struct ib_fmr_pool *pool;
480 unsigned long flags;
481
482 pool = fmr->pool;
483
484 spin_lock_irqsave(&pool->pool_lock, flags);
485
486 --fmr->ref_count;
487 if (!fmr->ref_count) {
488 if (fmr->remap_count < pool->max_remaps) {
489 list_add_tail(&fmr->list, &pool->free_list);
490 } else {
491 list_add_tail(&fmr->list, &pool->dirty_list);
492 if (++pool->dirty_len >= pool->dirty_watermark) {
493 atomic_inc(&pool->req_ser);
494 kthread_queue_work(pool->worker, &pool->work);
495 }
496 }
497 }
498
499#ifdef DEBUG
500 if (fmr->ref_count < 0)
501 pr_warn(PFX "FMR %p has ref count %d < 0\n",
502 fmr, fmr->ref_count);
503#endif
504
505 spin_unlock_irqrestore(&pool->pool_lock, flags);
506}
507EXPORT_SYMBOL(ib_fmr_pool_unmap);
508