1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/kernel.h>
34
35#include "rds.h"
36#include "rdma.h"
37#include "ib.h"
38
39
40
41
42
43struct rds_ib_mr {
44 struct rds_ib_device *device;
45 struct rds_ib_mr_pool *pool;
46 struct ib_fmr *fmr;
47 struct list_head list;
48 unsigned int remap_count;
49
50 struct scatterlist *sg;
51 unsigned int sg_len;
52 u64 *dma;
53 int sg_dma_len;
54};
55
56
57
58
59struct rds_ib_mr_pool {
60 struct mutex flush_lock;
61 struct work_struct flush_worker;
62
63 spinlock_t list_lock;
64 atomic_t item_count;
65 atomic_t dirty_count;
66 struct list_head drop_list;
67 struct list_head free_list;
68 struct list_head clean_list;
69 atomic_t free_pinned;
70 unsigned long max_items;
71 unsigned long max_items_soft;
72 unsigned long max_free_pinned;
73 struct ib_fmr_attr fmr_attr;
74};
75
76static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all);
77static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
78static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
79
80static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
81{
82 struct rds_ib_device *rds_ibdev;
83 struct rds_ib_ipaddr *i_ipaddr;
84
85 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
86 spin_lock_irq(&rds_ibdev->spinlock);
87 list_for_each_entry(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
88 if (i_ipaddr->ipaddr == ipaddr) {
89 spin_unlock_irq(&rds_ibdev->spinlock);
90 return rds_ibdev;
91 }
92 }
93 spin_unlock_irq(&rds_ibdev->spinlock);
94 }
95
96 return NULL;
97}
98
99static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
100{
101 struct rds_ib_ipaddr *i_ipaddr;
102
103 i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
104 if (!i_ipaddr)
105 return -ENOMEM;
106
107 i_ipaddr->ipaddr = ipaddr;
108
109 spin_lock_irq(&rds_ibdev->spinlock);
110 list_add_tail(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
111 spin_unlock_irq(&rds_ibdev->spinlock);
112
113 return 0;
114}
115
116static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
117{
118 struct rds_ib_ipaddr *i_ipaddr, *next;
119
120 spin_lock_irq(&rds_ibdev->spinlock);
121 list_for_each_entry_safe(i_ipaddr, next, &rds_ibdev->ipaddr_list, list) {
122 if (i_ipaddr->ipaddr == ipaddr) {
123 list_del(&i_ipaddr->list);
124 kfree(i_ipaddr);
125 break;
126 }
127 }
128 spin_unlock_irq(&rds_ibdev->spinlock);
129}
130
131int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
132{
133 struct rds_ib_device *rds_ibdev_old;
134
135 rds_ibdev_old = rds_ib_get_device(ipaddr);
136 if (rds_ibdev_old)
137 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
138
139 return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
140}
141
142void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
143{
144 struct rds_ib_connection *ic = conn->c_transport_data;
145
146
147 spin_lock_irq(&ib_nodev_conns_lock);
148 BUG_ON(list_empty(&ib_nodev_conns));
149 BUG_ON(list_empty(&ic->ib_node));
150 list_del(&ic->ib_node);
151
152 spin_lock_irq(&rds_ibdev->spinlock);
153 list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
154 spin_unlock_irq(&rds_ibdev->spinlock);
155 spin_unlock_irq(&ib_nodev_conns_lock);
156
157 ic->rds_ibdev = rds_ibdev;
158}
159
160void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
161{
162 struct rds_ib_connection *ic = conn->c_transport_data;
163
164
165 spin_lock(&ib_nodev_conns_lock);
166
167 spin_lock_irq(&rds_ibdev->spinlock);
168 BUG_ON(list_empty(&ic->ib_node));
169 list_del(&ic->ib_node);
170 spin_unlock_irq(&rds_ibdev->spinlock);
171
172 list_add_tail(&ic->ib_node, &ib_nodev_conns);
173
174 spin_unlock(&ib_nodev_conns_lock);
175
176 ic->rds_ibdev = NULL;
177}
178
179void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock)
180{
181 struct rds_ib_connection *ic, *_ic;
182 LIST_HEAD(tmp_list);
183
184
185 spin_lock_irq(list_lock);
186 list_splice(list, &tmp_list);
187 INIT_LIST_HEAD(list);
188 spin_unlock_irq(list_lock);
189
190 list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) {
191 if (ic->conn->c_passive)
192 rds_conn_destroy(ic->conn->c_passive);
193 rds_conn_destroy(ic->conn);
194 }
195}
196
197struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
198{
199 struct rds_ib_mr_pool *pool;
200
201 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
202 if (!pool)
203 return ERR_PTR(-ENOMEM);
204
205 INIT_LIST_HEAD(&pool->free_list);
206 INIT_LIST_HEAD(&pool->drop_list);
207 INIT_LIST_HEAD(&pool->clean_list);
208 mutex_init(&pool->flush_lock);
209 spin_lock_init(&pool->list_lock);
210 INIT_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
211
212 pool->fmr_attr.max_pages = fmr_message_size;
213 pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
214 pool->fmr_attr.page_shift = PAGE_SHIFT;
215 pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
216
217
218
219
220
221
222 pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4;
223 pool->max_items = rds_ibdev->max_fmrs;
224
225 return pool;
226}
227
228void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
229{
230 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
231
232 iinfo->rdma_mr_max = pool->max_items;
233 iinfo->rdma_mr_size = pool->fmr_attr.max_pages;
234}
235
236void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
237{
238 flush_workqueue(rds_wq);
239 rds_ib_flush_mr_pool(pool, 1);
240 BUG_ON(atomic_read(&pool->item_count));
241 BUG_ON(atomic_read(&pool->free_pinned));
242 kfree(pool);
243}
244
245static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
246{
247 struct rds_ib_mr *ibmr = NULL;
248 unsigned long flags;
249
250 spin_lock_irqsave(&pool->list_lock, flags);
251 if (!list_empty(&pool->clean_list)) {
252 ibmr = list_entry(pool->clean_list.next, struct rds_ib_mr, list);
253 list_del_init(&ibmr->list);
254 }
255 spin_unlock_irqrestore(&pool->list_lock, flags);
256
257 return ibmr;
258}
259
260static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
261{
262 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
263 struct rds_ib_mr *ibmr = NULL;
264 int err = 0, iter = 0;
265
266 while (1) {
267 ibmr = rds_ib_reuse_fmr(pool);
268 if (ibmr)
269 return ibmr;
270
271
272
273
274
275
276
277
278
279
280 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
281 break;
282
283 atomic_dec(&pool->item_count);
284
285 if (++iter > 2) {
286 rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted);
287 return ERR_PTR(-EAGAIN);
288 }
289
290
291 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
292 rds_ib_flush_mr_pool(pool, 0);
293 }
294
295 ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL);
296 if (!ibmr) {
297 err = -ENOMEM;
298 goto out_no_cigar;
299 }
300
301 ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
302 (IB_ACCESS_LOCAL_WRITE |
303 IB_ACCESS_REMOTE_READ |
304 IB_ACCESS_REMOTE_WRITE),
305 &pool->fmr_attr);
306 if (IS_ERR(ibmr->fmr)) {
307 err = PTR_ERR(ibmr->fmr);
308 ibmr->fmr = NULL;
309 printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err);
310 goto out_no_cigar;
311 }
312
313 rds_ib_stats_inc(s_ib_rdma_mr_alloc);
314 return ibmr;
315
316out_no_cigar:
317 if (ibmr) {
318 if (ibmr->fmr)
319 ib_dealloc_fmr(ibmr->fmr);
320 kfree(ibmr);
321 }
322 atomic_dec(&pool->item_count);
323 return ERR_PTR(err);
324}
325
326static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
327 struct scatterlist *sg, unsigned int nents)
328{
329 struct ib_device *dev = rds_ibdev->dev;
330 struct scatterlist *scat = sg;
331 u64 io_addr = 0;
332 u64 *dma_pages;
333 u32 len;
334 int page_cnt, sg_dma_len;
335 int i, j;
336 int ret;
337
338 sg_dma_len = ib_dma_map_sg(dev, sg, nents,
339 DMA_BIDIRECTIONAL);
340 if (unlikely(!sg_dma_len)) {
341 printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n");
342 return -EBUSY;
343 }
344
345 len = 0;
346 page_cnt = 0;
347
348 for (i = 0; i < sg_dma_len; ++i) {
349 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
350 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
351
352 if (dma_addr & ~PAGE_MASK) {
353 if (i > 0)
354 return -EINVAL;
355 else
356 ++page_cnt;
357 }
358 if ((dma_addr + dma_len) & ~PAGE_MASK) {
359 if (i < sg_dma_len - 1)
360 return -EINVAL;
361 else
362 ++page_cnt;
363 }
364
365 len += dma_len;
366 }
367
368 page_cnt += len >> PAGE_SHIFT;
369 if (page_cnt > fmr_message_size)
370 return -EINVAL;
371
372 dma_pages = kmalloc(sizeof(u64) * page_cnt, GFP_ATOMIC);
373 if (!dma_pages)
374 return -ENOMEM;
375
376 page_cnt = 0;
377 for (i = 0; i < sg_dma_len; ++i) {
378 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
379 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
380
381 for (j = 0; j < dma_len; j += PAGE_SIZE)
382 dma_pages[page_cnt++] =
383 (dma_addr & PAGE_MASK) + j;
384 }
385
386 ret = ib_map_phys_fmr(ibmr->fmr,
387 dma_pages, page_cnt, io_addr);
388 if (ret)
389 goto out;
390
391
392
393 rds_ib_teardown_mr(ibmr);
394
395 ibmr->sg = scat;
396 ibmr->sg_len = nents;
397 ibmr->sg_dma_len = sg_dma_len;
398 ibmr->remap_count++;
399
400 rds_ib_stats_inc(s_ib_rdma_mr_used);
401 ret = 0;
402
403out:
404 kfree(dma_pages);
405
406 return ret;
407}
408
409void rds_ib_sync_mr(void *trans_private, int direction)
410{
411 struct rds_ib_mr *ibmr = trans_private;
412 struct rds_ib_device *rds_ibdev = ibmr->device;
413
414 switch (direction) {
415 case DMA_FROM_DEVICE:
416 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
417 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
418 break;
419 case DMA_TO_DEVICE:
420 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
421 ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
422 break;
423 }
424}
425
426static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
427{
428 struct rds_ib_device *rds_ibdev = ibmr->device;
429
430 if (ibmr->sg_dma_len) {
431 ib_dma_unmap_sg(rds_ibdev->dev,
432 ibmr->sg, ibmr->sg_len,
433 DMA_BIDIRECTIONAL);
434 ibmr->sg_dma_len = 0;
435 }
436
437
438 if (ibmr->sg_len) {
439 unsigned int i;
440
441 for (i = 0; i < ibmr->sg_len; ++i) {
442 struct page *page = sg_page(&ibmr->sg[i]);
443
444
445
446 set_page_dirty(page);
447 put_page(page);
448 }
449 kfree(ibmr->sg);
450
451 ibmr->sg = NULL;
452 ibmr->sg_len = 0;
453 }
454}
455
456static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
457{
458 unsigned int pinned = ibmr->sg_len;
459
460 __rds_ib_teardown_mr(ibmr);
461 if (pinned) {
462 struct rds_ib_device *rds_ibdev = ibmr->device;
463 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
464
465 atomic_sub(pinned, &pool->free_pinned);
466 }
467}
468
469static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
470{
471 unsigned int item_count;
472
473 item_count = atomic_read(&pool->item_count);
474 if (free_all)
475 return item_count;
476
477 return 0;
478}
479
480
481
482
483
484
485
486static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
487{
488 struct rds_ib_mr *ibmr, *next;
489 LIST_HEAD(unmap_list);
490 LIST_HEAD(fmr_list);
491 unsigned long unpinned = 0;
492 unsigned long flags;
493 unsigned int nfreed = 0, ncleaned = 0, free_goal;
494 int ret = 0;
495
496 rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
497
498 mutex_lock(&pool->flush_lock);
499
500 spin_lock_irqsave(&pool->list_lock, flags);
501
502
503 list_splice_init(&pool->free_list, &unmap_list);
504 list_splice_init(&pool->drop_list, &unmap_list);
505 if (free_all)
506 list_splice_init(&pool->clean_list, &unmap_list);
507 spin_unlock_irqrestore(&pool->list_lock, flags);
508
509 free_goal = rds_ib_flush_goal(pool, free_all);
510
511 if (list_empty(&unmap_list))
512 goto out;
513
514
515 list_for_each_entry(ibmr, &unmap_list, list)
516 list_add(&ibmr->fmr->list, &fmr_list);
517 ret = ib_unmap_fmr(&fmr_list);
518 if (ret)
519 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
520
521
522 list_for_each_entry_safe(ibmr, next, &unmap_list, list) {
523 unpinned += ibmr->sg_len;
524 __rds_ib_teardown_mr(ibmr);
525 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
526 rds_ib_stats_inc(s_ib_rdma_mr_free);
527 list_del(&ibmr->list);
528 ib_dealloc_fmr(ibmr->fmr);
529 kfree(ibmr);
530 nfreed++;
531 }
532 ncleaned++;
533 }
534
535 spin_lock_irqsave(&pool->list_lock, flags);
536 list_splice(&unmap_list, &pool->clean_list);
537 spin_unlock_irqrestore(&pool->list_lock, flags);
538
539 atomic_sub(unpinned, &pool->free_pinned);
540 atomic_sub(ncleaned, &pool->dirty_count);
541 atomic_sub(nfreed, &pool->item_count);
542
543out:
544 mutex_unlock(&pool->flush_lock);
545 return ret;
546}
547
548static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
549{
550 struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker);
551
552 rds_ib_flush_mr_pool(pool, 0);
553}
554
555void rds_ib_free_mr(void *trans_private, int invalidate)
556{
557 struct rds_ib_mr *ibmr = trans_private;
558 struct rds_ib_device *rds_ibdev = ibmr->device;
559 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
560 unsigned long flags;
561
562 rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
563
564
565 spin_lock_irqsave(&pool->list_lock, flags);
566 if (ibmr->remap_count >= pool->fmr_attr.max_maps)
567 list_add(&ibmr->list, &pool->drop_list);
568 else
569 list_add(&ibmr->list, &pool->free_list);
570
571 atomic_add(ibmr->sg_len, &pool->free_pinned);
572 atomic_inc(&pool->dirty_count);
573 spin_unlock_irqrestore(&pool->list_lock, flags);
574
575
576 if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned
577 || atomic_read(&pool->dirty_count) >= pool->max_items / 10)
578 queue_work(rds_wq, &pool->flush_worker);
579
580 if (invalidate) {
581 if (likely(!in_interrupt())) {
582 rds_ib_flush_mr_pool(pool, 0);
583 } else {
584
585
586 queue_work(rds_wq, &pool->flush_worker);
587 }
588 }
589}
590
591void rds_ib_flush_mrs(void)
592{
593 struct rds_ib_device *rds_ibdev;
594
595 list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
596 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
597
598 if (pool)
599 rds_ib_flush_mr_pool(pool, 0);
600 }
601}
602
603void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
604 struct rds_sock *rs, u32 *key_ret)
605{
606 struct rds_ib_device *rds_ibdev;
607 struct rds_ib_mr *ibmr = NULL;
608 int ret;
609
610 rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
611 if (!rds_ibdev) {
612 ret = -ENODEV;
613 goto out;
614 }
615
616 if (!rds_ibdev->mr_pool) {
617 ret = -ENODEV;
618 goto out;
619 }
620
621 ibmr = rds_ib_alloc_fmr(rds_ibdev);
622 if (IS_ERR(ibmr))
623 return ibmr;
624
625 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
626 if (ret == 0)
627 *key_ret = ibmr->fmr->rkey;
628 else
629 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
630
631 ibmr->device = rds_ibdev;
632
633 out:
634 if (ret) {
635 if (ibmr)
636 rds_ib_free_mr(ibmr, 0);
637 ibmr = ERR_PTR(ret);
638 }
639 return ibmr;
640}
641