1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include "ib_mr.h"
34
35struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
36{
37 struct rds_ib_mr_pool *pool;
38 struct rds_ib_mr *ibmr = NULL;
39 struct rds_ib_fmr *fmr;
40 int err = 0;
41
42 if (npages <= RDS_MR_8K_MSG_SIZE)
43 pool = rds_ibdev->mr_8k_pool;
44 else
45 pool = rds_ibdev->mr_1m_pool;
46
47 ibmr = rds_ib_try_reuse_ibmr(pool);
48 if (ibmr)
49 return ibmr;
50
51 ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL,
52 rdsibdev_to_node(rds_ibdev));
53 if (!ibmr) {
54 err = -ENOMEM;
55 goto out_no_cigar;
56 }
57
58 fmr = &ibmr->u.fmr;
59 fmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
60 (IB_ACCESS_LOCAL_WRITE |
61 IB_ACCESS_REMOTE_READ |
62 IB_ACCESS_REMOTE_WRITE |
63 IB_ACCESS_REMOTE_ATOMIC),
64 &pool->fmr_attr);
65 if (IS_ERR(fmr->fmr)) {
66 err = PTR_ERR(fmr->fmr);
67 fmr->fmr = NULL;
68 pr_warn("RDS/IB: %s failed (err=%d)\n", __func__, err);
69 goto out_no_cigar;
70 }
71
72 ibmr->pool = pool;
73 if (pool->pool_type == RDS_IB_MR_8K_POOL)
74 rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc);
75 else
76 rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc);
77
78 return ibmr;
79
80out_no_cigar:
81 if (ibmr) {
82 if (fmr->fmr)
83 ib_dealloc_fmr(fmr->fmr);
84 kfree(ibmr);
85 }
86 atomic_dec(&pool->item_count);
87 return ERR_PTR(err);
88}
89
90int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
91 struct scatterlist *sg, unsigned int nents)
92{
93 struct ib_device *dev = rds_ibdev->dev;
94 struct rds_ib_fmr *fmr = &ibmr->u.fmr;
95 struct scatterlist *scat = sg;
96 u64 io_addr = 0;
97 u64 *dma_pages;
98 u32 len;
99 int page_cnt, sg_dma_len;
100 int i, j;
101 int ret;
102
103 sg_dma_len = ib_dma_map_sg(dev, sg, nents, DMA_BIDIRECTIONAL);
104 if (unlikely(!sg_dma_len)) {
105 pr_warn("RDS/IB: %s failed!\n", __func__);
106 return -EBUSY;
107 }
108
109 len = 0;
110 page_cnt = 0;
111
112 for (i = 0; i < sg_dma_len; ++i) {
113 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
114 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
115
116 if (dma_addr & ~PAGE_MASK) {
117 if (i > 0)
118 return -EINVAL;
119 else
120 ++page_cnt;
121 }
122 if ((dma_addr + dma_len) & ~PAGE_MASK) {
123 if (i < sg_dma_len - 1)
124 return -EINVAL;
125 else
126 ++page_cnt;
127 }
128
129 len += dma_len;
130 }
131
132 page_cnt += len >> PAGE_SHIFT;
133 if (page_cnt > ibmr->pool->fmr_attr.max_pages)
134 return -EINVAL;
135
136 dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
137 rdsibdev_to_node(rds_ibdev));
138 if (!dma_pages)
139 return -ENOMEM;
140
141 page_cnt = 0;
142 for (i = 0; i < sg_dma_len; ++i) {
143 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
144 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
145
146 for (j = 0; j < dma_len; j += PAGE_SIZE)
147 dma_pages[page_cnt++] =
148 (dma_addr & PAGE_MASK) + j;
149 }
150
151 ret = ib_map_phys_fmr(fmr->fmr, dma_pages, page_cnt, io_addr);
152 if (ret)
153 goto out;
154
155
156
157
158 rds_ib_teardown_mr(ibmr);
159
160 ibmr->sg = scat;
161 ibmr->sg_len = nents;
162 ibmr->sg_dma_len = sg_dma_len;
163 ibmr->remap_count++;
164
165 if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
166 rds_ib_stats_inc(s_ib_rdma_mr_8k_used);
167 else
168 rds_ib_stats_inc(s_ib_rdma_mr_1m_used);
169 ret = 0;
170
171out:
172 kfree(dma_pages);
173
174 return ret;
175}
176
177struct rds_ib_mr *rds_ib_reg_fmr(struct rds_ib_device *rds_ibdev,
178 struct scatterlist *sg,
179 unsigned long nents,
180 u32 *key)
181{
182 struct rds_ib_mr *ibmr = NULL;
183 struct rds_ib_fmr *fmr;
184 int ret;
185
186 ibmr = rds_ib_alloc_fmr(rds_ibdev, nents);
187 if (IS_ERR(ibmr))
188 return ibmr;
189
190 ibmr->device = rds_ibdev;
191 fmr = &ibmr->u.fmr;
192 ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
193 if (ret == 0)
194 *key = fmr->fmr->rkey;
195 else
196 rds_ib_free_mr(ibmr, 0);
197
198 return ibmr;
199}
200
201void rds_ib_unreg_fmr(struct list_head *list, unsigned int *nfreed,
202 unsigned long *unpinned, unsigned int goal)
203{
204 struct rds_ib_mr *ibmr, *next;
205 struct rds_ib_fmr *fmr;
206 LIST_HEAD(fmr_list);
207 int ret = 0;
208 unsigned int freed = *nfreed;
209
210
211 list_for_each_entry(ibmr, list, unmap_list) {
212 fmr = &ibmr->u.fmr;
213 list_add(&fmr->fmr->list, &fmr_list);
214 }
215
216 ret = ib_unmap_fmr(&fmr_list);
217 if (ret)
218 pr_warn("RDS/IB: FMR invalidation failed (err=%d)\n", ret);
219
220
221 list_for_each_entry_safe(ibmr, next, list, unmap_list) {
222 fmr = &ibmr->u.fmr;
223 *unpinned += ibmr->sg_len;
224 __rds_ib_teardown_mr(ibmr);
225 if (freed < goal ||
226 ibmr->remap_count >= ibmr->pool->fmr_attr.max_maps) {
227 if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
228 rds_ib_stats_inc(s_ib_rdma_mr_8k_free);
229 else
230 rds_ib_stats_inc(s_ib_rdma_mr_1m_free);
231 list_del(&ibmr->unmap_list);
232 ib_dealloc_fmr(fmr->fmr);
233 kfree(ibmr);
234 freed++;
235 }
236 }
237 *nfreed = freed;
238}
239
240void rds_ib_free_fmr_list(struct rds_ib_mr *ibmr)
241{
242 struct rds_ib_mr_pool *pool = ibmr->pool;
243
244 if (ibmr->remap_count >= pool->fmr_attr.max_maps)
245 llist_add(&ibmr->llnode, &pool->drop_list);
246 else
247 llist_add(&ibmr->llnode, &pool->free_list);
248}
249