1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/module.h>
34#include <linux/moduleparam.h>
35#include <rdma/ib_umem.h>
36#include <linux/atomic.h>
37#include <rdma/ib_user_verbs.h>
38
39#include "iw_cxgb4.h"
40
41int use_dsgl = 1;
42module_param(use_dsgl, int, 0644);
43MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1) (DEPRECATED)");
44
45#define T4_ULPTX_MIN_IO 32
46#define C4IW_MAX_INLINE_SIZE 96
47#define T4_ULPTX_MAX_DMA 1024
48#define C4IW_INLINE_THRESHOLD 128
49
50static int inline_threshold = C4IW_INLINE_THRESHOLD;
51module_param(inline_threshold, int, 0644);
52MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
53
54static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
55{
56 return (is_t4(dev->rdev.lldi.adapter_type) ||
57 is_t5(dev->rdev.lldi.adapter_type)) &&
58 length >= 8*1024*1024*1024ULL;
59}
60
61static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
62 u32 len, dma_addr_t data,
63 struct sk_buff *skb,
64 struct c4iw_wr_wait *wr_waitp)
65{
66 struct ulp_mem_io *req;
67 struct ulptx_sgl *sgl;
68 u8 wr_len;
69 int ret = 0;
70
71 addr &= 0x7FFFFFF;
72
73 if (wr_waitp)
74 c4iw_init_wr_wait(wr_waitp);
75 wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
76
77 if (!skb) {
78 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
79 if (!skb)
80 return -ENOMEM;
81 }
82 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
83
84 req = __skb_put_zero(skb, wr_len);
85 INIT_ULPTX_WR(req, wr_len, 0, 0);
86 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
87 (wr_waitp ? FW_WR_COMPL_F : 0));
88 req->wr.wr_lo = wr_waitp ? (__force __be64)(unsigned long)wr_waitp : 0L;
89 req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
90 req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
91 T5_ULP_MEMIO_ORDER_V(1) |
92 T5_ULP_MEMIO_FID_V(rdev->lldi.rxq_ids[0]));
93 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
94 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
95 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
96
97 sgl = (struct ulptx_sgl *)(req + 1);
98 sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
99 ULPTX_NSGE_V(1));
100 sgl->len0 = cpu_to_be32(len);
101 sgl->addr0 = cpu_to_be64(data);
102
103 if (wr_waitp)
104 ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
105 else
106 ret = c4iw_ofld_send(rdev, skb);
107 return ret;
108}
109
110static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
111 void *data, struct sk_buff *skb,
112 struct c4iw_wr_wait *wr_waitp)
113{
114 struct ulp_mem_io *req;
115 struct ulptx_idata *sc;
116 u8 wr_len, *to_dp, *from_dp;
117 int copy_len, num_wqe, i, ret = 0;
118 __be32 cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
119
120 if (is_t4(rdev->lldi.adapter_type))
121 cmd |= cpu_to_be32(ULP_MEMIO_ORDER_F);
122 else
123 cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F);
124
125 addr &= 0x7FFFFFF;
126 pr_debug("addr 0x%x len %u\n", addr, len);
127 num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
128 c4iw_init_wr_wait(wr_waitp);
129 for (i = 0; i < num_wqe; i++) {
130
131 copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
132 len;
133 wr_len = roundup(sizeof(*req) + sizeof(*sc) +
134 roundup(copy_len, T4_ULPTX_MIN_IO),
135 16);
136
137 if (!skb) {
138 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
139 if (!skb)
140 return -ENOMEM;
141 }
142 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
143
144 req = __skb_put_zero(skb, wr_len);
145 INIT_ULPTX_WR(req, wr_len, 0, 0);
146
147 if (i == (num_wqe-1)) {
148 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
149 FW_WR_COMPL_F);
150 req->wr.wr_lo = (__force __be64)(unsigned long)wr_waitp;
151 } else
152 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
153 req->wr.wr_mid = cpu_to_be32(
154 FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
155
156 req->cmd = cmd;
157 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(
158 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
159 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
160 16));
161 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr + i * 3));
162
163 sc = (struct ulptx_idata *)(req + 1);
164 sc->cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
165 sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
166
167 to_dp = (u8 *)(sc + 1);
168 from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
169 if (data)
170 memcpy(to_dp, from_dp, copy_len);
171 else
172 memset(to_dp, 0, copy_len);
173 if (copy_len % T4_ULPTX_MIN_IO)
174 memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
175 (copy_len % T4_ULPTX_MIN_IO));
176 if (i == (num_wqe-1))
177 ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0,
178 __func__);
179 else
180 ret = c4iw_ofld_send(rdev, skb);
181 if (ret)
182 break;
183 skb = NULL;
184 len -= C4IW_MAX_INLINE_SIZE;
185 }
186
187 return ret;
188}
189
190static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
191 void *data, struct sk_buff *skb,
192 struct c4iw_wr_wait *wr_waitp)
193{
194 u32 remain = len;
195 u32 dmalen;
196 int ret = 0;
197 dma_addr_t daddr;
198 dma_addr_t save;
199
200 daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE);
201 if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr))
202 return -1;
203 save = daddr;
204
205 while (remain > inline_threshold) {
206 if (remain < T4_ULPTX_MAX_DMA) {
207 if (remain & ~T4_ULPTX_MIN_IO)
208 dmalen = remain & ~(T4_ULPTX_MIN_IO-1);
209 else
210 dmalen = remain;
211 } else
212 dmalen = T4_ULPTX_MAX_DMA;
213 remain -= dmalen;
214 ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
215 skb, remain ? NULL : wr_waitp);
216 if (ret)
217 goto out;
218 addr += dmalen >> 5;
219 data += dmalen;
220 daddr += dmalen;
221 }
222 if (remain)
223 ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb,
224 wr_waitp);
225out:
226 dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
227 return ret;
228}
229
230
231
232
233
234static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
235 void *data, struct sk_buff *skb,
236 struct c4iw_wr_wait *wr_waitp)
237{
238 int ret;
239
240 if (!rdev->lldi.ulptx_memwrite_dsgl || !use_dsgl) {
241 ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
242 wr_waitp);
243 goto out;
244 }
245
246 if (len <= inline_threshold) {
247 ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
248 wr_waitp);
249 goto out;
250 }
251
252 ret = _c4iw_write_mem_dma(rdev, addr, len, data, skb, wr_waitp);
253 if (ret) {
254 pr_warn_ratelimited("%s: dma map failure (non fatal)\n",
255 pci_name(rdev->lldi.pdev));
256 ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
257 wr_waitp);
258 }
259out:
260 return ret;
261
262}
263
264
265
266
267
268
269
270static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
271 u32 *stag, u8 stag_state, u32 pdid,
272 enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
273 int bind_enabled, u32 zbva, u64 to,
274 u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr,
275 struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
276{
277 int err;
278 struct fw_ri_tpte tpt;
279 u32 stag_idx;
280 static atomic_t key;
281
282 if (c4iw_fatal_error(rdev))
283 return -EIO;
284
285 stag_state = stag_state > 0;
286 stag_idx = (*stag) >> 8;
287
288 if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
289 stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
290 if (!stag_idx) {
291 mutex_lock(&rdev->stats.lock);
292 rdev->stats.stag.fail++;
293 mutex_unlock(&rdev->stats.lock);
294 return -ENOMEM;
295 }
296 mutex_lock(&rdev->stats.lock);
297 rdev->stats.stag.cur += 32;
298 if (rdev->stats.stag.cur > rdev->stats.stag.max)
299 rdev->stats.stag.max = rdev->stats.stag.cur;
300 mutex_unlock(&rdev->stats.lock);
301 *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
302 }
303 pr_debug("stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
304 stag_state, type, pdid, stag_idx);
305
306
307 if (reset_tpt_entry)
308 memset(&tpt, 0, sizeof(tpt));
309 else {
310 tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
311 FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
312 FW_RI_TPTE_STAGSTATE_V(stag_state) |
313 FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
314 tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
315 (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
316 FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
317 FW_RI_VA_BASED_TO))|
318 FW_RI_TPTE_PS_V(page_size));
319 tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
320 FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
321 tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
322 tpt.va_hi = cpu_to_be32((u32)(to >> 32));
323 tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
324 tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
325 tpt.len_hi = cpu_to_be32((u32)(len >> 32));
326 }
327 err = write_adapter_mem(rdev, stag_idx +
328 (rdev->lldi.vr->stag.start >> 5),
329 sizeof(tpt), &tpt, skb, wr_waitp);
330
331 if (reset_tpt_entry) {
332 c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
333 mutex_lock(&rdev->stats.lock);
334 rdev->stats.stag.cur -= 32;
335 mutex_unlock(&rdev->stats.lock);
336 }
337 return err;
338}
339
340static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
341 u32 pbl_addr, u32 pbl_size, struct c4iw_wr_wait *wr_waitp)
342{
343 int err;
344
345 pr_debug("*pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
346 pbl_addr, rdev->lldi.vr->pbl.start,
347 pbl_size);
348
349 err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL,
350 wr_waitp);
351 return err;
352}
353
354static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
355 u32 pbl_addr, struct sk_buff *skb,
356 struct c4iw_wr_wait *wr_waitp)
357{
358 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
359 pbl_size, pbl_addr, skb, wr_waitp);
360}
361
362static int allocate_window(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
363 struct c4iw_wr_wait *wr_waitp)
364{
365 *stag = T4_STAG_UNSET;
366 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
367 0UL, 0, 0, 0, 0, NULL, wr_waitp);
368}
369
370static int deallocate_window(struct c4iw_rdev *rdev, u32 stag,
371 struct sk_buff *skb,
372 struct c4iw_wr_wait *wr_waitp)
373{
374 return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
375 0, skb, wr_waitp);
376}
377
378static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
379 u32 pbl_size, u32 pbl_addr,
380 struct c4iw_wr_wait *wr_waitp)
381{
382 *stag = T4_STAG_UNSET;
383 return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
384 0UL, 0, 0, pbl_size, pbl_addr, NULL, wr_waitp);
385}
386
387static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
388{
389 u32 mmid;
390
391 mhp->attr.state = 1;
392 mhp->attr.stag = stag;
393 mmid = stag >> 8;
394 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
395 mhp->ibmr.length = mhp->attr.len;
396 mhp->ibmr.iova = mhp->attr.va_fbo;
397 mhp->ibmr.page_size = 1U << (mhp->attr.page_size + 12);
398 pr_debug("mmid 0x%x mhp %p\n", mmid, mhp);
399 return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL);
400}
401
402static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
403 struct c4iw_mr *mhp, int shift)
404{
405 u32 stag = T4_STAG_UNSET;
406 int ret;
407
408 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
409 FW_RI_STAG_NSMR, mhp->attr.len ?
410 mhp->attr.perms : 0,
411 mhp->attr.mw_bind_enable, mhp->attr.zbva,
412 mhp->attr.va_fbo, mhp->attr.len ?
413 mhp->attr.len : -1, shift - 12,
414 mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL,
415 mhp->wr_waitp);
416 if (ret)
417 return ret;
418
419 ret = finish_mem_reg(mhp, stag);
420 if (ret) {
421 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
422 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
423 mhp->dereg_skb = NULL;
424 }
425 return ret;
426}
427
428static int alloc_pbl(struct c4iw_mr *mhp, int npages)
429{
430 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
431 npages << 3);
432
433 if (!mhp->attr.pbl_addr)
434 return -ENOMEM;
435
436 mhp->attr.pbl_size = npages;
437
438 return 0;
439}
440
441struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
442{
443 struct c4iw_dev *rhp;
444 struct c4iw_pd *php;
445 struct c4iw_mr *mhp;
446 int ret;
447 u32 stag = T4_STAG_UNSET;
448
449 pr_debug("ib_pd %p\n", pd);
450 php = to_c4iw_pd(pd);
451 rhp = php->rhp;
452
453 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
454 if (!mhp)
455 return ERR_PTR(-ENOMEM);
456 mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
457 if (!mhp->wr_waitp) {
458 ret = -ENOMEM;
459 goto err_free_mhp;
460 }
461 c4iw_init_wr_wait(mhp->wr_waitp);
462
463 mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
464 if (!mhp->dereg_skb) {
465 ret = -ENOMEM;
466 goto err_free_wr_wait;
467 }
468
469 mhp->rhp = rhp;
470 mhp->attr.pdid = php->pdid;
471 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
472 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
473 mhp->attr.zbva = 0;
474 mhp->attr.va_fbo = 0;
475 mhp->attr.page_size = 0;
476 mhp->attr.len = ~0ULL;
477 mhp->attr.pbl_size = 0;
478
479 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
480 FW_RI_STAG_NSMR, mhp->attr.perms,
481 mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0,
482 NULL, mhp->wr_waitp);
483 if (ret)
484 goto err_free_skb;
485
486 ret = finish_mem_reg(mhp, stag);
487 if (ret)
488 goto err_dereg_mem;
489 return &mhp->ibmr;
490err_dereg_mem:
491 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
492 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
493err_free_skb:
494 kfree_skb(mhp->dereg_skb);
495err_free_wr_wait:
496 c4iw_put_wr_wait(mhp->wr_waitp);
497err_free_mhp:
498 kfree(mhp);
499 return ERR_PTR(ret);
500}
501
502struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
503 u64 virt, int acc, struct ib_udata *udata)
504{
505 __be64 *pages;
506 int shift, n, i;
507 int err = -ENOMEM;
508 struct sg_dma_page_iter sg_iter;
509 struct c4iw_dev *rhp;
510 struct c4iw_pd *php;
511 struct c4iw_mr *mhp;
512
513 pr_debug("ib_pd %p\n", pd);
514
515 if (length == ~0ULL)
516 return ERR_PTR(-EINVAL);
517
518 if ((length + start) < start)
519 return ERR_PTR(-EINVAL);
520
521 php = to_c4iw_pd(pd);
522 rhp = php->rhp;
523
524 if (mr_exceeds_hw_limits(rhp, length))
525 return ERR_PTR(-EINVAL);
526
527 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
528 if (!mhp)
529 return ERR_PTR(-ENOMEM);
530 mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
531 if (!mhp->wr_waitp)
532 goto err_free_mhp;
533
534 mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
535 if (!mhp->dereg_skb)
536 goto err_free_wr_wait;
537
538 mhp->rhp = rhp;
539
540 mhp->umem = ib_umem_get(udata, start, length, acc, 0);
541 if (IS_ERR(mhp->umem))
542 goto err_free_skb;
543
544 shift = PAGE_SHIFT;
545
546 n = ib_umem_num_pages(mhp->umem);
547 err = alloc_pbl(mhp, n);
548 if (err)
549 goto err_umem_release;
550
551 pages = (__be64 *) __get_free_page(GFP_KERNEL);
552 if (!pages) {
553 err = -ENOMEM;
554 goto err_pbl_free;
555 }
556
557 i = n = 0;
558
559 for_each_sg_dma_page(mhp->umem->sg_head.sgl, &sg_iter, mhp->umem->nmap, 0) {
560 pages[i++] = cpu_to_be64(sg_page_iter_dma_address(&sg_iter));
561 if (i == PAGE_SIZE / sizeof(*pages)) {
562 err = write_pbl(&mhp->rhp->rdev, pages,
563 mhp->attr.pbl_addr + (n << 3), i,
564 mhp->wr_waitp);
565 if (err)
566 goto pbl_done;
567 n += i;
568 i = 0;
569 }
570 }
571
572 if (i)
573 err = write_pbl(&mhp->rhp->rdev, pages,
574 mhp->attr.pbl_addr + (n << 3), i,
575 mhp->wr_waitp);
576
577pbl_done:
578 free_page((unsigned long) pages);
579 if (err)
580 goto err_pbl_free;
581
582 mhp->attr.pdid = php->pdid;
583 mhp->attr.zbva = 0;
584 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
585 mhp->attr.va_fbo = virt;
586 mhp->attr.page_size = shift - 12;
587 mhp->attr.len = length;
588
589 err = register_mem(rhp, php, mhp, shift);
590 if (err)
591 goto err_pbl_free;
592
593 return &mhp->ibmr;
594
595err_pbl_free:
596 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
597 mhp->attr.pbl_size << 3);
598err_umem_release:
599 ib_umem_release(mhp->umem);
600err_free_skb:
601 kfree_skb(mhp->dereg_skb);
602err_free_wr_wait:
603 c4iw_put_wr_wait(mhp->wr_waitp);
604err_free_mhp:
605 kfree(mhp);
606 return ERR_PTR(err);
607}
608
609struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
610 struct ib_udata *udata)
611{
612 struct c4iw_dev *rhp;
613 struct c4iw_pd *php;
614 struct c4iw_mw *mhp;
615 u32 mmid;
616 u32 stag = 0;
617 int ret;
618
619 if (type != IB_MW_TYPE_1)
620 return ERR_PTR(-EINVAL);
621
622 php = to_c4iw_pd(pd);
623 rhp = php->rhp;
624 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
625 if (!mhp)
626 return ERR_PTR(-ENOMEM);
627
628 mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
629 if (!mhp->wr_waitp) {
630 ret = -ENOMEM;
631 goto free_mhp;
632 }
633
634 mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
635 if (!mhp->dereg_skb) {
636 ret = -ENOMEM;
637 goto free_wr_wait;
638 }
639
640 ret = allocate_window(&rhp->rdev, &stag, php->pdid, mhp->wr_waitp);
641 if (ret)
642 goto free_skb;
643 mhp->rhp = rhp;
644 mhp->attr.pdid = php->pdid;
645 mhp->attr.type = FW_RI_STAG_MW;
646 mhp->attr.stag = stag;
647 mmid = (stag) >> 8;
648 mhp->ibmw.rkey = stag;
649 if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
650 ret = -ENOMEM;
651 goto dealloc_win;
652 }
653 pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
654 return &(mhp->ibmw);
655
656dealloc_win:
657 deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
658 mhp->wr_waitp);
659free_skb:
660 kfree_skb(mhp->dereg_skb);
661free_wr_wait:
662 c4iw_put_wr_wait(mhp->wr_waitp);
663free_mhp:
664 kfree(mhp);
665 return ERR_PTR(ret);
666}
667
668int c4iw_dealloc_mw(struct ib_mw *mw)
669{
670 struct c4iw_dev *rhp;
671 struct c4iw_mw *mhp;
672 u32 mmid;
673
674 mhp = to_c4iw_mw(mw);
675 rhp = mhp->rhp;
676 mmid = (mw->rkey) >> 8;
677 xa_erase_irq(&rhp->mrs, mmid);
678 deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
679 mhp->wr_waitp);
680 kfree_skb(mhp->dereg_skb);
681 c4iw_put_wr_wait(mhp->wr_waitp);
682 pr_debug("ib_mw %p mmid 0x%x ptr %p\n", mw, mmid, mhp);
683 kfree(mhp);
684 return 0;
685}
686
687struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
688 u32 max_num_sg, struct ib_udata *udata)
689{
690 struct c4iw_dev *rhp;
691 struct c4iw_pd *php;
692 struct c4iw_mr *mhp;
693 u32 mmid;
694 u32 stag = 0;
695 int ret = 0;
696 int length = roundup(max_num_sg * sizeof(u64), 32);
697
698 php = to_c4iw_pd(pd);
699 rhp = php->rhp;
700
701 if (mr_type != IB_MR_TYPE_MEM_REG ||
702 max_num_sg > t4_max_fr_depth(rhp->rdev.lldi.ulptx_memwrite_dsgl &&
703 use_dsgl))
704 return ERR_PTR(-EINVAL);
705
706 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
707 if (!mhp) {
708 ret = -ENOMEM;
709 goto err;
710 }
711
712 mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
713 if (!mhp->wr_waitp) {
714 ret = -ENOMEM;
715 goto err_free_mhp;
716 }
717 c4iw_init_wr_wait(mhp->wr_waitp);
718
719 mhp->mpl = dma_alloc_coherent(&rhp->rdev.lldi.pdev->dev,
720 length, &mhp->mpl_addr, GFP_KERNEL);
721 if (!mhp->mpl) {
722 ret = -ENOMEM;
723 goto err_free_wr_wait;
724 }
725 mhp->max_mpl_len = length;
726
727 mhp->rhp = rhp;
728 ret = alloc_pbl(mhp, max_num_sg);
729 if (ret)
730 goto err_free_dma;
731 mhp->attr.pbl_size = max_num_sg;
732 ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
733 mhp->attr.pbl_size, mhp->attr.pbl_addr,
734 mhp->wr_waitp);
735 if (ret)
736 goto err_free_pbl;
737 mhp->attr.pdid = php->pdid;
738 mhp->attr.type = FW_RI_STAG_NSMR;
739 mhp->attr.stag = stag;
740 mhp->attr.state = 0;
741 mmid = (stag) >> 8;
742 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
743 if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
744 ret = -ENOMEM;
745 goto err_dereg;
746 }
747
748 pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
749 return &(mhp->ibmr);
750err_dereg:
751 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
752 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
753err_free_pbl:
754 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
755 mhp->attr.pbl_size << 3);
756err_free_dma:
757 dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
758 mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
759err_free_wr_wait:
760 c4iw_put_wr_wait(mhp->wr_waitp);
761err_free_mhp:
762 kfree(mhp);
763err:
764 return ERR_PTR(ret);
765}
766
767static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
768{
769 struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
770
771 if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
772 return -ENOMEM;
773
774 mhp->mpl[mhp->mpl_len++] = addr;
775
776 return 0;
777}
778
779int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
780 unsigned int *sg_offset)
781{
782 struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
783
784 mhp->mpl_len = 0;
785
786 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, c4iw_set_page);
787}
788
789int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
790{
791 struct c4iw_dev *rhp;
792 struct c4iw_mr *mhp;
793 u32 mmid;
794
795 pr_debug("ib_mr %p\n", ib_mr);
796
797 mhp = to_c4iw_mr(ib_mr);
798 rhp = mhp->rhp;
799 mmid = mhp->attr.stag >> 8;
800 xa_erase_irq(&rhp->mrs, mmid);
801 if (mhp->mpl)
802 dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
803 mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
804 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
805 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
806 if (mhp->attr.pbl_size)
807 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
808 mhp->attr.pbl_size << 3);
809 if (mhp->kva)
810 kfree((void *) (unsigned long) mhp->kva);
811 ib_umem_release(mhp->umem);
812 pr_debug("mmid 0x%x ptr %p\n", mmid, mhp);
813 c4iw_put_wr_wait(mhp->wr_waitp);
814 kfree(mhp);
815 return 0;
816}
817
818void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey)
819{
820 struct c4iw_mr *mhp;
821 unsigned long flags;
822
823 xa_lock_irqsave(&rhp->mrs, flags);
824 mhp = xa_load(&rhp->mrs, rkey >> 8);
825 if (mhp)
826 mhp->attr.state = 0;
827 xa_unlock_irqrestore(&rhp->mrs, flags);
828}
829