1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/vmalloc.h>
35#include <rdma/ib_umem.h>
36#include "hns_roce_device.h"
37
38void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf)
39{
40 struct hns_roce_buf_list *trunks;
41 u32 i;
42
43 if (!buf)
44 return;
45
46 trunks = buf->trunk_list;
47 if (trunks) {
48 buf->trunk_list = NULL;
49 for (i = 0; i < buf->ntrunks; i++)
50 dma_free_coherent(hr_dev->dev, 1 << buf->trunk_shift,
51 trunks[i].buf, trunks[i].map);
52
53 kfree(trunks);
54 }
55
56 kfree(buf);
57}
58
59
60
61
62
63
64
65
66struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
67 u32 page_shift, u32 flags)
68{
69 u32 trunk_size, page_size, alloced_size;
70 struct hns_roce_buf_list *trunks;
71 struct hns_roce_buf *buf;
72 gfp_t gfp_flags;
73 u32 ntrunk, i;
74
75
76 if (WARN_ON(page_shift < HNS_HW_PAGE_SHIFT))
77 return ERR_PTR(-EINVAL);
78
79 gfp_flags = (flags & HNS_ROCE_BUF_NOSLEEP) ? GFP_ATOMIC : GFP_KERNEL;
80 buf = kzalloc(sizeof(*buf), gfp_flags);
81 if (!buf)
82 return ERR_PTR(-ENOMEM);
83
84 buf->page_shift = page_shift;
85 page_size = 1 << buf->page_shift;
86
87
88 if (flags & HNS_ROCE_BUF_DIRECT) {
89 buf->trunk_shift = order_base_2(ALIGN(size, PAGE_SIZE));
90 ntrunk = 1;
91 } else {
92 buf->trunk_shift = order_base_2(ALIGN(page_size, PAGE_SIZE));
93 ntrunk = DIV_ROUND_UP(size, 1 << buf->trunk_shift);
94 }
95
96 trunks = kcalloc(ntrunk, sizeof(*trunks), gfp_flags);
97 if (!trunks) {
98 kfree(buf);
99 return ERR_PTR(-ENOMEM);
100 }
101
102 trunk_size = 1 << buf->trunk_shift;
103 alloced_size = 0;
104 for (i = 0; i < ntrunk; i++) {
105 trunks[i].buf = dma_alloc_coherent(hr_dev->dev, trunk_size,
106 &trunks[i].map, gfp_flags);
107 if (!trunks[i].buf)
108 break;
109
110 alloced_size += trunk_size;
111 }
112
113 buf->ntrunks = i;
114
115
116 if ((flags & HNS_ROCE_BUF_NOFAIL) ? i == 0 : i != ntrunk) {
117 for (i = 0; i < buf->ntrunks; i++)
118 dma_free_coherent(hr_dev->dev, trunk_size,
119 trunks[i].buf, trunks[i].map);
120
121 kfree(trunks);
122 kfree(buf);
123 return ERR_PTR(-ENOMEM);
124 }
125
126 buf->npages = DIV_ROUND_UP(alloced_size, page_size);
127 buf->trunk_list = trunks;
128
129 return buf;
130}
131
132int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
133 int buf_cnt, struct hns_roce_buf *buf,
134 unsigned int page_shift)
135{
136 unsigned int offset, max_size;
137 int total = 0;
138 int i;
139
140 if (page_shift > buf->trunk_shift) {
141 dev_err(hr_dev->dev, "failed to check kmem buf shift %u > %u\n",
142 page_shift, buf->trunk_shift);
143 return -EINVAL;
144 }
145
146 offset = 0;
147 max_size = buf->ntrunks << buf->trunk_shift;
148 for (i = 0; i < buf_cnt && offset < max_size; i++) {
149 bufs[total++] = hns_roce_buf_dma_addr(buf, offset);
150 offset += (1 << page_shift);
151 }
152
153 return total;
154}
155
156int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
157 int buf_cnt, struct ib_umem *umem,
158 unsigned int page_shift)
159{
160 struct ib_block_iter biter;
161 int total = 0;
162
163
164 rdma_umem_for_each_dma_block(umem, &biter, 1 << page_shift) {
165 bufs[total++] = rdma_block_iter_dma_address(&biter);
166 if (total >= buf_cnt)
167 goto done;
168 }
169
170done:
171 return total;
172}
173
174void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
175{
176 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
177 ida_destroy(&hr_dev->xrcd_ida.ida);
178
179 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
180 ida_destroy(&hr_dev->srq_table.srq_ida.ida);
181 hns_roce_cleanup_qp_table(hr_dev);
182 hns_roce_cleanup_cq_table(hr_dev);
183 ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
184 ida_destroy(&hr_dev->pd_ida.ida);
185 ida_destroy(&hr_dev->uar_ida.ida);
186}
187