1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/platform_device.h>
35#include <linux/vmalloc.h>
36#include "hns_roce_device.h"
37#include <rdma/ib_umem.h>
38
39void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf)
40{
41 struct hns_roce_buf_list *trunks;
42 u32 i;
43
44 if (!buf)
45 return;
46
47 trunks = buf->trunk_list;
48 if (trunks) {
49 buf->trunk_list = NULL;
50 for (i = 0; i < buf->ntrunks; i++)
51 dma_free_coherent(hr_dev->dev, 1 << buf->trunk_shift,
52 trunks[i].buf, trunks[i].map);
53
54 kfree(trunks);
55 }
56
57 kfree(buf);
58}
59
60
61
62
63
64
65
66
67struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
68 u32 page_shift, u32 flags)
69{
70 u32 trunk_size, page_size, alloced_size;
71 struct hns_roce_buf_list *trunks;
72 struct hns_roce_buf *buf;
73 gfp_t gfp_flags;
74 u32 ntrunk, i;
75
76
77 if (WARN_ON(page_shift < HNS_HW_PAGE_SHIFT))
78 return ERR_PTR(-EINVAL);
79
80 gfp_flags = (flags & HNS_ROCE_BUF_NOSLEEP) ? GFP_ATOMIC : GFP_KERNEL;
81 buf = kzalloc(sizeof(*buf), gfp_flags);
82 if (!buf)
83 return ERR_PTR(-ENOMEM);
84
85 buf->page_shift = page_shift;
86 page_size = 1 << buf->page_shift;
87
88
89 if (flags & HNS_ROCE_BUF_DIRECT) {
90 buf->trunk_shift = order_base_2(ALIGN(size, PAGE_SIZE));
91 ntrunk = 1;
92 } else {
93 buf->trunk_shift = order_base_2(ALIGN(page_size, PAGE_SIZE));
94 ntrunk = DIV_ROUND_UP(size, 1 << buf->trunk_shift);
95 }
96
97 trunks = kcalloc(ntrunk, sizeof(*trunks), gfp_flags);
98 if (!trunks) {
99 kfree(buf);
100 return ERR_PTR(-ENOMEM);
101 }
102
103 trunk_size = 1 << buf->trunk_shift;
104 alloced_size = 0;
105 for (i = 0; i < ntrunk; i++) {
106 trunks[i].buf = dma_alloc_coherent(hr_dev->dev, trunk_size,
107 &trunks[i].map, gfp_flags);
108 if (!trunks[i].buf)
109 break;
110
111 alloced_size += trunk_size;
112 }
113
114 buf->ntrunks = i;
115
116
117 if ((flags & HNS_ROCE_BUF_NOFAIL) ? i == 0 : i != ntrunk) {
118 for (i = 0; i < buf->ntrunks; i++)
119 dma_free_coherent(hr_dev->dev, trunk_size,
120 trunks[i].buf, trunks[i].map);
121
122 kfree(trunks);
123 kfree(buf);
124 return ERR_PTR(-ENOMEM);
125 }
126
127 buf->npages = DIV_ROUND_UP(alloced_size, page_size);
128 buf->trunk_list = trunks;
129
130 return buf;
131}
132
133int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
134 int buf_cnt, struct hns_roce_buf *buf,
135 unsigned int page_shift)
136{
137 unsigned int offset, max_size;
138 int total = 0;
139 int i;
140
141 if (page_shift > buf->trunk_shift) {
142 dev_err(hr_dev->dev, "failed to check kmem buf shift %u > %u\n",
143 page_shift, buf->trunk_shift);
144 return -EINVAL;
145 }
146
147 offset = 0;
148 max_size = buf->ntrunks << buf->trunk_shift;
149 for (i = 0; i < buf_cnt && offset < max_size; i++) {
150 bufs[total++] = hns_roce_buf_dma_addr(buf, offset);
151 offset += (1 << page_shift);
152 }
153
154 return total;
155}
156
157int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
158 int buf_cnt, struct ib_umem *umem,
159 unsigned int page_shift)
160{
161 struct ib_block_iter biter;
162 int total = 0;
163
164
165 rdma_umem_for_each_dma_block(umem, &biter, 1 << page_shift) {
166 bufs[total++] = rdma_block_iter_dma_address(&biter);
167 if (total >= buf_cnt)
168 goto done;
169 }
170
171done:
172 return total;
173}
174
175void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
176{
177 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_XRC)
178 ida_destroy(&hr_dev->xrcd_ida.ida);
179
180 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
181 ida_destroy(&hr_dev->srq_table.srq_ida.ida);
182 hns_roce_cleanup_qp_table(hr_dev);
183 hns_roce_cleanup_cq_table(hr_dev);
184 ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
185 ida_destroy(&hr_dev->pd_ida.ida);
186 ida_destroy(&hr_dev->uar_ida.ida);
187}
188