1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/platform_device.h>
35#include <linux/vmalloc.h>
36#include "hns_roce_device.h"
37
38int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
39{
40 int ret = 0;
41
42 spin_lock(&bitmap->lock);
43 *obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
44 if (*obj >= bitmap->max) {
45 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
46 & bitmap->mask;
47 *obj = find_first_zero_bit(bitmap->table, bitmap->max);
48 }
49
50 if (*obj < bitmap->max) {
51 set_bit(*obj, bitmap->table);
52 bitmap->last = (*obj + 1);
53 if (bitmap->last == bitmap->max)
54 bitmap->last = 0;
55 *obj |= bitmap->top;
56 } else {
57 ret = -1;
58 }
59
60 spin_unlock(&bitmap->lock);
61
62 return ret;
63}
64
65void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
66 int rr)
67{
68 hns_roce_bitmap_free_range(bitmap, obj, 1, rr);
69}
70
71int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
72 int align, unsigned long *obj)
73{
74 int ret = 0;
75 int i;
76
77 if (likely(cnt == 1 && align == 1))
78 return hns_roce_bitmap_alloc(bitmap, obj);
79
80 spin_lock(&bitmap->lock);
81
82 *obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
83 bitmap->last, cnt, align - 1);
84 if (*obj >= bitmap->max) {
85 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
86 & bitmap->mask;
87 *obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 0,
88 cnt, align - 1);
89 }
90
91 if (*obj < bitmap->max) {
92 for (i = 0; i < cnt; i++)
93 set_bit(*obj + i, bitmap->table);
94
95 if (*obj == bitmap->last) {
96 bitmap->last = (*obj + cnt);
97 if (bitmap->last >= bitmap->max)
98 bitmap->last = 0;
99 }
100 *obj |= bitmap->top;
101 } else {
102 ret = -1;
103 }
104
105 spin_unlock(&bitmap->lock);
106
107 return ret;
108}
109
110void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
111 unsigned long obj, int cnt,
112 int rr)
113{
114 int i;
115
116 obj &= bitmap->max + bitmap->reserved_top - 1;
117
118 spin_lock(&bitmap->lock);
119 for (i = 0; i < cnt; i++)
120 clear_bit(obj + i, bitmap->table);
121
122 if (!rr)
123 bitmap->last = min(bitmap->last, obj);
124 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
125 & bitmap->mask;
126 spin_unlock(&bitmap->lock);
127}
128
129int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
130 u32 reserved_bot, u32 reserved_top)
131{
132 u32 i;
133
134 if (num != roundup_pow_of_two(num))
135 return -EINVAL;
136
137 bitmap->last = 0;
138 bitmap->top = 0;
139 bitmap->max = num - reserved_top;
140 bitmap->mask = mask;
141 bitmap->reserved_top = reserved_top;
142 spin_lock_init(&bitmap->lock);
143 bitmap->table = kcalloc(BITS_TO_LONGS(bitmap->max), sizeof(long),
144 GFP_KERNEL);
145 if (!bitmap->table)
146 return -ENOMEM;
147
148 for (i = 0; i < reserved_bot; ++i)
149 set_bit(i, bitmap->table);
150
151 return 0;
152}
153
154void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap)
155{
156 kfree(bitmap->table);
157}
158
159void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
160 struct hns_roce_buf *buf)
161{
162 int i;
163 struct device *dev = &hr_dev->pdev->dev;
164 u32 bits_per_long = BITS_PER_LONG;
165
166 if (buf->nbufs == 1) {
167 dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
168 } else {
169 if (bits_per_long == 64)
170 vunmap(buf->direct.buf);
171
172 for (i = 0; i < buf->nbufs; ++i)
173 if (buf->page_list[i].buf)
174 dma_free_coherent(&hr_dev->pdev->dev, PAGE_SIZE,
175 buf->page_list[i].buf,
176 buf->page_list[i].map);
177 kfree(buf->page_list);
178 }
179}
180
181int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
182 struct hns_roce_buf *buf)
183{
184 int i = 0;
185 dma_addr_t t;
186 struct page **pages;
187 struct device *dev = &hr_dev->pdev->dev;
188 u32 bits_per_long = BITS_PER_LONG;
189
190
191 if (size <= max_direct) {
192 buf->nbufs = 1;
193
194 buf->npages = 1 << get_order(size);
195 buf->page_shift = PAGE_SHIFT;
196
197 buf->direct.buf = dma_alloc_coherent(dev, size, &t, GFP_KERNEL);
198 if (!buf->direct.buf)
199 return -ENOMEM;
200
201 buf->direct.map = t;
202
203 while (t & ((1 << buf->page_shift) - 1)) {
204 --buf->page_shift;
205 buf->npages *= 2;
206 }
207
208 memset(buf->direct.buf, 0, size);
209 } else {
210 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
211 buf->npages = buf->nbufs;
212 buf->page_shift = PAGE_SHIFT;
213 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
214 GFP_KERNEL);
215
216 if (!buf->page_list)
217 return -ENOMEM;
218
219 for (i = 0; i < buf->nbufs; ++i) {
220 buf->page_list[i].buf = dma_alloc_coherent(dev,
221 PAGE_SIZE, &t,
222 GFP_KERNEL);
223
224 if (!buf->page_list[i].buf)
225 goto err_free;
226
227 buf->page_list[i].map = t;
228 memset(buf->page_list[i].buf, 0, PAGE_SIZE);
229 }
230 if (bits_per_long == 64) {
231 pages = kmalloc_array(buf->nbufs, sizeof(*pages),
232 GFP_KERNEL);
233 if (!pages)
234 goto err_free;
235
236 for (i = 0; i < buf->nbufs; ++i)
237 pages[i] = virt_to_page(buf->page_list[i].buf);
238
239 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP,
240 PAGE_KERNEL);
241 kfree(pages);
242 if (!buf->direct.buf)
243 goto err_free;
244 }
245 }
246
247 return 0;
248
249err_free:
250 hns_roce_buf_free(hr_dev, size, buf);
251 return -ENOMEM;
252}
253
254void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
255{
256 hns_roce_cleanup_qp_table(hr_dev);
257 hns_roce_cleanup_cq_table(hr_dev);
258 hns_roce_cleanup_mr_table(hr_dev);
259 hns_roce_cleanup_pd_table(hr_dev);
260 hns_roce_cleanup_uar_table(hr_dev);
261}
262