1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/platform_device.h>
35#include "hns_roce_device.h"
36
37int hns_roce_bitmap_alloc(struct hns_roce_bitmap *bitmap, unsigned long *obj)
38{
39 int ret = 0;
40
41 spin_lock(&bitmap->lock);
42 *obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
43 if (*obj >= bitmap->max) {
44 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
45 & bitmap->mask;
46 *obj = find_first_zero_bit(bitmap->table, bitmap->max);
47 }
48
49 if (*obj < bitmap->max) {
50 set_bit(*obj, bitmap->table);
51 bitmap->last = (*obj + 1);
52 if (bitmap->last == bitmap->max)
53 bitmap->last = 0;
54 *obj |= bitmap->top;
55 } else {
56 ret = -1;
57 }
58
59 spin_unlock(&bitmap->lock);
60
61 return ret;
62}
63
64void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
65 int rr)
66{
67 hns_roce_bitmap_free_range(bitmap, obj, 1, rr);
68}
69
70int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
71 int align, unsigned long *obj)
72{
73 int ret = 0;
74 int i;
75
76 if (likely(cnt == 1 && align == 1))
77 return hns_roce_bitmap_alloc(bitmap, obj);
78
79 spin_lock(&bitmap->lock);
80
81 *obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max,
82 bitmap->last, cnt, align - 1);
83 if (*obj >= bitmap->max) {
84 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
85 & bitmap->mask;
86 *obj = bitmap_find_next_zero_area(bitmap->table, bitmap->max, 0,
87 cnt, align - 1);
88 }
89
90 if (*obj < bitmap->max) {
91 for (i = 0; i < cnt; i++)
92 set_bit(*obj + i, bitmap->table);
93
94 if (*obj == bitmap->last) {
95 bitmap->last = (*obj + cnt);
96 if (bitmap->last >= bitmap->max)
97 bitmap->last = 0;
98 }
99 *obj |= bitmap->top;
100 } else {
101 ret = -1;
102 }
103
104 spin_unlock(&bitmap->lock);
105
106 return ret;
107}
108
109void hns_roce_bitmap_free_range(struct hns_roce_bitmap *bitmap,
110 unsigned long obj, int cnt,
111 int rr)
112{
113 int i;
114
115 obj &= bitmap->max + bitmap->reserved_top - 1;
116
117 spin_lock(&bitmap->lock);
118 for (i = 0; i < cnt; i++)
119 clear_bit(obj + i, bitmap->table);
120
121 if (!rr)
122 bitmap->last = min(bitmap->last, obj);
123 bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
124 & bitmap->mask;
125 spin_unlock(&bitmap->lock);
126}
127
128int hns_roce_bitmap_init(struct hns_roce_bitmap *bitmap, u32 num, u32 mask,
129 u32 reserved_bot, u32 reserved_top)
130{
131 u32 i;
132
133 if (num != roundup_pow_of_two(num))
134 return -EINVAL;
135
136 bitmap->last = 0;
137 bitmap->top = 0;
138 bitmap->max = num - reserved_top;
139 bitmap->mask = mask;
140 bitmap->reserved_top = reserved_top;
141 spin_lock_init(&bitmap->lock);
142 bitmap->table = kcalloc(BITS_TO_LONGS(bitmap->max), sizeof(long),
143 GFP_KERNEL);
144 if (!bitmap->table)
145 return -ENOMEM;
146
147 for (i = 0; i < reserved_bot; ++i)
148 set_bit(i, bitmap->table);
149
150 return 0;
151}
152
153void hns_roce_bitmap_cleanup(struct hns_roce_bitmap *bitmap)
154{
155 kfree(bitmap->table);
156}
157
158void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
159 struct hns_roce_buf *buf)
160{
161 int i;
162 struct device *dev = &hr_dev->pdev->dev;
163 u32 bits_per_long = BITS_PER_LONG;
164
165 if (buf->nbufs == 1) {
166 dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
167 } else {
168 if (bits_per_long == 64)
169 vunmap(buf->direct.buf);
170
171 for (i = 0; i < buf->nbufs; ++i)
172 if (buf->page_list[i].buf)
173 dma_free_coherent(&hr_dev->pdev->dev, PAGE_SIZE,
174 buf->page_list[i].buf,
175 buf->page_list[i].map);
176 kfree(buf->page_list);
177 }
178}
179
180int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
181 struct hns_roce_buf *buf)
182{
183 int i = 0;
184 dma_addr_t t;
185 struct page **pages;
186 struct device *dev = &hr_dev->pdev->dev;
187 u32 bits_per_long = BITS_PER_LONG;
188
189
190 if (size <= max_direct) {
191 buf->nbufs = 1;
192
193 buf->npages = 1 << get_order(size);
194 buf->page_shift = PAGE_SHIFT;
195
196 buf->direct.buf = dma_alloc_coherent(dev, size, &t, GFP_KERNEL);
197 if (!buf->direct.buf)
198 return -ENOMEM;
199
200 buf->direct.map = t;
201
202 while (t & ((1 << buf->page_shift) - 1)) {
203 --buf->page_shift;
204 buf->npages *= 2;
205 }
206
207 memset(buf->direct.buf, 0, size);
208 } else {
209 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
210 buf->npages = buf->nbufs;
211 buf->page_shift = PAGE_SHIFT;
212 buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
213 GFP_KERNEL);
214
215 if (!buf->page_list)
216 return -ENOMEM;
217
218 for (i = 0; i < buf->nbufs; ++i) {
219 buf->page_list[i].buf = dma_alloc_coherent(dev,
220 PAGE_SIZE, &t,
221 GFP_KERNEL);
222
223 if (!buf->page_list[i].buf)
224 goto err_free;
225
226 buf->page_list[i].map = t;
227 memset(buf->page_list[i].buf, 0, PAGE_SIZE);
228 }
229 if (bits_per_long == 64) {
230 pages = kmalloc_array(buf->nbufs, sizeof(*pages),
231 GFP_KERNEL);
232 if (!pages)
233 goto err_free;
234
235 for (i = 0; i < buf->nbufs; ++i)
236 pages[i] = virt_to_page(buf->page_list[i].buf);
237
238 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP,
239 PAGE_KERNEL);
240 kfree(pages);
241 if (!buf->direct.buf)
242 goto err_free;
243 }
244 }
245
246 return 0;
247
248err_free:
249 hns_roce_buf_free(hr_dev, size, buf);
250 return -ENOMEM;
251}
252
253void hns_roce_cleanup_bitmap(struct hns_roce_dev *hr_dev)
254{
255 hns_roce_cleanup_qp_table(hr_dev);
256 hns_roce_cleanup_cq_table(hr_dev);
257 hns_roce_cleanup_mr_table(hr_dev);
258 hns_roce_cleanup_pd_table(hr_dev);
259 hns_roce_cleanup_uar_table(hr_dev);
260}
261