1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/kernel.h>
34#include <linux/io-mapping.h>
35#include <linux/mlx5/driver.h>
36#include "mlx5_core.h"
37
38static int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
39{
40 u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {};
41 u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {};
42 int err;
43
44 MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
45 err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out);
46 if (err)
47 return err;
48
49 *uarn = MLX5_GET(alloc_uar_out, out, uar);
50 return 0;
51}
52
53static int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
54{
55 u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {};
56
57 MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
58 MLX5_SET(dealloc_uar_in, in, uar, uarn);
59 return mlx5_cmd_exec_in(dev, dealloc_uar, in);
60}
61
62static int uars_per_sys_page(struct mlx5_core_dev *mdev)
63{
64 if (MLX5_CAP_GEN(mdev, uar_4k))
65 return MLX5_CAP_GEN(mdev, num_of_uars_per_page);
66
67 return 1;
68}
69
70static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index)
71{
72 u32 system_page_index;
73
74 if (MLX5_CAP_GEN(mdev, uar_4k))
75 system_page_index = index >> (PAGE_SHIFT - MLX5_ADAPTER_PAGE_SHIFT);
76 else
77 system_page_index = index;
78
79 return (mdev->bar_addr >> PAGE_SHIFT) + system_page_index;
80}
81
82static void up_rel_func(struct kref *kref)
83{
84 struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
85
86 list_del(&up->list);
87 iounmap(up->map);
88 if (mlx5_cmd_free_uar(up->mdev, up->index))
89 mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
90 bitmap_free(up->reg_bitmap);
91 bitmap_free(up->fp_bitmap);
92 kfree(up);
93}
94
95static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
96 bool map_wc)
97{
98 struct mlx5_uars_page *up;
99 int err = -ENOMEM;
100 phys_addr_t pfn;
101 int bfregs;
102 int node;
103 int i;
104
105 bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR;
106 node = mdev->priv.numa_node;
107 up = kzalloc_node(sizeof(*up), GFP_KERNEL, node);
108 if (!up)
109 return ERR_PTR(err);
110
111 up->mdev = mdev;
112 up->reg_bitmap = bitmap_zalloc_node(bfregs, GFP_KERNEL, node);
113 if (!up->reg_bitmap)
114 goto error1;
115
116 up->fp_bitmap = bitmap_zalloc_node(bfregs, GFP_KERNEL, node);
117 if (!up->fp_bitmap)
118 goto error1;
119
120 for (i = 0; i < bfregs; i++)
121 if ((i % MLX5_BFREGS_PER_UAR) < MLX5_NON_FP_BFREGS_PER_UAR)
122 set_bit(i, up->reg_bitmap);
123 else
124 set_bit(i, up->fp_bitmap);
125
126 up->bfregs = bfregs;
127 up->fp_avail = bfregs * MLX5_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
128 up->reg_avail = bfregs * MLX5_NON_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
129
130 err = mlx5_cmd_alloc_uar(mdev, &up->index);
131 if (err) {
132 mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
133 goto error1;
134 }
135
136 pfn = uar2pfn(mdev, up->index);
137 if (map_wc) {
138 up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
139 if (!up->map) {
140 err = -EAGAIN;
141 goto error2;
142 }
143 } else {
144 up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
145 if (!up->map) {
146 err = -ENOMEM;
147 goto error2;
148 }
149 }
150 kref_init(&up->ref_count);
151 mlx5_core_dbg(mdev, "allocated UAR page: index %d, total bfregs %d\n",
152 up->index, up->bfregs);
153 return up;
154
155error2:
156 if (mlx5_cmd_free_uar(mdev, up->index))
157 mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index);
158error1:
159 bitmap_free(up->fp_bitmap);
160 bitmap_free(up->reg_bitmap);
161 kfree(up);
162 return ERR_PTR(err);
163}
164
165struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
166{
167 struct mlx5_uars_page *ret;
168
169 mutex_lock(&mdev->priv.bfregs.reg_head.lock);
170 if (!list_empty(&mdev->priv.bfregs.reg_head.list)) {
171 ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
172 struct mlx5_uars_page, list);
173 kref_get(&ret->ref_count);
174 goto out;
175 }
176 ret = alloc_uars_page(mdev, false);
177 if (IS_ERR(ret))
178 goto out;
179 list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
180out:
181 mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
182
183 return ret;
184}
185EXPORT_SYMBOL(mlx5_get_uars_page);
186
187void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up)
188{
189 mutex_lock(&mdev->priv.bfregs.reg_head.lock);
190 kref_put(&up->ref_count, up_rel_func);
191 mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
192}
193EXPORT_SYMBOL(mlx5_put_uars_page);
194
195static unsigned long map_offset(struct mlx5_core_dev *mdev, int dbi)
196{
197
198
199
200 return dbi / MLX5_BFREGS_PER_UAR * MLX5_ADAPTER_PAGE_SIZE +
201 (dbi % MLX5_BFREGS_PER_UAR) *
202 (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET;
203}
204
205static int alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
206 bool map_wc, bool fast_path)
207{
208 struct mlx5_bfreg_data *bfregs;
209 struct mlx5_uars_page *up;
210 struct list_head *head;
211 unsigned long *bitmap;
212 unsigned int *avail;
213 struct mutex *lock;
214 int dbi;
215
216 bfregs = &mdev->priv.bfregs;
217 if (map_wc) {
218 head = &bfregs->wc_head.list;
219 lock = &bfregs->wc_head.lock;
220 } else {
221 head = &bfregs->reg_head.list;
222 lock = &bfregs->reg_head.lock;
223 }
224 mutex_lock(lock);
225 if (list_empty(head)) {
226 up = alloc_uars_page(mdev, map_wc);
227 if (IS_ERR(up)) {
228 mutex_unlock(lock);
229 return PTR_ERR(up);
230 }
231 list_add(&up->list, head);
232 } else {
233 up = list_entry(head->next, struct mlx5_uars_page, list);
234 kref_get(&up->ref_count);
235 }
236 if (fast_path) {
237 bitmap = up->fp_bitmap;
238 avail = &up->fp_avail;
239 } else {
240 bitmap = up->reg_bitmap;
241 avail = &up->reg_avail;
242 }
243 dbi = find_first_bit(bitmap, up->bfregs);
244 clear_bit(dbi, bitmap);
245 (*avail)--;
246 if (!(*avail))
247 list_del(&up->list);
248
249 bfreg->map = up->map + map_offset(mdev, dbi);
250 bfreg->up = up;
251 bfreg->wc = map_wc;
252 bfreg->index = up->index + dbi / MLX5_BFREGS_PER_UAR;
253 mutex_unlock(lock);
254
255 return 0;
256}
257
258int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
259 bool map_wc, bool fast_path)
260{
261 int err;
262
263 err = alloc_bfreg(mdev, bfreg, map_wc, fast_path);
264 if (!err)
265 return 0;
266
267 if (err == -EAGAIN && map_wc)
268 return alloc_bfreg(mdev, bfreg, false, fast_path);
269
270 return err;
271}
272EXPORT_SYMBOL(mlx5_alloc_bfreg);
273
274static unsigned int addr_to_dbi_in_syspage(struct mlx5_core_dev *dev,
275 struct mlx5_uars_page *up,
276 struct mlx5_sq_bfreg *bfreg)
277{
278 unsigned int uar_idx;
279 unsigned int bfreg_idx;
280 unsigned int bf_reg_size;
281
282 bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size);
283
284 uar_idx = (bfreg->map - up->map) >> MLX5_ADAPTER_PAGE_SHIFT;
285 bfreg_idx = (((uintptr_t)bfreg->map % MLX5_ADAPTER_PAGE_SIZE) - MLX5_BF_OFFSET) / bf_reg_size;
286
287 return uar_idx * MLX5_BFREGS_PER_UAR + bfreg_idx;
288}
289
290void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg)
291{
292 struct mlx5_bfreg_data *bfregs;
293 struct mlx5_uars_page *up;
294 struct mutex *lock;
295 unsigned int dbi;
296 bool fp;
297 unsigned int *avail;
298 unsigned long *bitmap;
299 struct list_head *head;
300
301 bfregs = &mdev->priv.bfregs;
302 if (bfreg->wc) {
303 head = &bfregs->wc_head.list;
304 lock = &bfregs->wc_head.lock;
305 } else {
306 head = &bfregs->reg_head.list;
307 lock = &bfregs->reg_head.lock;
308 }
309 up = bfreg->up;
310 dbi = addr_to_dbi_in_syspage(mdev, up, bfreg);
311 fp = (dbi % MLX5_BFREGS_PER_UAR) >= MLX5_NON_FP_BFREGS_PER_UAR;
312 if (fp) {
313 avail = &up->fp_avail;
314 bitmap = up->fp_bitmap;
315 } else {
316 avail = &up->reg_avail;
317 bitmap = up->reg_bitmap;
318 }
319 mutex_lock(lock);
320 (*avail)++;
321 set_bit(dbi, bitmap);
322 if (*avail == 1)
323 list_add_tail(&up->list, head);
324
325 kref_put(&up->ref_count, up_rel_func);
326 mutex_unlock(lock);
327}
328EXPORT_SYMBOL(mlx5_free_bfreg);
329