1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36#include <linux/gfp.h>
37#include <linux/export.h>
38#include <linux/init.h>
39
40#include <linux/mlx4/cmd.h>
41#include <linux/mlx4/qp.h>
42
43#include "mlx4.h"
44#include "icm.h"
45
46
47#define MLX4_BF_QP_SKIP_MASK 0xc0
48#define MLX4_MAX_BF_QP_RANGE 0x40
49
50void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
51{
52 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
53 struct mlx4_qp *qp;
54
55 spin_lock(&qp_table->lock);
56
57 qp = __mlx4_qp_lookup(dev, qpn);
58 if (qp)
59 atomic_inc(&qp->refcount);
60
61 spin_unlock(&qp_table->lock);
62
63 if (!qp) {
64 mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
65 return;
66 }
67
68 qp->event(qp, event_type);
69
70 if (atomic_dec_and_test(&qp->refcount))
71 complete(&qp->free);
72}
73
74
75static int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0)
76{
77
78
79 u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev);
80 *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1;
81
82 *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn &&
83 qp->qpn <= dev->phys_caps.base_sqpn + 1;
84
85 return *real_qp0 || *proxy_qp0;
86}
87
88static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
89 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
90 struct mlx4_qp_context *context,
91 enum mlx4_qp_optpar optpar,
92 int sqd_event, struct mlx4_qp *qp, int native)
93{
94 static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
95 [MLX4_QP_STATE_RST] = {
96 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
97 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
98 [MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP,
99 },
100 [MLX4_QP_STATE_INIT] = {
101 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
102 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
103 [MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP,
104 [MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP,
105 },
106 [MLX4_QP_STATE_RTR] = {
107 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
108 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
109 [MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP,
110 },
111 [MLX4_QP_STATE_RTS] = {
112 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
113 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
114 [MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP,
115 [MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP,
116 },
117 [MLX4_QP_STATE_SQD] = {
118 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
119 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
120 [MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP,
121 [MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP,
122 },
123 [MLX4_QP_STATE_SQER] = {
124 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
125 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
126 [MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP,
127 },
128 [MLX4_QP_STATE_ERR] = {
129 [MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
130 [MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
131 }
132 };
133
134 struct mlx4_priv *priv = mlx4_priv(dev);
135 struct mlx4_cmd_mailbox *mailbox;
136 int ret = 0;
137 int real_qp0 = 0;
138 int proxy_qp0 = 0;
139 u8 port;
140
141 if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
142 !op[cur_state][new_state])
143 return -EINVAL;
144
145 if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
146 ret = mlx4_cmd(dev, 0, qp->qpn, 2,
147 MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
148 if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
149 cur_state != MLX4_QP_STATE_RST &&
150 is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
151 port = (qp->qpn & 1) + 1;
152 if (proxy_qp0)
153 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
154 else
155 priv->mfunc.master.qp0_state[port].qp0_active = 0;
156 }
157 return ret;
158 }
159
160 mailbox = mlx4_alloc_cmd_mailbox(dev);
161 if (IS_ERR(mailbox))
162 return PTR_ERR(mailbox);
163
164 if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
165 u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
166 context->mtt_base_addr_h = mtt_addr >> 32;
167 context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
168 context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
169 }
170
171 if ((cur_state == MLX4_QP_STATE_RTR) &&
172 (new_state == MLX4_QP_STATE_RTS) &&
173 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
174 context->roce_entropy =
175 cpu_to_be16(mlx4_qp_roce_entropy(dev, qp->qpn));
176
177 *(__be32 *) mailbox->buf = cpu_to_be32(optpar);
178 memcpy(mailbox->buf + 8, context, sizeof(*context));
179
180 ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
181 cpu_to_be32(qp->qpn);
182
183 ret = mlx4_cmd(dev, mailbox->dma,
184 qp->qpn | (!!sqd_event << 31),
185 new_state == MLX4_QP_STATE_RST ? 2 : 0,
186 op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
187
188 if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
189 port = (qp->qpn & 1) + 1;
190 if (cur_state != MLX4_QP_STATE_ERR &&
191 cur_state != MLX4_QP_STATE_RST &&
192 new_state == MLX4_QP_STATE_ERR) {
193 if (proxy_qp0)
194 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
195 else
196 priv->mfunc.master.qp0_state[port].qp0_active = 0;
197 } else if (new_state == MLX4_QP_STATE_RTR) {
198 if (proxy_qp0)
199 priv->mfunc.master.qp0_state[port].proxy_qp0_active = 1;
200 else
201 priv->mfunc.master.qp0_state[port].qp0_active = 1;
202 }
203 }
204
205 mlx4_free_cmd_mailbox(dev, mailbox);
206 return ret;
207}
208
209int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
210 enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
211 struct mlx4_qp_context *context,
212 enum mlx4_qp_optpar optpar,
213 int sqd_event, struct mlx4_qp *qp)
214{
215 return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
216 optpar, sqd_event, qp, 0);
217}
218EXPORT_SYMBOL_GPL(mlx4_qp_modify);
219
220int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
221 int *base, u8 flags)
222{
223 u32 uid;
224 int bf_qp = !!(flags & (u8)MLX4_RESERVE_ETH_BF_QP);
225
226 struct mlx4_priv *priv = mlx4_priv(dev);
227 struct mlx4_qp_table *qp_table = &priv->qp_table;
228
229 if (cnt > MLX4_MAX_BF_QP_RANGE && bf_qp)
230 return -ENOMEM;
231
232 uid = MLX4_QP_TABLE_ZONE_GENERAL;
233 if (flags & (u8)MLX4_RESERVE_A0_QP) {
234 if (bf_qp)
235 uid = MLX4_QP_TABLE_ZONE_RAW_ETH;
236 else
237 uid = MLX4_QP_TABLE_ZONE_RSS;
238 }
239
240 *base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align,
241 bf_qp ? MLX4_BF_QP_SKIP_MASK : 0, NULL);
242 if (*base == -1)
243 return -ENOMEM;
244
245 return 0;
246}
247
248int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
249 int *base, u8 flags, u8 usage)
250{
251 u32 in_modifier = RES_QP | (((u32)usage & 3) << 30);
252 u64 in_param = 0;
253 u64 out_param;
254 int err;
255
256
257 flags &= dev->caps.alloc_res_qp_mask;
258
259 if (mlx4_is_mfunc(dev)) {
260 set_param_l(&in_param, (((u32)flags) << 24) | (u32)cnt);
261 set_param_h(&in_param, align);
262 err = mlx4_cmd_imm(dev, in_param, &out_param,
263 in_modifier, RES_OP_RESERVE,
264 MLX4_CMD_ALLOC_RES,
265 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
266 if (err)
267 return err;
268
269 *base = get_param_l(&out_param);
270 return 0;
271 }
272 return __mlx4_qp_reserve_range(dev, cnt, align, base, flags);
273}
274EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
275
276void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
277{
278 struct mlx4_priv *priv = mlx4_priv(dev);
279 struct mlx4_qp_table *qp_table = &priv->qp_table;
280
281 if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
282 return;
283 mlx4_zone_free_entries_unique(qp_table->zones, base_qpn, cnt);
284}
285
286void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
287{
288 u64 in_param = 0;
289 int err;
290
291 if (mlx4_is_mfunc(dev)) {
292 set_param_l(&in_param, base_qpn);
293 set_param_h(&in_param, cnt);
294 err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
295 MLX4_CMD_FREE_RES,
296 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
297 if (err) {
298 mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
299 base_qpn, cnt);
300 }
301 } else
302 __mlx4_qp_release_range(dev, base_qpn, cnt);
303}
304EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
305
306int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
307{
308 struct mlx4_priv *priv = mlx4_priv(dev);
309 struct mlx4_qp_table *qp_table = &priv->qp_table;
310 int err;
311
312 err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
313 if (err)
314 goto err_out;
315
316 err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
317 if (err)
318 goto err_put_qp;
319
320 err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
321 if (err)
322 goto err_put_auxc;
323
324 err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
325 if (err)
326 goto err_put_altc;
327
328 err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
329 if (err)
330 goto err_put_rdmarc;
331
332 return 0;
333
334err_put_rdmarc:
335 mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
336
337err_put_altc:
338 mlx4_table_put(dev, &qp_table->altc_table, qpn);
339
340err_put_auxc:
341 mlx4_table_put(dev, &qp_table->auxc_table, qpn);
342
343err_put_qp:
344 mlx4_table_put(dev, &qp_table->qp_table, qpn);
345
346err_out:
347 return err;
348}
349
350static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
351{
352 u64 param = 0;
353
354 if (mlx4_is_mfunc(dev)) {
355 set_param_l(¶m, qpn);
356 return mlx4_cmd_imm(dev, param, ¶m, RES_QP, RES_OP_MAP_ICM,
357 MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
358 MLX4_CMD_WRAPPED);
359 }
360 return __mlx4_qp_alloc_icm(dev, qpn);
361}
362
363void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
364{
365 struct mlx4_priv *priv = mlx4_priv(dev);
366 struct mlx4_qp_table *qp_table = &priv->qp_table;
367
368 mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
369 mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
370 mlx4_table_put(dev, &qp_table->altc_table, qpn);
371 mlx4_table_put(dev, &qp_table->auxc_table, qpn);
372 mlx4_table_put(dev, &qp_table->qp_table, qpn);
373}
374
375static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
376{
377 u64 in_param = 0;
378
379 if (mlx4_is_mfunc(dev)) {
380 set_param_l(&in_param, qpn);
381 if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
382 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
383 MLX4_CMD_WRAPPED))
384 mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
385 } else
386 __mlx4_qp_free_icm(dev, qpn);
387}
388
389struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
390{
391 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
392 struct mlx4_qp *qp;
393
394 spin_lock(&qp_table->lock);
395
396 qp = __mlx4_qp_lookup(dev, qpn);
397
398 spin_unlock(&qp_table->lock);
399 return qp;
400}
401
402int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
403{
404 struct mlx4_priv *priv = mlx4_priv(dev);
405 struct mlx4_qp_table *qp_table = &priv->qp_table;
406 int err;
407
408 if (!qpn)
409 return -EINVAL;
410
411 qp->qpn = qpn;
412
413 err = mlx4_qp_alloc_icm(dev, qpn);
414 if (err)
415 return err;
416
417 spin_lock_irq(&qp_table->lock);
418 err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
419 (dev->caps.num_qps - 1), qp);
420 spin_unlock_irq(&qp_table->lock);
421 if (err)
422 goto err_icm;
423
424 atomic_set(&qp->refcount, 1);
425 init_completion(&qp->free);
426
427 return 0;
428
429err_icm:
430 mlx4_qp_free_icm(dev, qpn);
431 return err;
432}
433
434EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
435
436int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
437 enum mlx4_update_qp_attr attr,
438 struct mlx4_update_qp_params *params)
439{
440 struct mlx4_cmd_mailbox *mailbox;
441 struct mlx4_update_qp_context *cmd;
442 u64 pri_addr_path_mask = 0;
443 u64 qp_mask = 0;
444 int err = 0;
445
446 if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
447 return -EINVAL;
448
449 mailbox = mlx4_alloc_cmd_mailbox(dev);
450 if (IS_ERR(mailbox))
451 return PTR_ERR(mailbox);
452
453 cmd = (struct mlx4_update_qp_context *)mailbox->buf;
454
455 if (attr & MLX4_UPDATE_QP_SMAC) {
456 pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
457 cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
458 }
459
460 if (attr & MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB) {
461 if (!(dev->caps.flags2
462 & MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
463 mlx4_warn(dev,
464 "Trying to set src check LB, but it isn't supported\n");
465 err = -EOPNOTSUPP;
466 goto out;
467 }
468 pri_addr_path_mask |=
469 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB;
470 if (params->flags &
471 MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB) {
472 cmd->qp_context.pri_path.fl |=
473 MLX4_FL_ETH_SRC_CHECK_MC_LB;
474 }
475 }
476
477 if (attr & MLX4_UPDATE_QP_VSD) {
478 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
479 if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
480 cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
481 }
482
483 if (attr & MLX4_UPDATE_QP_RATE_LIMIT) {
484 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_RATE_LIMIT;
485 cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val);
486 }
487
488 if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
489 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) {
490 mlx4_warn(dev, "Granular QoS per VF is not enabled\n");
491 err = -EOPNOTSUPP;
492 goto out;
493 }
494
495 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
496 cmd->qp_context.qos_vport = params->qos_vport;
497 }
498
499 cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
500 cmd->qp_mask = cpu_to_be64(qp_mask);
501
502 err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
503 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
504 MLX4_CMD_NATIVE);
505out:
506 mlx4_free_cmd_mailbox(dev, mailbox);
507 return err;
508}
509EXPORT_SYMBOL_GPL(mlx4_update_qp);
510
511void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
512{
513 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
514 unsigned long flags;
515
516 spin_lock_irqsave(&qp_table->lock, flags);
517 radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
518 spin_unlock_irqrestore(&qp_table->lock, flags);
519}
520EXPORT_SYMBOL_GPL(mlx4_qp_remove);
521
522void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
523{
524 if (atomic_dec_and_test(&qp->refcount))
525 complete(&qp->free);
526 wait_for_completion(&qp->free);
527
528 mlx4_qp_free_icm(dev, qp->qpn);
529}
530EXPORT_SYMBOL_GPL(mlx4_qp_free);
531
532static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
533{
534 return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
535 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
536}
537
538#define MLX4_QP_TABLE_RSS_ETH_PRIORITY 2
539#define MLX4_QP_TABLE_RAW_ETH_PRIORITY 1
540#define MLX4_QP_TABLE_RAW_ETH_SIZE 256
541
542static int mlx4_create_zones(struct mlx4_dev *dev,
543 u32 reserved_bottom_general,
544 u32 reserved_top_general,
545 u32 reserved_bottom_rss,
546 u32 start_offset_rss,
547 u32 max_table_offset)
548{
549 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
550 struct mlx4_bitmap (*bitmap)[MLX4_QP_TABLE_ZONE_NUM] = NULL;
551 int bitmap_initialized = 0;
552 u32 last_offset;
553 int k;
554 int err;
555
556 qp_table->zones = mlx4_zone_allocator_create(MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP);
557
558 if (NULL == qp_table->zones)
559 return -ENOMEM;
560
561 bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL);
562
563 if (NULL == bitmap) {
564 err = -ENOMEM;
565 goto free_zone;
566 }
567
568 err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_GENERAL, dev->caps.num_qps,
569 (1 << 23) - 1, reserved_bottom_general,
570 reserved_top_general);
571
572 if (err)
573 goto free_bitmap;
574
575 ++bitmap_initialized;
576
577 err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_GENERAL,
578 MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO |
579 MLX4_ZONE_USE_RR, 0,
580 0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_GENERAL);
581
582 if (err)
583 goto free_bitmap;
584
585 err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_RSS,
586 reserved_bottom_rss,
587 reserved_bottom_rss - 1,
588 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
589 reserved_bottom_rss - start_offset_rss);
590
591 if (err)
592 goto free_bitmap;
593
594 ++bitmap_initialized;
595
596 err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_RSS,
597 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
598 MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
599 MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RSS_ETH_PRIORITY,
600 0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_RSS);
601
602 if (err)
603 goto free_bitmap;
604
605 last_offset = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
606
607
608
609
610
611
612
613
614 for (k = MLX4_QP_TABLE_ZONE_RSS + 1; k < sizeof(*bitmap)/sizeof((*bitmap)[0]);
615 k++) {
616 int size;
617 u32 offset = start_offset_rss;
618 u32 bf_mask;
619 u32 requested_size;
620
621
622
623
624
625
626 bf_mask = (MLX4_BF_QP_SKIP_MASK & ~(MLX4_BF_QP_SKIP_MASK - 1)) - 1;
627 requested_size = min((u32)MLX4_QP_TABLE_RAW_ETH_SIZE, bf_mask + 1);
628
629 if (((last_offset & MLX4_BF_QP_SKIP_MASK) &&
630 ((int)(max_table_offset - last_offset)) >=
631 roundup_pow_of_two(MLX4_BF_QP_SKIP_MASK)) ||
632 (!(last_offset & MLX4_BF_QP_SKIP_MASK) &&
633 !((last_offset + requested_size - 1) &
634 MLX4_BF_QP_SKIP_MASK)))
635 size = requested_size;
636 else {
637 u32 candidate_offset =
638 (last_offset | MLX4_BF_QP_SKIP_MASK | bf_mask) + 1;
639
640 if (last_offset & MLX4_BF_QP_SKIP_MASK)
641 last_offset = candidate_offset;
642
643
644
645 if (last_offset > max_table_offset) {
646
647 size = -1;
648 } else {
649 size = min3(max_table_offset - last_offset,
650 bf_mask - (last_offset & bf_mask),
651 requested_size);
652 if (size < requested_size) {
653 int candidate_size;
654
655 candidate_size = min3(
656 max_table_offset - candidate_offset,
657 bf_mask - (last_offset & bf_mask),
658 requested_size);
659
660
661
662
663 if (candidate_size > size) {
664 last_offset = candidate_offset;
665 size = candidate_size;
666 }
667 }
668 }
669 }
670
671 if (size > 0) {
672
673
674
675
676 offset = mlx4_bitmap_alloc_range(
677 *bitmap + MLX4_QP_TABLE_ZONE_RSS,
678 size, 1,
679 MLX4_BF_QP_SKIP_MASK);
680
681 if (offset == (u32)-1) {
682 err = -ENOMEM;
683 break;
684 }
685
686 last_offset = offset + size;
687
688 err = mlx4_bitmap_init(*bitmap + k, roundup_pow_of_two(size),
689 roundup_pow_of_two(size) - 1, 0,
690 roundup_pow_of_two(size) - size);
691 } else {
692
693
694
695 err = mlx4_bitmap_init(*bitmap + k, 1,
696 MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0,
697 0);
698 mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
699 }
700
701 if (err)
702 break;
703
704 ++bitmap_initialized;
705
706 err = mlx4_zone_add_one(qp_table->zones, *bitmap + k,
707 MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
708 MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
709 MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RAW_ETH_PRIORITY,
710 offset, qp_table->zones_uids + k);
711
712 if (err)
713 break;
714 }
715
716 if (err)
717 goto free_bitmap;
718
719 qp_table->bitmap_gen = *bitmap;
720
721 return err;
722
723free_bitmap:
724 for (k = 0; k < bitmap_initialized; k++)
725 mlx4_bitmap_cleanup(*bitmap + k);
726 kfree(bitmap);
727free_zone:
728 mlx4_zone_allocator_destroy(qp_table->zones);
729 return err;
730}
731
732static void mlx4_cleanup_qp_zones(struct mlx4_dev *dev)
733{
734 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
735
736 if (qp_table->zones) {
737 int i;
738
739 for (i = 0;
740 i < sizeof(qp_table->zones_uids)/sizeof(qp_table->zones_uids[0]);
741 i++) {
742 struct mlx4_bitmap *bitmap =
743 mlx4_zone_get_bitmap(qp_table->zones,
744 qp_table->zones_uids[i]);
745
746 mlx4_zone_remove_one(qp_table->zones, qp_table->zones_uids[i]);
747 if (NULL == bitmap)
748 continue;
749
750 mlx4_bitmap_cleanup(bitmap);
751 }
752 mlx4_zone_allocator_destroy(qp_table->zones);
753 kfree(qp_table->bitmap_gen);
754 qp_table->bitmap_gen = NULL;
755 qp_table->zones = NULL;
756 }
757}
758
759int mlx4_init_qp_table(struct mlx4_dev *dev)
760{
761 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
762 int err;
763 int reserved_from_top = 0;
764 int reserved_from_bot;
765 int k;
766 int fixed_reserved_from_bot_rv = 0;
767 int bottom_reserved_for_rss_bitmap;
768 u32 max_table_offset = dev->caps.dmfs_high_rate_qpn_base +
769 dev->caps.dmfs_high_rate_qpn_range;
770
771 spin_lock_init(&qp_table->lock);
772 INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
773 if (mlx4_is_slave(dev))
774 return 0;
775
776
777
778
779
780
781
782
783 for (k = 0; k <= MLX4_QP_REGION_BOTTOM; k++)
784 fixed_reserved_from_bot_rv += dev->caps.reserved_qps_cnt[k];
785
786 if (fixed_reserved_from_bot_rv < max_table_offset)
787 fixed_reserved_from_bot_rv = max_table_offset;
788
789
790 bottom_reserved_for_rss_bitmap =
791 roundup_pow_of_two(fixed_reserved_from_bot_rv + 1);
792 dev->phys_caps.base_sqpn = ALIGN(bottom_reserved_for_rss_bitmap, 8);
793
794 {
795 int sort[MLX4_NUM_QP_REGION];
796 int i, j;
797 int last_base = dev->caps.num_qps;
798
799 for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
800 sort[i] = i;
801
802 for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) {
803 for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) {
804 if (dev->caps.reserved_qps_cnt[sort[j]] >
805 dev->caps.reserved_qps_cnt[sort[j - 1]])
806 swap(sort[j], sort[j - 1]);
807 }
808 }
809
810 for (i = MLX4_QP_REGION_BOTTOM + 1; i < MLX4_NUM_QP_REGION; ++i) {
811 last_base -= dev->caps.reserved_qps_cnt[sort[i]];
812 dev->caps.reserved_qps_base[sort[i]] = last_base;
813 reserved_from_top +=
814 dev->caps.reserved_qps_cnt[sort[i]];
815 }
816 }
817
818
819
820
821
822
823
824
825
826
827
828 reserved_from_bot = mlx4_num_reserved_sqps(dev);
829 if (reserved_from_bot + reserved_from_top > dev->caps.num_qps) {
830 mlx4_err(dev, "Number of reserved QPs is higher than number of QPs\n");
831 return -EINVAL;
832 }
833
834 err = mlx4_create_zones(dev, reserved_from_bot, reserved_from_bot,
835 bottom_reserved_for_rss_bitmap,
836 fixed_reserved_from_bot_rv,
837 max_table_offset);
838
839 if (err)
840 return err;
841
842 if (mlx4_is_mfunc(dev)) {
843
844 dev->phys_caps.base_proxy_sqpn = dev->phys_caps.base_sqpn + 8;
845 dev->phys_caps.base_tunnel_sqpn = dev->phys_caps.base_sqpn + 8 + 8 * MLX4_MFUNC_MAX;
846
847
848
849 dev->caps.spec_qps = kcalloc(dev->caps.num_ports,
850 sizeof(*dev->caps.spec_qps),
851 GFP_KERNEL);
852 if (!dev->caps.spec_qps) {
853 err = -ENOMEM;
854 goto err_mem;
855 }
856
857 for (k = 0; k < dev->caps.num_ports; k++) {
858 dev->caps.spec_qps[k].qp0_proxy = dev->phys_caps.base_proxy_sqpn +
859 8 * mlx4_master_func_num(dev) + k;
860 dev->caps.spec_qps[k].qp0_tunnel = dev->caps.spec_qps[k].qp0_proxy + 8 * MLX4_MFUNC_MAX;
861 dev->caps.spec_qps[k].qp1_proxy = dev->phys_caps.base_proxy_sqpn +
862 8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k;
863 dev->caps.spec_qps[k].qp1_tunnel = dev->caps.spec_qps[k].qp1_proxy + 8 * MLX4_MFUNC_MAX;
864 }
865 }
866
867
868 err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn);
869 if (err)
870 goto err_mem;
871
872 return err;
873
874err_mem:
875 kfree(dev->caps.spec_qps);
876 dev->caps.spec_qps = NULL;
877 mlx4_cleanup_qp_zones(dev);
878 return err;
879}
880
881void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
882{
883 if (mlx4_is_slave(dev))
884 return;
885
886 mlx4_CONF_SPECIAL_QP(dev, 0);
887
888 mlx4_cleanup_qp_zones(dev);
889}
890
891int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
892 struct mlx4_qp_context *context)
893{
894 struct mlx4_cmd_mailbox *mailbox;
895 int err;
896
897 mailbox = mlx4_alloc_cmd_mailbox(dev);
898 if (IS_ERR(mailbox))
899 return PTR_ERR(mailbox);
900
901 err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
902 MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
903 MLX4_CMD_WRAPPED);
904 if (!err)
905 memcpy(context, mailbox->buf + 8, sizeof(*context));
906
907 mlx4_free_cmd_mailbox(dev, mailbox);
908 return err;
909}
910EXPORT_SYMBOL_GPL(mlx4_qp_query);
911
912int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
913 struct mlx4_qp_context *context,
914 struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
915{
916 int err;
917 int i;
918 enum mlx4_qp_state states[] = {
919 MLX4_QP_STATE_RST,
920 MLX4_QP_STATE_INIT,
921 MLX4_QP_STATE_RTR,
922 MLX4_QP_STATE_RTS
923 };
924
925 for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
926 context->flags &= cpu_to_be32(~(0xf << 28));
927 context->flags |= cpu_to_be32(states[i + 1] << 28);
928 if (states[i + 1] != MLX4_QP_STATE_RTR)
929 context->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP);
930 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
931 context, 0, 0, qp);
932 if (err) {
933 mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
934 states[i + 1], err);
935 return err;
936 }
937
938 *qp_state = states[i + 1];
939 }
940
941 return 0;
942}
943EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
944
945u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn)
946{
947 struct mlx4_qp_context context;
948 struct mlx4_qp qp;
949 int err;
950
951 qp.qpn = qpn;
952 err = mlx4_qp_query(dev, &qp, &context);
953 if (!err) {
954 u32 dest_qpn = be32_to_cpu(context.remote_qpn) & 0xffffff;
955 u16 folded_dst = folded_qp(dest_qpn);
956 u16 folded_src = folded_qp(qpn);
957
958 return (dest_qpn != qpn) ?
959 ((folded_dst ^ folded_src) | 0xC000) :
960 folded_src | 0xC000;
961 }
962 return 0xdead;
963}
964