1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/slab.h>
35#include <linux/vmalloc.h>
36#include <linux/mlx4/qp.h>
37
38#include "mlx4_en.h"
39
40void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
41 int is_tx, int rss, int qpn, int cqn,
42 int user_prio, struct mlx4_qp_context *context)
43{
44 struct mlx4_en_dev *mdev = priv->mdev;
45 struct net_device *dev = priv->dev;
46
47 memset(context, 0, sizeof(*context));
48 context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET);
49 context->pd = cpu_to_be32(mdev->priv_pdn);
50 context->mtu_msgmax = 0xff;
51 if (!is_tx && !rss)
52 context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
53 if (is_tx) {
54 context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
55 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)
56 context->params2 |= cpu_to_be32(MLX4_QP_BIT_FPP);
57
58 } else {
59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
60 }
61 context->usr_page = cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
62 mdev->priv_uar.index));
63 context->local_qpn = cpu_to_be32(qpn);
64 context->pri_path.ackto = 1 & 0x07;
65 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
66
67 if (user_prio >= 0 && priv->prof->num_up == MLX4_EN_NUM_UP_HIGH) {
68 context->pri_path.sched_queue |= user_prio << 3;
69 context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP;
70 }
71 context->pri_path.counter_index = priv->counter_index;
72 context->cqn_send = cpu_to_be32(cqn);
73 context->cqn_recv = cpu_to_be32(cqn);
74 if (!rss &&
75 (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK) &&
76 context->pri_path.counter_index !=
77 MLX4_SINK_COUNTER_INDEX(mdev->dev)) {
78
79 if (!(dev->features & NETIF_F_LOOPBACK))
80 context->pri_path.fl |= MLX4_FL_ETH_SRC_CHECK_MC_LB;
81 context->pri_path.control |= MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
82 }
83 context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
84 if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
85 context->param3 |= cpu_to_be32(1 << 30);
86
87 if (!is_tx && !rss &&
88 (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)) {
89 en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn);
90 context->srqn = cpu_to_be32(7 << 28);
91 }
92}
93
94int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp,
95 int loopback)
96{
97 int ret;
98 struct mlx4_update_qp_params qp_params;
99
100 memset(&qp_params, 0, sizeof(qp_params));
101 if (!loopback)
102 qp_params.flags = MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB;
103
104 ret = mlx4_update_qp(priv->mdev->dev, qp->qpn,
105 MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB,
106 &qp_params);
107
108 return ret;
109}
110
111void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
112{
113 return;
114}
115
116