1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/slab.h>
35#include <linux/vmalloc.h>
36#include <linux/mlx4/qp.h>
37
38#include "mlx4_en.h"
39
40void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
41 int is_tx, int rss, int qpn, int cqn,
42 int user_prio, struct mlx4_qp_context *context)
43{
44 struct mlx4_en_dev *mdev = priv->mdev;
45 struct net_device *dev = priv->dev;
46
47 memset(context, 0, sizeof *context);
48 context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET);
49 context->pd = cpu_to_be32(mdev->priv_pdn);
50 context->mtu_msgmax = 0xff;
51 if (!is_tx && !rss)
52 context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
53 if (is_tx) {
54 context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
55 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)
56 context->params2 |= MLX4_QP_BIT_FPP;
57
58 } else {
59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
60 }
61 context->usr_page = cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
62 mdev->priv_uar.index));
63 context->local_qpn = cpu_to_be32(qpn);
64 context->pri_path.ackto = 1 & 0x07;
65 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
66 if (user_prio >= 0) {
67 context->pri_path.sched_queue |= user_prio << 3;
68 context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP;
69 }
70 context->pri_path.counter_index = priv->counter_index;
71 context->cqn_send = cpu_to_be32(cqn);
72 context->cqn_recv = cpu_to_be32(cqn);
73 if (!rss &&
74 (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK) &&
75 context->pri_path.counter_index !=
76 MLX4_SINK_COUNTER_INDEX(mdev->dev)) {
77
78 if (!(dev->features & NETIF_F_LOOPBACK))
79 context->pri_path.fl |= MLX4_FL_ETH_SRC_CHECK_MC_LB;
80 context->pri_path.control |= MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
81 }
82 context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
83 if (!(dev->features & NETIF_F_HW_VLAN_CTAG_RX))
84 context->param3 |= cpu_to_be32(1 << 30);
85
86 if (!is_tx && !rss &&
87 (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)) {
88 en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn);
89 context->srqn = cpu_to_be32(7 << 28);
90 }
91}
92
93int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp,
94 int loopback)
95{
96 int ret;
97 struct mlx4_update_qp_params qp_params;
98
99 memset(&qp_params, 0, sizeof(qp_params));
100 if (!loopback)
101 qp_params.flags = MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB;
102
103 ret = mlx4_update_qp(priv->mdev->dev, qp->qpn,
104 MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB,
105 &qp_params);
106
107 return ret;
108}
109
110int mlx4_en_map_buffer(struct mlx4_buf *buf)
111{
112 struct page **pages;
113 int i;
114
115 if (BITS_PER_LONG == 64 || buf->nbufs == 1)
116 return 0;
117
118 pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
119 if (!pages)
120 return -ENOMEM;
121
122 for (i = 0; i < buf->nbufs; ++i)
123 pages[i] = virt_to_page(buf->page_list[i].buf);
124
125 buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
126 kfree(pages);
127 if (!buf->direct.buf)
128 return -ENOMEM;
129
130 return 0;
131}
132
133void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
134{
135 if (BITS_PER_LONG == 64 || buf->nbufs == 1)
136 return;
137
138 vunmap(buf->direct.buf);
139}
140
141void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
142{
143 return;
144}
145
146