1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/mlx4/cq.h>
35#include <linux/mlx4/qp.h>
36#include <linux/mlx4/cmd.h>
37#include <linux/interrupt.h>
38
39#include "mlx4_en.h"
40
41static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
42{
43 return;
44}
45
46
47int mlx4_en_create_cq(struct mlx4_en_priv *priv,
48 struct mlx4_en_cq **pcq,
49 int entries, int ring, enum cq_type mode,
50 int node)
51{
52 struct mlx4_en_dev *mdev = priv->mdev;
53 struct mlx4_en_cq *cq;
54 int err;
55
56 cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node);
57 if (!cq) {
58 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
59 if (!cq) {
60 en_err(priv, "Failed to allocate CQ structure\n");
61 return -ENOMEM;
62 }
63 }
64
65 cq->size = entries;
66 cq->buf_size = cq->size * mdev->dev->caps.cqe_size;
67
68 cq->ring = ring;
69 cq->type = mode;
70 cq->vector = mdev->dev->caps.num_comp_vectors;
71
72
73
74
75 set_dev_node(&mdev->dev->persist->pdev->dev, node);
76 err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
77 cq->buf_size);
78 set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
79 if (err)
80 goto err_cq;
81
82 cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf;
83 *pcq = cq;
84
85 return 0;
86
87err_cq:
88 kfree(cq);
89 *pcq = NULL;
90 return err;
91}
92
93int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
94 int cq_idx)
95{
96 struct mlx4_en_dev *mdev = priv->mdev;
97 int err = 0;
98 int timestamp_en = 0;
99 bool assigned_eq = false;
100
101 cq->dev = mdev->pndev[priv->port];
102 cq->mcq.set_ci_db = cq->wqres.db.db;
103 cq->mcq.arm_db = cq->wqres.db.db + 1;
104 *cq->mcq.set_ci_db = 0;
105 *cq->mcq.arm_db = 0;
106 memset(cq->buf, 0, cq->buf_size);
107
108 if (cq->type == RX) {
109 if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
110 cq->vector)) {
111 cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask);
112
113 err = mlx4_assign_eq(mdev->dev, priv->port,
114 &cq->vector);
115 if (err) {
116 mlx4_err(mdev, "Failed assigning an EQ to CQ vector %d\n",
117 cq->vector);
118 goto free_eq;
119 }
120
121 assigned_eq = true;
122 }
123
124#ifdef CONFIG_GENERIC_HARDIRQS
125 cq->irq_desc =
126 irq_to_desc(mlx4_eq_get_irq(mdev->dev,
127 cq->vector));
128#endif
129 } else {
130
131
132 struct mlx4_en_cq *rx_cq;
133
134 cq_idx = cq_idx % priv->rx_ring_num;
135 rx_cq = priv->rx_cq[cq_idx];
136 cq->vector = rx_cq->vector;
137 }
138
139 if (cq->type == RX)
140 cq->size = priv->rx_ring[cq->ring]->actual_size;
141
142 if ((cq->type != RX && priv->hwtstamp_config.tx_type) ||
143 (cq->type == RX && priv->hwtstamp_config.rx_filter))
144 timestamp_en = 1;
145
146 cq->mcq.usage = MLX4_RES_USAGE_DRIVER;
147 err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
148 &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
149 cq->vector, 0, timestamp_en);
150 if (err)
151 goto free_eq;
152
153 cq->mcq.event = mlx4_en_cq_event;
154
155 switch (cq->type) {
156 case TX:
157 cq->mcq.comp = mlx4_en_tx_irq;
158 netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
159 NAPI_POLL_WEIGHT);
160 napi_enable(&cq->napi);
161 break;
162 case RX:
163 cq->mcq.comp = mlx4_en_rx_irq;
164 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
165 napi_enable(&cq->napi);
166 break;
167 case TX_XDP:
168
169 cq->xdp_busy = false;
170 break;
171 }
172
173 return 0;
174
175free_eq:
176 if (assigned_eq)
177 mlx4_release_eq(mdev->dev, cq->vector);
178 cq->vector = mdev->dev->caps.num_comp_vectors;
179 return err;
180}
181
182void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
183{
184 struct mlx4_en_dev *mdev = priv->mdev;
185 struct mlx4_en_cq *cq = *pcq;
186
187 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
188 if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
189 cq->type == RX)
190 mlx4_release_eq(priv->mdev->dev, cq->vector);
191 cq->vector = 0;
192 cq->buf_size = 0;
193 cq->buf = NULL;
194 kfree(cq);
195 *pcq = NULL;
196}
197
198void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
199{
200 if (cq->type != TX_XDP) {
201 napi_disable(&cq->napi);
202 netif_napi_del(&cq->napi);
203 }
204
205 mlx4_cq_free(priv->mdev->dev, &cq->mcq);
206}
207
208
209int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
210{
211 return mlx4_cq_modify(priv->mdev->dev, &cq->mcq,
212 cq->moder_cnt, cq->moder_time);
213}
214
215void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
216{
217 mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,
218 &priv->mdev->uar_lock);
219}
220
221
222