1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/irq.h>
34#include "en.h"
35#include "en/txrx.h"
36#include "en/xdp.h"
37#include "en/xsk/rx.h"
38#include "en/xsk/tx.h"
39#include "en_accel/ktls_txrx.h"
40
41static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
42{
43 int current_cpu = smp_processor_id();
44
45 return cpumask_test_cpu(current_cpu, c->aff_mask);
46}
47
48static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
49{
50 struct mlx5e_sq_stats *stats = sq->stats;
51 struct dim_sample dim_sample = {};
52
53 if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state)))
54 return;
55
56 dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
57 net_dim(&sq->dim, dim_sample);
58}
59
60static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
61{
62 struct mlx5e_rq_stats *stats = rq->stats;
63 struct dim_sample dim_sample = {};
64
65 if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state)))
66 return;
67
68 dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
69 net_dim(&rq->dim, dim_sample);
70}
71
72void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
73{
74 struct mlx5_wq_cyc *wq = &sq->wq;
75 struct mlx5e_tx_wqe *nopwqe;
76 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
77
78 sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
79 .wqe_type = MLX5E_ICOSQ_WQE_NOP,
80 .num_wqebbs = 1,
81 };
82
83 nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
84 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
85}
86
87static bool mlx5e_napi_xsk_post(struct mlx5e_xdpsq *xsksq, struct mlx5e_rq *xskrq)
88{
89 bool busy_xsk = false, xsk_rx_alloc_err;
90
91
92
93
94
95
96
97
98
99
100 mlx5e_xsk_update_tx_wakeup(xsksq);
101 busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET);
102 mlx5e_xsk_update_tx_wakeup(xsksq);
103
104 xsk_rx_alloc_err = INDIRECT_CALL_2(xskrq->post_wqes,
105 mlx5e_post_rx_mpwqes,
106 mlx5e_post_rx_wqes,
107 xskrq);
108 busy_xsk |= mlx5e_xsk_update_rx_wakeup(xskrq, xsk_rx_alloc_err);
109
110 return busy_xsk;
111}
112
113int mlx5e_napi_poll(struct napi_struct *napi, int budget)
114{
115 struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
116 napi);
117 struct mlx5e_ch_stats *ch_stats = c->stats;
118 struct mlx5e_xdpsq *xsksq = &c->xsksq;
119 struct mlx5e_txqsq __rcu **qos_sqs;
120 struct mlx5e_rq *xskrq = &c->xskrq;
121 struct mlx5e_rq *rq = &c->rq;
122 bool aff_change = false;
123 bool busy_xsk = false;
124 bool busy = false;
125 int work_done = 0;
126 u16 qos_sqs_size;
127 bool xsk_open;
128 int i;
129
130 rcu_read_lock();
131
132 qos_sqs = rcu_dereference(c->qos_sqs);
133
134 xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
135
136 ch_stats->poll++;
137
138 for (i = 0; i < c->num_tc; i++)
139 busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
140
141 if (unlikely(qos_sqs)) {
142 smp_rmb();
143 qos_sqs_size = READ_ONCE(c->qos_sqs_size);
144
145 for (i = 0; i < qos_sqs_size; i++) {
146 struct mlx5e_txqsq *sq = rcu_dereference(qos_sqs[i]);
147
148 if (sq)
149 busy |= mlx5e_poll_tx_cq(&sq->cq, budget);
150 }
151 }
152
153 busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
154
155 if (c->xdp)
156 busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq);
157
158 if (likely(budget)) {
159 if (xsk_open)
160 work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget);
161
162 if (likely(budget - work_done))
163 work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
164
165 busy |= work_done == budget;
166 }
167
168 mlx5e_poll_ico_cq(&c->icosq.cq);
169 if (mlx5e_poll_ico_cq(&c->async_icosq.cq))
170
171
172
173 clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state);
174
175
176 if (unlikely(mlx5e_ktls_rx_pending_resync_list(c, budget)))
177 busy |= mlx5e_ktls_rx_handle_resync_list(c, budget);
178
179 busy |= INDIRECT_CALL_2(rq->post_wqes,
180 mlx5e_post_rx_mpwqes,
181 mlx5e_post_rx_wqes,
182 rq);
183 if (xsk_open) {
184 busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq);
185 busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq);
186 }
187
188 busy |= busy_xsk;
189
190 if (busy) {
191 if (likely(mlx5e_channel_no_affinity_change(c))) {
192 work_done = budget;
193 goto out;
194 }
195 ch_stats->aff_change++;
196 aff_change = true;
197 if (budget && work_done == budget)
198 work_done--;
199 }
200
201 if (unlikely(!napi_complete_done(napi, work_done)))
202 goto out;
203
204 ch_stats->arm++;
205
206 for (i = 0; i < c->num_tc; i++) {
207 mlx5e_handle_tx_dim(&c->sq[i]);
208 mlx5e_cq_arm(&c->sq[i].cq);
209 }
210 if (unlikely(qos_sqs)) {
211 for (i = 0; i < qos_sqs_size; i++) {
212 struct mlx5e_txqsq *sq = rcu_dereference(qos_sqs[i]);
213
214 if (sq) {
215 mlx5e_handle_tx_dim(sq);
216 mlx5e_cq_arm(&sq->cq);
217 }
218 }
219 }
220
221 mlx5e_handle_rx_dim(rq);
222
223 mlx5e_cq_arm(&rq->cq);
224 mlx5e_cq_arm(&c->icosq.cq);
225 mlx5e_cq_arm(&c->async_icosq.cq);
226 mlx5e_cq_arm(&c->xdpsq.cq);
227
228 if (xsk_open) {
229 mlx5e_handle_rx_dim(xskrq);
230 mlx5e_cq_arm(&xsksq->cq);
231 mlx5e_cq_arm(&xskrq->cq);
232 }
233
234 if (unlikely(aff_change && busy_xsk)) {
235 mlx5e_trigger_irq(&c->icosq);
236 ch_stats->force_irq++;
237 }
238
239out:
240 rcu_read_unlock();
241
242 return work_done;
243}
244
245void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
246{
247 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
248
249 napi_schedule(cq->napi);
250 cq->event_ctr++;
251 cq->ch_stats->events++;
252}
253
254void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
255{
256 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
257 struct net_device *netdev = cq->netdev;
258
259 netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
260 __func__, mcq->cqn, event);
261}
262