1
2
3
4
5
6
7
8
9
10
11
12
13
14#include "aq_vec.h"
15#include "aq_nic.h"
16#include "aq_ring.h"
17#include "aq_hw.h"
18
19#include <linux/netdevice.h>
20
21struct aq_vec_s {
22 struct aq_obj_s header;
23 struct aq_hw_ops *aq_hw_ops;
24 struct aq_hw_s *aq_hw;
25 struct aq_nic_s *aq_nic;
26 unsigned int tx_rings;
27 unsigned int rx_rings;
28 struct aq_ring_param_s aq_ring_param;
29 struct napi_struct napi;
30 struct aq_ring_s ring[AQ_CFG_TCS_MAX][2];
31};
32
33#define AQ_VEC_TX_ID 0
34#define AQ_VEC_RX_ID 1
35
36static int aq_vec_poll(struct napi_struct *napi, int budget)
37{
38 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
39 struct aq_ring_s *ring = NULL;
40 int work_done = 0;
41 int err = 0;
42 unsigned int i = 0U;
43 unsigned int sw_tail_old = 0U;
44 bool was_tx_cleaned = false;
45
46 if (!self) {
47 err = -EINVAL;
48 } else {
49 for (i = 0U, ring = self->ring[0];
50 self->tx_rings > i; ++i, ring = self->ring[i]) {
51 if (self->aq_hw_ops->hw_ring_tx_head_update) {
52 err = self->aq_hw_ops->hw_ring_tx_head_update(
53 self->aq_hw,
54 &ring[AQ_VEC_TX_ID]);
55 if (err < 0)
56 goto err_exit;
57 }
58
59 if (ring[AQ_VEC_TX_ID].sw_head !=
60 ring[AQ_VEC_TX_ID].hw_head) {
61 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
62 aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]);
63 was_tx_cleaned = true;
64 }
65
66 err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw,
67 &ring[AQ_VEC_RX_ID]);
68 if (err < 0)
69 goto err_exit;
70
71 if (ring[AQ_VEC_RX_ID].sw_head !=
72 ring[AQ_VEC_RX_ID].hw_head) {
73 err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID],
74 napi,
75 &work_done,
76 budget - work_done);
77 if (err < 0)
78 goto err_exit;
79
80 sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail;
81
82 err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
83 if (err < 0)
84 goto err_exit;
85
86 err = self->aq_hw_ops->hw_ring_rx_fill(
87 self->aq_hw,
88 &ring[AQ_VEC_RX_ID], sw_tail_old);
89 if (err < 0)
90 goto err_exit;
91 }
92 }
93
94 if (was_tx_cleaned)
95 work_done = budget;
96
97 if (work_done < budget) {
98 napi_complete_done(napi, work_done);
99 self->aq_hw_ops->hw_irq_enable(self->aq_hw,
100 1U << self->aq_ring_param.vec_idx);
101 }
102 }
103err_exit:
104 return work_done;
105}
106
107struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
108 struct aq_nic_cfg_s *aq_nic_cfg)
109{
110 struct aq_vec_s *self = NULL;
111 struct aq_ring_s *ring = NULL;
112 unsigned int i = 0U;
113 int err = 0;
114
115 self = kzalloc(sizeof(*self), GFP_KERNEL);
116 if (!self) {
117 err = -ENOMEM;
118 goto err_exit;
119 }
120
121 self->aq_nic = aq_nic;
122 self->aq_ring_param.vec_idx = idx;
123 self->aq_ring_param.cpu =
124 idx + aq_nic_cfg->aq_rss.base_cpu_number;
125
126 cpumask_set_cpu(self->aq_ring_param.cpu,
127 &self->aq_ring_param.affinity_mask);
128
129 self->tx_rings = 0;
130 self->rx_rings = 0;
131
132 netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
133 aq_vec_poll, AQ_CFG_NAPI_WEIGHT);
134
135 for (i = 0; i < aq_nic_cfg->tcs; ++i) {
136 unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic,
137 self->tx_rings,
138 self->aq_ring_param.vec_idx);
139
140 ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
141 idx_ring, aq_nic_cfg);
142 if (!ring) {
143 err = -ENOMEM;
144 goto err_exit;
145 }
146
147 ++self->tx_rings;
148
149 aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
150
151 ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
152 idx_ring, aq_nic_cfg);
153 if (!ring) {
154 err = -ENOMEM;
155 goto err_exit;
156 }
157
158 ++self->rx_rings;
159 }
160
161err_exit:
162 if (err < 0) {
163 aq_vec_free(self);
164 self = NULL;
165 }
166 return self;
167}
168
169int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
170 struct aq_hw_s *aq_hw)
171{
172 struct aq_ring_s *ring = NULL;
173 unsigned int i = 0U;
174 int err = 0;
175
176 self->aq_hw_ops = aq_hw_ops;
177 self->aq_hw = aq_hw;
178
179 for (i = 0U, ring = self->ring[0];
180 self->tx_rings > i; ++i, ring = self->ring[i]) {
181 err = aq_ring_init(&ring[AQ_VEC_TX_ID]);
182 if (err < 0)
183 goto err_exit;
184
185 err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw,
186 &ring[AQ_VEC_TX_ID],
187 &self->aq_ring_param);
188 if (err < 0)
189 goto err_exit;
190
191 err = aq_ring_init(&ring[AQ_VEC_RX_ID]);
192 if (err < 0)
193 goto err_exit;
194
195 err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw,
196 &ring[AQ_VEC_RX_ID],
197 &self->aq_ring_param);
198 if (err < 0)
199 goto err_exit;
200
201 err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
202 if (err < 0)
203 goto err_exit;
204
205 err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw,
206 &ring[AQ_VEC_RX_ID], 0U);
207 if (err < 0)
208 goto err_exit;
209 }
210
211err_exit:
212 return err;
213}
214
215int aq_vec_start(struct aq_vec_s *self)
216{
217 struct aq_ring_s *ring = NULL;
218 unsigned int i = 0U;
219 int err = 0;
220
221 for (i = 0U, ring = self->ring[0];
222 self->tx_rings > i; ++i, ring = self->ring[i]) {
223 err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
224 &ring[AQ_VEC_TX_ID]);
225 if (err < 0)
226 goto err_exit;
227
228 err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw,
229 &ring[AQ_VEC_RX_ID]);
230 if (err < 0)
231 goto err_exit;
232 }
233
234 napi_enable(&self->napi);
235
236err_exit:
237 return err;
238}
239
240void aq_vec_stop(struct aq_vec_s *self)
241{
242 struct aq_ring_s *ring = NULL;
243 unsigned int i = 0U;
244
245 for (i = 0U, ring = self->ring[0];
246 self->tx_rings > i; ++i, ring = self->ring[i]) {
247 self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
248 &ring[AQ_VEC_TX_ID]);
249
250 self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw,
251 &ring[AQ_VEC_RX_ID]);
252 }
253
254 napi_disable(&self->napi);
255}
256
257void aq_vec_deinit(struct aq_vec_s *self)
258{
259 struct aq_ring_s *ring = NULL;
260 unsigned int i = 0U;
261
262 if (!self)
263 goto err_exit;
264
265 for (i = 0U, ring = self->ring[0];
266 self->tx_rings > i; ++i, ring = self->ring[i]) {
267 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
268 aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
269 }
270err_exit:;
271}
272
273void aq_vec_free(struct aq_vec_s *self)
274{
275 struct aq_ring_s *ring = NULL;
276 unsigned int i = 0U;
277
278 if (!self)
279 goto err_exit;
280
281 for (i = 0U, ring = self->ring[0];
282 self->tx_rings > i; ++i, ring = self->ring[i]) {
283 aq_ring_free(&ring[AQ_VEC_TX_ID]);
284 aq_ring_free(&ring[AQ_VEC_RX_ID]);
285 }
286
287 netif_napi_del(&self->napi);
288
289 kfree(self);
290
291err_exit:;
292}
293
294irqreturn_t aq_vec_isr(int irq, void *private)
295{
296 struct aq_vec_s *self = private;
297 int err = 0;
298
299 if (!self) {
300 err = -EINVAL;
301 goto err_exit;
302 }
303 napi_schedule(&self->napi);
304
305err_exit:
306 return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
307}
308
309irqreturn_t aq_vec_isr_legacy(int irq, void *private)
310{
311 struct aq_vec_s *self = private;
312 u64 irq_mask = 0U;
313 irqreturn_t err = 0;
314
315 if (!self) {
316 err = -EINVAL;
317 goto err_exit;
318 }
319 err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask);
320 if (err < 0)
321 goto err_exit;
322
323 if (irq_mask) {
324 self->aq_hw_ops->hw_irq_disable(self->aq_hw,
325 1U << self->aq_ring_param.vec_idx);
326 napi_schedule(&self->napi);
327 } else {
328 self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U);
329 err = IRQ_NONE;
330 }
331
332err_exit:
333 return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
334}
335
336cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
337{
338 return &self->aq_ring_param.affinity_mask;
339}
340
341void aq_vec_add_stats(struct aq_vec_s *self,
342 struct aq_ring_stats_rx_s *stats_rx,
343 struct aq_ring_stats_tx_s *stats_tx)
344{
345 struct aq_ring_s *ring = NULL;
346 unsigned int r = 0U;
347
348 for (r = 0U, ring = self->ring[0];
349 self->tx_rings > r; ++r, ring = self->ring[r]) {
350 struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx;
351 struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx;
352
353 stats_rx->packets += rx->packets;
354 stats_rx->bytes += rx->bytes;
355 stats_rx->errors += rx->errors;
356 stats_rx->jumbo_packets += rx->jumbo_packets;
357 stats_rx->lro_packets += rx->lro_packets;
358
359 stats_tx->packets += tx->packets;
360 stats_tx->bytes += tx->bytes;
361 stats_tx->errors += tx->errors;
362 stats_tx->queue_restarts += tx->queue_restarts;
363 }
364}
365
366int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
367{
368 unsigned int count = 0U;
369 struct aq_ring_stats_rx_s stats_rx;
370 struct aq_ring_stats_tx_s stats_tx;
371
372 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
373 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
374 aq_vec_add_stats(self, &stats_rx, &stats_tx);
375
376
377
378 data[count] += stats_rx.packets;
379 data[++count] += stats_tx.packets;
380 data[++count] += stats_tx.queue_restarts;
381 data[++count] += stats_rx.jumbo_packets;
382 data[++count] += stats_rx.lro_packets;
383 data[++count] += stats_rx.errors;
384
385 if (p_count)
386 *p_count = ++count;
387
388 return 0;
389}
390