1
2
3
4
5
6#include <linux/pci.h>
7
8#include "ena_netdev.h"
9
10struct ena_stats {
11 char name[ETH_GSTRING_LEN];
12 int stat_offset;
13};
14
15#define ENA_STAT_ENA_COM_ENTRY(stat) { \
16 .name = #stat, \
17 .stat_offset = offsetof(struct ena_com_stats_admin, stat) / sizeof(u64) \
18}
19
20#define ENA_STAT_ENTRY(stat, stat_type) { \
21 .name = #stat, \
22 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) / sizeof(u64) \
23}
24
25#define ENA_STAT_HW_ENTRY(stat, stat_type) { \
26 .name = #stat, \
27 .stat_offset = offsetof(struct ena_admin_##stat_type, stat) / sizeof(u64) \
28}
29
30#define ENA_STAT_RX_ENTRY(stat) \
31 ENA_STAT_ENTRY(stat, rx)
32
33#define ENA_STAT_TX_ENTRY(stat) \
34 ENA_STAT_ENTRY(stat, tx)
35
36#define ENA_STAT_GLOBAL_ENTRY(stat) \
37 ENA_STAT_ENTRY(stat, dev)
38
39#define ENA_STAT_ENI_ENTRY(stat) \
40 ENA_STAT_HW_ENTRY(stat, eni_stats)
41
42static const struct ena_stats ena_stats_global_strings[] = {
43 ENA_STAT_GLOBAL_ENTRY(tx_timeout),
44 ENA_STAT_GLOBAL_ENTRY(suspend),
45 ENA_STAT_GLOBAL_ENTRY(resume),
46 ENA_STAT_GLOBAL_ENTRY(wd_expired),
47 ENA_STAT_GLOBAL_ENTRY(interface_up),
48 ENA_STAT_GLOBAL_ENTRY(interface_down),
49 ENA_STAT_GLOBAL_ENTRY(admin_q_pause),
50};
51
52static const struct ena_stats ena_stats_eni_strings[] = {
53 ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded),
54 ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded),
55 ENA_STAT_ENI_ENTRY(pps_allowance_exceeded),
56 ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded),
57 ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded),
58};
59
60static const struct ena_stats ena_stats_tx_strings[] = {
61 ENA_STAT_TX_ENTRY(cnt),
62 ENA_STAT_TX_ENTRY(bytes),
63 ENA_STAT_TX_ENTRY(queue_stop),
64 ENA_STAT_TX_ENTRY(queue_wakeup),
65 ENA_STAT_TX_ENTRY(dma_mapping_err),
66 ENA_STAT_TX_ENTRY(linearize),
67 ENA_STAT_TX_ENTRY(linearize_failed),
68 ENA_STAT_TX_ENTRY(napi_comp),
69 ENA_STAT_TX_ENTRY(tx_poll),
70 ENA_STAT_TX_ENTRY(doorbells),
71 ENA_STAT_TX_ENTRY(prepare_ctx_err),
72 ENA_STAT_TX_ENTRY(bad_req_id),
73 ENA_STAT_TX_ENTRY(llq_buffer_copy),
74 ENA_STAT_TX_ENTRY(missed_tx),
75 ENA_STAT_TX_ENTRY(unmask_interrupt),
76};
77
78static const struct ena_stats ena_stats_rx_strings[] = {
79 ENA_STAT_RX_ENTRY(cnt),
80 ENA_STAT_RX_ENTRY(bytes),
81 ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
82 ENA_STAT_RX_ENTRY(csum_good),
83 ENA_STAT_RX_ENTRY(refil_partial),
84 ENA_STAT_RX_ENTRY(bad_csum),
85 ENA_STAT_RX_ENTRY(page_alloc_fail),
86 ENA_STAT_RX_ENTRY(skb_alloc_fail),
87 ENA_STAT_RX_ENTRY(dma_mapping_err),
88 ENA_STAT_RX_ENTRY(bad_desc_num),
89 ENA_STAT_RX_ENTRY(bad_req_id),
90 ENA_STAT_RX_ENTRY(empty_rx_ring),
91 ENA_STAT_RX_ENTRY(csum_unchecked),
92 ENA_STAT_RX_ENTRY(xdp_aborted),
93 ENA_STAT_RX_ENTRY(xdp_drop),
94 ENA_STAT_RX_ENTRY(xdp_pass),
95 ENA_STAT_RX_ENTRY(xdp_tx),
96 ENA_STAT_RX_ENTRY(xdp_invalid),
97 ENA_STAT_RX_ENTRY(xdp_redirect),
98};
99
100static const struct ena_stats ena_stats_ena_com_strings[] = {
101 ENA_STAT_ENA_COM_ENTRY(aborted_cmd),
102 ENA_STAT_ENA_COM_ENTRY(submitted_cmd),
103 ENA_STAT_ENA_COM_ENTRY(completed_cmd),
104 ENA_STAT_ENA_COM_ENTRY(out_of_space),
105 ENA_STAT_ENA_COM_ENTRY(no_completion),
106};
107
108#define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings)
109#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
110#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
111#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
112#define ENA_STATS_ARRAY_ENI(adapter) \
113 (ARRAY_SIZE(ena_stats_eni_strings) * (adapter)->eni_stats_supported)
114
115static void ena_safe_update_stat(u64 *src, u64 *dst,
116 struct u64_stats_sync *syncp)
117{
118 unsigned int start;
119
120 do {
121 start = u64_stats_fetch_begin_irq(syncp);
122 *(dst) = *src;
123 } while (u64_stats_fetch_retry_irq(syncp, start));
124}
125
126static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
127{
128 const struct ena_stats *ena_stats;
129 struct ena_ring *ring;
130
131 u64 *ptr;
132 int i, j;
133
134 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
135
136 ring = &adapter->tx_ring[i];
137
138 for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
139 ena_stats = &ena_stats_tx_strings[j];
140
141 ptr = (u64 *)&ring->tx_stats + ena_stats->stat_offset;
142
143 ena_safe_update_stat(ptr, (*data)++, &ring->syncp);
144 }
145
146 if (!ENA_IS_XDP_INDEX(adapter, i)) {
147
148 ring = &adapter->rx_ring[i];
149
150 for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
151 ena_stats = &ena_stats_rx_strings[j];
152
153 ptr = (u64 *)&ring->rx_stats +
154 ena_stats->stat_offset;
155
156 ena_safe_update_stat(ptr, (*data)++, &ring->syncp);
157 }
158 }
159 }
160}
161
162static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data)
163{
164 const struct ena_stats *ena_stats;
165 u64 *ptr;
166 int i;
167
168 for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) {
169 ena_stats = &ena_stats_ena_com_strings[i];
170
171 ptr = (u64 *)&adapter->ena_dev->admin_queue.stats +
172 ena_stats->stat_offset;
173
174 *(*data)++ = *ptr;
175 }
176}
177
178static void ena_get_stats(struct ena_adapter *adapter,
179 u64 *data,
180 bool eni_stats_needed)
181{
182 const struct ena_stats *ena_stats;
183 u64 *ptr;
184 int i;
185
186 for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
187 ena_stats = &ena_stats_global_strings[i];
188
189 ptr = (u64 *)&adapter->dev_stats + ena_stats->stat_offset;
190
191 ena_safe_update_stat(ptr, data++, &adapter->syncp);
192 }
193
194 if (eni_stats_needed) {
195 ena_update_hw_stats(adapter);
196 for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
197 ena_stats = &ena_stats_eni_strings[i];
198
199 ptr = (u64 *)&adapter->eni_stats +
200 ena_stats->stat_offset;
201
202 ena_safe_update_stat(ptr, data++, &adapter->syncp);
203 }
204 }
205
206 ena_queue_stats(adapter, &data);
207 ena_dev_admin_queue_stats(adapter, &data);
208}
209
210static void ena_get_ethtool_stats(struct net_device *netdev,
211 struct ethtool_stats *stats,
212 u64 *data)
213{
214 struct ena_adapter *adapter = netdev_priv(netdev);
215
216 ena_get_stats(adapter, data, adapter->eni_stats_supported);
217}
218
219static int ena_get_sw_stats_count(struct ena_adapter *adapter)
220{
221 return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
222 + adapter->xdp_num_queues * ENA_STATS_ARRAY_TX
223 + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
224}
225
226static int ena_get_hw_stats_count(struct ena_adapter *adapter)
227{
228 return ENA_STATS_ARRAY_ENI(adapter);
229}
230
231int ena_get_sset_count(struct net_device *netdev, int sset)
232{
233 struct ena_adapter *adapter = netdev_priv(netdev);
234
235 if (sset != ETH_SS_STATS)
236 return -EOPNOTSUPP;
237
238 return ena_get_sw_stats_count(adapter) + ena_get_hw_stats_count(adapter);
239}
240
241static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
242{
243 const struct ena_stats *ena_stats;
244 bool is_xdp;
245 int i, j;
246
247 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
248 is_xdp = ENA_IS_XDP_INDEX(adapter, i);
249
250 for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
251 ena_stats = &ena_stats_tx_strings[j];
252
253 ethtool_sprintf(data,
254 "queue_%u_%s_%s", i,
255 is_xdp ? "xdp_tx" : "tx",
256 ena_stats->name);
257 }
258
259 if (!is_xdp) {
260
261
262
263 for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
264 ena_stats = &ena_stats_rx_strings[j];
265
266 ethtool_sprintf(data,
267 "queue_%u_rx_%s", i,
268 ena_stats->name);
269 }
270 }
271 }
272}
273
274static void ena_com_dev_strings(u8 **data)
275{
276 const struct ena_stats *ena_stats;
277 int i;
278
279 for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) {
280 ena_stats = &ena_stats_ena_com_strings[i];
281
282 ethtool_sprintf(data,
283 "ena_admin_q_%s", ena_stats->name);
284 }
285}
286
287static void ena_get_strings(struct ena_adapter *adapter,
288 u8 *data,
289 bool eni_stats_needed)
290{
291 const struct ena_stats *ena_stats;
292 int i;
293
294 for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
295 ena_stats = &ena_stats_global_strings[i];
296 ethtool_sprintf(&data, ena_stats->name);
297 }
298
299 if (eni_stats_needed) {
300 for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
301 ena_stats = &ena_stats_eni_strings[i];
302 ethtool_sprintf(&data, ena_stats->name);
303 }
304 }
305
306 ena_queue_strings(adapter, &data);
307 ena_com_dev_strings(&data);
308}
309
310static void ena_get_ethtool_strings(struct net_device *netdev,
311 u32 sset,
312 u8 *data)
313{
314 struct ena_adapter *adapter = netdev_priv(netdev);
315
316 if (sset != ETH_SS_STATS)
317 return;
318
319 ena_get_strings(adapter, data, adapter->eni_stats_supported);
320}
321
322static int ena_get_link_ksettings(struct net_device *netdev,
323 struct ethtool_link_ksettings *link_ksettings)
324{
325 struct ena_adapter *adapter = netdev_priv(netdev);
326 struct ena_com_dev *ena_dev = adapter->ena_dev;
327 struct ena_admin_get_feature_link_desc *link;
328 struct ena_admin_get_feat_resp feat_resp;
329 int rc;
330
331 rc = ena_com_get_link_params(ena_dev, &feat_resp);
332 if (rc)
333 return rc;
334
335 link = &feat_resp.u.link;
336 link_ksettings->base.speed = link->speed;
337
338 if (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) {
339 ethtool_link_ksettings_add_link_mode(link_ksettings,
340 supported, Autoneg);
341 ethtool_link_ksettings_add_link_mode(link_ksettings,
342 supported, Autoneg);
343 }
344
345 link_ksettings->base.autoneg =
346 (link->flags & ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK) ?
347 AUTONEG_ENABLE : AUTONEG_DISABLE;
348
349 link_ksettings->base.duplex = DUPLEX_FULL;
350
351 return 0;
352}
353
354static int ena_get_coalesce(struct net_device *net_dev,
355 struct ethtool_coalesce *coalesce)
356{
357 struct ena_adapter *adapter = netdev_priv(net_dev);
358 struct ena_com_dev *ena_dev = adapter->ena_dev;
359
360 if (!ena_com_interrupt_moderation_supported(ena_dev))
361 return -EOPNOTSUPP;
362
363 coalesce->tx_coalesce_usecs =
364 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev) *
365 ena_dev->intr_delay_resolution;
366
367 coalesce->rx_coalesce_usecs =
368 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev)
369 * ena_dev->intr_delay_resolution;
370
371 coalesce->use_adaptive_rx_coalesce =
372 ena_com_get_adaptive_moderation_enabled(ena_dev);
373
374 return 0;
375}
376
377static void ena_update_tx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter)
378{
379 unsigned int val;
380 int i;
381
382 val = ena_com_get_nonadaptive_moderation_interval_tx(adapter->ena_dev);
383
384 for (i = 0; i < adapter->num_io_queues; i++)
385 adapter->tx_ring[i].smoothed_interval = val;
386}
387
388static void ena_update_rx_rings_nonadaptive_intr_moderation(struct ena_adapter *adapter)
389{
390 unsigned int val;
391 int i;
392
393 val = ena_com_get_nonadaptive_moderation_interval_rx(adapter->ena_dev);
394
395 for (i = 0; i < adapter->num_io_queues; i++)
396 adapter->rx_ring[i].smoothed_interval = val;
397}
398
399static int ena_set_coalesce(struct net_device *net_dev,
400 struct ethtool_coalesce *coalesce)
401{
402 struct ena_adapter *adapter = netdev_priv(net_dev);
403 struct ena_com_dev *ena_dev = adapter->ena_dev;
404 int rc;
405
406 if (!ena_com_interrupt_moderation_supported(ena_dev))
407 return -EOPNOTSUPP;
408
409 rc = ena_com_update_nonadaptive_moderation_interval_tx(ena_dev,
410 coalesce->tx_coalesce_usecs);
411 if (rc)
412 return rc;
413
414 ena_update_tx_rings_nonadaptive_intr_moderation(adapter);
415
416 rc = ena_com_update_nonadaptive_moderation_interval_rx(ena_dev,
417 coalesce->rx_coalesce_usecs);
418 if (rc)
419 return rc;
420
421 ena_update_rx_rings_nonadaptive_intr_moderation(adapter);
422
423 if (coalesce->use_adaptive_rx_coalesce &&
424 !ena_com_get_adaptive_moderation_enabled(ena_dev))
425 ena_com_enable_adaptive_moderation(ena_dev);
426
427 if (!coalesce->use_adaptive_rx_coalesce &&
428 ena_com_get_adaptive_moderation_enabled(ena_dev))
429 ena_com_disable_adaptive_moderation(ena_dev);
430
431 return 0;
432}
433
434static u32 ena_get_msglevel(struct net_device *netdev)
435{
436 struct ena_adapter *adapter = netdev_priv(netdev);
437
438 return adapter->msg_enable;
439}
440
441static void ena_set_msglevel(struct net_device *netdev, u32 value)
442{
443 struct ena_adapter *adapter = netdev_priv(netdev);
444
445 adapter->msg_enable = value;
446}
447
448static void ena_get_drvinfo(struct net_device *dev,
449 struct ethtool_drvinfo *info)
450{
451 struct ena_adapter *adapter = netdev_priv(dev);
452
453 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
454 strlcpy(info->bus_info, pci_name(adapter->pdev),
455 sizeof(info->bus_info));
456}
457
458static void ena_get_ringparam(struct net_device *netdev,
459 struct ethtool_ringparam *ring)
460{
461 struct ena_adapter *adapter = netdev_priv(netdev);
462
463 ring->tx_max_pending = adapter->max_tx_ring_size;
464 ring->rx_max_pending = adapter->max_rx_ring_size;
465 ring->tx_pending = adapter->tx_ring[0].ring_size;
466 ring->rx_pending = adapter->rx_ring[0].ring_size;
467}
468
469static int ena_set_ringparam(struct net_device *netdev,
470 struct ethtool_ringparam *ring)
471{
472 struct ena_adapter *adapter = netdev_priv(netdev);
473 u32 new_tx_size, new_rx_size;
474
475 new_tx_size = ring->tx_pending < ENA_MIN_RING_SIZE ?
476 ENA_MIN_RING_SIZE : ring->tx_pending;
477 new_tx_size = rounddown_pow_of_two(new_tx_size);
478
479 new_rx_size = ring->rx_pending < ENA_MIN_RING_SIZE ?
480 ENA_MIN_RING_SIZE : ring->rx_pending;
481 new_rx_size = rounddown_pow_of_two(new_rx_size);
482
483 if (new_tx_size == adapter->requested_tx_ring_size &&
484 new_rx_size == adapter->requested_rx_ring_size)
485 return 0;
486
487 return ena_update_queue_sizes(adapter, new_tx_size, new_rx_size);
488}
489
490static u32 ena_flow_hash_to_flow_type(u16 hash_fields)
491{
492 u32 data = 0;
493
494 if (hash_fields & ENA_ADMIN_RSS_L2_DA)
495 data |= RXH_L2DA;
496
497 if (hash_fields & ENA_ADMIN_RSS_L3_DA)
498 data |= RXH_IP_DST;
499
500 if (hash_fields & ENA_ADMIN_RSS_L3_SA)
501 data |= RXH_IP_SRC;
502
503 if (hash_fields & ENA_ADMIN_RSS_L4_DP)
504 data |= RXH_L4_B_2_3;
505
506 if (hash_fields & ENA_ADMIN_RSS_L4_SP)
507 data |= RXH_L4_B_0_1;
508
509 return data;
510}
511
512static u16 ena_flow_data_to_flow_hash(u32 hash_fields)
513{
514 u16 data = 0;
515
516 if (hash_fields & RXH_L2DA)
517 data |= ENA_ADMIN_RSS_L2_DA;
518
519 if (hash_fields & RXH_IP_DST)
520 data |= ENA_ADMIN_RSS_L3_DA;
521
522 if (hash_fields & RXH_IP_SRC)
523 data |= ENA_ADMIN_RSS_L3_SA;
524
525 if (hash_fields & RXH_L4_B_2_3)
526 data |= ENA_ADMIN_RSS_L4_DP;
527
528 if (hash_fields & RXH_L4_B_0_1)
529 data |= ENA_ADMIN_RSS_L4_SP;
530
531 return data;
532}
533
534static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
535 struct ethtool_rxnfc *cmd)
536{
537 enum ena_admin_flow_hash_proto proto;
538 u16 hash_fields;
539 int rc;
540
541 cmd->data = 0;
542
543 switch (cmd->flow_type) {
544 case TCP_V4_FLOW:
545 proto = ENA_ADMIN_RSS_TCP4;
546 break;
547 case UDP_V4_FLOW:
548 proto = ENA_ADMIN_RSS_UDP4;
549 break;
550 case TCP_V6_FLOW:
551 proto = ENA_ADMIN_RSS_TCP6;
552 break;
553 case UDP_V6_FLOW:
554 proto = ENA_ADMIN_RSS_UDP6;
555 break;
556 case IPV4_FLOW:
557 proto = ENA_ADMIN_RSS_IP4;
558 break;
559 case IPV6_FLOW:
560 proto = ENA_ADMIN_RSS_IP6;
561 break;
562 case ETHER_FLOW:
563 proto = ENA_ADMIN_RSS_NOT_IP;
564 break;
565 case AH_V4_FLOW:
566 case ESP_V4_FLOW:
567 case AH_V6_FLOW:
568 case ESP_V6_FLOW:
569 case SCTP_V4_FLOW:
570 case AH_ESP_V4_FLOW:
571 return -EOPNOTSUPP;
572 default:
573 return -EINVAL;
574 }
575
576 rc = ena_com_get_hash_ctrl(ena_dev, proto, &hash_fields);
577 if (rc)
578 return rc;
579
580 cmd->data = ena_flow_hash_to_flow_type(hash_fields);
581
582 return 0;
583}
584
585static int ena_set_rss_hash(struct ena_com_dev *ena_dev,
586 struct ethtool_rxnfc *cmd)
587{
588 enum ena_admin_flow_hash_proto proto;
589 u16 hash_fields;
590
591 switch (cmd->flow_type) {
592 case TCP_V4_FLOW:
593 proto = ENA_ADMIN_RSS_TCP4;
594 break;
595 case UDP_V4_FLOW:
596 proto = ENA_ADMIN_RSS_UDP4;
597 break;
598 case TCP_V6_FLOW:
599 proto = ENA_ADMIN_RSS_TCP6;
600 break;
601 case UDP_V6_FLOW:
602 proto = ENA_ADMIN_RSS_UDP6;
603 break;
604 case IPV4_FLOW:
605 proto = ENA_ADMIN_RSS_IP4;
606 break;
607 case IPV6_FLOW:
608 proto = ENA_ADMIN_RSS_IP6;
609 break;
610 case ETHER_FLOW:
611 proto = ENA_ADMIN_RSS_NOT_IP;
612 break;
613 case AH_V4_FLOW:
614 case ESP_V4_FLOW:
615 case AH_V6_FLOW:
616 case ESP_V6_FLOW:
617 case SCTP_V4_FLOW:
618 case AH_ESP_V4_FLOW:
619 return -EOPNOTSUPP;
620 default:
621 return -EINVAL;
622 }
623
624 hash_fields = ena_flow_data_to_flow_hash(cmd->data);
625
626 return ena_com_fill_hash_ctrl(ena_dev, proto, hash_fields);
627}
628
629static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
630{
631 struct ena_adapter *adapter = netdev_priv(netdev);
632 int rc = 0;
633
634 switch (info->cmd) {
635 case ETHTOOL_SRXFH:
636 rc = ena_set_rss_hash(adapter->ena_dev, info);
637 break;
638 case ETHTOOL_SRXCLSRLDEL:
639 case ETHTOOL_SRXCLSRLINS:
640 default:
641 netif_err(adapter, drv, netdev,
642 "Command parameter %d is not supported\n", info->cmd);
643 rc = -EOPNOTSUPP;
644 }
645
646 return rc;
647}
648
649static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
650 u32 *rules)
651{
652 struct ena_adapter *adapter = netdev_priv(netdev);
653 int rc = 0;
654
655 switch (info->cmd) {
656 case ETHTOOL_GRXRINGS:
657 info->data = adapter->num_io_queues;
658 rc = 0;
659 break;
660 case ETHTOOL_GRXFH:
661 rc = ena_get_rss_hash(adapter->ena_dev, info);
662 break;
663 case ETHTOOL_GRXCLSRLCNT:
664 case ETHTOOL_GRXCLSRULE:
665 case ETHTOOL_GRXCLSRLALL:
666 default:
667 netif_err(adapter, drv, netdev,
668 "Command parameter %d is not supported\n", info->cmd);
669 rc = -EOPNOTSUPP;
670 }
671
672 return rc;
673}
674
675static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
676{
677 return ENA_RX_RSS_TABLE_SIZE;
678}
679
680static u32 ena_get_rxfh_key_size(struct net_device *netdev)
681{
682 return ENA_HASH_KEY_SIZE;
683}
684
685static int ena_indirection_table_set(struct ena_adapter *adapter,
686 const u32 *indir)
687{
688 struct ena_com_dev *ena_dev = adapter->ena_dev;
689 int i, rc;
690
691 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
692 rc = ena_com_indirect_table_fill_entry(ena_dev,
693 i,
694 ENA_IO_RXQ_IDX(indir[i]));
695 if (unlikely(rc)) {
696 netif_err(adapter, drv, adapter->netdev,
697 "Cannot fill indirect table (index is too large)\n");
698 return rc;
699 }
700 }
701
702 rc = ena_com_indirect_table_set(ena_dev);
703 if (rc) {
704 netif_err(adapter, drv, adapter->netdev,
705 "Cannot set indirect table\n");
706 return rc == -EPERM ? -EOPNOTSUPP : rc;
707 }
708 return rc;
709}
710
711static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
712{
713 struct ena_com_dev *ena_dev = adapter->ena_dev;
714 int i, rc;
715
716 if (!indir)
717 return 0;
718
719 rc = ena_com_indirect_table_get(ena_dev, indir);
720 if (rc)
721 return rc;
722
723
724
725
726
727 for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++)
728 indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]);
729
730 return rc;
731}
732
733static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
734 u8 *hfunc)
735{
736 struct ena_adapter *adapter = netdev_priv(netdev);
737 enum ena_admin_hash_functions ena_func;
738 u8 func;
739 int rc;
740
741 rc = ena_indirection_table_get(adapter, indir);
742 if (rc)
743 return rc;
744
745
746
747
748 rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func);
749 if (rc) {
750 if (rc == -EOPNOTSUPP)
751 rc = 0;
752
753 return rc;
754 }
755
756 rc = ena_com_get_hash_key(adapter->ena_dev, key);
757 if (rc)
758 return rc;
759
760 switch (ena_func) {
761 case ENA_ADMIN_TOEPLITZ:
762 func = ETH_RSS_HASH_TOP;
763 break;
764 case ENA_ADMIN_CRC32:
765 func = ETH_RSS_HASH_CRC32;
766 break;
767 default:
768 netif_err(adapter, drv, netdev,
769 "Command parameter is not supported\n");
770 return -EOPNOTSUPP;
771 }
772
773 if (hfunc)
774 *hfunc = func;
775
776 return 0;
777}
778
779static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
780 const u8 *key, const u8 hfunc)
781{
782 struct ena_adapter *adapter = netdev_priv(netdev);
783 struct ena_com_dev *ena_dev = adapter->ena_dev;
784 enum ena_admin_hash_functions func = 0;
785 int rc;
786
787 if (indir) {
788 rc = ena_indirection_table_set(adapter, indir);
789 if (rc)
790 return rc;
791 }
792
793 switch (hfunc) {
794 case ETH_RSS_HASH_NO_CHANGE:
795 func = ena_com_get_current_hash_function(ena_dev);
796 break;
797 case ETH_RSS_HASH_TOP:
798 func = ENA_ADMIN_TOEPLITZ;
799 break;
800 case ETH_RSS_HASH_CRC32:
801 func = ENA_ADMIN_CRC32;
802 break;
803 default:
804 netif_err(adapter, drv, netdev, "Unsupported hfunc %d\n",
805 hfunc);
806 return -EOPNOTSUPP;
807 }
808
809 if (key || func) {
810 rc = ena_com_fill_hash_function(ena_dev, func, key,
811 ENA_HASH_KEY_SIZE,
812 0xFFFFFFFF);
813 if (unlikely(rc)) {
814 netif_err(adapter, drv, netdev, "Cannot fill key\n");
815 return rc == -EPERM ? -EOPNOTSUPP : rc;
816 }
817 }
818
819 return 0;
820}
821
822static void ena_get_channels(struct net_device *netdev,
823 struct ethtool_channels *channels)
824{
825 struct ena_adapter *adapter = netdev_priv(netdev);
826
827 channels->max_combined = adapter->max_num_io_queues;
828 channels->combined_count = adapter->num_io_queues;
829}
830
831static int ena_set_channels(struct net_device *netdev,
832 struct ethtool_channels *channels)
833{
834 struct ena_adapter *adapter = netdev_priv(netdev);
835 u32 count = channels->combined_count;
836
837 if (count < ENA_MIN_NUM_IO_QUEUES ||
838 (ena_xdp_present(adapter) &&
839 !ena_xdp_legal_queue_count(adapter, count)))
840 return -EINVAL;
841
842 return ena_update_queue_count(adapter, count);
843}
844
845static int ena_get_tunable(struct net_device *netdev,
846 const struct ethtool_tunable *tuna, void *data)
847{
848 struct ena_adapter *adapter = netdev_priv(netdev);
849 int ret = 0;
850
851 switch (tuna->id) {
852 case ETHTOOL_RX_COPYBREAK:
853 *(u32 *)data = adapter->rx_copybreak;
854 break;
855 default:
856 ret = -EINVAL;
857 break;
858 }
859
860 return ret;
861}
862
863static int ena_set_tunable(struct net_device *netdev,
864 const struct ethtool_tunable *tuna,
865 const void *data)
866{
867 struct ena_adapter *adapter = netdev_priv(netdev);
868 int ret = 0;
869 u32 len;
870
871 switch (tuna->id) {
872 case ETHTOOL_RX_COPYBREAK:
873 len = *(u32 *)data;
874 if (len > adapter->netdev->mtu) {
875 ret = -EINVAL;
876 break;
877 }
878 adapter->rx_copybreak = len;
879 break;
880 default:
881 ret = -EINVAL;
882 break;
883 }
884
885 return ret;
886}
887
888static const struct ethtool_ops ena_ethtool_ops = {
889 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
890 ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
891 .get_link_ksettings = ena_get_link_ksettings,
892 .get_drvinfo = ena_get_drvinfo,
893 .get_msglevel = ena_get_msglevel,
894 .set_msglevel = ena_set_msglevel,
895 .get_link = ethtool_op_get_link,
896 .get_coalesce = ena_get_coalesce,
897 .set_coalesce = ena_set_coalesce,
898 .get_ringparam = ena_get_ringparam,
899 .set_ringparam = ena_set_ringparam,
900 .get_sset_count = ena_get_sset_count,
901 .get_strings = ena_get_ethtool_strings,
902 .get_ethtool_stats = ena_get_ethtool_stats,
903 .get_rxnfc = ena_get_rxnfc,
904 .set_rxnfc = ena_set_rxnfc,
905 .get_rxfh_indir_size = ena_get_rxfh_indir_size,
906 .get_rxfh_key_size = ena_get_rxfh_key_size,
907 .get_rxfh = ena_get_rxfh,
908 .set_rxfh = ena_set_rxfh,
909 .get_channels = ena_get_channels,
910 .set_channels = ena_set_channels,
911 .get_tunable = ena_get_tunable,
912 .set_tunable = ena_set_tunable,
913 .get_ts_info = ethtool_op_get_ts_info,
914};
915
916void ena_set_ethtool_ops(struct net_device *netdev)
917{
918 netdev->ethtool_ops = &ena_ethtool_ops;
919}
920
921static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
922{
923 struct net_device *netdev = adapter->netdev;
924 u8 *strings_buf;
925 u64 *data_buf;
926 int strings_num;
927 int i, rc;
928
929 strings_num = ena_get_sw_stats_count(adapter);
930 if (strings_num <= 0) {
931 netif_err(adapter, drv, netdev, "Can't get stats num\n");
932 return;
933 }
934
935 strings_buf = devm_kcalloc(&adapter->pdev->dev,
936 ETH_GSTRING_LEN, strings_num,
937 GFP_ATOMIC);
938 if (!strings_buf) {
939 netif_err(adapter, drv, netdev,
940 "Failed to allocate strings_buf\n");
941 return;
942 }
943
944 data_buf = devm_kcalloc(&adapter->pdev->dev,
945 strings_num, sizeof(u64),
946 GFP_ATOMIC);
947 if (!data_buf) {
948 netif_err(adapter, drv, netdev,
949 "Failed to allocate data buf\n");
950 devm_kfree(&adapter->pdev->dev, strings_buf);
951 return;
952 }
953
954 ena_get_strings(adapter, strings_buf, false);
955 ena_get_stats(adapter, data_buf, false);
956
957
958 if (buf)
959 for (i = 0; i < strings_num; i++) {
960 rc = snprintf(buf, ETH_GSTRING_LEN + sizeof(u64),
961 "%s %llu\n",
962 strings_buf + i * ETH_GSTRING_LEN,
963 data_buf[i]);
964 buf += rc;
965 }
966 else
967 for (i = 0; i < strings_num; i++)
968 netif_err(adapter, drv, netdev, "%s: %llu\n",
969 strings_buf + i * ETH_GSTRING_LEN,
970 data_buf[i]);
971
972 devm_kfree(&adapter->pdev->dev, strings_buf);
973 devm_kfree(&adapter->pdev->dev, data_buf);
974}
975
976void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf)
977{
978 if (!buf)
979 return;
980
981 ena_dump_stats_ex(adapter, buf);
982}
983
984void ena_dump_stats_to_dmesg(struct ena_adapter *adapter)
985{
986 ena_dump_stats_ex(adapter, NULL);
987}
988