1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include "i40evf.h"
29
30#include <linux/uaccess.h>
31
32struct i40evf_stats {
33 char stat_string[ETH_GSTRING_LEN];
34 int stat_offset;
35};
36
37#define I40EVF_STAT(_name, _stat) { \
38 .stat_string = _name, \
39 .stat_offset = offsetof(struct i40evf_adapter, _stat) \
40}
41
42
43static const struct i40evf_stats i40evf_gstrings_stats[] = {
44 I40EVF_STAT("rx_bytes", current_stats.rx_bytes),
45 I40EVF_STAT("rx_unicast", current_stats.rx_unicast),
46 I40EVF_STAT("rx_multicast", current_stats.rx_multicast),
47 I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast),
48 I40EVF_STAT("rx_discards", current_stats.rx_discards),
49 I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
50 I40EVF_STAT("tx_bytes", current_stats.tx_bytes),
51 I40EVF_STAT("tx_unicast", current_stats.tx_unicast),
52 I40EVF_STAT("tx_multicast", current_stats.tx_multicast),
53 I40EVF_STAT("tx_broadcast", current_stats.tx_broadcast),
54 I40EVF_STAT("tx_discards", current_stats.tx_discards),
55 I40EVF_STAT("tx_errors", current_stats.tx_errors),
56};
57
58#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
59#define I40EVF_QUEUE_STATS_LEN(_dev) \
60 (((struct i40evf_adapter *)\
61 netdev_priv(_dev))->num_active_queues \
62 * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
63#define I40EVF_STATS_LEN(_dev) \
64 (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
65
66
67
68
69
70
71struct i40evf_priv_flags {
72 char flag_string[ETH_GSTRING_LEN];
73 u32 flag;
74 bool read_only;
75};
76
77#define I40EVF_PRIV_FLAG(_name, _flag, _read_only) { \
78 .flag_string = _name, \
79 .flag = _flag, \
80 .read_only = _read_only, \
81}
82
83static const struct i40evf_priv_flags i40evf_gstrings_priv_flags[] = {
84 I40EVF_PRIV_FLAG("legacy-rx", I40EVF_FLAG_LEGACY_RX, 0),
85};
86
87#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_gstrings_priv_flags)
88
89
90
91
92
93
94
95
96
97static int i40evf_get_link_ksettings(struct net_device *netdev,
98 struct ethtool_link_ksettings *cmd)
99{
100 struct i40evf_adapter *adapter = netdev_priv(netdev);
101
102 ethtool_link_ksettings_zero_link_mode(cmd, supported);
103 cmd->base.autoneg = AUTONEG_DISABLE;
104 cmd->base.port = PORT_NONE;
105
106 switch (adapter->link_speed) {
107 case I40E_LINK_SPEED_40GB:
108 cmd->base.speed = SPEED_40000;
109 break;
110 case I40E_LINK_SPEED_25GB:
111#ifdef SPEED_25000
112 cmd->base.speed = SPEED_25000;
113#else
114 netdev_info(netdev,
115 "Speed is 25G, display not supported by this version of ethtool.\n");
116#endif
117 break;
118 case I40E_LINK_SPEED_20GB:
119 cmd->base.speed = SPEED_20000;
120 break;
121 case I40E_LINK_SPEED_10GB:
122 cmd->base.speed = SPEED_10000;
123 break;
124 case I40E_LINK_SPEED_1GB:
125 cmd->base.speed = SPEED_1000;
126 break;
127 case I40E_LINK_SPEED_100MB:
128 cmd->base.speed = SPEED_100;
129 break;
130 default:
131 break;
132 }
133 cmd->base.duplex = DUPLEX_FULL;
134
135 return 0;
136}
137
138
139
140
141
142
143
144
145
146static int i40evf_get_sset_count(struct net_device *netdev, int sset)
147{
148 if (sset == ETH_SS_STATS)
149 return I40EVF_STATS_LEN(netdev);
150 else if (sset == ETH_SS_PRIV_FLAGS)
151 return I40EVF_PRIV_FLAGS_STR_LEN;
152 else
153 return -EINVAL;
154}
155
156
157
158
159
160
161
162
163
164static void i40evf_get_ethtool_stats(struct net_device *netdev,
165 struct ethtool_stats *stats, u64 *data)
166{
167 struct i40evf_adapter *adapter = netdev_priv(netdev);
168 int i, j;
169 char *p;
170
171 for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) {
172 p = (char *)adapter + i40evf_gstrings_stats[i].stat_offset;
173 data[i] = *(u64 *)p;
174 }
175 for (j = 0; j < adapter->num_active_queues; j++) {
176 data[i++] = adapter->tx_rings[j].stats.packets;
177 data[i++] = adapter->tx_rings[j].stats.bytes;
178 }
179 for (j = 0; j < adapter->num_active_queues; j++) {
180 data[i++] = adapter->rx_rings[j].stats.packets;
181 data[i++] = adapter->rx_rings[j].stats.bytes;
182 }
183}
184
185
186
187
188
189
190
191
192
193static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
194{
195 struct i40evf_adapter *adapter = netdev_priv(netdev);
196 u8 *p = data;
197 int i;
198
199 if (sset == ETH_SS_STATS) {
200 for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) {
201 memcpy(p, i40evf_gstrings_stats[i].stat_string,
202 ETH_GSTRING_LEN);
203 p += ETH_GSTRING_LEN;
204 }
205 for (i = 0; i < adapter->num_active_queues; i++) {
206 snprintf(p, ETH_GSTRING_LEN, "tx-%u.packets", i);
207 p += ETH_GSTRING_LEN;
208 snprintf(p, ETH_GSTRING_LEN, "tx-%u.bytes", i);
209 p += ETH_GSTRING_LEN;
210 }
211 for (i = 0; i < adapter->num_active_queues; i++) {
212 snprintf(p, ETH_GSTRING_LEN, "rx-%u.packets", i);
213 p += ETH_GSTRING_LEN;
214 snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
215 p += ETH_GSTRING_LEN;
216 }
217 } else if (sset == ETH_SS_PRIV_FLAGS) {
218 for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
219 snprintf(p, ETH_GSTRING_LEN, "%s",
220 i40evf_gstrings_priv_flags[i].flag_string);
221 p += ETH_GSTRING_LEN;
222 }
223 }
224}
225
226
227
228
229
230
231
232
233
234
235
236static u32 i40evf_get_priv_flags(struct net_device *netdev)
237{
238 struct i40evf_adapter *adapter = netdev_priv(netdev);
239 u32 i, ret_flags = 0;
240
241 for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
242 const struct i40evf_priv_flags *priv_flags;
243
244 priv_flags = &i40evf_gstrings_priv_flags[i];
245
246 if (priv_flags->flag & adapter->flags)
247 ret_flags |= BIT(i);
248 }
249
250 return ret_flags;
251}
252
253
254
255
256
257
258static int i40evf_set_priv_flags(struct net_device *netdev, u32 flags)
259{
260 struct i40evf_adapter *adapter = netdev_priv(netdev);
261 u64 changed_flags;
262 u32 i;
263
264 changed_flags = adapter->flags;
265
266 for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
267 const struct i40evf_priv_flags *priv_flags;
268
269 priv_flags = &i40evf_gstrings_priv_flags[i];
270
271 if (priv_flags->read_only)
272 continue;
273
274 if (flags & BIT(i))
275 adapter->flags |= priv_flags->flag;
276 else
277 adapter->flags &= ~(priv_flags->flag);
278 }
279
280
281 changed_flags ^= adapter->flags;
282
283
284
285
286 if (changed_flags & I40EVF_FLAG_LEGACY_RX) {
287 if (netif_running(netdev)) {
288 adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
289 schedule_work(&adapter->reset_task);
290 }
291 }
292
293 return 0;
294}
295
296
297
298
299
300
301
302static u32 i40evf_get_msglevel(struct net_device *netdev)
303{
304 struct i40evf_adapter *adapter = netdev_priv(netdev);
305
306 return adapter->msg_enable;
307}
308
309
310
311
312
313
314
315
316
317static void i40evf_set_msglevel(struct net_device *netdev, u32 data)
318{
319 struct i40evf_adapter *adapter = netdev_priv(netdev);
320
321 if (I40E_DEBUG_USER & data)
322 adapter->hw.debug_mask = data;
323 adapter->msg_enable = data;
324}
325
326
327
328
329
330
331
332
333static void i40evf_get_drvinfo(struct net_device *netdev,
334 struct ethtool_drvinfo *drvinfo)
335{
336 struct i40evf_adapter *adapter = netdev_priv(netdev);
337
338 strlcpy(drvinfo->driver, i40evf_driver_name, 32);
339 strlcpy(drvinfo->version, i40evf_driver_version, 32);
340 strlcpy(drvinfo->fw_version, "N/A", 4);
341 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
342 drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN;
343}
344
345
346
347
348
349
350
351
352
353static void i40evf_get_ringparam(struct net_device *netdev,
354 struct ethtool_ringparam *ring)
355{
356 struct i40evf_adapter *adapter = netdev_priv(netdev);
357
358 ring->rx_max_pending = I40EVF_MAX_RXD;
359 ring->tx_max_pending = I40EVF_MAX_TXD;
360 ring->rx_pending = adapter->rx_desc_count;
361 ring->tx_pending = adapter->tx_desc_count;
362}
363
364
365
366
367
368
369
370
371
372static int i40evf_set_ringparam(struct net_device *netdev,
373 struct ethtool_ringparam *ring)
374{
375 struct i40evf_adapter *adapter = netdev_priv(netdev);
376 u32 new_rx_count, new_tx_count;
377
378 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
379 return -EINVAL;
380
381 new_tx_count = clamp_t(u32, ring->tx_pending,
382 I40EVF_MIN_TXD,
383 I40EVF_MAX_TXD);
384 new_tx_count = ALIGN(new_tx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
385
386 new_rx_count = clamp_t(u32, ring->rx_pending,
387 I40EVF_MIN_RXD,
388 I40EVF_MAX_RXD);
389 new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
390
391
392 if ((new_tx_count == adapter->tx_desc_count) &&
393 (new_rx_count == adapter->rx_desc_count))
394 return 0;
395
396 adapter->tx_desc_count = new_tx_count;
397 adapter->rx_desc_count = new_rx_count;
398
399 if (netif_running(netdev)) {
400 adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
401 schedule_work(&adapter->reset_task);
402 }
403
404 return 0;
405}
406
407
408
409
410
411
412
413
414
415
416
417static int __i40evf_get_coalesce(struct net_device *netdev,
418 struct ethtool_coalesce *ec,
419 int queue)
420{
421 struct i40evf_adapter *adapter = netdev_priv(netdev);
422 struct i40e_vsi *vsi = &adapter->vsi;
423 struct i40e_ring *rx_ring, *tx_ring;
424
425 ec->tx_max_coalesced_frames = vsi->work_limit;
426 ec->rx_max_coalesced_frames = vsi->work_limit;
427
428
429
430
431 if (queue < 0)
432 queue = 0;
433 else if (queue >= adapter->num_active_queues)
434 return -EINVAL;
435
436 rx_ring = &adapter->rx_rings[queue];
437 tx_ring = &adapter->tx_rings[queue];
438
439 if (ITR_IS_DYNAMIC(rx_ring->rx_itr_setting))
440 ec->use_adaptive_rx_coalesce = 1;
441
442 if (ITR_IS_DYNAMIC(tx_ring->tx_itr_setting))
443 ec->use_adaptive_tx_coalesce = 1;
444
445 ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC;
446 ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC;
447
448 return 0;
449}
450
451
452
453
454
455
456
457
458
459
460
461static int i40evf_get_coalesce(struct net_device *netdev,
462 struct ethtool_coalesce *ec)
463{
464 return __i40evf_get_coalesce(netdev, ec, -1);
465}
466
467
468
469
470
471
472
473
474
475static int i40evf_get_per_queue_coalesce(struct net_device *netdev,
476 u32 queue,
477 struct ethtool_coalesce *ec)
478{
479 return __i40evf_get_coalesce(netdev, ec, queue);
480}
481
482
483
484
485
486
487
488
489
490static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter,
491 struct ethtool_coalesce *ec,
492 int queue)
493{
494 struct i40e_vsi *vsi = &adapter->vsi;
495 struct i40e_hw *hw = &adapter->hw;
496 struct i40e_q_vector *q_vector;
497 u16 vector;
498
499 adapter->rx_rings[queue].rx_itr_setting = ec->rx_coalesce_usecs;
500 adapter->tx_rings[queue].tx_itr_setting = ec->tx_coalesce_usecs;
501
502 if (ec->use_adaptive_rx_coalesce)
503 adapter->rx_rings[queue].rx_itr_setting |= I40E_ITR_DYNAMIC;
504 else
505 adapter->rx_rings[queue].rx_itr_setting &= ~I40E_ITR_DYNAMIC;
506
507 if (ec->use_adaptive_tx_coalesce)
508 adapter->tx_rings[queue].tx_itr_setting |= I40E_ITR_DYNAMIC;
509 else
510 adapter->tx_rings[queue].tx_itr_setting &= ~I40E_ITR_DYNAMIC;
511
512 q_vector = adapter->rx_rings[queue].q_vector;
513 q_vector->rx.itr = ITR_TO_REG(adapter->rx_rings[queue].rx_itr_setting);
514 vector = vsi->base_vector + q_vector->v_idx;
515 wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
516
517 q_vector = adapter->tx_rings[queue].q_vector;
518 q_vector->tx.itr = ITR_TO_REG(adapter->tx_rings[queue].tx_itr_setting);
519 vector = vsi->base_vector + q_vector->v_idx;
520 wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
521
522 i40e_flush(hw);
523}
524
525
526
527
528
529
530
531
532
533static int __i40evf_set_coalesce(struct net_device *netdev,
534 struct ethtool_coalesce *ec,
535 int queue)
536{
537 struct i40evf_adapter *adapter = netdev_priv(netdev);
538 struct i40e_vsi *vsi = &adapter->vsi;
539 int i;
540
541 if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
542 vsi->work_limit = ec->tx_max_coalesced_frames_irq;
543
544 if (ec->rx_coalesce_usecs == 0) {
545 if (ec->use_adaptive_rx_coalesce)
546 netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
547 } else if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
548 (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1))) {
549 netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
550 return -EINVAL;
551 }
552
553 else
554 if (ec->tx_coalesce_usecs == 0) {
555 if (ec->use_adaptive_tx_coalesce)
556 netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
557 } else if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
558 (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1))) {
559 netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
560 return -EINVAL;
561 }
562
563
564
565
566 if (queue < 0) {
567 for (i = 0; i < adapter->num_active_queues; i++)
568 i40evf_set_itr_per_queue(adapter, ec, i);
569 } else if (queue < adapter->num_active_queues) {
570 i40evf_set_itr_per_queue(adapter, ec, queue);
571 } else {
572 netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
573 adapter->num_active_queues - 1);
574 return -EINVAL;
575 }
576
577 return 0;
578}
579
580
581
582
583
584
585
586
587static int i40evf_set_coalesce(struct net_device *netdev,
588 struct ethtool_coalesce *ec)
589{
590 return __i40evf_set_coalesce(netdev, ec, -1);
591}
592
593
594
595
596
597
598
599
600
601static int i40evf_set_per_queue_coalesce(struct net_device *netdev,
602 u32 queue,
603 struct ethtool_coalesce *ec)
604{
605 return __i40evf_set_coalesce(netdev, ec, queue);
606}
607
608
609
610
611
612
613
614
615static int i40evf_get_rxnfc(struct net_device *netdev,
616 struct ethtool_rxnfc *cmd,
617 u32 *rule_locs)
618{
619 struct i40evf_adapter *adapter = netdev_priv(netdev);
620 int ret = -EOPNOTSUPP;
621
622 switch (cmd->cmd) {
623 case ETHTOOL_GRXRINGS:
624 cmd->data = adapter->num_active_queues;
625 ret = 0;
626 break;
627 case ETHTOOL_GRXFH:
628 netdev_info(netdev,
629 "RSS hash info is not available to vf, use pf.\n");
630 break;
631 default:
632 break;
633 }
634
635 return ret;
636}
637
638
639
640
641
642
643
644
645static void i40evf_get_channels(struct net_device *netdev,
646 struct ethtool_channels *ch)
647{
648 struct i40evf_adapter *adapter = netdev_priv(netdev);
649
650
651 ch->max_combined = adapter->num_active_queues;
652
653 ch->max_other = NONQ_VECS;
654 ch->other_count = NONQ_VECS;
655
656 ch->combined_count = adapter->num_active_queues;
657}
658
659
660
661
662
663
664
665static u32 i40evf_get_rxfh_key_size(struct net_device *netdev)
666{
667 struct i40evf_adapter *adapter = netdev_priv(netdev);
668
669 return adapter->rss_key_size;
670}
671
672
673
674
675
676
677
678static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
679{
680 struct i40evf_adapter *adapter = netdev_priv(netdev);
681
682 return adapter->rss_lut_size;
683}
684
685
686
687
688
689
690
691
692
693static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
694 u8 *hfunc)
695{
696 struct i40evf_adapter *adapter = netdev_priv(netdev);
697 u16 i;
698
699 if (hfunc)
700 *hfunc = ETH_RSS_HASH_TOP;
701 if (!indir)
702 return 0;
703
704 memcpy(key, adapter->rss_key, adapter->rss_key_size);
705
706
707 for (i = 0; i < adapter->rss_lut_size; i++)
708 indir[i] = (u32)adapter->rss_lut[i];
709
710 return 0;
711}
712
713
714
715
716
717
718
719
720
721
722static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
723 const u8 *key, const u8 hfunc)
724{
725 struct i40evf_adapter *adapter = netdev_priv(netdev);
726 u16 i;
727
728
729 if (key ||
730 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
731 return -EOPNOTSUPP;
732 if (!indir)
733 return 0;
734
735 if (key) {
736 memcpy(adapter->rss_key, key, adapter->rss_key_size);
737 }
738
739
740 for (i = 0; i < adapter->rss_lut_size; i++)
741 adapter->rss_lut[i] = (u8)(indir[i]);
742
743 return i40evf_config_rss(adapter);
744}
745
746static const struct ethtool_ops i40evf_ethtool_ops = {
747 .get_drvinfo = i40evf_get_drvinfo,
748 .get_link = ethtool_op_get_link,
749 .get_ringparam = i40evf_get_ringparam,
750 .set_ringparam = i40evf_set_ringparam,
751 .get_strings = i40evf_get_strings,
752 .get_ethtool_stats = i40evf_get_ethtool_stats,
753 .get_sset_count = i40evf_get_sset_count,
754 .get_priv_flags = i40evf_get_priv_flags,
755 .set_priv_flags = i40evf_set_priv_flags,
756 .get_msglevel = i40evf_get_msglevel,
757 .set_msglevel = i40evf_set_msglevel,
758 .get_coalesce = i40evf_get_coalesce,
759 .set_coalesce = i40evf_set_coalesce,
760 .get_per_queue_coalesce = i40evf_get_per_queue_coalesce,
761 .set_per_queue_coalesce = i40evf_set_per_queue_coalesce,
762 .get_rxnfc = i40evf_get_rxnfc,
763 .get_rxfh_indir_size = i40evf_get_rxfh_indir_size,
764 .get_rxfh = i40evf_get_rxfh,
765 .set_rxfh = i40evf_set_rxfh,
766 .get_channels = i40evf_get_channels,
767 .get_rxfh_key_size = i40evf_get_rxfh_key_size,
768 .get_link_ksettings = i40evf_get_link_ksettings,
769};
770
771
772
773
774
775
776
777
778void i40evf_set_ethtool_ops(struct net_device *netdev)
779{
780 netdev->ethtool_ops = &i40evf_ethtool_ops;
781}
782