1
2
3
4
5#include "iavf.h"
6
7#include <linux/uaccess.h>
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33struct iavf_stats {
34 char stat_string[ETH_GSTRING_LEN];
35 int sizeof_stat;
36 int stat_offset;
37};
38
39
40
41
42
43#define IAVF_STAT(_type, _name, _stat) { \
44 .stat_string = _name, \
45 .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
46 .stat_offset = offsetof(_type, _stat) \
47}
48
49
50#define IAVF_QUEUE_STAT(_name, _stat) \
51 IAVF_STAT(struct iavf_ring, _name, _stat)
52
53
54static const struct iavf_stats iavf_gstrings_queue_stats[] = {
55 IAVF_QUEUE_STAT("%s-%u.packets", stats.packets),
56 IAVF_QUEUE_STAT("%s-%u.bytes", stats.bytes),
57};
58
59
60
61
62
63
64
65
66
67
68
69static void
70iavf_add_one_ethtool_stat(u64 *data, void *pointer,
71 const struct iavf_stats *stat)
72{
73 char *p;
74
75 if (!pointer) {
76
77
78
79 *data = 0;
80 return;
81 }
82
83 p = (char *)pointer + stat->stat_offset;
84 switch (stat->sizeof_stat) {
85 case sizeof(u64):
86 *data = *((u64 *)p);
87 break;
88 case sizeof(u32):
89 *data = *((u32 *)p);
90 break;
91 case sizeof(u16):
92 *data = *((u16 *)p);
93 break;
94 case sizeof(u8):
95 *data = *((u8 *)p);
96 break;
97 default:
98 WARN_ONCE(1, "unexpected stat size for %s",
99 stat->stat_string);
100 *data = 0;
101 }
102}
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117static void
118__iavf_add_ethtool_stats(u64 **data, void *pointer,
119 const struct iavf_stats stats[],
120 const unsigned int size)
121{
122 unsigned int i;
123
124 for (i = 0; i < size; i++)
125 iavf_add_one_ethtool_stat((*data)++, pointer, &stats[i]);
126}
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141#define iavf_add_ethtool_stats(data, pointer, stats) \
142 __iavf_add_ethtool_stats(data, pointer, stats, ARRAY_SIZE(stats))
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158static void
159iavf_add_queue_stats(u64 **data, struct iavf_ring *ring)
160{
161 const unsigned int size = ARRAY_SIZE(iavf_gstrings_queue_stats);
162 const struct iavf_stats *stats = iavf_gstrings_queue_stats;
163 unsigned int start;
164 unsigned int i;
165
166
167
168
169
170
171 do {
172 start = !ring ? 0 : u64_stats_fetch_begin_irq(&ring->syncp);
173 for (i = 0; i < size; i++)
174 iavf_add_one_ethtool_stat(&(*data)[i], ring, &stats[i]);
175 } while (ring && u64_stats_fetch_retry_irq(&ring->syncp, start));
176
177
178 *data += size;
179}
180
181
182
183
184
185
186
187
188
189
190static void __iavf_add_stat_strings(u8 **p, const struct iavf_stats stats[],
191 const unsigned int size, ...)
192{
193 unsigned int i;
194
195 for (i = 0; i < size; i++) {
196 va_list args;
197
198 va_start(args, size);
199 vsnprintf(*p, ETH_GSTRING_LEN, stats[i].stat_string, args);
200 *p += ETH_GSTRING_LEN;
201 va_end(args);
202 }
203}
204
205
206
207
208
209
210
211
212
213
214
215
216
217#define iavf_add_stat_strings(p, stats, ...) \
218 __iavf_add_stat_strings(p, stats, ARRAY_SIZE(stats), ## __VA_ARGS__)
219
220#define VF_STAT(_name, _stat) \
221 IAVF_STAT(struct iavf_adapter, _name, _stat)
222
223static const struct iavf_stats iavf_gstrings_stats[] = {
224 VF_STAT("rx_bytes", current_stats.rx_bytes),
225 VF_STAT("rx_unicast", current_stats.rx_unicast),
226 VF_STAT("rx_multicast", current_stats.rx_multicast),
227 VF_STAT("rx_broadcast", current_stats.rx_broadcast),
228 VF_STAT("rx_discards", current_stats.rx_discards),
229 VF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
230 VF_STAT("tx_bytes", current_stats.tx_bytes),
231 VF_STAT("tx_unicast", current_stats.tx_unicast),
232 VF_STAT("tx_multicast", current_stats.tx_multicast),
233 VF_STAT("tx_broadcast", current_stats.tx_broadcast),
234 VF_STAT("tx_discards", current_stats.tx_discards),
235 VF_STAT("tx_errors", current_stats.tx_errors),
236};
237
238#define IAVF_STATS_LEN ARRAY_SIZE(iavf_gstrings_stats)
239
240#define IAVF_QUEUE_STATS_LEN ARRAY_SIZE(iavf_gstrings_queue_stats)
241
242
243
244
245
246
247struct iavf_priv_flags {
248 char flag_string[ETH_GSTRING_LEN];
249 u32 flag;
250 bool read_only;
251};
252
253#define IAVF_PRIV_FLAG(_name, _flag, _read_only) { \
254 .flag_string = _name, \
255 .flag = _flag, \
256 .read_only = _read_only, \
257}
258
259static const struct iavf_priv_flags iavf_gstrings_priv_flags[] = {
260 IAVF_PRIV_FLAG("legacy-rx", IAVF_FLAG_LEGACY_RX, 0),
261};
262
263#define IAVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(iavf_gstrings_priv_flags)
264
265
266
267
268
269
270
271
272
273static int iavf_get_link_ksettings(struct net_device *netdev,
274 struct ethtool_link_ksettings *cmd)
275{
276 struct iavf_adapter *adapter = netdev_priv(netdev);
277
278 ethtool_link_ksettings_zero_link_mode(cmd, supported);
279 cmd->base.autoneg = AUTONEG_DISABLE;
280 cmd->base.port = PORT_NONE;
281
282 switch (adapter->link_speed) {
283 case I40E_LINK_SPEED_40GB:
284 cmd->base.speed = SPEED_40000;
285 break;
286 case I40E_LINK_SPEED_25GB:
287#ifdef SPEED_25000
288 cmd->base.speed = SPEED_25000;
289#else
290 netdev_info(netdev,
291 "Speed is 25G, display not supported by this version of ethtool.\n");
292#endif
293 break;
294 case I40E_LINK_SPEED_20GB:
295 cmd->base.speed = SPEED_20000;
296 break;
297 case I40E_LINK_SPEED_10GB:
298 cmd->base.speed = SPEED_10000;
299 break;
300 case I40E_LINK_SPEED_1GB:
301 cmd->base.speed = SPEED_1000;
302 break;
303 case I40E_LINK_SPEED_100MB:
304 cmd->base.speed = SPEED_100;
305 break;
306 default:
307 break;
308 }
309 cmd->base.duplex = DUPLEX_FULL;
310
311 return 0;
312}
313
314
315
316
317
318
319
320
321static int iavf_get_sset_count(struct net_device *netdev, int sset)
322{
323 if (sset == ETH_SS_STATS)
324 return IAVF_STATS_LEN +
325 (IAVF_QUEUE_STATS_LEN * 2 * IAVF_MAX_REQ_QUEUES);
326 else if (sset == ETH_SS_PRIV_FLAGS)
327 return IAVF_PRIV_FLAGS_STR_LEN;
328 else
329 return -EINVAL;
330}
331
332
333
334
335
336
337
338
339
340static void iavf_get_ethtool_stats(struct net_device *netdev,
341 struct ethtool_stats *stats, u64 *data)
342{
343 struct iavf_adapter *adapter = netdev_priv(netdev);
344 unsigned int i;
345
346 iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
347
348 rcu_read_lock();
349 for (i = 0; i < IAVF_MAX_REQ_QUEUES; i++) {
350 struct iavf_ring *ring;
351
352
353 ring = (i < adapter->num_active_queues ?
354 &adapter->tx_rings[i] : NULL);
355 iavf_add_queue_stats(&data, ring);
356
357
358 ring = (i < adapter->num_active_queues ?
359 &adapter->rx_rings[i] : NULL);
360 iavf_add_queue_stats(&data, ring);
361 }
362 rcu_read_unlock();
363}
364
365
366
367
368
369
370
371
372static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data)
373{
374 unsigned int i;
375
376 for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
377 snprintf(data, ETH_GSTRING_LEN, "%s",
378 iavf_gstrings_priv_flags[i].flag_string);
379 data += ETH_GSTRING_LEN;
380 }
381}
382
383
384
385
386
387
388
389
390static void iavf_get_stat_strings(struct net_device *netdev, u8 *data)
391{
392 unsigned int i;
393
394 iavf_add_stat_strings(&data, iavf_gstrings_stats);
395
396
397
398
399 for (i = 0; i < netdev->num_tx_queues; i++) {
400 iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
401 "tx", i);
402 iavf_add_stat_strings(&data, iavf_gstrings_queue_stats,
403 "rx", i);
404 }
405}
406
407
408
409
410
411
412
413
414
415static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
416{
417 switch (sset) {
418 case ETH_SS_STATS:
419 iavf_get_stat_strings(netdev, data);
420 break;
421 case ETH_SS_PRIV_FLAGS:
422 iavf_get_priv_flag_strings(netdev, data);
423 break;
424 default:
425 break;
426 }
427}
428
429
430
431
432
433
434
435
436
437
438
439static u32 iavf_get_priv_flags(struct net_device *netdev)
440{
441 struct iavf_adapter *adapter = netdev_priv(netdev);
442 u32 i, ret_flags = 0;
443
444 for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
445 const struct iavf_priv_flags *priv_flags;
446
447 priv_flags = &iavf_gstrings_priv_flags[i];
448
449 if (priv_flags->flag & adapter->flags)
450 ret_flags |= BIT(i);
451 }
452
453 return ret_flags;
454}
455
456
457
458
459
460
461static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
462{
463 struct iavf_adapter *adapter = netdev_priv(netdev);
464 u32 orig_flags, new_flags, changed_flags;
465 u32 i;
466
467 orig_flags = READ_ONCE(adapter->flags);
468 new_flags = orig_flags;
469
470 for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
471 const struct iavf_priv_flags *priv_flags;
472
473 priv_flags = &iavf_gstrings_priv_flags[i];
474
475 if (flags & BIT(i))
476 new_flags |= priv_flags->flag;
477 else
478 new_flags &= ~(priv_flags->flag);
479
480 if (priv_flags->read_only &&
481 ((orig_flags ^ new_flags) & ~BIT(i)))
482 return -EOPNOTSUPP;
483 }
484
485
486
487
488
489
490
491
492
493
494
495
496 if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) {
497 dev_warn(&adapter->pdev->dev,
498 "Unable to update adapter->flags as it was modified by another thread...\n");
499 return -EAGAIN;
500 }
501
502 changed_flags = orig_flags ^ new_flags;
503
504
505
506
507
508
509
510 if (changed_flags & IAVF_FLAG_LEGACY_RX) {
511 if (netif_running(netdev)) {
512 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
513 schedule_work(&adapter->reset_task);
514 }
515 }
516
517 return 0;
518}
519
520
521
522
523
524
525
526static u32 iavf_get_msglevel(struct net_device *netdev)
527{
528 struct iavf_adapter *adapter = netdev_priv(netdev);
529
530 return adapter->msg_enable;
531}
532
533
534
535
536
537
538
539
540
541static void iavf_set_msglevel(struct net_device *netdev, u32 data)
542{
543 struct iavf_adapter *adapter = netdev_priv(netdev);
544
545 if (IAVF_DEBUG_USER & data)
546 adapter->hw.debug_mask = data;
547 adapter->msg_enable = data;
548}
549
550
551
552
553
554
555
556
557static void iavf_get_drvinfo(struct net_device *netdev,
558 struct ethtool_drvinfo *drvinfo)
559{
560 struct iavf_adapter *adapter = netdev_priv(netdev);
561
562 strlcpy(drvinfo->driver, iavf_driver_name, 32);
563 strlcpy(drvinfo->version, iavf_driver_version, 32);
564 strlcpy(drvinfo->fw_version, "N/A", 4);
565 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
566 drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN;
567}
568
569
570
571
572
573
574
575
576
577static void iavf_get_ringparam(struct net_device *netdev,
578 struct ethtool_ringparam *ring)
579{
580 struct iavf_adapter *adapter = netdev_priv(netdev);
581
582 ring->rx_max_pending = IAVF_MAX_RXD;
583 ring->tx_max_pending = IAVF_MAX_TXD;
584 ring->rx_pending = adapter->rx_desc_count;
585 ring->tx_pending = adapter->tx_desc_count;
586}
587
588
589
590
591
592
593
594
595
596static int iavf_set_ringparam(struct net_device *netdev,
597 struct ethtool_ringparam *ring)
598{
599 struct iavf_adapter *adapter = netdev_priv(netdev);
600 u32 new_rx_count, new_tx_count;
601
602 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
603 return -EINVAL;
604
605 new_tx_count = clamp_t(u32, ring->tx_pending,
606 IAVF_MIN_TXD,
607 IAVF_MAX_TXD);
608 new_tx_count = ALIGN(new_tx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
609
610 new_rx_count = clamp_t(u32, ring->rx_pending,
611 IAVF_MIN_RXD,
612 IAVF_MAX_RXD);
613 new_rx_count = ALIGN(new_rx_count, IAVF_REQ_DESCRIPTOR_MULTIPLE);
614
615
616 if ((new_tx_count == adapter->tx_desc_count) &&
617 (new_rx_count == adapter->rx_desc_count))
618 return 0;
619
620 adapter->tx_desc_count = new_tx_count;
621 adapter->rx_desc_count = new_rx_count;
622
623 if (netif_running(netdev)) {
624 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
625 schedule_work(&adapter->reset_task);
626 }
627
628 return 0;
629}
630
631
632
633
634
635
636
637
638
639
640
641static int __iavf_get_coalesce(struct net_device *netdev,
642 struct ethtool_coalesce *ec, int queue)
643{
644 struct iavf_adapter *adapter = netdev_priv(netdev);
645 struct iavf_vsi *vsi = &adapter->vsi;
646 struct iavf_ring *rx_ring, *tx_ring;
647
648 ec->tx_max_coalesced_frames = vsi->work_limit;
649 ec->rx_max_coalesced_frames = vsi->work_limit;
650
651
652
653
654 if (queue < 0)
655 queue = 0;
656 else if (queue >= adapter->num_active_queues)
657 return -EINVAL;
658
659 rx_ring = &adapter->rx_rings[queue];
660 tx_ring = &adapter->tx_rings[queue];
661
662 if (ITR_IS_DYNAMIC(rx_ring->itr_setting))
663 ec->use_adaptive_rx_coalesce = 1;
664
665 if (ITR_IS_DYNAMIC(tx_ring->itr_setting))
666 ec->use_adaptive_tx_coalesce = 1;
667
668 ec->rx_coalesce_usecs = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
669 ec->tx_coalesce_usecs = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
670
671 return 0;
672}
673
674
675
676
677
678
679
680
681
682
683
684static int iavf_get_coalesce(struct net_device *netdev,
685 struct ethtool_coalesce *ec)
686{
687 return __iavf_get_coalesce(netdev, ec, -1);
688}
689
690
691
692
693
694
695
696
697
698static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
699 struct ethtool_coalesce *ec)
700{
701 return __iavf_get_coalesce(netdev, ec, queue);
702}
703
704
705
706
707
708
709
710
711
712static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
713 struct ethtool_coalesce *ec, int queue)
714{
715 struct iavf_ring *rx_ring = &adapter->rx_rings[queue];
716 struct iavf_ring *tx_ring = &adapter->tx_rings[queue];
717 struct iavf_q_vector *q_vector;
718
719 rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
720 tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
721
722 rx_ring->itr_setting |= IAVF_ITR_DYNAMIC;
723 if (!ec->use_adaptive_rx_coalesce)
724 rx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;
725
726 tx_ring->itr_setting |= IAVF_ITR_DYNAMIC;
727 if (!ec->use_adaptive_tx_coalesce)
728 tx_ring->itr_setting ^= IAVF_ITR_DYNAMIC;
729
730 q_vector = rx_ring->q_vector;
731 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
732
733 q_vector = tx_ring->q_vector;
734 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
735
736
737
738
739
740}
741
742
743
744
745
746
747
748
749
750static int __iavf_set_coalesce(struct net_device *netdev,
751 struct ethtool_coalesce *ec, int queue)
752{
753 struct iavf_adapter *adapter = netdev_priv(netdev);
754 struct iavf_vsi *vsi = &adapter->vsi;
755 int i;
756
757 if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
758 vsi->work_limit = ec->tx_max_coalesced_frames_irq;
759
760 if (ec->rx_coalesce_usecs == 0) {
761 if (ec->use_adaptive_rx_coalesce)
762 netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
763 } else if ((ec->rx_coalesce_usecs < IAVF_MIN_ITR) ||
764 (ec->rx_coalesce_usecs > IAVF_MAX_ITR)) {
765 netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
766 return -EINVAL;
767 } else if (ec->tx_coalesce_usecs == 0) {
768 if (ec->use_adaptive_tx_coalesce)
769 netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
770 } else if ((ec->tx_coalesce_usecs < IAVF_MIN_ITR) ||
771 (ec->tx_coalesce_usecs > IAVF_MAX_ITR)) {
772 netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
773 return -EINVAL;
774 }
775
776
777
778
779 if (queue < 0) {
780 for (i = 0; i < adapter->num_active_queues; i++)
781 iavf_set_itr_per_queue(adapter, ec, i);
782 } else if (queue < adapter->num_active_queues) {
783 iavf_set_itr_per_queue(adapter, ec, queue);
784 } else {
785 netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
786 adapter->num_active_queues - 1);
787 return -EINVAL;
788 }
789
790 return 0;
791}
792
793
794
795
796
797
798
799
800static int iavf_set_coalesce(struct net_device *netdev,
801 struct ethtool_coalesce *ec)
802{
803 return __iavf_set_coalesce(netdev, ec, -1);
804}
805
806
807
808
809
810
811
812
813
814static int iavf_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
815 struct ethtool_coalesce *ec)
816{
817 return __iavf_set_coalesce(netdev, ec, queue);
818}
819
820
821
822
823
824
825
826
827
828static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
829 u32 *rule_locs)
830{
831 struct iavf_adapter *adapter = netdev_priv(netdev);
832 int ret = -EOPNOTSUPP;
833
834 switch (cmd->cmd) {
835 case ETHTOOL_GRXRINGS:
836 cmd->data = adapter->num_active_queues;
837 ret = 0;
838 break;
839 case ETHTOOL_GRXFH:
840 netdev_info(netdev,
841 "RSS hash info is not available to vf, use pf.\n");
842 break;
843 default:
844 break;
845 }
846
847 return ret;
848}
849
850
851
852
853
854
855
856
857static void iavf_get_channels(struct net_device *netdev,
858 struct ethtool_channels *ch)
859{
860 struct iavf_adapter *adapter = netdev_priv(netdev);
861
862
863 ch->max_combined = IAVF_MAX_REQ_QUEUES;
864
865 ch->max_other = NONQ_VECS;
866 ch->other_count = NONQ_VECS;
867
868 ch->combined_count = adapter->num_active_queues;
869}
870
871
872
873
874
875
876
877
878
879
880static int iavf_set_channels(struct net_device *netdev,
881 struct ethtool_channels *ch)
882{
883 struct iavf_adapter *adapter = netdev_priv(netdev);
884 int num_req = ch->combined_count;
885
886 if (num_req != adapter->num_active_queues &&
887 !(adapter->vf_res->vf_cap_flags &
888 VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)) {
889 dev_info(&adapter->pdev->dev, "PF is not capable of queue negotiation.\n");
890 return -EINVAL;
891 }
892
893 if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
894 adapter->num_tc) {
895 dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n");
896 return -EINVAL;
897 }
898
899
900
901
902 if (num_req <= 0 || num_req > IAVF_MAX_REQ_QUEUES)
903 return -EINVAL;
904
905 if (ch->rx_count || ch->tx_count || ch->other_count != NONQ_VECS)
906 return -EINVAL;
907
908 adapter->num_req_queues = num_req;
909 return iavf_request_queues(adapter, num_req);
910}
911
912
913
914
915
916
917
918static u32 iavf_get_rxfh_key_size(struct net_device *netdev)
919{
920 struct iavf_adapter *adapter = netdev_priv(netdev);
921
922 return adapter->rss_key_size;
923}
924
925
926
927
928
929
930
931static u32 iavf_get_rxfh_indir_size(struct net_device *netdev)
932{
933 struct iavf_adapter *adapter = netdev_priv(netdev);
934
935 return adapter->rss_lut_size;
936}
937
938
939
940
941
942
943
944
945
946
947static int iavf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
948 u8 *hfunc)
949{
950 struct iavf_adapter *adapter = netdev_priv(netdev);
951 u16 i;
952
953 if (hfunc)
954 *hfunc = ETH_RSS_HASH_TOP;
955 if (!indir)
956 return 0;
957
958 memcpy(key, adapter->rss_key, adapter->rss_key_size);
959
960
961 for (i = 0; i < adapter->rss_lut_size; i++)
962 indir[i] = (u32)adapter->rss_lut[i];
963
964 return 0;
965}
966
967
968
969
970
971
972
973
974
975
976
977static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
978 const u8 *key, const u8 hfunc)
979{
980 struct iavf_adapter *adapter = netdev_priv(netdev);
981 u16 i;
982
983
984 if (key ||
985 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
986 return -EOPNOTSUPP;
987 if (!indir)
988 return 0;
989
990 if (key)
991 memcpy(adapter->rss_key, key, adapter->rss_key_size);
992
993
994 for (i = 0; i < adapter->rss_lut_size; i++)
995 adapter->rss_lut[i] = (u8)(indir[i]);
996
997 return iavf_config_rss(adapter);
998}
999
1000static const struct ethtool_ops iavf_ethtool_ops = {
1001 .get_drvinfo = iavf_get_drvinfo,
1002 .get_link = ethtool_op_get_link,
1003 .get_ringparam = iavf_get_ringparam,
1004 .set_ringparam = iavf_set_ringparam,
1005 .get_strings = iavf_get_strings,
1006 .get_ethtool_stats = iavf_get_ethtool_stats,
1007 .get_sset_count = iavf_get_sset_count,
1008 .get_priv_flags = iavf_get_priv_flags,
1009 .set_priv_flags = iavf_set_priv_flags,
1010 .get_msglevel = iavf_get_msglevel,
1011 .set_msglevel = iavf_set_msglevel,
1012 .get_coalesce = iavf_get_coalesce,
1013 .set_coalesce = iavf_set_coalesce,
1014 .get_per_queue_coalesce = iavf_get_per_queue_coalesce,
1015 .set_per_queue_coalesce = iavf_set_per_queue_coalesce,
1016 .get_rxnfc = iavf_get_rxnfc,
1017 .get_rxfh_indir_size = iavf_get_rxfh_indir_size,
1018 .get_rxfh = iavf_get_rxfh,
1019 .set_rxfh = iavf_set_rxfh,
1020 .get_channels = iavf_get_channels,
1021 .set_channels = iavf_set_channels,
1022 .get_rxfh_key_size = iavf_get_rxfh_key_size,
1023 .get_link_ksettings = iavf_get_link_ksettings,
1024};
1025
1026
1027
1028
1029
1030
1031
1032
1033void iavf_set_ethtool_ops(struct net_device *netdev)
1034{
1035 netdev->ethtool_ops = &iavf_ethtool_ops;
1036}
1037