1
2
3
4
5
6
7
8
9
10
11#include <linux/netdevice.h>
12#include <linux/ethtool.h>
13#include <linux/rtnetlink.h>
14#include <linux/in.h>
15#include "net_driver.h"
16#include "workarounds.h"
17#include "selftest.h"
18#include "efx.h"
19#include "filter.h"
20#include "nic.h"
21
22struct ef4_sw_stat_desc {
23 const char *name;
24 enum {
25 EF4_ETHTOOL_STAT_SOURCE_nic,
26 EF4_ETHTOOL_STAT_SOURCE_channel,
27 EF4_ETHTOOL_STAT_SOURCE_tx_queue
28 } source;
29 unsigned offset;
30 u64(*get_stat) (void *field);
31};
32
33
34#define EF4_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
35 get_stat_function) { \
36 .name = #stat_name, \
37 .source = EF4_ETHTOOL_STAT_SOURCE_##source_name, \
38 .offset = ((((field_type *) 0) == \
39 &((struct ef4_##source_name *)0)->field) ? \
40 offsetof(struct ef4_##source_name, field) : \
41 offsetof(struct ef4_##source_name, field)), \
42 .get_stat = get_stat_function, \
43}
44
45static u64 ef4_get_uint_stat(void *field)
46{
47 return *(unsigned int *)field;
48}
49
50static u64 ef4_get_atomic_stat(void *field)
51{
52 return atomic_read((atomic_t *) field);
53}
54
55#define EF4_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
56 EF4_ETHTOOL_STAT(field, nic, field, \
57 atomic_t, ef4_get_atomic_stat)
58
59#define EF4_ETHTOOL_UINT_CHANNEL_STAT(field) \
60 EF4_ETHTOOL_STAT(field, channel, n_##field, \
61 unsigned int, ef4_get_uint_stat)
62
63#define EF4_ETHTOOL_UINT_TXQ_STAT(field) \
64 EF4_ETHTOOL_STAT(tx_##field, tx_queue, field, \
65 unsigned int, ef4_get_uint_stat)
66
67static const struct ef4_sw_stat_desc ef4_sw_stat_desc[] = {
68 EF4_ETHTOOL_UINT_TXQ_STAT(merge_events),
69 EF4_ETHTOOL_UINT_TXQ_STAT(pushes),
70 EF4_ETHTOOL_UINT_TXQ_STAT(cb_packets),
71 EF4_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
72 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
73 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
74 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
75 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
76 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
77 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
78 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
79};
80
81#define EF4_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(ef4_sw_stat_desc)
82
83#define EF4_ETHTOOL_EEPROM_MAGIC 0xEFAB
84
85
86
87
88
89
90
91
92
93static int ef4_ethtool_phys_id(struct net_device *net_dev,
94 enum ethtool_phys_id_state state)
95{
96 struct ef4_nic *efx = netdev_priv(net_dev);
97 enum ef4_led_mode mode = EF4_LED_DEFAULT;
98
99 switch (state) {
100 case ETHTOOL_ID_ON:
101 mode = EF4_LED_ON;
102 break;
103 case ETHTOOL_ID_OFF:
104 mode = EF4_LED_OFF;
105 break;
106 case ETHTOOL_ID_INACTIVE:
107 mode = EF4_LED_DEFAULT;
108 break;
109 case ETHTOOL_ID_ACTIVE:
110 return 1;
111 }
112
113 efx->type->set_id_led(efx, mode);
114 return 0;
115}
116
117
118static int ef4_ethtool_get_settings(struct net_device *net_dev,
119 struct ethtool_cmd *ecmd)
120{
121 struct ef4_nic *efx = netdev_priv(net_dev);
122 struct ef4_link_state *link_state = &efx->link_state;
123
124 mutex_lock(&efx->mac_lock);
125 efx->phy_op->get_settings(efx, ecmd);
126 mutex_unlock(&efx->mac_lock);
127
128
129 ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
130
131 if (LOOPBACK_INTERNAL(efx)) {
132 ethtool_cmd_speed_set(ecmd, link_state->speed);
133 ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
134 }
135
136 return 0;
137}
138
139
140static int ef4_ethtool_set_settings(struct net_device *net_dev,
141 struct ethtool_cmd *ecmd)
142{
143 struct ef4_nic *efx = netdev_priv(net_dev);
144 int rc;
145
146
147 if ((ethtool_cmd_speed(ecmd) == SPEED_1000) &&
148 (ecmd->duplex != DUPLEX_FULL)) {
149 netif_dbg(efx, drv, efx->net_dev,
150 "rejecting unsupported 1000Mbps HD setting\n");
151 return -EINVAL;
152 }
153
154 mutex_lock(&efx->mac_lock);
155 rc = efx->phy_op->set_settings(efx, ecmd);
156 mutex_unlock(&efx->mac_lock);
157 return rc;
158}
159
160static void ef4_ethtool_get_drvinfo(struct net_device *net_dev,
161 struct ethtool_drvinfo *info)
162{
163 struct ef4_nic *efx = netdev_priv(net_dev);
164
165 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
166 strlcpy(info->version, EF4_DRIVER_VERSION, sizeof(info->version));
167 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
168}
169
170static int ef4_ethtool_get_regs_len(struct net_device *net_dev)
171{
172 return ef4_nic_get_regs_len(netdev_priv(net_dev));
173}
174
175static void ef4_ethtool_get_regs(struct net_device *net_dev,
176 struct ethtool_regs *regs, void *buf)
177{
178 struct ef4_nic *efx = netdev_priv(net_dev);
179
180 regs->version = efx->type->revision;
181 ef4_nic_get_regs(efx, buf);
182}
183
184static u32 ef4_ethtool_get_msglevel(struct net_device *net_dev)
185{
186 struct ef4_nic *efx = netdev_priv(net_dev);
187 return efx->msg_enable;
188}
189
190static void ef4_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
191{
192 struct ef4_nic *efx = netdev_priv(net_dev);
193 efx->msg_enable = msg_enable;
194}
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209static void ef4_fill_test(unsigned int test_index, u8 *strings, u64 *data,
210 int *test, const char *unit_format, int unit_id,
211 const char *test_format, const char *test_id)
212{
213 char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
214
215
216 if (data)
217 data[test_index] = *test;
218
219
220 if (strings) {
221 if (strchr(unit_format, '%'))
222 snprintf(unit_str, sizeof(unit_str),
223 unit_format, unit_id);
224 else
225 strcpy(unit_str, unit_format);
226 snprintf(test_str, sizeof(test_str), test_format, test_id);
227 snprintf(strings + test_index * ETH_GSTRING_LEN,
228 ETH_GSTRING_LEN,
229 "%-6s %-24s", unit_str, test_str);
230 }
231}
232
233#define EF4_CHANNEL_NAME(_channel) "chan%d", _channel->channel
234#define EF4_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
235#define EF4_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
236#define EF4_LOOPBACK_NAME(_mode, _counter) \
237 "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, ef4_loopback_mode)
238
239
240
241
242
243
244
245
246
247
248
249
250
251static int ef4_fill_loopback_test(struct ef4_nic *efx,
252 struct ef4_loopback_self_tests *lb_tests,
253 enum ef4_loopback_mode mode,
254 unsigned int test_index,
255 u8 *strings, u64 *data)
256{
257 struct ef4_channel *channel =
258 ef4_get_channel(efx, efx->tx_channel_offset);
259 struct ef4_tx_queue *tx_queue;
260
261 ef4_for_each_channel_tx_queue(tx_queue, channel) {
262 ef4_fill_test(test_index++, strings, data,
263 &lb_tests->tx_sent[tx_queue->queue],
264 EF4_TX_QUEUE_NAME(tx_queue),
265 EF4_LOOPBACK_NAME(mode, "tx_sent"));
266 ef4_fill_test(test_index++, strings, data,
267 &lb_tests->tx_done[tx_queue->queue],
268 EF4_TX_QUEUE_NAME(tx_queue),
269 EF4_LOOPBACK_NAME(mode, "tx_done"));
270 }
271 ef4_fill_test(test_index++, strings, data,
272 &lb_tests->rx_good,
273 "rx", 0,
274 EF4_LOOPBACK_NAME(mode, "rx_good"));
275 ef4_fill_test(test_index++, strings, data,
276 &lb_tests->rx_bad,
277 "rx", 0,
278 EF4_LOOPBACK_NAME(mode, "rx_bad"));
279
280 return test_index;
281}
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296static int ef4_ethtool_fill_self_tests(struct ef4_nic *efx,
297 struct ef4_self_tests *tests,
298 u8 *strings, u64 *data)
299{
300 struct ef4_channel *channel;
301 unsigned int n = 0, i;
302 enum ef4_loopback_mode mode;
303
304 ef4_fill_test(n++, strings, data, &tests->phy_alive,
305 "phy", 0, "alive", NULL);
306 ef4_fill_test(n++, strings, data, &tests->nvram,
307 "core", 0, "nvram", NULL);
308 ef4_fill_test(n++, strings, data, &tests->interrupt,
309 "core", 0, "interrupt", NULL);
310
311
312 ef4_for_each_channel(channel, efx) {
313 ef4_fill_test(n++, strings, data,
314 &tests->eventq_dma[channel->channel],
315 EF4_CHANNEL_NAME(channel),
316 "eventq.dma", NULL);
317 ef4_fill_test(n++, strings, data,
318 &tests->eventq_int[channel->channel],
319 EF4_CHANNEL_NAME(channel),
320 "eventq.int", NULL);
321 }
322
323 ef4_fill_test(n++, strings, data, &tests->memory,
324 "core", 0, "memory", NULL);
325 ef4_fill_test(n++, strings, data, &tests->registers,
326 "core", 0, "registers", NULL);
327
328 if (efx->phy_op->run_tests != NULL) {
329 EF4_BUG_ON_PARANOID(efx->phy_op->test_name == NULL);
330
331 for (i = 0; true; ++i) {
332 const char *name;
333
334 EF4_BUG_ON_PARANOID(i >= EF4_MAX_PHY_TESTS);
335 name = efx->phy_op->test_name(efx, i);
336 if (name == NULL)
337 break;
338
339 ef4_fill_test(n++, strings, data, &tests->phy_ext[i],
340 "phy", 0, name, NULL);
341 }
342 }
343
344
345 for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
346 if (!(efx->loopback_modes & (1 << mode)))
347 continue;
348 n = ef4_fill_loopback_test(efx,
349 &tests->loopback[mode], mode, n,
350 strings, data);
351 }
352
353 return n;
354}
355
356static size_t ef4_describe_per_queue_stats(struct ef4_nic *efx, u8 *strings)
357{
358 size_t n_stats = 0;
359 struct ef4_channel *channel;
360
361 ef4_for_each_channel(channel, efx) {
362 if (ef4_channel_has_tx_queues(channel)) {
363 n_stats++;
364 if (strings != NULL) {
365 snprintf(strings, ETH_GSTRING_LEN,
366 "tx-%u.tx_packets",
367 channel->tx_queue[0].queue /
368 EF4_TXQ_TYPES);
369
370 strings += ETH_GSTRING_LEN;
371 }
372 }
373 }
374 ef4_for_each_channel(channel, efx) {
375 if (ef4_channel_has_rx_queue(channel)) {
376 n_stats++;
377 if (strings != NULL) {
378 snprintf(strings, ETH_GSTRING_LEN,
379 "rx-%d.rx_packets", channel->channel);
380 strings += ETH_GSTRING_LEN;
381 }
382 }
383 }
384 return n_stats;
385}
386
387static int ef4_ethtool_get_sset_count(struct net_device *net_dev,
388 int string_set)
389{
390 struct ef4_nic *efx = netdev_priv(net_dev);
391
392 switch (string_set) {
393 case ETH_SS_STATS:
394 return efx->type->describe_stats(efx, NULL) +
395 EF4_ETHTOOL_SW_STAT_COUNT +
396 ef4_describe_per_queue_stats(efx, NULL);
397 case ETH_SS_TEST:
398 return ef4_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
399 default:
400 return -EINVAL;
401 }
402}
403
404static void ef4_ethtool_get_strings(struct net_device *net_dev,
405 u32 string_set, u8 *strings)
406{
407 struct ef4_nic *efx = netdev_priv(net_dev);
408 int i;
409
410 switch (string_set) {
411 case ETH_SS_STATS:
412 strings += (efx->type->describe_stats(efx, strings) *
413 ETH_GSTRING_LEN);
414 for (i = 0; i < EF4_ETHTOOL_SW_STAT_COUNT; i++)
415 strlcpy(strings + i * ETH_GSTRING_LEN,
416 ef4_sw_stat_desc[i].name, ETH_GSTRING_LEN);
417 strings += EF4_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
418 strings += (ef4_describe_per_queue_stats(efx, strings) *
419 ETH_GSTRING_LEN);
420 break;
421 case ETH_SS_TEST:
422 ef4_ethtool_fill_self_tests(efx, NULL, strings, NULL);
423 break;
424 default:
425
426 break;
427 }
428}
429
430static void ef4_ethtool_get_stats(struct net_device *net_dev,
431 struct ethtool_stats *stats,
432 u64 *data)
433{
434 struct ef4_nic *efx = netdev_priv(net_dev);
435 const struct ef4_sw_stat_desc *stat;
436 struct ef4_channel *channel;
437 struct ef4_tx_queue *tx_queue;
438 struct ef4_rx_queue *rx_queue;
439 int i;
440
441 spin_lock_bh(&efx->stats_lock);
442
443
444 data += efx->type->update_stats(efx, data, NULL);
445
446
447 for (i = 0; i < EF4_ETHTOOL_SW_STAT_COUNT; i++) {
448 stat = &ef4_sw_stat_desc[i];
449 switch (stat->source) {
450 case EF4_ETHTOOL_STAT_SOURCE_nic:
451 data[i] = stat->get_stat((void *)efx + stat->offset);
452 break;
453 case EF4_ETHTOOL_STAT_SOURCE_channel:
454 data[i] = 0;
455 ef4_for_each_channel(channel, efx)
456 data[i] += stat->get_stat((void *)channel +
457 stat->offset);
458 break;
459 case EF4_ETHTOOL_STAT_SOURCE_tx_queue:
460 data[i] = 0;
461 ef4_for_each_channel(channel, efx) {
462 ef4_for_each_channel_tx_queue(tx_queue, channel)
463 data[i] +=
464 stat->get_stat((void *)tx_queue
465 + stat->offset);
466 }
467 break;
468 }
469 }
470 data += EF4_ETHTOOL_SW_STAT_COUNT;
471
472 spin_unlock_bh(&efx->stats_lock);
473
474 ef4_for_each_channel(channel, efx) {
475 if (ef4_channel_has_tx_queues(channel)) {
476 *data = 0;
477 ef4_for_each_channel_tx_queue(tx_queue, channel) {
478 *data += tx_queue->tx_packets;
479 }
480 data++;
481 }
482 }
483 ef4_for_each_channel(channel, efx) {
484 if (ef4_channel_has_rx_queue(channel)) {
485 *data = 0;
486 ef4_for_each_channel_rx_queue(rx_queue, channel) {
487 *data += rx_queue->rx_packets;
488 }
489 data++;
490 }
491 }
492}
493
494static void ef4_ethtool_self_test(struct net_device *net_dev,
495 struct ethtool_test *test, u64 *data)
496{
497 struct ef4_nic *efx = netdev_priv(net_dev);
498 struct ef4_self_tests *ef4_tests;
499 bool already_up;
500 int rc = -ENOMEM;
501
502 ef4_tests = kzalloc(sizeof(*ef4_tests), GFP_KERNEL);
503 if (!ef4_tests)
504 goto fail;
505
506 if (efx->state != STATE_READY) {
507 rc = -EBUSY;
508 goto out;
509 }
510
511 netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
512 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
513
514
515 already_up = (efx->net_dev->flags & IFF_UP);
516 if (!already_up) {
517 rc = dev_open(efx->net_dev);
518 if (rc) {
519 netif_err(efx, drv, efx->net_dev,
520 "failed opening device.\n");
521 goto out;
522 }
523 }
524
525 rc = ef4_selftest(efx, ef4_tests, test->flags);
526
527 if (!already_up)
528 dev_close(efx->net_dev);
529
530 netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
531 rc == 0 ? "passed" : "failed",
532 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
533
534out:
535 ef4_ethtool_fill_self_tests(efx, ef4_tests, NULL, data);
536 kfree(ef4_tests);
537fail:
538 if (rc)
539 test->flags |= ETH_TEST_FL_FAILED;
540}
541
542
543static int ef4_ethtool_nway_reset(struct net_device *net_dev)
544{
545 struct ef4_nic *efx = netdev_priv(net_dev);
546
547 return mdio45_nway_restart(&efx->mdio);
548}
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579static int ef4_ethtool_get_coalesce(struct net_device *net_dev,
580 struct ethtool_coalesce *coalesce)
581{
582 struct ef4_nic *efx = netdev_priv(net_dev);
583 unsigned int tx_usecs, rx_usecs;
584 bool rx_adaptive;
585
586 ef4_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive);
587
588 coalesce->tx_coalesce_usecs = tx_usecs;
589 coalesce->tx_coalesce_usecs_irq = tx_usecs;
590 coalesce->rx_coalesce_usecs = rx_usecs;
591 coalesce->rx_coalesce_usecs_irq = rx_usecs;
592 coalesce->use_adaptive_rx_coalesce = rx_adaptive;
593
594 return 0;
595}
596
597static int ef4_ethtool_set_coalesce(struct net_device *net_dev,
598 struct ethtool_coalesce *coalesce)
599{
600 struct ef4_nic *efx = netdev_priv(net_dev);
601 struct ef4_channel *channel;
602 unsigned int tx_usecs, rx_usecs;
603 bool adaptive, rx_may_override_tx;
604 int rc;
605
606 if (coalesce->use_adaptive_tx_coalesce)
607 return -EINVAL;
608
609 ef4_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
610
611 if (coalesce->rx_coalesce_usecs != rx_usecs)
612 rx_usecs = coalesce->rx_coalesce_usecs;
613 else
614 rx_usecs = coalesce->rx_coalesce_usecs_irq;
615
616 adaptive = coalesce->use_adaptive_rx_coalesce;
617
618
619
620
621 rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs &&
622 coalesce->tx_coalesce_usecs_irq == tx_usecs);
623 if (coalesce->tx_coalesce_usecs != tx_usecs)
624 tx_usecs = coalesce->tx_coalesce_usecs;
625 else
626 tx_usecs = coalesce->tx_coalesce_usecs_irq;
627
628 rc = ef4_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
629 rx_may_override_tx);
630 if (rc != 0)
631 return rc;
632
633 ef4_for_each_channel(channel, efx)
634 efx->type->push_irq_moderation(channel);
635
636 return 0;
637}
638
639static void ef4_ethtool_get_ringparam(struct net_device *net_dev,
640 struct ethtool_ringparam *ring)
641{
642 struct ef4_nic *efx = netdev_priv(net_dev);
643
644 ring->rx_max_pending = EF4_MAX_DMAQ_SIZE;
645 ring->tx_max_pending = EF4_MAX_DMAQ_SIZE;
646 ring->rx_pending = efx->rxq_entries;
647 ring->tx_pending = efx->txq_entries;
648}
649
650static int ef4_ethtool_set_ringparam(struct net_device *net_dev,
651 struct ethtool_ringparam *ring)
652{
653 struct ef4_nic *efx = netdev_priv(net_dev);
654 u32 txq_entries;
655
656 if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
657 ring->rx_pending > EF4_MAX_DMAQ_SIZE ||
658 ring->tx_pending > EF4_MAX_DMAQ_SIZE)
659 return -EINVAL;
660
661 if (ring->rx_pending < EF4_RXQ_MIN_ENT) {
662 netif_err(efx, drv, efx->net_dev,
663 "RX queues cannot be smaller than %u\n",
664 EF4_RXQ_MIN_ENT);
665 return -EINVAL;
666 }
667
668 txq_entries = max(ring->tx_pending, EF4_TXQ_MIN_ENT(efx));
669 if (txq_entries != ring->tx_pending)
670 netif_warn(efx, drv, efx->net_dev,
671 "increasing TX queue size to minimum of %u\n",
672 txq_entries);
673
674 return ef4_realloc_channels(efx, ring->rx_pending, txq_entries);
675}
676
677static int ef4_ethtool_set_pauseparam(struct net_device *net_dev,
678 struct ethtool_pauseparam *pause)
679{
680 struct ef4_nic *efx = netdev_priv(net_dev);
681 u8 wanted_fc, old_fc;
682 u32 old_adv;
683 int rc = 0;
684
685 mutex_lock(&efx->mac_lock);
686
687 wanted_fc = ((pause->rx_pause ? EF4_FC_RX : 0) |
688 (pause->tx_pause ? EF4_FC_TX : 0) |
689 (pause->autoneg ? EF4_FC_AUTO : 0));
690
691 if ((wanted_fc & EF4_FC_TX) && !(wanted_fc & EF4_FC_RX)) {
692 netif_dbg(efx, drv, efx->net_dev,
693 "Flow control unsupported: tx ON rx OFF\n");
694 rc = -EINVAL;
695 goto out;
696 }
697
698 if ((wanted_fc & EF4_FC_AUTO) && !efx->link_advertising) {
699 netif_dbg(efx, drv, efx->net_dev,
700 "Autonegotiation is disabled\n");
701 rc = -EINVAL;
702 goto out;
703 }
704
705
706 if (efx->type->prepare_enable_fc_tx &&
707 (wanted_fc & EF4_FC_TX) && !(efx->wanted_fc & EF4_FC_TX))
708 efx->type->prepare_enable_fc_tx(efx);
709
710 old_adv = efx->link_advertising;
711 old_fc = efx->wanted_fc;
712 ef4_link_set_wanted_fc(efx, wanted_fc);
713 if (efx->link_advertising != old_adv ||
714 (efx->wanted_fc ^ old_fc) & EF4_FC_AUTO) {
715 rc = efx->phy_op->reconfigure(efx);
716 if (rc) {
717 netif_err(efx, drv, efx->net_dev,
718 "Unable to advertise requested flow "
719 "control setting\n");
720 goto out;
721 }
722 }
723
724
725
726
727 ef4_mac_reconfigure(efx);
728
729out:
730 mutex_unlock(&efx->mac_lock);
731
732 return rc;
733}
734
735static void ef4_ethtool_get_pauseparam(struct net_device *net_dev,
736 struct ethtool_pauseparam *pause)
737{
738 struct ef4_nic *efx = netdev_priv(net_dev);
739
740 pause->rx_pause = !!(efx->wanted_fc & EF4_FC_RX);
741 pause->tx_pause = !!(efx->wanted_fc & EF4_FC_TX);
742 pause->autoneg = !!(efx->wanted_fc & EF4_FC_AUTO);
743}
744
745static void ef4_ethtool_get_wol(struct net_device *net_dev,
746 struct ethtool_wolinfo *wol)
747{
748 struct ef4_nic *efx = netdev_priv(net_dev);
749 return efx->type->get_wol(efx, wol);
750}
751
752
753static int ef4_ethtool_set_wol(struct net_device *net_dev,
754 struct ethtool_wolinfo *wol)
755{
756 struct ef4_nic *efx = netdev_priv(net_dev);
757 return efx->type->set_wol(efx, wol->wolopts);
758}
759
760static int ef4_ethtool_reset(struct net_device *net_dev, u32 *flags)
761{
762 struct ef4_nic *efx = netdev_priv(net_dev);
763 int rc;
764
765 rc = efx->type->map_reset_flags(flags);
766 if (rc < 0)
767 return rc;
768
769 return ef4_reset(efx, rc);
770}
771
772
773static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
774
775#define IP4_ADDR_FULL_MASK ((__force __be32)~0)
776#define IP_PROTO_FULL_MASK 0xFF
777#define PORT_FULL_MASK ((__force __be16)~0)
778#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
779
780static inline void ip6_fill_mask(__be32 *mask)
781{
782 mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0;
783}
784
785static int ef4_ethtool_get_class_rule(struct ef4_nic *efx,
786 struct ethtool_rx_flow_spec *rule)
787{
788 struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
789 struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
790 struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
791 struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
792 struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
793 struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
794 struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
795 struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
796 struct ethhdr *mac_entry = &rule->h_u.ether_spec;
797 struct ethhdr *mac_mask = &rule->m_u.ether_spec;
798 struct ef4_filter_spec spec;
799 int rc;
800
801 rc = ef4_filter_get_filter_safe(efx, EF4_FILTER_PRI_MANUAL,
802 rule->location, &spec);
803 if (rc)
804 return rc;
805
806 if (spec.dmaq_id == EF4_FILTER_RX_DMAQ_ID_DROP)
807 rule->ring_cookie = RX_CLS_FLOW_DISC;
808 else
809 rule->ring_cookie = spec.dmaq_id;
810
811 if ((spec.match_flags & EF4_FILTER_MATCH_ETHER_TYPE) &&
812 spec.ether_type == htons(ETH_P_IP) &&
813 (spec.match_flags & EF4_FILTER_MATCH_IP_PROTO) &&
814 (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
815 !(spec.match_flags &
816 ~(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_OUTER_VID |
817 EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_REM_HOST |
818 EF4_FILTER_MATCH_IP_PROTO |
819 EF4_FILTER_MATCH_LOC_PORT | EF4_FILTER_MATCH_REM_PORT))) {
820 rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
821 TCP_V4_FLOW : UDP_V4_FLOW);
822 if (spec.match_flags & EF4_FILTER_MATCH_LOC_HOST) {
823 ip_entry->ip4dst = spec.loc_host[0];
824 ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
825 }
826 if (spec.match_flags & EF4_FILTER_MATCH_REM_HOST) {
827 ip_entry->ip4src = spec.rem_host[0];
828 ip_mask->ip4src = IP4_ADDR_FULL_MASK;
829 }
830 if (spec.match_flags & EF4_FILTER_MATCH_LOC_PORT) {
831 ip_entry->pdst = spec.loc_port;
832 ip_mask->pdst = PORT_FULL_MASK;
833 }
834 if (spec.match_flags & EF4_FILTER_MATCH_REM_PORT) {
835 ip_entry->psrc = spec.rem_port;
836 ip_mask->psrc = PORT_FULL_MASK;
837 }
838 } else if ((spec.match_flags & EF4_FILTER_MATCH_ETHER_TYPE) &&
839 spec.ether_type == htons(ETH_P_IPV6) &&
840 (spec.match_flags & EF4_FILTER_MATCH_IP_PROTO) &&
841 (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
842 !(spec.match_flags &
843 ~(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_OUTER_VID |
844 EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_REM_HOST |
845 EF4_FILTER_MATCH_IP_PROTO |
846 EF4_FILTER_MATCH_LOC_PORT | EF4_FILTER_MATCH_REM_PORT))) {
847 rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
848 TCP_V6_FLOW : UDP_V6_FLOW);
849 if (spec.match_flags & EF4_FILTER_MATCH_LOC_HOST) {
850 memcpy(ip6_entry->ip6dst, spec.loc_host,
851 sizeof(ip6_entry->ip6dst));
852 ip6_fill_mask(ip6_mask->ip6dst);
853 }
854 if (spec.match_flags & EF4_FILTER_MATCH_REM_HOST) {
855 memcpy(ip6_entry->ip6src, spec.rem_host,
856 sizeof(ip6_entry->ip6src));
857 ip6_fill_mask(ip6_mask->ip6src);
858 }
859 if (spec.match_flags & EF4_FILTER_MATCH_LOC_PORT) {
860 ip6_entry->pdst = spec.loc_port;
861 ip6_mask->pdst = PORT_FULL_MASK;
862 }
863 if (spec.match_flags & EF4_FILTER_MATCH_REM_PORT) {
864 ip6_entry->psrc = spec.rem_port;
865 ip6_mask->psrc = PORT_FULL_MASK;
866 }
867 } else if (!(spec.match_flags &
868 ~(EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_LOC_MAC_IG |
869 EF4_FILTER_MATCH_REM_MAC | EF4_FILTER_MATCH_ETHER_TYPE |
870 EF4_FILTER_MATCH_OUTER_VID))) {
871 rule->flow_type = ETHER_FLOW;
872 if (spec.match_flags &
873 (EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_LOC_MAC_IG)) {
874 ether_addr_copy(mac_entry->h_dest, spec.loc_mac);
875 if (spec.match_flags & EF4_FILTER_MATCH_LOC_MAC)
876 eth_broadcast_addr(mac_mask->h_dest);
877 else
878 ether_addr_copy(mac_mask->h_dest,
879 mac_addr_ig_mask);
880 }
881 if (spec.match_flags & EF4_FILTER_MATCH_REM_MAC) {
882 ether_addr_copy(mac_entry->h_source, spec.rem_mac);
883 eth_broadcast_addr(mac_mask->h_source);
884 }
885 if (spec.match_flags & EF4_FILTER_MATCH_ETHER_TYPE) {
886 mac_entry->h_proto = spec.ether_type;
887 mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
888 }
889 } else if (spec.match_flags & EF4_FILTER_MATCH_ETHER_TYPE &&
890 spec.ether_type == htons(ETH_P_IP) &&
891 !(spec.match_flags &
892 ~(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_OUTER_VID |
893 EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_REM_HOST |
894 EF4_FILTER_MATCH_IP_PROTO))) {
895 rule->flow_type = IPV4_USER_FLOW;
896 uip_entry->ip_ver = ETH_RX_NFC_IP4;
897 if (spec.match_flags & EF4_FILTER_MATCH_IP_PROTO) {
898 uip_mask->proto = IP_PROTO_FULL_MASK;
899 uip_entry->proto = spec.ip_proto;
900 }
901 if (spec.match_flags & EF4_FILTER_MATCH_LOC_HOST) {
902 uip_entry->ip4dst = spec.loc_host[0];
903 uip_mask->ip4dst = IP4_ADDR_FULL_MASK;
904 }
905 if (spec.match_flags & EF4_FILTER_MATCH_REM_HOST) {
906 uip_entry->ip4src = spec.rem_host[0];
907 uip_mask->ip4src = IP4_ADDR_FULL_MASK;
908 }
909 } else if (spec.match_flags & EF4_FILTER_MATCH_ETHER_TYPE &&
910 spec.ether_type == htons(ETH_P_IPV6) &&
911 !(spec.match_flags &
912 ~(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_OUTER_VID |
913 EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_REM_HOST |
914 EF4_FILTER_MATCH_IP_PROTO))) {
915 rule->flow_type = IPV6_USER_FLOW;
916 if (spec.match_flags & EF4_FILTER_MATCH_IP_PROTO) {
917 uip6_mask->l4_proto = IP_PROTO_FULL_MASK;
918 uip6_entry->l4_proto = spec.ip_proto;
919 }
920 if (spec.match_flags & EF4_FILTER_MATCH_LOC_HOST) {
921 memcpy(uip6_entry->ip6dst, spec.loc_host,
922 sizeof(uip6_entry->ip6dst));
923 ip6_fill_mask(uip6_mask->ip6dst);
924 }
925 if (spec.match_flags & EF4_FILTER_MATCH_REM_HOST) {
926 memcpy(uip6_entry->ip6src, spec.rem_host,
927 sizeof(uip6_entry->ip6src));
928 ip6_fill_mask(uip6_mask->ip6src);
929 }
930 } else {
931
932 WARN_ON(1);
933 return -EINVAL;
934 }
935
936 if (spec.match_flags & EF4_FILTER_MATCH_OUTER_VID) {
937 rule->flow_type |= FLOW_EXT;
938 rule->h_ext.vlan_tci = spec.outer_vid;
939 rule->m_ext.vlan_tci = htons(0xfff);
940 }
941
942 return rc;
943}
944
945static int
946ef4_ethtool_get_rxnfc(struct net_device *net_dev,
947 struct ethtool_rxnfc *info, u32 *rule_locs)
948{
949 struct ef4_nic *efx = netdev_priv(net_dev);
950
951 switch (info->cmd) {
952 case ETHTOOL_GRXRINGS:
953 info->data = efx->n_rx_channels;
954 return 0;
955
956 case ETHTOOL_GRXFH: {
957 unsigned min_revision = 0;
958
959 info->data = 0;
960 switch (info->flow_type) {
961 case TCP_V4_FLOW:
962 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
963 case UDP_V4_FLOW:
964 case SCTP_V4_FLOW:
965 case AH_ESP_V4_FLOW:
966 case IPV4_FLOW:
967 info->data |= RXH_IP_SRC | RXH_IP_DST;
968 min_revision = EF4_REV_FALCON_B0;
969 break;
970 default:
971 break;
972 }
973 if (ef4_nic_rev(efx) < min_revision)
974 info->data = 0;
975 return 0;
976 }
977
978 case ETHTOOL_GRXCLSRLCNT:
979 info->data = ef4_filter_get_rx_id_limit(efx);
980 if (info->data == 0)
981 return -EOPNOTSUPP;
982 info->data |= RX_CLS_LOC_SPECIAL;
983 info->rule_cnt =
984 ef4_filter_count_rx_used(efx, EF4_FILTER_PRI_MANUAL);
985 return 0;
986
987 case ETHTOOL_GRXCLSRULE:
988 if (ef4_filter_get_rx_id_limit(efx) == 0)
989 return -EOPNOTSUPP;
990 return ef4_ethtool_get_class_rule(efx, &info->fs);
991
992 case ETHTOOL_GRXCLSRLALL: {
993 s32 rc;
994 info->data = ef4_filter_get_rx_id_limit(efx);
995 if (info->data == 0)
996 return -EOPNOTSUPP;
997 rc = ef4_filter_get_rx_ids(efx, EF4_FILTER_PRI_MANUAL,
998 rule_locs, info->rule_cnt);
999 if (rc < 0)
1000 return rc;
1001 info->rule_cnt = rc;
1002 return 0;
1003 }
1004
1005 default:
1006 return -EOPNOTSUPP;
1007 }
1008}
1009
1010static inline bool ip6_mask_is_full(__be32 mask[4])
1011{
1012 return !~(mask[0] & mask[1] & mask[2] & mask[3]);
1013}
1014
1015static inline bool ip6_mask_is_empty(__be32 mask[4])
1016{
1017 return !(mask[0] | mask[1] | mask[2] | mask[3]);
1018}
1019
1020static int ef4_ethtool_set_class_rule(struct ef4_nic *efx,
1021 struct ethtool_rx_flow_spec *rule)
1022{
1023 struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
1024 struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
1025 struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
1026 struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
1027 struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
1028 struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
1029 struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
1030 struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
1031 struct ethhdr *mac_entry = &rule->h_u.ether_spec;
1032 struct ethhdr *mac_mask = &rule->m_u.ether_spec;
1033 struct ef4_filter_spec spec;
1034 int rc;
1035
1036
1037 if (rule->location != RX_CLS_LOC_ANY)
1038 return -EINVAL;
1039
1040
1041 if (rule->ring_cookie >= efx->n_rx_channels &&
1042 rule->ring_cookie != RX_CLS_FLOW_DISC)
1043 return -EINVAL;
1044
1045
1046 if ((rule->flow_type & FLOW_EXT) &&
1047 (rule->m_ext.vlan_etype || rule->m_ext.data[0] ||
1048 rule->m_ext.data[1]))
1049 return -EINVAL;
1050
1051 ef4_filter_init_rx(&spec, EF4_FILTER_PRI_MANUAL,
1052 efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0,
1053 (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
1054 EF4_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
1055
1056 switch (rule->flow_type & ~FLOW_EXT) {
1057 case TCP_V4_FLOW:
1058 case UDP_V4_FLOW:
1059 spec.match_flags = (EF4_FILTER_MATCH_ETHER_TYPE |
1060 EF4_FILTER_MATCH_IP_PROTO);
1061 spec.ether_type = htons(ETH_P_IP);
1062 spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ?
1063 IPPROTO_TCP : IPPROTO_UDP);
1064 if (ip_mask->ip4dst) {
1065 if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
1066 return -EINVAL;
1067 spec.match_flags |= EF4_FILTER_MATCH_LOC_HOST;
1068 spec.loc_host[0] = ip_entry->ip4dst;
1069 }
1070 if (ip_mask->ip4src) {
1071 if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
1072 return -EINVAL;
1073 spec.match_flags |= EF4_FILTER_MATCH_REM_HOST;
1074 spec.rem_host[0] = ip_entry->ip4src;
1075 }
1076 if (ip_mask->pdst) {
1077 if (ip_mask->pdst != PORT_FULL_MASK)
1078 return -EINVAL;
1079 spec.match_flags |= EF4_FILTER_MATCH_LOC_PORT;
1080 spec.loc_port = ip_entry->pdst;
1081 }
1082 if (ip_mask->psrc) {
1083 if (ip_mask->psrc != PORT_FULL_MASK)
1084 return -EINVAL;
1085 spec.match_flags |= EF4_FILTER_MATCH_REM_PORT;
1086 spec.rem_port = ip_entry->psrc;
1087 }
1088 if (ip_mask->tos)
1089 return -EINVAL;
1090 break;
1091
1092 case TCP_V6_FLOW:
1093 case UDP_V6_FLOW:
1094 spec.match_flags = (EF4_FILTER_MATCH_ETHER_TYPE |
1095 EF4_FILTER_MATCH_IP_PROTO);
1096 spec.ether_type = htons(ETH_P_IPV6);
1097 spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V6_FLOW ?
1098 IPPROTO_TCP : IPPROTO_UDP);
1099 if (!ip6_mask_is_empty(ip6_mask->ip6dst)) {
1100 if (!ip6_mask_is_full(ip6_mask->ip6dst))
1101 return -EINVAL;
1102 spec.match_flags |= EF4_FILTER_MATCH_LOC_HOST;
1103 memcpy(spec.loc_host, ip6_entry->ip6dst, sizeof(spec.loc_host));
1104 }
1105 if (!ip6_mask_is_empty(ip6_mask->ip6src)) {
1106 if (!ip6_mask_is_full(ip6_mask->ip6src))
1107 return -EINVAL;
1108 spec.match_flags |= EF4_FILTER_MATCH_REM_HOST;
1109 memcpy(spec.rem_host, ip6_entry->ip6src, sizeof(spec.rem_host));
1110 }
1111 if (ip6_mask->pdst) {
1112 if (ip6_mask->pdst != PORT_FULL_MASK)
1113 return -EINVAL;
1114 spec.match_flags |= EF4_FILTER_MATCH_LOC_PORT;
1115 spec.loc_port = ip6_entry->pdst;
1116 }
1117 if (ip6_mask->psrc) {
1118 if (ip6_mask->psrc != PORT_FULL_MASK)
1119 return -EINVAL;
1120 spec.match_flags |= EF4_FILTER_MATCH_REM_PORT;
1121 spec.rem_port = ip6_entry->psrc;
1122 }
1123 if (ip6_mask->tclass)
1124 return -EINVAL;
1125 break;
1126
1127 case IPV4_USER_FLOW:
1128 if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver ||
1129 uip_entry->ip_ver != ETH_RX_NFC_IP4)
1130 return -EINVAL;
1131 spec.match_flags = EF4_FILTER_MATCH_ETHER_TYPE;
1132 spec.ether_type = htons(ETH_P_IP);
1133 if (uip_mask->ip4dst) {
1134 if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK)
1135 return -EINVAL;
1136 spec.match_flags |= EF4_FILTER_MATCH_LOC_HOST;
1137 spec.loc_host[0] = uip_entry->ip4dst;
1138 }
1139 if (uip_mask->ip4src) {
1140 if (uip_mask->ip4src != IP4_ADDR_FULL_MASK)
1141 return -EINVAL;
1142 spec.match_flags |= EF4_FILTER_MATCH_REM_HOST;
1143 spec.rem_host[0] = uip_entry->ip4src;
1144 }
1145 if (uip_mask->proto) {
1146 if (uip_mask->proto != IP_PROTO_FULL_MASK)
1147 return -EINVAL;
1148 spec.match_flags |= EF4_FILTER_MATCH_IP_PROTO;
1149 spec.ip_proto = uip_entry->proto;
1150 }
1151 break;
1152
1153 case IPV6_USER_FLOW:
1154 if (uip6_mask->l4_4_bytes || uip6_mask->tclass)
1155 return -EINVAL;
1156 spec.match_flags = EF4_FILTER_MATCH_ETHER_TYPE;
1157 spec.ether_type = htons(ETH_P_IPV6);
1158 if (!ip6_mask_is_empty(uip6_mask->ip6dst)) {
1159 if (!ip6_mask_is_full(uip6_mask->ip6dst))
1160 return -EINVAL;
1161 spec.match_flags |= EF4_FILTER_MATCH_LOC_HOST;
1162 memcpy(spec.loc_host, uip6_entry->ip6dst, sizeof(spec.loc_host));
1163 }
1164 if (!ip6_mask_is_empty(uip6_mask->ip6src)) {
1165 if (!ip6_mask_is_full(uip6_mask->ip6src))
1166 return -EINVAL;
1167 spec.match_flags |= EF4_FILTER_MATCH_REM_HOST;
1168 memcpy(spec.rem_host, uip6_entry->ip6src, sizeof(spec.rem_host));
1169 }
1170 if (uip6_mask->l4_proto) {
1171 if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK)
1172 return -EINVAL;
1173 spec.match_flags |= EF4_FILTER_MATCH_IP_PROTO;
1174 spec.ip_proto = uip6_entry->l4_proto;
1175 }
1176 break;
1177
1178 case ETHER_FLOW:
1179 if (!is_zero_ether_addr(mac_mask->h_dest)) {
1180 if (ether_addr_equal(mac_mask->h_dest,
1181 mac_addr_ig_mask))
1182 spec.match_flags |= EF4_FILTER_MATCH_LOC_MAC_IG;
1183 else if (is_broadcast_ether_addr(mac_mask->h_dest))
1184 spec.match_flags |= EF4_FILTER_MATCH_LOC_MAC;
1185 else
1186 return -EINVAL;
1187 ether_addr_copy(spec.loc_mac, mac_entry->h_dest);
1188 }
1189 if (!is_zero_ether_addr(mac_mask->h_source)) {
1190 if (!is_broadcast_ether_addr(mac_mask->h_source))
1191 return -EINVAL;
1192 spec.match_flags |= EF4_FILTER_MATCH_REM_MAC;
1193 ether_addr_copy(spec.rem_mac, mac_entry->h_source);
1194 }
1195 if (mac_mask->h_proto) {
1196 if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
1197 return -EINVAL;
1198 spec.match_flags |= EF4_FILTER_MATCH_ETHER_TYPE;
1199 spec.ether_type = mac_entry->h_proto;
1200 }
1201 break;
1202
1203 default:
1204 return -EINVAL;
1205 }
1206
1207 if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
1208 if (rule->m_ext.vlan_tci != htons(0xfff))
1209 return -EINVAL;
1210 spec.match_flags |= EF4_FILTER_MATCH_OUTER_VID;
1211 spec.outer_vid = rule->h_ext.vlan_tci;
1212 }
1213
1214 rc = ef4_filter_insert_filter(efx, &spec, true);
1215 if (rc < 0)
1216 return rc;
1217
1218 rule->location = rc;
1219 return 0;
1220}
1221
1222static int ef4_ethtool_set_rxnfc(struct net_device *net_dev,
1223 struct ethtool_rxnfc *info)
1224{
1225 struct ef4_nic *efx = netdev_priv(net_dev);
1226
1227 if (ef4_filter_get_rx_id_limit(efx) == 0)
1228 return -EOPNOTSUPP;
1229
1230 switch (info->cmd) {
1231 case ETHTOOL_SRXCLSRLINS:
1232 return ef4_ethtool_set_class_rule(efx, &info->fs);
1233
1234 case ETHTOOL_SRXCLSRLDEL:
1235 return ef4_filter_remove_id_safe(efx, EF4_FILTER_PRI_MANUAL,
1236 info->fs.location);
1237
1238 default:
1239 return -EOPNOTSUPP;
1240 }
1241}
1242
1243static u32 ef4_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
1244{
1245 struct ef4_nic *efx = netdev_priv(net_dev);
1246
1247 return ((ef4_nic_rev(efx) < EF4_REV_FALCON_B0 ||
1248 efx->n_rx_channels == 1) ?
1249 0 : ARRAY_SIZE(efx->rx_indir_table));
1250}
1251
1252static int ef4_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
1253 u8 *hfunc)
1254{
1255 struct ef4_nic *efx = netdev_priv(net_dev);
1256
1257 if (hfunc)
1258 *hfunc = ETH_RSS_HASH_TOP;
1259 if (indir)
1260 memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
1261 return 0;
1262}
1263
1264static int ef4_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
1265 const u8 *key, const u8 hfunc)
1266{
1267 struct ef4_nic *efx = netdev_priv(net_dev);
1268
1269
1270 if (key ||
1271 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
1272 return -EOPNOTSUPP;
1273 if (!indir)
1274 return 0;
1275
1276 return efx->type->rx_push_rss_config(efx, true, indir);
1277}
1278
1279static int ef4_ethtool_get_module_eeprom(struct net_device *net_dev,
1280 struct ethtool_eeprom *ee,
1281 u8 *data)
1282{
1283 struct ef4_nic *efx = netdev_priv(net_dev);
1284 int ret;
1285
1286 if (!efx->phy_op || !efx->phy_op->get_module_eeprom)
1287 return -EOPNOTSUPP;
1288
1289 mutex_lock(&efx->mac_lock);
1290 ret = efx->phy_op->get_module_eeprom(efx, ee, data);
1291 mutex_unlock(&efx->mac_lock);
1292
1293 return ret;
1294}
1295
1296static int ef4_ethtool_get_module_info(struct net_device *net_dev,
1297 struct ethtool_modinfo *modinfo)
1298{
1299 struct ef4_nic *efx = netdev_priv(net_dev);
1300 int ret;
1301
1302 if (!efx->phy_op || !efx->phy_op->get_module_info)
1303 return -EOPNOTSUPP;
1304
1305 mutex_lock(&efx->mac_lock);
1306 ret = efx->phy_op->get_module_info(efx, modinfo);
1307 mutex_unlock(&efx->mac_lock);
1308
1309 return ret;
1310}
1311
1312const struct ethtool_ops ef4_ethtool_ops = {
1313 .get_settings = ef4_ethtool_get_settings,
1314 .set_settings = ef4_ethtool_set_settings,
1315 .get_drvinfo = ef4_ethtool_get_drvinfo,
1316 .get_regs_len = ef4_ethtool_get_regs_len,
1317 .get_regs = ef4_ethtool_get_regs,
1318 .get_msglevel = ef4_ethtool_get_msglevel,
1319 .set_msglevel = ef4_ethtool_set_msglevel,
1320 .nway_reset = ef4_ethtool_nway_reset,
1321 .get_link = ethtool_op_get_link,
1322 .get_coalesce = ef4_ethtool_get_coalesce,
1323 .set_coalesce = ef4_ethtool_set_coalesce,
1324 .get_ringparam = ef4_ethtool_get_ringparam,
1325 .set_ringparam = ef4_ethtool_set_ringparam,
1326 .get_pauseparam = ef4_ethtool_get_pauseparam,
1327 .set_pauseparam = ef4_ethtool_set_pauseparam,
1328 .get_sset_count = ef4_ethtool_get_sset_count,
1329 .self_test = ef4_ethtool_self_test,
1330 .get_strings = ef4_ethtool_get_strings,
1331 .set_phys_id = ef4_ethtool_phys_id,
1332 .get_ethtool_stats = ef4_ethtool_get_stats,
1333 .get_wol = ef4_ethtool_get_wol,
1334 .set_wol = ef4_ethtool_set_wol,
1335 .reset = ef4_ethtool_reset,
1336 .get_rxnfc = ef4_ethtool_get_rxnfc,
1337 .set_rxnfc = ef4_ethtool_set_rxnfc,
1338 .get_rxfh_indir_size = ef4_ethtool_get_rxfh_indir_size,
1339 .get_rxfh = ef4_ethtool_get_rxfh,
1340 .set_rxfh = ef4_ethtool_set_rxfh,
1341 .get_module_info = ef4_ethtool_get_module_info,
1342 .get_module_eeprom = ef4_ethtool_get_module_eeprom,
1343};
1344