1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/netdevice.h>
20#include <linux/ethtool.h>
21#include <linux/net_tstamp.h>
22
23#include "enic_res.h"
24#include "enic.h"
25#include "enic_dev.h"
26#include "enic_clsf.h"
27#include "vnic_rss.h"
28#include "vnic_stats.h"
29
30struct enic_stat {
31 char name[ETH_GSTRING_LEN];
32 unsigned int index;
33};
34
35#define ENIC_TX_STAT(stat) { \
36 .name = #stat, \
37 .index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \
38}
39
40#define ENIC_RX_STAT(stat) { \
41 .name = #stat, \
42 .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
43}
44
45#define ENIC_GEN_STAT(stat) { \
46 .name = #stat, \
47 .index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
48}
49
50static const struct enic_stat enic_tx_stats[] = {
51 ENIC_TX_STAT(tx_frames_ok),
52 ENIC_TX_STAT(tx_unicast_frames_ok),
53 ENIC_TX_STAT(tx_multicast_frames_ok),
54 ENIC_TX_STAT(tx_broadcast_frames_ok),
55 ENIC_TX_STAT(tx_bytes_ok),
56 ENIC_TX_STAT(tx_unicast_bytes_ok),
57 ENIC_TX_STAT(tx_multicast_bytes_ok),
58 ENIC_TX_STAT(tx_broadcast_bytes_ok),
59 ENIC_TX_STAT(tx_drops),
60 ENIC_TX_STAT(tx_errors),
61 ENIC_TX_STAT(tx_tso),
62};
63
64static const struct enic_stat enic_rx_stats[] = {
65 ENIC_RX_STAT(rx_frames_ok),
66 ENIC_RX_STAT(rx_frames_total),
67 ENIC_RX_STAT(rx_unicast_frames_ok),
68 ENIC_RX_STAT(rx_multicast_frames_ok),
69 ENIC_RX_STAT(rx_broadcast_frames_ok),
70 ENIC_RX_STAT(rx_bytes_ok),
71 ENIC_RX_STAT(rx_unicast_bytes_ok),
72 ENIC_RX_STAT(rx_multicast_bytes_ok),
73 ENIC_RX_STAT(rx_broadcast_bytes_ok),
74 ENIC_RX_STAT(rx_drop),
75 ENIC_RX_STAT(rx_no_bufs),
76 ENIC_RX_STAT(rx_errors),
77 ENIC_RX_STAT(rx_rss),
78 ENIC_RX_STAT(rx_crc_errors),
79 ENIC_RX_STAT(rx_frames_64),
80 ENIC_RX_STAT(rx_frames_127),
81 ENIC_RX_STAT(rx_frames_255),
82 ENIC_RX_STAT(rx_frames_511),
83 ENIC_RX_STAT(rx_frames_1023),
84 ENIC_RX_STAT(rx_frames_1518),
85 ENIC_RX_STAT(rx_frames_to_max),
86};
87
88static const struct enic_stat enic_gen_stats[] = {
89 ENIC_GEN_STAT(dma_map_error),
90};
91
92static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
93static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
94static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
95
96static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
97{
98 int i;
99 int intr;
100
101 for (i = 0; i < enic->rq_count; i++) {
102 intr = enic_msix_rq_intr(enic, i);
103 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
104 }
105}
106
107static int enic_get_ksettings(struct net_device *netdev,
108 struct ethtool_link_ksettings *ecmd)
109{
110 struct enic *enic = netdev_priv(netdev);
111 struct ethtool_link_settings *base = &ecmd->base;
112
113 ethtool_link_ksettings_add_link_mode(ecmd, supported,
114 10000baseT_Full);
115 ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
116 ethtool_link_ksettings_add_link_mode(ecmd, advertising,
117 10000baseT_Full);
118 ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
119 base->port = PORT_FIBRE;
120
121 if (netif_carrier_ok(netdev)) {
122 base->speed = vnic_dev_port_speed(enic->vdev);
123 base->duplex = DUPLEX_FULL;
124 } else {
125 base->speed = SPEED_UNKNOWN;
126 base->duplex = DUPLEX_UNKNOWN;
127 }
128
129 base->autoneg = AUTONEG_DISABLE;
130
131 return 0;
132}
133
134static void enic_get_drvinfo(struct net_device *netdev,
135 struct ethtool_drvinfo *drvinfo)
136{
137 struct enic *enic = netdev_priv(netdev);
138 struct vnic_devcmd_fw_info *fw_info;
139 int err;
140
141 err = enic_dev_fw_info(enic, &fw_info);
142
143
144
145
146 if (err == -ENOMEM)
147 return;
148
149 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
150 strlcpy(drvinfo->fw_version, fw_info->fw_version,
151 sizeof(drvinfo->fw_version));
152 strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
153 sizeof(drvinfo->bus_info));
154}
155
156static void enic_get_strings(struct net_device *netdev, u32 stringset,
157 u8 *data)
158{
159 unsigned int i;
160
161 switch (stringset) {
162 case ETH_SS_STATS:
163 for (i = 0; i < enic_n_tx_stats; i++) {
164 memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
165 data += ETH_GSTRING_LEN;
166 }
167 for (i = 0; i < enic_n_rx_stats; i++) {
168 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
169 data += ETH_GSTRING_LEN;
170 }
171 for (i = 0; i < enic_n_gen_stats; i++) {
172 memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
173 data += ETH_GSTRING_LEN;
174 }
175 break;
176 }
177}
178
179static void enic_get_ringparam(struct net_device *netdev,
180 struct ethtool_ringparam *ring)
181{
182 struct enic *enic = netdev_priv(netdev);
183 struct vnic_enet_config *c = &enic->config;
184
185 ring->rx_max_pending = ENIC_MAX_RQ_DESCS;
186 ring->rx_pending = c->rq_desc_count;
187 ring->tx_max_pending = ENIC_MAX_WQ_DESCS;
188 ring->tx_pending = c->wq_desc_count;
189}
190
191static int enic_set_ringparam(struct net_device *netdev,
192 struct ethtool_ringparam *ring)
193{
194 struct enic *enic = netdev_priv(netdev);
195 struct vnic_enet_config *c = &enic->config;
196 int running = netif_running(netdev);
197 unsigned int rx_pending;
198 unsigned int tx_pending;
199 int err = 0;
200
201 if (ring->rx_mini_max_pending || ring->rx_mini_pending) {
202 netdev_info(netdev,
203 "modifying mini ring params is not supported");
204 return -EINVAL;
205 }
206 if (ring->rx_jumbo_max_pending || ring->rx_jumbo_pending) {
207 netdev_info(netdev,
208 "modifying jumbo ring params is not supported");
209 return -EINVAL;
210 }
211 rx_pending = c->rq_desc_count;
212 tx_pending = c->wq_desc_count;
213 if (ring->rx_pending > ENIC_MAX_RQ_DESCS ||
214 ring->rx_pending < ENIC_MIN_RQ_DESCS) {
215 netdev_info(netdev, "rx pending (%u) not in range [%u,%u]",
216 ring->rx_pending, ENIC_MIN_RQ_DESCS,
217 ENIC_MAX_RQ_DESCS);
218 return -EINVAL;
219 }
220 if (ring->tx_pending > ENIC_MAX_WQ_DESCS ||
221 ring->tx_pending < ENIC_MIN_WQ_DESCS) {
222 netdev_info(netdev, "tx pending (%u) not in range [%u,%u]",
223 ring->tx_pending, ENIC_MIN_WQ_DESCS,
224 ENIC_MAX_WQ_DESCS);
225 return -EINVAL;
226 }
227 if (running)
228 dev_close(netdev);
229 c->rq_desc_count =
230 ring->rx_pending & 0xffffffe0;
231 c->wq_desc_count =
232 ring->tx_pending & 0xffffffe0;
233 enic_free_vnic_resources(enic);
234 err = enic_alloc_vnic_resources(enic);
235 if (err) {
236 netdev_err(netdev,
237 "Failed to alloc vNIC resources, aborting\n");
238 enic_free_vnic_resources(enic);
239 goto err_out;
240 }
241 enic_init_vnic_resources(enic);
242 if (running) {
243 err = dev_open(netdev, NULL);
244 if (err)
245 goto err_out;
246 }
247 return 0;
248err_out:
249 c->rq_desc_count = rx_pending;
250 c->wq_desc_count = tx_pending;
251 return err;
252}
253
254static int enic_get_sset_count(struct net_device *netdev, int sset)
255{
256 switch (sset) {
257 case ETH_SS_STATS:
258 return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
259 default:
260 return -EOPNOTSUPP;
261 }
262}
263
264static void enic_get_ethtool_stats(struct net_device *netdev,
265 struct ethtool_stats *stats, u64 *data)
266{
267 struct enic *enic = netdev_priv(netdev);
268 struct vnic_stats *vstats;
269 unsigned int i;
270 int err;
271
272 err = enic_dev_stats_dump(enic, &vstats);
273
274
275
276
277 if (err == -ENOMEM)
278 return;
279
280 for (i = 0; i < enic_n_tx_stats; i++)
281 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
282 for (i = 0; i < enic_n_rx_stats; i++)
283 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
284 for (i = 0; i < enic_n_gen_stats; i++)
285 *(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
286}
287
288static u32 enic_get_msglevel(struct net_device *netdev)
289{
290 struct enic *enic = netdev_priv(netdev);
291 return enic->msg_enable;
292}
293
294static void enic_set_msglevel(struct net_device *netdev, u32 value)
295{
296 struct enic *enic = netdev_priv(netdev);
297 enic->msg_enable = value;
298}
299
300static int enic_get_coalesce(struct net_device *netdev,
301 struct ethtool_coalesce *ecmd,
302 struct kernel_ethtool_coalesce *kernel_coal,
303 struct netlink_ext_ack *extack)
304{
305 struct enic *enic = netdev_priv(netdev);
306 struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
307
308 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
309 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
310 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
311 if (rxcoal->use_adaptive_rx_coalesce)
312 ecmd->use_adaptive_rx_coalesce = 1;
313 ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
314 ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
315
316 return 0;
317}
318
319static int enic_coalesce_valid(struct enic *enic,
320 struct ethtool_coalesce *ec)
321{
322 u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
323 u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max,
324 ec->rx_coalesce_usecs_high);
325 u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
326 ec->rx_coalesce_usecs_low);
327
328 if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
329 ec->tx_coalesce_usecs)
330 return -EINVAL;
331
332 if ((ec->tx_coalesce_usecs > coalesce_usecs_max) ||
333 (ec->rx_coalesce_usecs > coalesce_usecs_max) ||
334 (ec->rx_coalesce_usecs_low > coalesce_usecs_max) ||
335 (ec->rx_coalesce_usecs_high > coalesce_usecs_max))
336 netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n",
337 coalesce_usecs_max);
338
339 if (ec->rx_coalesce_usecs_high &&
340 (rx_coalesce_usecs_high <
341 rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
342 return -EINVAL;
343
344 return 0;
345}
346
347static int enic_set_coalesce(struct net_device *netdev,
348 struct ethtool_coalesce *ecmd,
349 struct kernel_ethtool_coalesce *kernel_coal,
350 struct netlink_ext_ack *extack)
351{
352 struct enic *enic = netdev_priv(netdev);
353 u32 tx_coalesce_usecs;
354 u32 rx_coalesce_usecs;
355 u32 rx_coalesce_usecs_low;
356 u32 rx_coalesce_usecs_high;
357 u32 coalesce_usecs_max;
358 unsigned int i, intr;
359 int ret;
360 struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
361
362 ret = enic_coalesce_valid(enic, ecmd);
363 if (ret)
364 return ret;
365 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
366 tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
367 coalesce_usecs_max);
368 rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
369 coalesce_usecs_max);
370
371 rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
372 coalesce_usecs_max);
373 rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
374 coalesce_usecs_max);
375
376 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
377 for (i = 0; i < enic->wq_count; i++) {
378 intr = enic_msix_wq_intr(enic, i);
379 vnic_intr_coalescing_timer_set(&enic->intr[intr],
380 tx_coalesce_usecs);
381 }
382 enic->tx_coalesce_usecs = tx_coalesce_usecs;
383 }
384 rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce;
385 if (!rxcoal->use_adaptive_rx_coalesce)
386 enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
387 if (ecmd->rx_coalesce_usecs_high) {
388 rxcoal->range_end = rx_coalesce_usecs_high;
389 rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
390 rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
391 ENIC_AIC_LARGE_PKT_DIFF;
392 }
393
394 enic->rx_coalesce_usecs = rx_coalesce_usecs;
395
396 return 0;
397}
398
399static int enic_grxclsrlall(struct enic *enic, struct ethtool_rxnfc *cmd,
400 u32 *rule_locs)
401{
402 int j, ret = 0, cnt = 0;
403
404 cmd->data = enic->rfs_h.max - enic->rfs_h.free;
405 for (j = 0; j < (1 << ENIC_RFS_FLW_BITSHIFT); j++) {
406 struct hlist_head *hhead;
407 struct hlist_node *tmp;
408 struct enic_rfs_fltr_node *n;
409
410 hhead = &enic->rfs_h.ht_head[j];
411 hlist_for_each_entry_safe(n, tmp, hhead, node) {
412 if (cnt == cmd->rule_cnt)
413 return -EMSGSIZE;
414 rule_locs[cnt] = n->fltr_id;
415 cnt++;
416 }
417 }
418 cmd->rule_cnt = cnt;
419
420 return ret;
421}
422
423static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
424{
425 struct ethtool_rx_flow_spec *fsp =
426 (struct ethtool_rx_flow_spec *)&cmd->fs;
427 struct enic_rfs_fltr_node *n;
428
429 n = htbl_fltr_search(enic, (u16)fsp->location);
430 if (!n)
431 return -EINVAL;
432 switch (n->keys.basic.ip_proto) {
433 case IPPROTO_TCP:
434 fsp->flow_type = TCP_V4_FLOW;
435 break;
436 case IPPROTO_UDP:
437 fsp->flow_type = UDP_V4_FLOW;
438 break;
439 default:
440 return -EINVAL;
441 }
442
443 fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys);
444 fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0;
445
446 fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys);
447 fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0;
448
449 fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src;
450 fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0;
451
452 fsp->h_u.tcp_ip4_spec.pdst = n->keys.ports.dst;
453 fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0;
454
455 fsp->ring_cookie = n->rq_id;
456
457 return 0;
458}
459
460static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd)
461{
462 u8 rss_hash_type = 0;
463 cmd->data = 0;
464
465 spin_lock_bh(&enic->devcmd_lock);
466 (void)vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type);
467 spin_unlock_bh(&enic->devcmd_lock);
468 switch (cmd->flow_type) {
469 case TCP_V6_FLOW:
470 case TCP_V4_FLOW:
471 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 |
472 RXH_IP_SRC | RXH_IP_DST;
473 break;
474 case UDP_V6_FLOW:
475 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
476 if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV6)
477 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
478 break;
479 case UDP_V4_FLOW:
480 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
481 if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV4)
482 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
483 break;
484 case SCTP_V4_FLOW:
485 case AH_ESP_V4_FLOW:
486 case AH_V4_FLOW:
487 case ESP_V4_FLOW:
488 case SCTP_V6_FLOW:
489 case AH_ESP_V6_FLOW:
490 case AH_V6_FLOW:
491 case ESP_V6_FLOW:
492 case IPV4_FLOW:
493 case IPV6_FLOW:
494 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
495 break;
496 default:
497 return -EINVAL;
498 }
499
500 return 0;
501}
502
503static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
504 u32 *rule_locs)
505{
506 struct enic *enic = netdev_priv(dev);
507 int ret = 0;
508
509 switch (cmd->cmd) {
510 case ETHTOOL_GRXRINGS:
511 cmd->data = enic->rq_count;
512 break;
513 case ETHTOOL_GRXCLSRLCNT:
514 spin_lock_bh(&enic->rfs_h.lock);
515 cmd->rule_cnt = enic->rfs_h.max - enic->rfs_h.free;
516 cmd->data = enic->rfs_h.max;
517 spin_unlock_bh(&enic->rfs_h.lock);
518 break;
519 case ETHTOOL_GRXCLSRLALL:
520 spin_lock_bh(&enic->rfs_h.lock);
521 ret = enic_grxclsrlall(enic, cmd, rule_locs);
522 spin_unlock_bh(&enic->rfs_h.lock);
523 break;
524 case ETHTOOL_GRXCLSRULE:
525 spin_lock_bh(&enic->rfs_h.lock);
526 ret = enic_grxclsrule(enic, cmd);
527 spin_unlock_bh(&enic->rfs_h.lock);
528 break;
529 case ETHTOOL_GRXFH:
530 ret = enic_get_rx_flow_hash(enic, cmd);
531 break;
532 default:
533 ret = -EOPNOTSUPP;
534 break;
535 }
536
537 return ret;
538}
539
540static int enic_get_tunable(struct net_device *dev,
541 const struct ethtool_tunable *tuna, void *data)
542{
543 struct enic *enic = netdev_priv(dev);
544 int ret = 0;
545
546 switch (tuna->id) {
547 case ETHTOOL_RX_COPYBREAK:
548 *(u32 *)data = enic->rx_copybreak;
549 break;
550 default:
551 ret = -EINVAL;
552 break;
553 }
554
555 return ret;
556}
557
558static int enic_set_tunable(struct net_device *dev,
559 const struct ethtool_tunable *tuna,
560 const void *data)
561{
562 struct enic *enic = netdev_priv(dev);
563 int ret = 0;
564
565 switch (tuna->id) {
566 case ETHTOOL_RX_COPYBREAK:
567 enic->rx_copybreak = *(u32 *)data;
568 break;
569 default:
570 ret = -EINVAL;
571 break;
572 }
573
574 return ret;
575}
576
577static u32 enic_get_rxfh_key_size(struct net_device *netdev)
578{
579 return ENIC_RSS_LEN;
580}
581
582static int enic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
583 u8 *hfunc)
584{
585 struct enic *enic = netdev_priv(netdev);
586
587 if (hkey)
588 memcpy(hkey, enic->rss_key, ENIC_RSS_LEN);
589
590 if (hfunc)
591 *hfunc = ETH_RSS_HASH_TOP;
592
593 return 0;
594}
595
596static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
597 const u8 *hkey, const u8 hfunc)
598{
599 struct enic *enic = netdev_priv(netdev);
600
601 if ((hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) ||
602 indir)
603 return -EINVAL;
604
605 if (hkey)
606 memcpy(enic->rss_key, hkey, ENIC_RSS_LEN);
607
608 return __enic_set_rsskey(enic);
609}
610
611static int enic_get_ts_info(struct net_device *netdev,
612 struct ethtool_ts_info *info)
613{
614 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
615 SOF_TIMESTAMPING_RX_SOFTWARE |
616 SOF_TIMESTAMPING_SOFTWARE;
617
618 return 0;
619}
620
621static const struct ethtool_ops enic_ethtool_ops = {
622 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
623 ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
624 ETHTOOL_COALESCE_RX_USECS_LOW |
625 ETHTOOL_COALESCE_RX_USECS_HIGH,
626 .get_drvinfo = enic_get_drvinfo,
627 .get_msglevel = enic_get_msglevel,
628 .set_msglevel = enic_set_msglevel,
629 .get_link = ethtool_op_get_link,
630 .get_strings = enic_get_strings,
631 .get_ringparam = enic_get_ringparam,
632 .set_ringparam = enic_set_ringparam,
633 .get_sset_count = enic_get_sset_count,
634 .get_ethtool_stats = enic_get_ethtool_stats,
635 .get_coalesce = enic_get_coalesce,
636 .set_coalesce = enic_set_coalesce,
637 .get_rxnfc = enic_get_rxnfc,
638 .get_tunable = enic_get_tunable,
639 .set_tunable = enic_set_tunable,
640 .get_rxfh_key_size = enic_get_rxfh_key_size,
641 .get_rxfh = enic_get_rxfh,
642 .set_rxfh = enic_set_rxfh,
643 .get_link_ksettings = enic_get_ksettings,
644 .get_ts_info = enic_get_ts_info,
645};
646
647void enic_set_ethtool_ops(struct net_device *netdev)
648{
649 netdev->ethtool_ops = &enic_ethtool_ops;
650}
651