1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/netdevice.h>
20#include <linux/ethtool.h>
21
22#include "enic_res.h"
23#include "enic.h"
24#include "enic_dev.h"
25#include "enic_clsf.h"
26#include "vnic_rss.h"
27#include "vnic_stats.h"
28
29struct enic_stat {
30 char name[ETH_GSTRING_LEN];
31 unsigned int index;
32};
33
34#define ENIC_TX_STAT(stat) { \
35 .name = #stat, \
36 .index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \
37}
38
39#define ENIC_RX_STAT(stat) { \
40 .name = #stat, \
41 .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
42}
43
44#define ENIC_GEN_STAT(stat) { \
45 .name = #stat, \
46 .index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
47}
48
49static const struct enic_stat enic_tx_stats[] = {
50 ENIC_TX_STAT(tx_frames_ok),
51 ENIC_TX_STAT(tx_unicast_frames_ok),
52 ENIC_TX_STAT(tx_multicast_frames_ok),
53 ENIC_TX_STAT(tx_broadcast_frames_ok),
54 ENIC_TX_STAT(tx_bytes_ok),
55 ENIC_TX_STAT(tx_unicast_bytes_ok),
56 ENIC_TX_STAT(tx_multicast_bytes_ok),
57 ENIC_TX_STAT(tx_broadcast_bytes_ok),
58 ENIC_TX_STAT(tx_drops),
59 ENIC_TX_STAT(tx_errors),
60 ENIC_TX_STAT(tx_tso),
61};
62
63static const struct enic_stat enic_rx_stats[] = {
64 ENIC_RX_STAT(rx_frames_ok),
65 ENIC_RX_STAT(rx_frames_total),
66 ENIC_RX_STAT(rx_unicast_frames_ok),
67 ENIC_RX_STAT(rx_multicast_frames_ok),
68 ENIC_RX_STAT(rx_broadcast_frames_ok),
69 ENIC_RX_STAT(rx_bytes_ok),
70 ENIC_RX_STAT(rx_unicast_bytes_ok),
71 ENIC_RX_STAT(rx_multicast_bytes_ok),
72 ENIC_RX_STAT(rx_broadcast_bytes_ok),
73 ENIC_RX_STAT(rx_drop),
74 ENIC_RX_STAT(rx_no_bufs),
75 ENIC_RX_STAT(rx_errors),
76 ENIC_RX_STAT(rx_rss),
77 ENIC_RX_STAT(rx_crc_errors),
78 ENIC_RX_STAT(rx_frames_64),
79 ENIC_RX_STAT(rx_frames_127),
80 ENIC_RX_STAT(rx_frames_255),
81 ENIC_RX_STAT(rx_frames_511),
82 ENIC_RX_STAT(rx_frames_1023),
83 ENIC_RX_STAT(rx_frames_1518),
84 ENIC_RX_STAT(rx_frames_to_max),
85};
86
87static const struct enic_stat enic_gen_stats[] = {
88 ENIC_GEN_STAT(dma_map_error),
89};
90
91static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
92static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
93static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
94
95static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
96{
97 int i;
98 int intr;
99
100 for (i = 0; i < enic->rq_count; i++) {
101 intr = enic_msix_rq_intr(enic, i);
102 vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
103 }
104}
105
106static int enic_get_settings(struct net_device *netdev,
107 struct ethtool_cmd *ecmd)
108{
109 struct enic *enic = netdev_priv(netdev);
110
111 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
112 ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
113 ecmd->port = PORT_FIBRE;
114 ecmd->transceiver = XCVR_EXTERNAL;
115
116 if (netif_carrier_ok(netdev)) {
117 ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
118 ecmd->duplex = DUPLEX_FULL;
119 } else {
120 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
121 ecmd->duplex = DUPLEX_UNKNOWN;
122 }
123
124 ecmd->autoneg = AUTONEG_DISABLE;
125
126 return 0;
127}
128
129static void enic_get_drvinfo(struct net_device *netdev,
130 struct ethtool_drvinfo *drvinfo)
131{
132 struct enic *enic = netdev_priv(netdev);
133 struct vnic_devcmd_fw_info *fw_info;
134 int err;
135
136 err = enic_dev_fw_info(enic, &fw_info);
137
138
139
140
141 if (err == -ENOMEM)
142 return;
143
144 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
145 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
146 strlcpy(drvinfo->fw_version, fw_info->fw_version,
147 sizeof(drvinfo->fw_version));
148 strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
149 sizeof(drvinfo->bus_info));
150}
151
152static void enic_get_strings(struct net_device *netdev, u32 stringset,
153 u8 *data)
154{
155 unsigned int i;
156
157 switch (stringset) {
158 case ETH_SS_STATS:
159 for (i = 0; i < enic_n_tx_stats; i++) {
160 memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
161 data += ETH_GSTRING_LEN;
162 }
163 for (i = 0; i < enic_n_rx_stats; i++) {
164 memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
165 data += ETH_GSTRING_LEN;
166 }
167 for (i = 0; i < enic_n_gen_stats; i++) {
168 memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
169 data += ETH_GSTRING_LEN;
170 }
171 break;
172 }
173}
174
175static int enic_get_sset_count(struct net_device *netdev, int sset)
176{
177 switch (sset) {
178 case ETH_SS_STATS:
179 return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
180 default:
181 return -EOPNOTSUPP;
182 }
183}
184
185static void enic_get_ethtool_stats(struct net_device *netdev,
186 struct ethtool_stats *stats, u64 *data)
187{
188 struct enic *enic = netdev_priv(netdev);
189 struct vnic_stats *vstats;
190 unsigned int i;
191 int err;
192
193 err = enic_dev_stats_dump(enic, &vstats);
194
195
196
197
198 if (err == -ENOMEM)
199 return;
200
201 for (i = 0; i < enic_n_tx_stats; i++)
202 *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
203 for (i = 0; i < enic_n_rx_stats; i++)
204 *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
205 for (i = 0; i < enic_n_gen_stats; i++)
206 *(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
207}
208
209static u32 enic_get_msglevel(struct net_device *netdev)
210{
211 struct enic *enic = netdev_priv(netdev);
212 return enic->msg_enable;
213}
214
215static void enic_set_msglevel(struct net_device *netdev, u32 value)
216{
217 struct enic *enic = netdev_priv(netdev);
218 enic->msg_enable = value;
219}
220
221static int enic_get_coalesce(struct net_device *netdev,
222 struct ethtool_coalesce *ecmd)
223{
224 struct enic *enic = netdev_priv(netdev);
225 struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
226
227 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
228 ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
229 ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
230 if (rxcoal->use_adaptive_rx_coalesce)
231 ecmd->use_adaptive_rx_coalesce = 1;
232 ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
233 ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
234
235 return 0;
236}
237
238static int enic_coalesce_valid(struct enic *enic,
239 struct ethtool_coalesce *ec)
240{
241 u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
242 u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max,
243 ec->rx_coalesce_usecs_high);
244 u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
245 ec->rx_coalesce_usecs_low);
246
247 if (ec->rx_max_coalesced_frames ||
248 ec->rx_coalesce_usecs_irq ||
249 ec->rx_max_coalesced_frames_irq ||
250 ec->tx_max_coalesced_frames ||
251 ec->tx_coalesce_usecs_irq ||
252 ec->tx_max_coalesced_frames_irq ||
253 ec->stats_block_coalesce_usecs ||
254 ec->use_adaptive_tx_coalesce ||
255 ec->pkt_rate_low ||
256 ec->rx_max_coalesced_frames_low ||
257 ec->tx_coalesce_usecs_low ||
258 ec->tx_max_coalesced_frames_low ||
259 ec->pkt_rate_high ||
260 ec->rx_max_coalesced_frames_high ||
261 ec->tx_coalesce_usecs_high ||
262 ec->tx_max_coalesced_frames_high ||
263 ec->rate_sample_interval)
264 return -EINVAL;
265
266 if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
267 ec->tx_coalesce_usecs)
268 return -EINVAL;
269
270 if ((ec->tx_coalesce_usecs > coalesce_usecs_max) ||
271 (ec->rx_coalesce_usecs > coalesce_usecs_max) ||
272 (ec->rx_coalesce_usecs_low > coalesce_usecs_max) ||
273 (ec->rx_coalesce_usecs_high > coalesce_usecs_max))
274 netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n",
275 coalesce_usecs_max);
276
277 if (ec->rx_coalesce_usecs_high &&
278 (rx_coalesce_usecs_high <
279 rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
280 return -EINVAL;
281
282 return 0;
283}
284
285static int enic_set_coalesce(struct net_device *netdev,
286 struct ethtool_coalesce *ecmd)
287{
288 struct enic *enic = netdev_priv(netdev);
289 u32 tx_coalesce_usecs;
290 u32 rx_coalesce_usecs;
291 u32 rx_coalesce_usecs_low;
292 u32 rx_coalesce_usecs_high;
293 u32 coalesce_usecs_max;
294 unsigned int i, intr;
295 int ret;
296 struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
297
298 ret = enic_coalesce_valid(enic, ecmd);
299 if (ret)
300 return ret;
301 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
302 tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
303 coalesce_usecs_max);
304 rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
305 coalesce_usecs_max);
306
307 rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
308 coalesce_usecs_max);
309 rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
310 coalesce_usecs_max);
311
312 if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
313 for (i = 0; i < enic->wq_count; i++) {
314 intr = enic_msix_wq_intr(enic, i);
315 vnic_intr_coalescing_timer_set(&enic->intr[intr],
316 tx_coalesce_usecs);
317 }
318 enic->tx_coalesce_usecs = tx_coalesce_usecs;
319 }
320 rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce;
321 if (!rxcoal->use_adaptive_rx_coalesce)
322 enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
323 if (ecmd->rx_coalesce_usecs_high) {
324 rxcoal->range_end = rx_coalesce_usecs_high;
325 rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
326 rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
327 ENIC_AIC_LARGE_PKT_DIFF;
328 }
329
330 enic->rx_coalesce_usecs = rx_coalesce_usecs;
331
332 return 0;
333}
334
335static int enic_grxclsrlall(struct enic *enic, struct ethtool_rxnfc *cmd,
336 u32 *rule_locs)
337{
338 int j, ret = 0, cnt = 0;
339
340 cmd->data = enic->rfs_h.max - enic->rfs_h.free;
341 for (j = 0; j < (1 << ENIC_RFS_FLW_BITSHIFT); j++) {
342 struct hlist_head *hhead;
343 struct hlist_node *tmp;
344 struct enic_rfs_fltr_node *n;
345
346 hhead = &enic->rfs_h.ht_head[j];
347 hlist_for_each_entry_safe(n, tmp, hhead, node) {
348 if (cnt == cmd->rule_cnt)
349 return -EMSGSIZE;
350 rule_locs[cnt] = n->fltr_id;
351 cnt++;
352 }
353 }
354 cmd->rule_cnt = cnt;
355
356 return ret;
357}
358
359static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
360{
361 struct ethtool_rx_flow_spec *fsp =
362 (struct ethtool_rx_flow_spec *)&cmd->fs;
363 struct enic_rfs_fltr_node *n;
364
365 n = htbl_fltr_search(enic, (u16)fsp->location);
366 if (!n)
367 return -EINVAL;
368 switch (n->keys.basic.ip_proto) {
369 case IPPROTO_TCP:
370 fsp->flow_type = TCP_V4_FLOW;
371 break;
372 case IPPROTO_UDP:
373 fsp->flow_type = UDP_V4_FLOW;
374 break;
375 default:
376 return -EINVAL;
377 break;
378 }
379
380 fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys);
381 fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0;
382
383 fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys);
384 fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0;
385
386 fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src;
387 fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0;
388
389 fsp->h_u.tcp_ip4_spec.pdst = n->keys.ports.dst;
390 fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0;
391
392 fsp->ring_cookie = n->rq_id;
393
394 return 0;
395}
396
397static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
398 u32 *rule_locs)
399{
400 struct enic *enic = netdev_priv(dev);
401 int ret = 0;
402
403 switch (cmd->cmd) {
404 case ETHTOOL_GRXRINGS:
405 cmd->data = enic->rq_count;
406 break;
407 case ETHTOOL_GRXCLSRLCNT:
408 spin_lock_bh(&enic->rfs_h.lock);
409 cmd->rule_cnt = enic->rfs_h.max - enic->rfs_h.free;
410 cmd->data = enic->rfs_h.max;
411 spin_unlock_bh(&enic->rfs_h.lock);
412 break;
413 case ETHTOOL_GRXCLSRLALL:
414 spin_lock_bh(&enic->rfs_h.lock);
415 ret = enic_grxclsrlall(enic, cmd, rule_locs);
416 spin_unlock_bh(&enic->rfs_h.lock);
417 break;
418 case ETHTOOL_GRXCLSRULE:
419 spin_lock_bh(&enic->rfs_h.lock);
420 ret = enic_grxclsrule(enic, cmd);
421 spin_unlock_bh(&enic->rfs_h.lock);
422 break;
423 default:
424 ret = -EOPNOTSUPP;
425 break;
426 }
427
428 return ret;
429}
430
431static int enic_get_tunable(struct net_device *dev,
432 const struct ethtool_tunable *tuna, void *data)
433{
434 struct enic *enic = netdev_priv(dev);
435 int ret = 0;
436
437 switch (tuna->id) {
438 case ETHTOOL_RX_COPYBREAK:
439 *(u32 *)data = enic->rx_copybreak;
440 break;
441 default:
442 ret = -EINVAL;
443 break;
444 }
445
446 return ret;
447}
448
449static int enic_set_tunable(struct net_device *dev,
450 const struct ethtool_tunable *tuna,
451 const void *data)
452{
453 struct enic *enic = netdev_priv(dev);
454 int ret = 0;
455
456 switch (tuna->id) {
457 case ETHTOOL_RX_COPYBREAK:
458 enic->rx_copybreak = *(u32 *)data;
459 break;
460 default:
461 ret = -EINVAL;
462 break;
463 }
464
465 return ret;
466}
467
468static u32 enic_get_rxfh_key_size(struct net_device *netdev)
469{
470 return ENIC_RSS_LEN;
471}
472
473static int enic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
474 u8 *hfunc)
475{
476 struct enic *enic = netdev_priv(netdev);
477
478 if (hkey)
479 memcpy(hkey, enic->rss_key, ENIC_RSS_LEN);
480
481 if (hfunc)
482 *hfunc = ETH_RSS_HASH_TOP;
483
484 return 0;
485}
486
487static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
488 const u8 *hkey, const u8 hfunc)
489{
490 struct enic *enic = netdev_priv(netdev);
491
492 if ((hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) ||
493 indir)
494 return -EINVAL;
495
496 if (hkey)
497 memcpy(enic->rss_key, hkey, ENIC_RSS_LEN);
498
499 return __enic_set_rsskey(enic);
500}
501
502static const struct ethtool_ops enic_ethtool_ops = {
503 .get_settings = enic_get_settings,
504 .get_drvinfo = enic_get_drvinfo,
505 .get_msglevel = enic_get_msglevel,
506 .set_msglevel = enic_set_msglevel,
507 .get_link = ethtool_op_get_link,
508 .get_strings = enic_get_strings,
509 .get_sset_count = enic_get_sset_count,
510 .get_ethtool_stats = enic_get_ethtool_stats,
511 .get_coalesce = enic_get_coalesce,
512 .set_coalesce = enic_set_coalesce,
513 .get_rxnfc = enic_get_rxnfc,
514 .get_tunable = enic_get_tunable,
515 .set_tunable = enic_set_tunable,
516 .get_rxfh_key_size = enic_get_rxfh_key_size,
517 .get_rxfh = enic_get_rxfh,
518 .set_rxfh = enic_set_rxfh,
519};
520
521void enic_set_ethtool_ops(struct net_device *netdev)
522{
523 netdev->ethtool_ops = &enic_ethtool_ops;
524}
525