1
2
3
4
5
6
7#include <linux/ethtool.h>
8#include <linux/rtnetlink.h>
9#include "gve.h"
10#include "gve_adminq.h"
11
12static void gve_get_drvinfo(struct net_device *netdev,
13 struct ethtool_drvinfo *info)
14{
15 struct gve_priv *priv = netdev_priv(netdev);
16
17 strscpy(info->driver, "gve", sizeof(info->driver));
18 strscpy(info->version, gve_version_str, sizeof(info->version));
19 strscpy(info->bus_info, pci_name(priv->pdev), sizeof(info->bus_info));
20}
21
22static void gve_set_msglevel(struct net_device *netdev, u32 value)
23{
24 struct gve_priv *priv = netdev_priv(netdev);
25
26 priv->msg_enable = value;
27}
28
29static u32 gve_get_msglevel(struct net_device *netdev)
30{
31 struct gve_priv *priv = netdev_priv(netdev);
32
33 return priv->msg_enable;
34}
35
36static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
37 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes",
38 "rx_dropped", "tx_dropped", "tx_timeouts",
39 "rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
40 "interface_up_cnt", "interface_down_cnt", "reset_cnt",
41 "page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
42};
43
44static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
45 "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_bytes[%u]",
46 "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
47 "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
48 "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
49};
50
51static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
52 "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_bytes[%u]",
53 "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
54 "tx_dma_mapping_error[%u]",
55};
56
57static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
58 "adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts",
59 "adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt",
60 "adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt",
61 "adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
62 "adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
63 "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
64 "adminq_report_stats_cnt", "adminq_report_link_speed_cnt"
65};
66
67static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
68 "report-stats",
69};
70
71#define GVE_MAIN_STATS_LEN ARRAY_SIZE(gve_gstrings_main_stats)
72#define GVE_ADMINQ_STATS_LEN ARRAY_SIZE(gve_gstrings_adminq_stats)
73#define NUM_GVE_TX_CNTS ARRAY_SIZE(gve_gstrings_tx_stats)
74#define NUM_GVE_RX_CNTS ARRAY_SIZE(gve_gstrings_rx_stats)
75#define GVE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(gve_gstrings_priv_flags)
76
77static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
78{
79 struct gve_priv *priv = netdev_priv(netdev);
80 char *s = (char *)data;
81 int i, j;
82
83 switch (stringset) {
84 case ETH_SS_STATS:
85 memcpy(s, *gve_gstrings_main_stats,
86 sizeof(gve_gstrings_main_stats));
87 s += sizeof(gve_gstrings_main_stats);
88
89 for (i = 0; i < priv->rx_cfg.num_queues; i++) {
90 for (j = 0; j < NUM_GVE_RX_CNTS; j++) {
91 snprintf(s, ETH_GSTRING_LEN,
92 gve_gstrings_rx_stats[j], i);
93 s += ETH_GSTRING_LEN;
94 }
95 }
96
97 for (i = 0; i < priv->tx_cfg.num_queues; i++) {
98 for (j = 0; j < NUM_GVE_TX_CNTS; j++) {
99 snprintf(s, ETH_GSTRING_LEN,
100 gve_gstrings_tx_stats[j], i);
101 s += ETH_GSTRING_LEN;
102 }
103 }
104
105 memcpy(s, *gve_gstrings_adminq_stats,
106 sizeof(gve_gstrings_adminq_stats));
107 s += sizeof(gve_gstrings_adminq_stats);
108 break;
109
110 case ETH_SS_PRIV_FLAGS:
111 memcpy(s, *gve_gstrings_priv_flags,
112 sizeof(gve_gstrings_priv_flags));
113 s += sizeof(gve_gstrings_priv_flags);
114 break;
115
116 default:
117 break;
118 }
119}
120
121static int gve_get_sset_count(struct net_device *netdev, int sset)
122{
123 struct gve_priv *priv = netdev_priv(netdev);
124
125 switch (sset) {
126 case ETH_SS_STATS:
127 return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN +
128 (priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) +
129 (priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS);
130 case ETH_SS_PRIV_FLAGS:
131 return GVE_PRIV_FLAGS_STR_LEN;
132 default:
133 return -EOPNOTSUPP;
134 }
135}
136
137static void
138gve_get_ethtool_stats(struct net_device *netdev,
139 struct ethtool_stats *stats, u64 *data)
140{
141 u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail,
142 tmp_rx_desc_err_dropped_pkt, tmp_tx_pkts, tmp_tx_bytes;
143 u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts,
144 rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes;
145 int stats_idx, base_stats_idx, max_stats_idx;
146 struct stats *report_stats;
147 int *rx_qid_to_stats_idx;
148 int *tx_qid_to_stats_idx;
149 struct gve_priv *priv;
150 bool skip_nic_stats;
151 unsigned int start;
152 int ring;
153 int i, j;
154
155 ASSERT_RTNL();
156
157 priv = netdev_priv(netdev);
158 report_stats = priv->stats_report->stats;
159 rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues,
160 sizeof(int), GFP_KERNEL);
161 if (!rx_qid_to_stats_idx)
162 return;
163 tx_qid_to_stats_idx = kmalloc_array(priv->tx_cfg.num_queues,
164 sizeof(int), GFP_KERNEL);
165 if (!tx_qid_to_stats_idx) {
166 kfree(rx_qid_to_stats_idx);
167 return;
168 }
169 for (rx_pkts = 0, rx_bytes = 0, rx_skb_alloc_fail = 0,
170 rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, ring = 0;
171 ring < priv->rx_cfg.num_queues; ring++) {
172 if (priv->rx) {
173 do {
174 struct gve_rx_ring *rx = &priv->rx[ring];
175
176 start =
177 u64_stats_fetch_begin(&priv->rx[ring].statss);
178 tmp_rx_pkts = rx->rpackets;
179 tmp_rx_bytes = rx->rbytes;
180 tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
181 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
182 tmp_rx_desc_err_dropped_pkt =
183 rx->rx_desc_err_dropped_pkt;
184 } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
185 start));
186 rx_pkts += tmp_rx_pkts;
187 rx_bytes += tmp_rx_bytes;
188 rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
189 rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
190 rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
191 }
192 }
193 for (tx_pkts = 0, tx_bytes = 0, ring = 0;
194 ring < priv->tx_cfg.num_queues; ring++) {
195 if (priv->tx) {
196 do {
197 start =
198 u64_stats_fetch_begin(&priv->tx[ring].statss);
199 tmp_tx_pkts = priv->tx[ring].pkt_done;
200 tmp_tx_bytes = priv->tx[ring].bytes_done;
201 } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
202 start));
203 tx_pkts += tmp_tx_pkts;
204 tx_bytes += tmp_tx_bytes;
205 }
206 }
207
208 i = 0;
209 data[i++] = rx_pkts;
210 data[i++] = tx_pkts;
211 data[i++] = rx_bytes;
212 data[i++] = tx_bytes;
213
214 data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail +
215 rx_desc_err_dropped_pkt;
216
217 i++;
218
219 data[i++] = priv->tx_timeo_cnt;
220 data[i++] = rx_skb_alloc_fail;
221 data[i++] = rx_buf_alloc_fail;
222 data[i++] = rx_desc_err_dropped_pkt;
223 data[i++] = priv->interface_up_cnt;
224 data[i++] = priv->interface_down_cnt;
225 data[i++] = priv->reset_cnt;
226 data[i++] = priv->page_alloc_fail;
227 data[i++] = priv->dma_mapping_error;
228 data[i++] = priv->stats_report_trigger_cnt;
229 i = GVE_MAIN_STATS_LEN;
230
231
232 base_stats_idx = GVE_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues +
233 GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
234 max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
235 base_stats_idx;
236
237 skip_nic_stats = false;
238 for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
239 stats_idx += NIC_RX_STATS_REPORT_NUM) {
240 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
241 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
242
243 if (stat_name == 0) {
244
245 skip_nic_stats = true;
246 break;
247 }
248 rx_qid_to_stats_idx[queue_id] = stats_idx;
249 }
250
251 if (priv->rx) {
252 for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
253 struct gve_rx_ring *rx = &priv->rx[ring];
254
255 data[i++] = rx->fill_cnt;
256 data[i++] = rx->cnt;
257 do {
258 start =
259 u64_stats_fetch_begin(&priv->rx[ring].statss);
260 tmp_rx_bytes = rx->rbytes;
261 tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
262 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
263 tmp_rx_desc_err_dropped_pkt =
264 rx->rx_desc_err_dropped_pkt;
265 } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
266 start));
267 data[i++] = tmp_rx_bytes;
268
269 data[i++] = tmp_rx_skb_alloc_fail +
270 tmp_rx_buf_alloc_fail +
271 tmp_rx_desc_err_dropped_pkt;
272 data[i++] = rx->rx_copybreak_pkt;
273 data[i++] = rx->rx_copied_pkt;
274
275 if (skip_nic_stats) {
276
277 i += NIC_RX_STATS_REPORT_NUM;
278 continue;
279 }
280 for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
281 u64 value =
282 be64_to_cpu(report_stats[rx_qid_to_stats_idx[ring] + j].value);
283
284 data[i++] = value;
285 }
286 }
287 } else {
288 i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
289 }
290
291
292 base_stats_idx = max_stats_idx;
293 max_stats_idx = NIC_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues +
294 max_stats_idx;
295
296 skip_nic_stats = false;
297 for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
298 stats_idx += NIC_TX_STATS_REPORT_NUM) {
299 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
300 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
301
302 if (stat_name == 0) {
303
304 skip_nic_stats = true;
305 break;
306 }
307 tx_qid_to_stats_idx[queue_id] = stats_idx;
308 }
309
310 if (priv->tx) {
311 for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
312 struct gve_tx_ring *tx = &priv->tx[ring];
313
314 if (gve_is_gqi(priv)) {
315 data[i++] = tx->req;
316 data[i++] = tx->done;
317 } else {
318
319
320
321 data[i++] = 0;
322 data[i++] = 0;
323 }
324 do {
325 start =
326 u64_stats_fetch_begin(&priv->tx[ring].statss);
327 tmp_tx_bytes = tx->bytes_done;
328 } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
329 start));
330 data[i++] = tmp_tx_bytes;
331 data[i++] = tx->wake_queue;
332 data[i++] = tx->stop_queue;
333 data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv,
334 tx));
335 data[i++] = tx->dma_mapping_error;
336
337 if (skip_nic_stats) {
338
339 i += NIC_TX_STATS_REPORT_NUM;
340 continue;
341 }
342 for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
343 u64 value =
344 be64_to_cpu(report_stats[tx_qid_to_stats_idx[ring] + j].value);
345 data[i++] = value;
346 }
347 }
348 } else {
349 i += priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS;
350 }
351
352 kfree(rx_qid_to_stats_idx);
353 kfree(tx_qid_to_stats_idx);
354
355 data[i++] = priv->adminq_prod_cnt;
356 data[i++] = priv->adminq_cmd_fail;
357 data[i++] = priv->adminq_timeouts;
358 data[i++] = priv->adminq_describe_device_cnt;
359 data[i++] = priv->adminq_cfg_device_resources_cnt;
360 data[i++] = priv->adminq_register_page_list_cnt;
361 data[i++] = priv->adminq_unregister_page_list_cnt;
362 data[i++] = priv->adminq_create_tx_queue_cnt;
363 data[i++] = priv->adminq_create_rx_queue_cnt;
364 data[i++] = priv->adminq_destroy_tx_queue_cnt;
365 data[i++] = priv->adminq_destroy_rx_queue_cnt;
366 data[i++] = priv->adminq_dcfg_device_resources_cnt;
367 data[i++] = priv->adminq_set_driver_parameter_cnt;
368 data[i++] = priv->adminq_report_stats_cnt;
369 data[i++] = priv->adminq_report_link_speed_cnt;
370}
371
372static void gve_get_channels(struct net_device *netdev,
373 struct ethtool_channels *cmd)
374{
375 struct gve_priv *priv = netdev_priv(netdev);
376
377 cmd->max_rx = priv->rx_cfg.max_queues;
378 cmd->max_tx = priv->tx_cfg.max_queues;
379 cmd->max_other = 0;
380 cmd->max_combined = 0;
381 cmd->rx_count = priv->rx_cfg.num_queues;
382 cmd->tx_count = priv->tx_cfg.num_queues;
383 cmd->other_count = 0;
384 cmd->combined_count = 0;
385}
386
387static int gve_set_channels(struct net_device *netdev,
388 struct ethtool_channels *cmd)
389{
390 struct gve_priv *priv = netdev_priv(netdev);
391 struct gve_queue_config new_tx_cfg = priv->tx_cfg;
392 struct gve_queue_config new_rx_cfg = priv->rx_cfg;
393 struct ethtool_channels old_settings;
394 int new_tx = cmd->tx_count;
395 int new_rx = cmd->rx_count;
396
397 gve_get_channels(netdev, &old_settings);
398
399
400 if (cmd->combined_count != old_settings.combined_count)
401 return -EINVAL;
402
403 if (!new_rx || !new_tx)
404 return -EINVAL;
405
406 if (!netif_carrier_ok(netdev)) {
407 priv->tx_cfg.num_queues = new_tx;
408 priv->rx_cfg.num_queues = new_rx;
409 return 0;
410 }
411
412 new_tx_cfg.num_queues = new_tx;
413 new_rx_cfg.num_queues = new_rx;
414
415 return gve_adjust_queues(priv, new_rx_cfg, new_tx_cfg);
416}
417
418static void gve_get_ringparam(struct net_device *netdev,
419 struct ethtool_ringparam *cmd)
420{
421 struct gve_priv *priv = netdev_priv(netdev);
422
423 cmd->rx_max_pending = priv->rx_desc_cnt;
424 cmd->tx_max_pending = priv->tx_desc_cnt;
425 cmd->rx_pending = priv->rx_desc_cnt;
426 cmd->tx_pending = priv->tx_desc_cnt;
427}
428
429static int gve_user_reset(struct net_device *netdev, u32 *flags)
430{
431 struct gve_priv *priv = netdev_priv(netdev);
432
433 if (*flags == ETH_RESET_ALL) {
434 *flags = 0;
435 return gve_reset(priv, true);
436 }
437
438 return -EOPNOTSUPP;
439}
440
441static int gve_get_tunable(struct net_device *netdev,
442 const struct ethtool_tunable *etuna, void *value)
443{
444 struct gve_priv *priv = netdev_priv(netdev);
445
446 switch (etuna->id) {
447 case ETHTOOL_RX_COPYBREAK:
448 *(u32 *)value = priv->rx_copybreak;
449 return 0;
450 default:
451 return -EOPNOTSUPP;
452 }
453}
454
455static int gve_set_tunable(struct net_device *netdev,
456 const struct ethtool_tunable *etuna,
457 const void *value)
458{
459 struct gve_priv *priv = netdev_priv(netdev);
460 u32 len;
461
462 switch (etuna->id) {
463 case ETHTOOL_RX_COPYBREAK:
464 {
465 u32 max_copybreak = gve_is_gqi(priv) ?
466 (PAGE_SIZE / 2) : priv->data_buffer_size_dqo;
467
468 len = *(u32 *)value;
469 if (len > max_copybreak)
470 return -EINVAL;
471 priv->rx_copybreak = len;
472 return 0;
473 }
474 default:
475 return -EOPNOTSUPP;
476 }
477}
478
479static u32 gve_get_priv_flags(struct net_device *netdev)
480{
481 struct gve_priv *priv = netdev_priv(netdev);
482 u32 ret_flags = 0;
483
484
485 if (priv->ethtool_flags & BIT(0))
486 ret_flags |= BIT(0);
487 return ret_flags;
488}
489
490static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
491{
492 struct gve_priv *priv = netdev_priv(netdev);
493 u64 ori_flags, new_flags;
494
495 ori_flags = READ_ONCE(priv->ethtool_flags);
496 new_flags = ori_flags;
497
498
499 if (flags & BIT(0))
500 new_flags |= BIT(0);
501 else
502 new_flags &= ~(BIT(0));
503 priv->ethtool_flags = new_flags;
504
505 if (flags & BIT(0)) {
506 mod_timer(&priv->stats_report_timer,
507 round_jiffies(jiffies +
508 msecs_to_jiffies(priv->stats_report_timer_period)));
509 }
510
511
512 if (!(flags & BIT(0)) && (ori_flags & BIT(0))) {
513 int tx_stats_num = GVE_TX_STATS_REPORT_NUM *
514 priv->tx_cfg.num_queues;
515 int rx_stats_num = GVE_RX_STATS_REPORT_NUM *
516 priv->rx_cfg.num_queues;
517
518 memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) *
519 sizeof(struct stats));
520 del_timer_sync(&priv->stats_report_timer);
521 }
522 return 0;
523}
524
525static int gve_get_link_ksettings(struct net_device *netdev,
526 struct ethtool_link_ksettings *cmd)
527{
528 struct gve_priv *priv = netdev_priv(netdev);
529 int err = gve_adminq_report_link_speed(priv);
530
531 cmd->base.speed = priv->link_speed;
532 return err;
533}
534
535const struct ethtool_ops gve_ethtool_ops = {
536 .get_drvinfo = gve_get_drvinfo,
537 .get_strings = gve_get_strings,
538 .get_sset_count = gve_get_sset_count,
539 .get_ethtool_stats = gve_get_ethtool_stats,
540 .set_msglevel = gve_set_msglevel,
541 .get_msglevel = gve_get_msglevel,
542 .set_channels = gve_set_channels,
543 .get_channels = gve_get_channels,
544 .get_link = ethtool_op_get_link,
545 .get_ringparam = gve_get_ringparam,
546 .reset = gve_user_reset,
547 .get_tunable = gve_get_tunable,
548 .set_tunable = gve_set_tunable,
549 .get_priv_flags = gve_get_priv_flags,
550 .set_priv_flags = gve_set_priv_flags,
551 .get_link_ksettings = gve_get_link_ksettings
552};
553