1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34#include <linux/string.h>
35#include <linux/of_platform.h>
36#include <linux/net_tstamp.h>
37#include <linux/fsl/ptp_qoriq.h>
38
39#include "dpaa_eth.h"
40#include "mac.h"
41
42static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = {
43 "interrupts",
44 "rx packets",
45 "tx packets",
46 "tx confirm",
47 "tx S/G",
48 "tx error",
49 "rx error",
50 "rx dropped",
51 "tx dropped",
52};
53
54static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
55
56 "rx dma error",
57 "rx frame physical error",
58 "rx frame size error",
59 "rx header error",
60
61
62 "qman cg_tdrop",
63 "qman wred",
64 "qman error cond",
65 "qman early window",
66 "qman late window",
67 "qman fq tdrop",
68 "qman fq retired",
69 "qman orp disabled",
70
71
72 "congestion time (ms)",
73 "entered congestion",
74 "congested (0/1)"
75};
76
77#define DPAA_STATS_PERCPU_LEN ARRAY_SIZE(dpaa_stats_percpu)
78#define DPAA_STATS_GLOBAL_LEN ARRAY_SIZE(dpaa_stats_global)
79
80static int dpaa_get_link_ksettings(struct net_device *net_dev,
81 struct ethtool_link_ksettings *cmd)
82{
83 if (!net_dev->phydev)
84 return 0;
85
86 phy_ethtool_ksettings_get(net_dev->phydev, cmd);
87
88 return 0;
89}
90
91static int dpaa_set_link_ksettings(struct net_device *net_dev,
92 const struct ethtool_link_ksettings *cmd)
93{
94 int err;
95
96 if (!net_dev->phydev)
97 return -ENODEV;
98
99 err = phy_ethtool_ksettings_set(net_dev->phydev, cmd);
100 if (err < 0)
101 netdev_err(net_dev, "phy_ethtool_ksettings_set() = %d\n", err);
102
103 return err;
104}
105
106static void dpaa_get_drvinfo(struct net_device *net_dev,
107 struct ethtool_drvinfo *drvinfo)
108{
109 strlcpy(drvinfo->driver, KBUILD_MODNAME,
110 sizeof(drvinfo->driver));
111 strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
112 sizeof(drvinfo->bus_info));
113}
114
115static u32 dpaa_get_msglevel(struct net_device *net_dev)
116{
117 return ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable;
118}
119
120static void dpaa_set_msglevel(struct net_device *net_dev,
121 u32 msg_enable)
122{
123 ((struct dpaa_priv *)netdev_priv(net_dev))->msg_enable = msg_enable;
124}
125
126static int dpaa_nway_reset(struct net_device *net_dev)
127{
128 int err;
129
130 if (!net_dev->phydev)
131 return -ENODEV;
132
133 err = 0;
134 if (net_dev->phydev->autoneg) {
135 err = phy_start_aneg(net_dev->phydev);
136 if (err < 0)
137 netdev_err(net_dev, "phy_start_aneg() = %d\n",
138 err);
139 }
140
141 return err;
142}
143
144static void dpaa_get_pauseparam(struct net_device *net_dev,
145 struct ethtool_pauseparam *epause)
146{
147 struct mac_device *mac_dev;
148 struct dpaa_priv *priv;
149
150 priv = netdev_priv(net_dev);
151 mac_dev = priv->mac_dev;
152
153 if (!net_dev->phydev)
154 return;
155
156 epause->autoneg = mac_dev->autoneg_pause;
157 epause->rx_pause = mac_dev->rx_pause_active;
158 epause->tx_pause = mac_dev->tx_pause_active;
159}
160
161static int dpaa_set_pauseparam(struct net_device *net_dev,
162 struct ethtool_pauseparam *epause)
163{
164 struct mac_device *mac_dev;
165 struct phy_device *phydev;
166 bool rx_pause, tx_pause;
167 struct dpaa_priv *priv;
168 int err;
169
170 priv = netdev_priv(net_dev);
171 mac_dev = priv->mac_dev;
172
173 phydev = net_dev->phydev;
174 if (!phydev) {
175 netdev_err(net_dev, "phy device not initialized\n");
176 return -ENODEV;
177 }
178
179 if (!phy_validate_pause(phydev, epause))
180 return -EINVAL;
181
182
183
184
185
186 mac_dev->autoneg_pause = !!epause->autoneg;
187 mac_dev->rx_pause_req = !!epause->rx_pause;
188 mac_dev->tx_pause_req = !!epause->tx_pause;
189
190
191
192
193
194 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
195
196 fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
197 err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
198 if (err < 0)
199 netdev_err(net_dev, "set_mac_active_pause() = %d\n", err);
200
201 return err;
202}
203
204static int dpaa_get_sset_count(struct net_device *net_dev, int type)
205{
206 unsigned int total_stats, num_stats;
207
208 num_stats = num_online_cpus() + 1;
209 total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + 1) +
210 DPAA_STATS_GLOBAL_LEN;
211
212 switch (type) {
213 case ETH_SS_STATS:
214 return total_stats;
215 default:
216 return -EOPNOTSUPP;
217 }
218}
219
220static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
221 int crr_cpu, u64 bp_count, u64 *data)
222{
223 int num_values = num_cpus + 1;
224 int crr = 0;
225
226
227 data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
228 data[crr++ * num_values + num_cpus] += percpu_priv->in_interrupt;
229
230 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_packets;
231 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_packets;
232
233 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_packets;
234 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_packets;
235
236 data[crr * num_values + crr_cpu] = percpu_priv->tx_confirm;
237 data[crr++ * num_values + num_cpus] += percpu_priv->tx_confirm;
238
239 data[crr * num_values + crr_cpu] = percpu_priv->tx_frag_skbuffs;
240 data[crr++ * num_values + num_cpus] += percpu_priv->tx_frag_skbuffs;
241
242 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_errors;
243 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_errors;
244
245 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
246 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
247
248 data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_dropped;
249 data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_dropped;
250
251 data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_dropped;
252 data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_dropped;
253
254 data[crr * num_values + crr_cpu] = bp_count;
255 data[crr++ * num_values + num_cpus] += bp_count;
256}
257
258static void dpaa_get_ethtool_stats(struct net_device *net_dev,
259 struct ethtool_stats *stats, u64 *data)
260{
261 struct dpaa_percpu_priv *percpu_priv;
262 struct dpaa_rx_errors rx_errors;
263 unsigned int num_cpus, offset;
264 u64 bp_count, cg_time, cg_num;
265 struct dpaa_ern_cnt ern_cnt;
266 struct dpaa_bp *dpaa_bp;
267 struct dpaa_priv *priv;
268 int total_stats, i;
269 bool cg_status;
270
271 total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS);
272 priv = netdev_priv(net_dev);
273 num_cpus = num_online_cpus();
274
275 memset(&bp_count, 0, sizeof(bp_count));
276 memset(&rx_errors, 0, sizeof(struct dpaa_rx_errors));
277 memset(&ern_cnt, 0, sizeof(struct dpaa_ern_cnt));
278 memset(data, 0, total_stats * sizeof(u64));
279
280 for_each_online_cpu(i) {
281 percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
282 dpaa_bp = priv->dpaa_bp;
283 if (!dpaa_bp->percpu_count)
284 continue;
285 bp_count = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
286 rx_errors.dme += percpu_priv->rx_errors.dme;
287 rx_errors.fpe += percpu_priv->rx_errors.fpe;
288 rx_errors.fse += percpu_priv->rx_errors.fse;
289 rx_errors.phe += percpu_priv->rx_errors.phe;
290
291 ern_cnt.cg_tdrop += percpu_priv->ern_cnt.cg_tdrop;
292 ern_cnt.wred += percpu_priv->ern_cnt.wred;
293 ern_cnt.err_cond += percpu_priv->ern_cnt.err_cond;
294 ern_cnt.early_window += percpu_priv->ern_cnt.early_window;
295 ern_cnt.late_window += percpu_priv->ern_cnt.late_window;
296 ern_cnt.fq_tdrop += percpu_priv->ern_cnt.fq_tdrop;
297 ern_cnt.fq_retired += percpu_priv->ern_cnt.fq_retired;
298 ern_cnt.orp_zero += percpu_priv->ern_cnt.orp_zero;
299
300 copy_stats(percpu_priv, num_cpus, i, bp_count, data);
301 }
302
303 offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + 1);
304 memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors));
305
306 offset += sizeof(struct dpaa_rx_errors) / sizeof(u64);
307 memcpy(data + offset, &ern_cnt, sizeof(struct dpaa_ern_cnt));
308
309
310 cg_num = 0;
311 cg_status = false;
312 cg_time = jiffies_to_msecs(priv->cgr_data.congested_jiffies);
313 if (qman_query_cgr_congested(&priv->cgr_data.cgr, &cg_status) == 0) {
314 cg_num = priv->cgr_data.cgr_congested_count;
315
316
317 priv->cgr_data.congested_jiffies = 0;
318 priv->cgr_data.cgr_congested_count = 0;
319 }
320
321 offset += sizeof(struct dpaa_ern_cnt) / sizeof(u64);
322 data[offset++] = cg_time;
323 data[offset++] = cg_num;
324 data[offset++] = cg_status;
325}
326
327static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
328 u8 *data)
329{
330 unsigned int i, j, num_cpus, size;
331 char string_cpu[ETH_GSTRING_LEN];
332 u8 *strings;
333
334 memset(string_cpu, 0, sizeof(string_cpu));
335 strings = data;
336 num_cpus = num_online_cpus();
337 size = DPAA_STATS_GLOBAL_LEN * ETH_GSTRING_LEN;
338
339 for (i = 0; i < DPAA_STATS_PERCPU_LEN; i++) {
340 for (j = 0; j < num_cpus; j++) {
341 snprintf(string_cpu, ETH_GSTRING_LEN, "%s [CPU %d]",
342 dpaa_stats_percpu[i], j);
343 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
344 strings += ETH_GSTRING_LEN;
345 }
346 snprintf(string_cpu, ETH_GSTRING_LEN, "%s [TOTAL]",
347 dpaa_stats_percpu[i]);
348 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
349 strings += ETH_GSTRING_LEN;
350 }
351 for (j = 0; j < num_cpus; j++) {
352 snprintf(string_cpu, ETH_GSTRING_LEN,
353 "bpool [CPU %d]", j);
354 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
355 strings += ETH_GSTRING_LEN;
356 }
357 snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [TOTAL]");
358 memcpy(strings, string_cpu, ETH_GSTRING_LEN);
359 strings += ETH_GSTRING_LEN;
360
361 memcpy(strings, dpaa_stats_global, size);
362}
363
364static int dpaa_get_hash_opts(struct net_device *dev,
365 struct ethtool_rxnfc *cmd)
366{
367 struct dpaa_priv *priv = netdev_priv(dev);
368
369 cmd->data = 0;
370
371 switch (cmd->flow_type) {
372 case TCP_V4_FLOW:
373 case TCP_V6_FLOW:
374 case UDP_V4_FLOW:
375 case UDP_V6_FLOW:
376 if (priv->keygen_in_use)
377 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
378 fallthrough;
379 case IPV4_FLOW:
380 case IPV6_FLOW:
381 case SCTP_V4_FLOW:
382 case SCTP_V6_FLOW:
383 case AH_ESP_V4_FLOW:
384 case AH_ESP_V6_FLOW:
385 case AH_V4_FLOW:
386 case AH_V6_FLOW:
387 case ESP_V4_FLOW:
388 case ESP_V6_FLOW:
389 if (priv->keygen_in_use)
390 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
391 break;
392 default:
393 cmd->data = 0;
394 break;
395 }
396
397 return 0;
398}
399
400static int dpaa_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
401 u32 *unused)
402{
403 int ret = -EOPNOTSUPP;
404
405 switch (cmd->cmd) {
406 case ETHTOOL_GRXFH:
407 ret = dpaa_get_hash_opts(dev, cmd);
408 break;
409 default:
410 break;
411 }
412
413 return ret;
414}
415
416static void dpaa_set_hash(struct net_device *net_dev, bool enable)
417{
418 struct mac_device *mac_dev;
419 struct fman_port *rxport;
420 struct dpaa_priv *priv;
421
422 priv = netdev_priv(net_dev);
423 mac_dev = priv->mac_dev;
424 rxport = mac_dev->port[0];
425
426 fman_port_use_kg_hash(rxport, enable);
427 priv->keygen_in_use = enable;
428}
429
430static int dpaa_set_hash_opts(struct net_device *dev,
431 struct ethtool_rxnfc *nfc)
432{
433 int ret = -EINVAL;
434
435
436 if (nfc->data &
437 ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
438 return -EINVAL;
439
440 switch (nfc->flow_type) {
441 case TCP_V4_FLOW:
442 case TCP_V6_FLOW:
443 case UDP_V4_FLOW:
444 case UDP_V6_FLOW:
445 case IPV4_FLOW:
446 case IPV6_FLOW:
447 case SCTP_V4_FLOW:
448 case SCTP_V6_FLOW:
449 case AH_ESP_V4_FLOW:
450 case AH_ESP_V6_FLOW:
451 case AH_V4_FLOW:
452 case AH_V6_FLOW:
453 case ESP_V4_FLOW:
454 case ESP_V6_FLOW:
455 dpaa_set_hash(dev, !!nfc->data);
456 ret = 0;
457 break;
458 default:
459 break;
460 }
461
462 return ret;
463}
464
465static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
466{
467 int ret = -EOPNOTSUPP;
468
469 switch (cmd->cmd) {
470 case ETHTOOL_SRXFH:
471 ret = dpaa_set_hash_opts(dev, cmd);
472 break;
473 default:
474 break;
475 }
476
477 return ret;
478}
479
480static int dpaa_get_ts_info(struct net_device *net_dev,
481 struct ethtool_ts_info *info)
482{
483 struct device *dev = net_dev->dev.parent;
484 struct device_node *mac_node = dev->of_node;
485 struct device_node *fman_node = NULL, *ptp_node = NULL;
486 struct platform_device *ptp_dev = NULL;
487 struct ptp_qoriq *ptp = NULL;
488
489 info->phc_index = -1;
490
491 fman_node = of_get_parent(mac_node);
492 if (fman_node)
493 ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
494
495 if (ptp_node)
496 ptp_dev = of_find_device_by_node(ptp_node);
497
498 if (ptp_dev)
499 ptp = platform_get_drvdata(ptp_dev);
500
501 if (ptp)
502 info->phc_index = ptp->phc_index;
503
504 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
505 SOF_TIMESTAMPING_RX_HARDWARE |
506 SOF_TIMESTAMPING_RAW_HARDWARE;
507 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
508 (1 << HWTSTAMP_TX_ON);
509 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
510 (1 << HWTSTAMP_FILTER_ALL);
511
512 return 0;
513}
514
515static int dpaa_get_coalesce(struct net_device *dev,
516 struct ethtool_coalesce *c)
517{
518 struct qman_portal *portal;
519 u32 period;
520 u8 thresh;
521
522 portal = qman_get_affine_portal(smp_processor_id());
523 qman_portal_get_iperiod(portal, &period);
524 qman_dqrr_get_ithresh(portal, &thresh);
525
526 c->rx_coalesce_usecs = period;
527 c->rx_max_coalesced_frames = thresh;
528
529 return 0;
530}
531
532static int dpaa_set_coalesce(struct net_device *dev,
533 struct ethtool_coalesce *c)
534{
535 const cpumask_t *cpus = qman_affine_cpus();
536 bool needs_revert[NR_CPUS] = {false};
537 struct qman_portal *portal;
538 u32 period, prev_period;
539 u8 thresh, prev_thresh;
540 int cpu, res;
541
542 period = c->rx_coalesce_usecs;
543 thresh = c->rx_max_coalesced_frames;
544
545
546 portal = qman_get_affine_portal(smp_processor_id());
547 qman_portal_get_iperiod(portal, &prev_period);
548 qman_dqrr_get_ithresh(portal, &prev_thresh);
549
550
551 for_each_cpu_and(cpu, cpus, cpu_online_mask) {
552 portal = qman_get_affine_portal(cpu);
553 res = qman_portal_set_iperiod(portal, period);
554 if (res)
555 goto revert_values;
556 res = qman_dqrr_set_ithresh(portal, thresh);
557 if (res) {
558 qman_portal_set_iperiod(portal, prev_period);
559 goto revert_values;
560 }
561 needs_revert[cpu] = true;
562 }
563
564 return 0;
565
566revert_values:
567
568 for_each_cpu_and(cpu, cpus, cpu_online_mask) {
569 if (!needs_revert[cpu])
570 continue;
571 portal = qman_get_affine_portal(cpu);
572
573 qman_portal_set_iperiod(portal, prev_period);
574 qman_dqrr_set_ithresh(portal, prev_thresh);
575 }
576
577 return res;
578}
579
580const struct ethtool_ops dpaa_ethtool_ops = {
581 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
582 ETHTOOL_COALESCE_RX_MAX_FRAMES,
583 .get_drvinfo = dpaa_get_drvinfo,
584 .get_msglevel = dpaa_get_msglevel,
585 .set_msglevel = dpaa_set_msglevel,
586 .nway_reset = dpaa_nway_reset,
587 .get_pauseparam = dpaa_get_pauseparam,
588 .set_pauseparam = dpaa_set_pauseparam,
589 .get_link = ethtool_op_get_link,
590 .get_sset_count = dpaa_get_sset_count,
591 .get_ethtool_stats = dpaa_get_ethtool_stats,
592 .get_strings = dpaa_get_strings,
593 .get_link_ksettings = dpaa_get_link_ksettings,
594 .set_link_ksettings = dpaa_set_link_ksettings,
595 .get_rxnfc = dpaa_get_rxnfc,
596 .set_rxnfc = dpaa_set_rxnfc,
597 .get_ts_info = dpaa_get_ts_info,
598 .get_coalesce = dpaa_get_coalesce,
599 .set_coalesce = dpaa_set_coalesce,
600};
601