1
2
3
4
5
6
7
8#include <linux/platform_device.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/netdevice.h>
12#include <linux/etherdevice.h>
13#include <linux/phy.h>
14#include <linux/slab.h>
15#include <linux/interrupt.h>
16#include <linux/of_net.h>
17#include <linux/if_ether.h>
18#include <linux/if_vlan.h>
19
20#include <net/dst.h>
21
22#include "octeon-ethernet.h"
23#include "ethernet-defines.h"
24#include "ethernet-mem.h"
25#include "ethernet-rx.h"
26#include "ethernet-tx.h"
27#include "ethernet-mdio.h"
28#include "ethernet-util.h"
29
30#define OCTEON_MAX_MTU 65392
31
32static int num_packet_buffers = 1024;
33module_param(num_packet_buffers, int, 0444);
34MODULE_PARM_DESC(num_packet_buffers, "\n"
35 "\tNumber of packet buffers to allocate and store in the\n"
36 "\tFPA. By default, 1024 packet buffers are used.\n");
37
38static int pow_receive_group = 15;
39module_param(pow_receive_group, int, 0444);
40MODULE_PARM_DESC(pow_receive_group, "\n"
41 "\tPOW group to receive packets from. All ethernet hardware\n"
42 "\twill be configured to send incoming packets to this POW\n"
43 "\tgroup. Also any other software can submit packets to this\n"
44 "\tgroup for the kernel to process.");
45
46static int receive_group_order;
47module_param(receive_group_order, int, 0444);
48MODULE_PARM_DESC(receive_group_order, "\n"
49 "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n"
50 "\twill be configured to send incoming packets to multiple POW\n"
51 "\tgroups. pow_receive_group parameter is ignored when multiple\n"
52 "\tgroups are taken into use and groups are allocated starting\n"
53 "\tfrom 0. By default, a single group is used.\n");
54
55int pow_send_group = -1;
56module_param(pow_send_group, int, 0644);
57MODULE_PARM_DESC(pow_send_group, "\n"
58 "\tPOW group to send packets to other software on. This\n"
59 "\tcontrols the creation of the virtual device pow0.\n"
60 "\talways_use_pow also depends on this value.");
61
62int always_use_pow;
63module_param(always_use_pow, int, 0444);
64MODULE_PARM_DESC(always_use_pow, "\n"
65 "\tWhen set, always send to the pow group. This will cause\n"
66 "\tpackets sent to real ethernet devices to be sent to the\n"
67 "\tPOW group instead of the hardware. Unless some other\n"
68 "\tapplication changes the config, packets will still be\n"
69 "\treceived from the low level hardware. Use this option\n"
70 "\tto allow a CVMX app to intercept all packets from the\n"
71 "\tlinux kernel. You must specify pow_send_group along with\n"
72 "\tthis option.");
73
74char pow_send_list[128] = "";
75module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
76MODULE_PARM_DESC(pow_send_list, "\n"
77 "\tComma separated list of ethernet devices that should use the\n"
78 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
79 "\tis a per port version of always_use_pow. always_use_pow takes\n"
80 "\tprecedence over this list. For example, setting this to\n"
81 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
82 "\tusing the pow_send_group.");
83
84int rx_napi_weight = 32;
85module_param(rx_napi_weight, int, 0444);
86MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
87
88
89int pow_receive_groups;
90
91
92
93
94
95
96atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
97
98
99
100
101
102struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
103
104u64 cvm_oct_tx_poll_interval;
105
106static void cvm_oct_rx_refill_worker(struct work_struct *work);
107static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
108
109static void cvm_oct_rx_refill_worker(struct work_struct *work)
110{
111
112
113
114
115
116
117
118 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
119
120 if (!atomic_read(&cvm_oct_poll_queue_stopping))
121 schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
122}
123
124static void cvm_oct_periodic_worker(struct work_struct *work)
125{
126 struct octeon_ethernet *priv = container_of(work,
127 struct octeon_ethernet,
128 port_periodic_work.work);
129
130 if (priv->poll)
131 priv->poll(cvm_oct_device[priv->port]);
132
133 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats
134 (cvm_oct_device[priv->port]);
135
136 if (!atomic_read(&cvm_oct_poll_queue_stopping))
137 schedule_delayed_work(&priv->port_periodic_work, HZ);
138}
139
140static void cvm_oct_configure_common_hw(void)
141{
142
143 cvmx_fpa_enable();
144 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
145 num_packet_buffers);
146 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
147 num_packet_buffers);
148 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
149 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
150 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
151
152#ifdef __LITTLE_ENDIAN
153 {
154 union cvmx_ipd_ctl_status ipd_ctl_status;
155
156 ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
157 ipd_ctl_status.s.pkt_lend = 1;
158 ipd_ctl_status.s.wqe_lend = 1;
159 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
160 }
161#endif
162
163 cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
164}
165
166
167
168
169
170
171
172
173int cvm_oct_free_work(void *work_queue_entry)
174{
175 struct cvmx_wqe *work = work_queue_entry;
176
177 int segments = work->word2.s.bufs;
178 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
179
180 while (segments--) {
181 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
182 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
183 if (unlikely(!segment_ptr.s.i))
184 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
185 segment_ptr.s.pool,
186 CVMX_FPA_PACKET_POOL_SIZE / 128);
187 segment_ptr = next_ptr;
188 }
189 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
190
191 return 0;
192}
193EXPORT_SYMBOL(cvm_oct_free_work);
194
195
196
197
198
199
200
201static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
202{
203 cvmx_pip_port_status_t rx_status;
204 cvmx_pko_port_status_t tx_status;
205 struct octeon_ethernet *priv = netdev_priv(dev);
206
207 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
208 if (octeon_is_simulation()) {
209
210 memset(&rx_status, 0, sizeof(rx_status));
211 memset(&tx_status, 0, sizeof(tx_status));
212 } else {
213 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
214 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
215 }
216
217 dev->stats.rx_packets += rx_status.inb_packets;
218 dev->stats.tx_packets += tx_status.packets;
219 dev->stats.rx_bytes += rx_status.inb_octets;
220 dev->stats.tx_bytes += tx_status.octets;
221 dev->stats.multicast += rx_status.multicast_packets;
222 dev->stats.rx_crc_errors += rx_status.inb_errors;
223 dev->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
224 dev->stats.rx_dropped += rx_status.dropped_packets;
225 }
226
227 return &dev->stats;
228}
229
230
231
232
233
234
235
236
237static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
238{
239 struct octeon_ethernet *priv = netdev_priv(dev);
240 int interface = INTERFACE(priv->port);
241#if IS_ENABLED(CONFIG_VLAN_8021Q)
242 int vlan_bytes = VLAN_HLEN;
243#else
244 int vlan_bytes = 0;
245#endif
246 int mtu_overhead = ETH_HLEN + ETH_FCS_LEN + vlan_bytes;
247
248 dev->mtu = new_mtu;
249
250 if ((interface < 2) &&
251 (cvmx_helper_interface_get_mode(interface) !=
252 CVMX_HELPER_INTERFACE_MODE_SPI)) {
253 int index = INDEX(priv->port);
254
255 int max_packet = new_mtu + mtu_overhead;
256
257 if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
258 OCTEON_IS_MODEL(OCTEON_CN58XX)) {
259
260 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
261 max_packet);
262 } else {
263
264
265
266
267 union cvmx_pip_frm_len_chkx frm_len_chk;
268
269 frm_len_chk.u64 = 0;
270 frm_len_chk.s.minlen = VLAN_ETH_ZLEN;
271 frm_len_chk.s.maxlen = max_packet;
272 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
273 frm_len_chk.u64);
274 }
275
276
277
278
279
280 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
281 (max_packet + 7) & ~7u);
282 }
283 return 0;
284}
285
286
287
288
289
290static void cvm_oct_common_set_multicast_list(struct net_device *dev)
291{
292 union cvmx_gmxx_prtx_cfg gmx_cfg;
293 struct octeon_ethernet *priv = netdev_priv(dev);
294 int interface = INTERFACE(priv->port);
295
296 if ((interface < 2) &&
297 (cvmx_helper_interface_get_mode(interface) !=
298 CVMX_HELPER_INTERFACE_MODE_SPI)) {
299 union cvmx_gmxx_rxx_adr_ctl control;
300 int index = INDEX(priv->port);
301
302 control.u64 = 0;
303 control.s.bcst = 1;
304
305 if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
306 (dev->flags & IFF_PROMISC))
307
308 control.s.mcst = 2;
309 else
310
311 control.s.mcst = 1;
312
313 if (dev->flags & IFF_PROMISC)
314
315
316
317
318 control.s.cam_mode = 0;
319 else
320
321 control.s.cam_mode = 1;
322
323 gmx_cfg.u64 =
324 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
325 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
326 gmx_cfg.u64 & ~1ull);
327
328 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
329 control.u64);
330 if (dev->flags & IFF_PROMISC)
331 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
332 (index, interface), 0);
333 else
334 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
335 (index, interface), 1);
336
337 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
338 gmx_cfg.u64);
339 }
340}
341
342static int cvm_oct_set_mac_filter(struct net_device *dev)
343{
344 struct octeon_ethernet *priv = netdev_priv(dev);
345 union cvmx_gmxx_prtx_cfg gmx_cfg;
346 int interface = INTERFACE(priv->port);
347
348 if ((interface < 2) &&
349 (cvmx_helper_interface_get_mode(interface) !=
350 CVMX_HELPER_INTERFACE_MODE_SPI)) {
351 int i;
352 u8 *ptr = dev->dev_addr;
353 u64 mac = 0;
354 int index = INDEX(priv->port);
355
356 for (i = 0; i < 6; i++)
357 mac = (mac << 8) | (u64)ptr[i];
358
359 gmx_cfg.u64 =
360 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
361 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
362 gmx_cfg.u64 & ~1ull);
363
364 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
365 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
366 ptr[0]);
367 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
368 ptr[1]);
369 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
370 ptr[2]);
371 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
372 ptr[3]);
373 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
374 ptr[4]);
375 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
376 ptr[5]);
377 cvm_oct_common_set_multicast_list(dev);
378 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
379 gmx_cfg.u64);
380 }
381 return 0;
382}
383
384
385
386
387
388
389
390
391static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
392{
393 int r = eth_mac_addr(dev, addr);
394
395 if (r)
396 return r;
397 return cvm_oct_set_mac_filter(dev);
398}
399
400
401
402
403
404
405
406int cvm_oct_common_init(struct net_device *dev)
407{
408 struct octeon_ethernet *priv = netdev_priv(dev);
409 const u8 *mac = NULL;
410
411 if (priv->of_node)
412 mac = of_get_mac_address(priv->of_node);
413
414 if (!IS_ERR_OR_NULL(mac))
415 ether_addr_copy(dev->dev_addr, mac);
416 else
417 eth_hw_addr_random(dev);
418
419
420
421
422
423 if ((pow_send_group != -1) &&
424 (always_use_pow || strstr(pow_send_list, dev->name)))
425 priv->queue = -1;
426
427 if (priv->queue != -1)
428 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
429
430
431 dev->features |= NETIF_F_LLTX;
432 dev->ethtool_ops = &cvm_oct_ethtool_ops;
433
434 cvm_oct_set_mac_filter(dev);
435 dev_set_mtu(dev, dev->mtu);
436
437
438
439
440
441 memset(dev->netdev_ops->ndo_get_stats(dev), 0,
442 sizeof(struct net_device_stats));
443
444 if (dev->netdev_ops->ndo_stop)
445 dev->netdev_ops->ndo_stop(dev);
446
447 return 0;
448}
449
450void cvm_oct_common_uninit(struct net_device *dev)
451{
452 if (dev->phydev)
453 phy_disconnect(dev->phydev);
454}
455
456int cvm_oct_common_open(struct net_device *dev,
457 void (*link_poll)(struct net_device *))
458{
459 union cvmx_gmxx_prtx_cfg gmx_cfg;
460 struct octeon_ethernet *priv = netdev_priv(dev);
461 int interface = INTERFACE(priv->port);
462 int index = INDEX(priv->port);
463 union cvmx_helper_link_info link_info;
464 int rv;
465
466 rv = cvm_oct_phy_setup_device(dev);
467 if (rv)
468 return rv;
469
470 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
471 gmx_cfg.s.en = 1;
472 if (octeon_has_feature(OCTEON_FEATURE_PKND))
473 gmx_cfg.s.pknd = priv->port;
474 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
475
476 if (octeon_is_simulation())
477 return 0;
478
479 if (dev->phydev) {
480 int r = phy_read_status(dev->phydev);
481
482 if (r == 0 && dev->phydev->link == 0)
483 netif_carrier_off(dev);
484 cvm_oct_adjust_link(dev);
485 } else {
486 link_info = cvmx_helper_link_get(priv->port);
487 if (!link_info.s.link_up)
488 netif_carrier_off(dev);
489 priv->poll = link_poll;
490 link_poll(dev);
491 }
492
493 return 0;
494}
495
496void cvm_oct_link_poll(struct net_device *dev)
497{
498 struct octeon_ethernet *priv = netdev_priv(dev);
499 union cvmx_helper_link_info link_info;
500
501 link_info = cvmx_helper_link_get(priv->port);
502 if (link_info.u64 == priv->link_info)
503 return;
504
505 if (cvmx_helper_link_set(priv->port, link_info))
506 link_info.u64 = priv->link_info;
507 else
508 priv->link_info = link_info.u64;
509
510 if (link_info.s.link_up) {
511 if (!netif_carrier_ok(dev))
512 netif_carrier_on(dev);
513 } else if (netif_carrier_ok(dev)) {
514 netif_carrier_off(dev);
515 }
516 cvm_oct_note_carrier(priv, link_info);
517}
518
519static int cvm_oct_xaui_open(struct net_device *dev)
520{
521 return cvm_oct_common_open(dev, cvm_oct_link_poll);
522}
523
524static const struct net_device_ops cvm_oct_npi_netdev_ops = {
525 .ndo_init = cvm_oct_common_init,
526 .ndo_uninit = cvm_oct_common_uninit,
527 .ndo_start_xmit = cvm_oct_xmit,
528 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
529 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
530 .ndo_do_ioctl = cvm_oct_ioctl,
531 .ndo_change_mtu = cvm_oct_common_change_mtu,
532 .ndo_get_stats = cvm_oct_common_get_stats,
533#ifdef CONFIG_NET_POLL_CONTROLLER
534 .ndo_poll_controller = cvm_oct_poll_controller,
535#endif
536};
537
538static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
539 .ndo_init = cvm_oct_common_init,
540 .ndo_uninit = cvm_oct_common_uninit,
541 .ndo_open = cvm_oct_xaui_open,
542 .ndo_stop = cvm_oct_common_stop,
543 .ndo_start_xmit = cvm_oct_xmit,
544 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
545 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
546 .ndo_do_ioctl = cvm_oct_ioctl,
547 .ndo_change_mtu = cvm_oct_common_change_mtu,
548 .ndo_get_stats = cvm_oct_common_get_stats,
549#ifdef CONFIG_NET_POLL_CONTROLLER
550 .ndo_poll_controller = cvm_oct_poll_controller,
551#endif
552};
553
554static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
555 .ndo_init = cvm_oct_sgmii_init,
556 .ndo_uninit = cvm_oct_common_uninit,
557 .ndo_open = cvm_oct_sgmii_open,
558 .ndo_stop = cvm_oct_common_stop,
559 .ndo_start_xmit = cvm_oct_xmit,
560 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
561 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
562 .ndo_do_ioctl = cvm_oct_ioctl,
563 .ndo_change_mtu = cvm_oct_common_change_mtu,
564 .ndo_get_stats = cvm_oct_common_get_stats,
565#ifdef CONFIG_NET_POLL_CONTROLLER
566 .ndo_poll_controller = cvm_oct_poll_controller,
567#endif
568};
569
570static const struct net_device_ops cvm_oct_spi_netdev_ops = {
571 .ndo_init = cvm_oct_spi_init,
572 .ndo_uninit = cvm_oct_spi_uninit,
573 .ndo_start_xmit = cvm_oct_xmit,
574 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
575 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
576 .ndo_do_ioctl = cvm_oct_ioctl,
577 .ndo_change_mtu = cvm_oct_common_change_mtu,
578 .ndo_get_stats = cvm_oct_common_get_stats,
579#ifdef CONFIG_NET_POLL_CONTROLLER
580 .ndo_poll_controller = cvm_oct_poll_controller,
581#endif
582};
583
584static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
585 .ndo_init = cvm_oct_common_init,
586 .ndo_uninit = cvm_oct_common_uninit,
587 .ndo_open = cvm_oct_rgmii_open,
588 .ndo_stop = cvm_oct_common_stop,
589 .ndo_start_xmit = cvm_oct_xmit,
590 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
591 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
592 .ndo_do_ioctl = cvm_oct_ioctl,
593 .ndo_change_mtu = cvm_oct_common_change_mtu,
594 .ndo_get_stats = cvm_oct_common_get_stats,
595#ifdef CONFIG_NET_POLL_CONTROLLER
596 .ndo_poll_controller = cvm_oct_poll_controller,
597#endif
598};
599
600static const struct net_device_ops cvm_oct_pow_netdev_ops = {
601 .ndo_init = cvm_oct_common_init,
602 .ndo_start_xmit = cvm_oct_xmit_pow,
603 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
604 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
605 .ndo_do_ioctl = cvm_oct_ioctl,
606 .ndo_change_mtu = cvm_oct_common_change_mtu,
607 .ndo_get_stats = cvm_oct_common_get_stats,
608#ifdef CONFIG_NET_POLL_CONTROLLER
609 .ndo_poll_controller = cvm_oct_poll_controller,
610#endif
611};
612
613static struct device_node *cvm_oct_of_get_child
614 (const struct device_node *parent, int reg_val)
615{
616 struct device_node *node = NULL;
617 int size;
618 const __be32 *addr;
619
620 for (;;) {
621 node = of_get_next_child(parent, node);
622 if (!node)
623 break;
624 addr = of_get_property(node, "reg", &size);
625 if (addr && (be32_to_cpu(*addr) == reg_val))
626 break;
627 }
628 return node;
629}
630
631static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
632 int interface, int port)
633{
634 struct device_node *ni, *np;
635
636 ni = cvm_oct_of_get_child(pip, interface);
637 if (!ni)
638 return NULL;
639
640 np = cvm_oct_of_get_child(ni, port);
641 of_node_put(ni);
642
643 return np;
644}
645
646static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface,
647 int port)
648{
649 struct device_node *np = priv->of_node;
650 u32 delay_value;
651 bool rx_delay;
652 bool tx_delay;
653
654
655
656
657 rx_delay = true;
658 tx_delay = true;
659
660 if (!of_property_read_u32(np, "rx-delay", &delay_value)) {
661 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
662 rx_delay = delay_value > 0;
663 }
664 if (!of_property_read_u32(np, "tx-delay", &delay_value)) {
665 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
666 tx_delay = delay_value > 0;
667 }
668
669 if (!rx_delay && !tx_delay)
670 priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
671 else if (!rx_delay)
672 priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID;
673 else if (!tx_delay)
674 priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID;
675 else
676 priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
677}
678
679static int cvm_oct_probe(struct platform_device *pdev)
680{
681 int num_interfaces;
682 int interface;
683 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
684 int qos;
685 struct device_node *pip;
686 int mtu_overhead = ETH_HLEN + ETH_FCS_LEN;
687
688#if IS_ENABLED(CONFIG_VLAN_8021Q)
689 mtu_overhead += VLAN_HLEN;
690#endif
691
692 octeon_mdiobus_force_mod_depencency();
693
694 pip = pdev->dev.of_node;
695 if (!pip) {
696 pr_err("Error: No 'pip' in /aliases\n");
697 return -EINVAL;
698 }
699
700 cvm_oct_configure_common_hw();
701
702 cvmx_helper_initialize_packet_io_global();
703
704 if (receive_group_order) {
705 if (receive_group_order > 4)
706 receive_group_order = 4;
707 pow_receive_groups = (1 << (1 << receive_group_order)) - 1;
708 } else {
709 pow_receive_groups = BIT(pow_receive_group);
710 }
711
712
713 num_interfaces = cvmx_helper_get_number_of_interfaces();
714 for (interface = 0; interface < num_interfaces; interface++) {
715 int num_ports = cvmx_helper_ports_on_interface(interface);
716 int port;
717
718 for (port = cvmx_helper_get_ipd_port(interface, 0);
719 port < cvmx_helper_get_ipd_port(interface, num_ports);
720 port++) {
721 union cvmx_pip_prt_tagx pip_prt_tagx;
722
723 pip_prt_tagx.u64 =
724 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
725
726 if (receive_group_order) {
727 int tag_mask;
728
729
730
731
732
733 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
734 pip_prt_tagx.u64 |= 0x3ull << 44;
735
736 tag_mask = ~((1 << receive_group_order) - 1);
737 pip_prt_tagx.s.grptagbase = 0;
738 pip_prt_tagx.s.grptagmask = tag_mask;
739 pip_prt_tagx.s.grptag = 1;
740 pip_prt_tagx.s.tag_mode = 0;
741 pip_prt_tagx.s.inc_prt_flag = 1;
742 pip_prt_tagx.s.ip6_dprt_flag = 1;
743 pip_prt_tagx.s.ip4_dprt_flag = 1;
744 pip_prt_tagx.s.ip6_sprt_flag = 1;
745 pip_prt_tagx.s.ip4_sprt_flag = 1;
746 pip_prt_tagx.s.ip6_dst_flag = 1;
747 pip_prt_tagx.s.ip4_dst_flag = 1;
748 pip_prt_tagx.s.ip6_src_flag = 1;
749 pip_prt_tagx.s.ip4_src_flag = 1;
750 pip_prt_tagx.s.grp = 0;
751 } else {
752 pip_prt_tagx.s.grptag = 0;
753 pip_prt_tagx.s.grp = pow_receive_group;
754 }
755
756 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
757 pip_prt_tagx.u64);
758 }
759 }
760
761 cvmx_helper_ipd_and_packet_input_enable();
762
763 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
764
765
766
767
768
769 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
770
771
772 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
773
774 if ((pow_send_group != -1)) {
775 struct net_device *dev;
776
777 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
778 if (dev) {
779
780 struct octeon_ethernet *priv = netdev_priv(dev);
781
782 SET_NETDEV_DEV(dev, &pdev->dev);
783 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
784 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
785 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
786 priv->queue = -1;
787 strscpy(dev->name, "pow%d", sizeof(dev->name));
788 for (qos = 0; qos < 16; qos++)
789 skb_queue_head_init(&priv->tx_free_list[qos]);
790 dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
791 dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
792
793 if (register_netdev(dev) < 0) {
794 pr_err("Failed to register ethernet device for POW\n");
795 free_netdev(dev);
796 } else {
797 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
798 pr_info("%s: POW send group %d, receive group %d\n",
799 dev->name, pow_send_group,
800 pow_receive_group);
801 }
802 } else {
803 pr_err("Failed to allocate ethernet device for POW\n");
804 }
805 }
806
807 num_interfaces = cvmx_helper_get_number_of_interfaces();
808 for (interface = 0; interface < num_interfaces; interface++) {
809 cvmx_helper_interface_mode_t imode =
810 cvmx_helper_interface_get_mode(interface);
811 int num_ports = cvmx_helper_ports_on_interface(interface);
812 int port;
813 int port_index;
814
815 for (port_index = 0,
816 port = cvmx_helper_get_ipd_port(interface, 0);
817 port < cvmx_helper_get_ipd_port(interface, num_ports);
818 port_index++, port++) {
819 struct octeon_ethernet *priv;
820 struct net_device *dev =
821 alloc_etherdev(sizeof(struct octeon_ethernet));
822 if (!dev) {
823 pr_err("Failed to allocate ethernet device for port %d\n",
824 port);
825 continue;
826 }
827
828
829 SET_NETDEV_DEV(dev, &pdev->dev);
830 priv = netdev_priv(dev);
831 priv->netdev = dev;
832 priv->of_node = cvm_oct_node_for_port(pip, interface,
833 port_index);
834
835 INIT_DELAYED_WORK(&priv->port_periodic_work,
836 cvm_oct_periodic_worker);
837 priv->imode = imode;
838 priv->port = port;
839 priv->queue = cvmx_pko_get_base_queue(priv->port);
840 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
841 priv->phy_mode = PHY_INTERFACE_MODE_NA;
842 for (qos = 0; qos < 16; qos++)
843 skb_queue_head_init(&priv->tx_free_list[qos]);
844 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
845 qos++)
846 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
847 dev->min_mtu = VLAN_ETH_ZLEN - mtu_overhead;
848 dev->max_mtu = OCTEON_MAX_MTU - mtu_overhead;
849
850 switch (priv->imode) {
851
852 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
853 case CVMX_HELPER_INTERFACE_MODE_PCIE:
854 case CVMX_HELPER_INTERFACE_MODE_PICMG:
855 break;
856
857 case CVMX_HELPER_INTERFACE_MODE_NPI:
858 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
859 strscpy(dev->name, "npi%d", sizeof(dev->name));
860 break;
861
862 case CVMX_HELPER_INTERFACE_MODE_XAUI:
863 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
864 strscpy(dev->name, "xaui%d", sizeof(dev->name));
865 break;
866
867 case CVMX_HELPER_INTERFACE_MODE_LOOP:
868 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
869 strscpy(dev->name, "loop%d", sizeof(dev->name));
870 break;
871
872 case CVMX_HELPER_INTERFACE_MODE_SGMII:
873 priv->phy_mode = PHY_INTERFACE_MODE_SGMII;
874 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
875 strscpy(dev->name, "eth%d", sizeof(dev->name));
876 break;
877
878 case CVMX_HELPER_INTERFACE_MODE_SPI:
879 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
880 strscpy(dev->name, "spi%d", sizeof(dev->name));
881 break;
882
883 case CVMX_HELPER_INTERFACE_MODE_GMII:
884 priv->phy_mode = PHY_INTERFACE_MODE_GMII;
885 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
886 strscpy(dev->name, "eth%d", sizeof(dev->name));
887 break;
888
889 case CVMX_HELPER_INTERFACE_MODE_RGMII:
890 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
891 strscpy(dev->name, "eth%d", sizeof(dev->name));
892 cvm_set_rgmii_delay(priv, interface,
893 port_index);
894 break;
895 }
896
897 if (!dev->netdev_ops) {
898 free_netdev(dev);
899 } else if (register_netdev(dev) < 0) {
900 pr_err("Failed to register ethernet device for interface %d, port %d\n",
901 interface, priv->port);
902 free_netdev(dev);
903 } else {
904 cvm_oct_device[priv->port] = dev;
905 fau -=
906 cvmx_pko_get_num_queues(priv->port) *
907 sizeof(u32);
908 schedule_delayed_work(&priv->port_periodic_work,
909 HZ);
910 }
911 }
912 }
913
914 cvm_oct_tx_initialize();
915 cvm_oct_rx_initialize();
916
917
918
919
920 cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
921
922 schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
923
924 return 0;
925}
926
927static int cvm_oct_remove(struct platform_device *pdev)
928{
929 int port;
930
931 cvmx_ipd_disable();
932
933 atomic_inc_return(&cvm_oct_poll_queue_stopping);
934 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
935
936 cvm_oct_rx_shutdown();
937 cvm_oct_tx_shutdown();
938
939 cvmx_pko_disable();
940
941
942 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
943 if (cvm_oct_device[port]) {
944 struct net_device *dev = cvm_oct_device[port];
945 struct octeon_ethernet *priv = netdev_priv(dev);
946
947 cancel_delayed_work_sync(&priv->port_periodic_work);
948
949 cvm_oct_tx_shutdown_dev(dev);
950 unregister_netdev(dev);
951 free_netdev(dev);
952 cvm_oct_device[port] = NULL;
953 }
954 }
955
956 cvmx_pko_shutdown();
957
958 cvmx_ipd_free_ptr();
959
960
961 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
962 num_packet_buffers);
963 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
964 num_packet_buffers);
965 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
966 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
967 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
968 return 0;
969}
970
971static const struct of_device_id cvm_oct_match[] = {
972 {
973 .compatible = "cavium,octeon-3860-pip",
974 },
975 {},
976};
977MODULE_DEVICE_TABLE(of, cvm_oct_match);
978
979static struct platform_driver cvm_oct_driver = {
980 .probe = cvm_oct_probe,
981 .remove = cvm_oct_remove,
982 .driver = {
983 .name = KBUILD_MODNAME,
984 .of_match_table = cvm_oct_match,
985 },
986};
987
988module_platform_driver(cvm_oct_driver);
989
990MODULE_LICENSE("GPL");
991MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
992MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
993