1
2
3
4
5
6
7
8
9
10
11#include <linux/platform_device.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/phy.h>
17#include <linux/slab.h>
18#include <linux/interrupt.h>
19#include <linux/of_net.h>
20#include <linux/if_ether.h>
21#include <linux/if_vlan.h>
22
23#include <net/dst.h>
24
25#include <asm/octeon/octeon.h>
26
27#include "ethernet-defines.h"
28#include "octeon-ethernet.h"
29#include "ethernet-mem.h"
30#include "ethernet-rx.h"
31#include "ethernet-tx.h"
32#include "ethernet-mdio.h"
33#include "ethernet-util.h"
34
35#include <asm/octeon/cvmx-pip.h>
36#include <asm/octeon/cvmx-pko.h>
37#include <asm/octeon/cvmx-fau.h>
38#include <asm/octeon/cvmx-ipd.h>
39#include <asm/octeon/cvmx-helper.h>
40#include <asm/octeon/cvmx-asxx-defs.h>
41#include <asm/octeon/cvmx-gmxx-defs.h>
42#include <asm/octeon/cvmx-smix-defs.h>
43
44#define OCTEON_MAX_MTU 65392
45
46static int num_packet_buffers = 1024;
47module_param(num_packet_buffers, int, 0444);
48MODULE_PARM_DESC(num_packet_buffers, "\n"
49 "\tNumber of packet buffers to allocate and store in the\n"
50 "\tFPA. By default, 1024 packet buffers are used.\n");
51
52static int pow_receive_group = 15;
53module_param(pow_receive_group, int, 0444);
54MODULE_PARM_DESC(pow_receive_group, "\n"
55 "\tPOW group to receive packets from. All ethernet hardware\n"
56 "\twill be configured to send incoming packets to this POW\n"
57 "\tgroup. Also any other software can submit packets to this\n"
58 "\tgroup for the kernel to process.");
59
60static int receive_group_order;
61module_param(receive_group_order, int, 0444);
62MODULE_PARM_DESC(receive_group_order, "\n"
63 "\tOrder (0..4) of receive groups to take into use. Ethernet hardware\n"
64 "\twill be configured to send incoming packets to multiple POW\n"
65 "\tgroups. pow_receive_group parameter is ignored when multiple\n"
66 "\tgroups are taken into use and groups are allocated starting\n"
67 "\tfrom 0. By default, a single group is used.\n");
68
69int pow_send_group = -1;
70module_param(pow_send_group, int, 0644);
71MODULE_PARM_DESC(pow_send_group, "\n"
72 "\tPOW group to send packets to other software on. This\n"
73 "\tcontrols the creation of the virtual device pow0.\n"
74 "\talways_use_pow also depends on this value.");
75
76int always_use_pow;
77module_param(always_use_pow, int, 0444);
78MODULE_PARM_DESC(always_use_pow, "\n"
79 "\tWhen set, always send to the pow group. This will cause\n"
80 "\tpackets sent to real ethernet devices to be sent to the\n"
81 "\tPOW group instead of the hardware. Unless some other\n"
82 "\tapplication changes the config, packets will still be\n"
83 "\treceived from the low level hardware. Use this option\n"
84 "\tto allow a CVMX app to intercept all packets from the\n"
85 "\tlinux kernel. You must specify pow_send_group along with\n"
86 "\tthis option.");
87
88char pow_send_list[128] = "";
89module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
90MODULE_PARM_DESC(pow_send_list, "\n"
91 "\tComma separated list of ethernet devices that should use the\n"
92 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
93 "\tis a per port version of always_use_pow. always_use_pow takes\n"
94 "\tprecedence over this list. For example, setting this to\n"
95 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
96 "\tusing the pow_send_group.");
97
98int rx_napi_weight = 32;
99module_param(rx_napi_weight, int, 0444);
100MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
101
102
103int pow_receive_groups;
104
105
106
107
108
109
110atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
111
112
113
114
115
116struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
117
118u64 cvm_oct_tx_poll_interval;
119
120static void cvm_oct_rx_refill_worker(struct work_struct *work);
121static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
122
123static void cvm_oct_rx_refill_worker(struct work_struct *work)
124{
125
126
127
128
129
130
131
132 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
133
134 if (!atomic_read(&cvm_oct_poll_queue_stopping))
135 schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
136}
137
138static void cvm_oct_periodic_worker(struct work_struct *work)
139{
140 struct octeon_ethernet *priv = container_of(work,
141 struct octeon_ethernet,
142 port_periodic_work.work);
143
144 if (priv->poll)
145 priv->poll(cvm_oct_device[priv->port]);
146
147 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
148 cvm_oct_device[priv->port]);
149
150 if (!atomic_read(&cvm_oct_poll_queue_stopping))
151 schedule_delayed_work(&priv->port_periodic_work, HZ);
152}
153
154static void cvm_oct_configure_common_hw(void)
155{
156
157 cvmx_fpa_enable();
158 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
159 num_packet_buffers);
160 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
161 num_packet_buffers);
162 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
163 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
164 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
165
166#ifdef __LITTLE_ENDIAN
167 {
168 union cvmx_ipd_ctl_status ipd_ctl_status;
169
170 ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
171 ipd_ctl_status.s.pkt_lend = 1;
172 ipd_ctl_status.s.wqe_lend = 1;
173 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
174 }
175#endif
176
177 cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
178}
179
180
181
182
183
184
185
186
187int cvm_oct_free_work(void *work_queue_entry)
188{
189 cvmx_wqe_t *work = work_queue_entry;
190
191 int segments = work->word2.s.bufs;
192 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
193
194 while (segments--) {
195 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
196 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
197 if (unlikely(!segment_ptr.s.i))
198 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
199 segment_ptr.s.pool,
200 CVMX_FPA_PACKET_POOL_SIZE / 128);
201 segment_ptr = next_ptr;
202 }
203 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
204
205 return 0;
206}
207EXPORT_SYMBOL(cvm_oct_free_work);
208
209
210
211
212
213
214
215static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
216{
217 cvmx_pip_port_status_t rx_status;
218 cvmx_pko_port_status_t tx_status;
219 struct octeon_ethernet *priv = netdev_priv(dev);
220
221 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
222 if (octeon_is_simulation()) {
223
224 memset(&rx_status, 0, sizeof(rx_status));
225 memset(&tx_status, 0, sizeof(tx_status));
226 } else {
227 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
228 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
229 }
230
231 priv->stats.rx_packets += rx_status.inb_packets;
232 priv->stats.tx_packets += tx_status.packets;
233 priv->stats.rx_bytes += rx_status.inb_octets;
234 priv->stats.tx_bytes += tx_status.octets;
235 priv->stats.multicast += rx_status.multicast_packets;
236 priv->stats.rx_crc_errors += rx_status.inb_errors;
237 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
238 priv->stats.rx_dropped += rx_status.dropped_packets;
239 }
240
241 return &priv->stats;
242}
243
244
245
246
247
248
249
250
251static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
252{
253 struct octeon_ethernet *priv = netdev_priv(dev);
254 int interface = INTERFACE(priv->port);
255#if IS_ENABLED(CONFIG_VLAN_8021Q)
256 int vlan_bytes = VLAN_HLEN;
257#else
258 int vlan_bytes = 0;
259#endif
260 int mtu_overhead = ETH_HLEN + ETH_FCS_LEN + vlan_bytes;
261
262
263
264
265
266 if ((new_mtu + mtu_overhead < VLAN_ETH_ZLEN) ||
267 (new_mtu + mtu_overhead > OCTEON_MAX_MTU)) {
268 pr_err("MTU must be between %d and %d.\n",
269 VLAN_ETH_ZLEN - mtu_overhead,
270 OCTEON_MAX_MTU - mtu_overhead);
271 return -EINVAL;
272 }
273 dev->mtu = new_mtu;
274
275 if ((interface < 2) &&
276 (cvmx_helper_interface_get_mode(interface) !=
277 CVMX_HELPER_INTERFACE_MODE_SPI)) {
278 int index = INDEX(priv->port);
279
280 int max_packet = new_mtu + mtu_overhead;
281
282 if (OCTEON_IS_MODEL(OCTEON_CN3XXX) ||
283 OCTEON_IS_MODEL(OCTEON_CN58XX)) {
284
285 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
286 max_packet);
287 } else {
288
289
290
291
292 union cvmx_pip_frm_len_chkx frm_len_chk;
293
294 frm_len_chk.u64 = 0;
295 frm_len_chk.s.minlen = VLAN_ETH_ZLEN;
296 frm_len_chk.s.maxlen = max_packet;
297 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
298 frm_len_chk.u64);
299 }
300
301
302
303
304
305 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
306 (max_packet + 7) & ~7u);
307 }
308 return 0;
309}
310
311
312
313
314
315static void cvm_oct_common_set_multicast_list(struct net_device *dev)
316{
317 union cvmx_gmxx_prtx_cfg gmx_cfg;
318 struct octeon_ethernet *priv = netdev_priv(dev);
319 int interface = INTERFACE(priv->port);
320
321 if ((interface < 2) &&
322 (cvmx_helper_interface_get_mode(interface) !=
323 CVMX_HELPER_INTERFACE_MODE_SPI)) {
324 union cvmx_gmxx_rxx_adr_ctl control;
325 int index = INDEX(priv->port);
326
327 control.u64 = 0;
328 control.s.bcst = 1;
329
330 if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
331 (dev->flags & IFF_PROMISC))
332
333 control.s.mcst = 2;
334 else
335
336 control.s.mcst = 1;
337
338 if (dev->flags & IFF_PROMISC)
339
340
341
342
343 control.s.cam_mode = 0;
344 else
345
346 control.s.cam_mode = 1;
347
348 gmx_cfg.u64 =
349 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
350 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
351 gmx_cfg.u64 & ~1ull);
352
353 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
354 control.u64);
355 if (dev->flags & IFF_PROMISC)
356 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
357 (index, interface), 0);
358 else
359 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
360 (index, interface), 1);
361
362 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
363 gmx_cfg.u64);
364 }
365}
366
367static int cvm_oct_set_mac_filter(struct net_device *dev)
368{
369 struct octeon_ethernet *priv = netdev_priv(dev);
370 union cvmx_gmxx_prtx_cfg gmx_cfg;
371 int interface = INTERFACE(priv->port);
372
373 if ((interface < 2) &&
374 (cvmx_helper_interface_get_mode(interface) !=
375 CVMX_HELPER_INTERFACE_MODE_SPI)) {
376 int i;
377 u8 *ptr = dev->dev_addr;
378 u64 mac = 0;
379 int index = INDEX(priv->port);
380
381 for (i = 0; i < 6; i++)
382 mac = (mac << 8) | (u64)ptr[i];
383
384 gmx_cfg.u64 =
385 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
386 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
387 gmx_cfg.u64 & ~1ull);
388
389 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
390 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
391 ptr[0]);
392 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
393 ptr[1]);
394 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
395 ptr[2]);
396 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
397 ptr[3]);
398 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
399 ptr[4]);
400 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
401 ptr[5]);
402 cvm_oct_common_set_multicast_list(dev);
403 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
404 gmx_cfg.u64);
405 }
406 return 0;
407}
408
409
410
411
412
413
414
415
416static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
417{
418 int r = eth_mac_addr(dev, addr);
419
420 if (r)
421 return r;
422 return cvm_oct_set_mac_filter(dev);
423}
424
425
426
427
428
429
430
431int cvm_oct_common_init(struct net_device *dev)
432{
433 struct octeon_ethernet *priv = netdev_priv(dev);
434 const u8 *mac = NULL;
435
436 if (priv->of_node)
437 mac = of_get_mac_address(priv->of_node);
438
439 if (mac)
440 ether_addr_copy(dev->dev_addr, mac);
441 else
442 eth_hw_addr_random(dev);
443
444
445
446
447
448 if ((pow_send_group != -1) &&
449 (always_use_pow || strstr(pow_send_list, dev->name)))
450 priv->queue = -1;
451
452 if (priv->queue != -1)
453 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
454
455
456 dev->features |= NETIF_F_LLTX;
457 dev->ethtool_ops = &cvm_oct_ethtool_ops;
458
459 cvm_oct_set_mac_filter(dev);
460 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
461
462
463
464
465
466 memset(dev->netdev_ops->ndo_get_stats(dev), 0,
467 sizeof(struct net_device_stats));
468
469 if (dev->netdev_ops->ndo_stop)
470 dev->netdev_ops->ndo_stop(dev);
471
472 return 0;
473}
474
475void cvm_oct_common_uninit(struct net_device *dev)
476{
477 if (dev->phydev)
478 phy_disconnect(dev->phydev);
479}
480
481int cvm_oct_common_open(struct net_device *dev,
482 void (*link_poll)(struct net_device *))
483{
484 union cvmx_gmxx_prtx_cfg gmx_cfg;
485 struct octeon_ethernet *priv = netdev_priv(dev);
486 int interface = INTERFACE(priv->port);
487 int index = INDEX(priv->port);
488 cvmx_helper_link_info_t link_info;
489 int rv;
490
491 rv = cvm_oct_phy_setup_device(dev);
492 if (rv)
493 return rv;
494
495 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
496 gmx_cfg.s.en = 1;
497 if (octeon_has_feature(OCTEON_FEATURE_PKND))
498 gmx_cfg.s.pknd = priv->port;
499 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
500
501 if (octeon_is_simulation())
502 return 0;
503
504 if (dev->phydev) {
505 int r = phy_read_status(dev->phydev);
506
507 if (r == 0 && dev->phydev->link == 0)
508 netif_carrier_off(dev);
509 cvm_oct_adjust_link(dev);
510 } else {
511 link_info = cvmx_helper_link_get(priv->port);
512 if (!link_info.s.link_up)
513 netif_carrier_off(dev);
514 priv->poll = link_poll;
515 link_poll(dev);
516 }
517
518 return 0;
519}
520
521void cvm_oct_link_poll(struct net_device *dev)
522{
523 struct octeon_ethernet *priv = netdev_priv(dev);
524 cvmx_helper_link_info_t link_info;
525
526 link_info = cvmx_helper_link_get(priv->port);
527 if (link_info.u64 == priv->link_info)
528 return;
529
530 if (cvmx_helper_link_set(priv->port, link_info))
531 link_info.u64 = priv->link_info;
532 else
533 priv->link_info = link_info.u64;
534
535 if (link_info.s.link_up) {
536 if (!netif_carrier_ok(dev))
537 netif_carrier_on(dev);
538 } else if (netif_carrier_ok(dev)) {
539 netif_carrier_off(dev);
540 }
541 cvm_oct_note_carrier(priv, link_info);
542}
543
544static int cvm_oct_xaui_open(struct net_device *dev)
545{
546 return cvm_oct_common_open(dev, cvm_oct_link_poll);
547}
548
549static const struct net_device_ops cvm_oct_npi_netdev_ops = {
550 .ndo_init = cvm_oct_common_init,
551 .ndo_uninit = cvm_oct_common_uninit,
552 .ndo_start_xmit = cvm_oct_xmit,
553 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
554 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
555 .ndo_do_ioctl = cvm_oct_ioctl,
556 .ndo_change_mtu = cvm_oct_common_change_mtu,
557 .ndo_get_stats = cvm_oct_common_get_stats,
558#ifdef CONFIG_NET_POLL_CONTROLLER
559 .ndo_poll_controller = cvm_oct_poll_controller,
560#endif
561};
562
563static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
564 .ndo_init = cvm_oct_common_init,
565 .ndo_uninit = cvm_oct_common_uninit,
566 .ndo_open = cvm_oct_xaui_open,
567 .ndo_stop = cvm_oct_common_stop,
568 .ndo_start_xmit = cvm_oct_xmit,
569 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
570 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
571 .ndo_do_ioctl = cvm_oct_ioctl,
572 .ndo_change_mtu = cvm_oct_common_change_mtu,
573 .ndo_get_stats = cvm_oct_common_get_stats,
574#ifdef CONFIG_NET_POLL_CONTROLLER
575 .ndo_poll_controller = cvm_oct_poll_controller,
576#endif
577};
578
579static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
580 .ndo_init = cvm_oct_sgmii_init,
581 .ndo_uninit = cvm_oct_common_uninit,
582 .ndo_open = cvm_oct_sgmii_open,
583 .ndo_stop = cvm_oct_common_stop,
584 .ndo_start_xmit = cvm_oct_xmit,
585 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
586 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
587 .ndo_do_ioctl = cvm_oct_ioctl,
588 .ndo_change_mtu = cvm_oct_common_change_mtu,
589 .ndo_get_stats = cvm_oct_common_get_stats,
590#ifdef CONFIG_NET_POLL_CONTROLLER
591 .ndo_poll_controller = cvm_oct_poll_controller,
592#endif
593};
594
595static const struct net_device_ops cvm_oct_spi_netdev_ops = {
596 .ndo_init = cvm_oct_spi_init,
597 .ndo_uninit = cvm_oct_spi_uninit,
598 .ndo_start_xmit = cvm_oct_xmit,
599 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
600 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
601 .ndo_do_ioctl = cvm_oct_ioctl,
602 .ndo_change_mtu = cvm_oct_common_change_mtu,
603 .ndo_get_stats = cvm_oct_common_get_stats,
604#ifdef CONFIG_NET_POLL_CONTROLLER
605 .ndo_poll_controller = cvm_oct_poll_controller,
606#endif
607};
608
609static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
610 .ndo_init = cvm_oct_common_init,
611 .ndo_uninit = cvm_oct_common_uninit,
612 .ndo_open = cvm_oct_rgmii_open,
613 .ndo_stop = cvm_oct_common_stop,
614 .ndo_start_xmit = cvm_oct_xmit,
615 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
616 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
617 .ndo_do_ioctl = cvm_oct_ioctl,
618 .ndo_change_mtu = cvm_oct_common_change_mtu,
619 .ndo_get_stats = cvm_oct_common_get_stats,
620#ifdef CONFIG_NET_POLL_CONTROLLER
621 .ndo_poll_controller = cvm_oct_poll_controller,
622#endif
623};
624
625static const struct net_device_ops cvm_oct_pow_netdev_ops = {
626 .ndo_init = cvm_oct_common_init,
627 .ndo_start_xmit = cvm_oct_xmit_pow,
628 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
629 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
630 .ndo_do_ioctl = cvm_oct_ioctl,
631 .ndo_change_mtu = cvm_oct_common_change_mtu,
632 .ndo_get_stats = cvm_oct_common_get_stats,
633#ifdef CONFIG_NET_POLL_CONTROLLER
634 .ndo_poll_controller = cvm_oct_poll_controller,
635#endif
636};
637
638static struct device_node *cvm_oct_of_get_child(
639 const struct device_node *parent, int reg_val)
640{
641 struct device_node *node = NULL;
642 int size;
643 const __be32 *addr;
644
645 for (;;) {
646 node = of_get_next_child(parent, node);
647 if (!node)
648 break;
649 addr = of_get_property(node, "reg", &size);
650 if (addr && (be32_to_cpu(*addr) == reg_val))
651 break;
652 }
653 return node;
654}
655
656static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
657 int interface, int port)
658{
659 struct device_node *ni, *np;
660
661 ni = cvm_oct_of_get_child(pip, interface);
662 if (!ni)
663 return NULL;
664
665 np = cvm_oct_of_get_child(ni, port);
666 of_node_put(ni);
667
668 return np;
669}
670
671static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port)
672{
673 u32 delay_value;
674
675 if (!of_property_read_u32(np, "rx-delay", &delay_value))
676 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
677 if (!of_property_read_u32(np, "tx-delay", &delay_value))
678 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
679}
680
681static int cvm_oct_probe(struct platform_device *pdev)
682{
683 int num_interfaces;
684 int interface;
685 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
686 int qos;
687 struct device_node *pip;
688
689 octeon_mdiobus_force_mod_depencency();
690
691 pip = pdev->dev.of_node;
692 if (!pip) {
693 pr_err("Error: No 'pip' in /aliases\n");
694 return -EINVAL;
695 }
696
697 cvm_oct_configure_common_hw();
698
699 cvmx_helper_initialize_packet_io_global();
700
701 if (receive_group_order) {
702 if (receive_group_order > 4)
703 receive_group_order = 4;
704 pow_receive_groups = (1 << (1 << receive_group_order)) - 1;
705 } else {
706 pow_receive_groups = BIT(pow_receive_group);
707 }
708
709
710 num_interfaces = cvmx_helper_get_number_of_interfaces();
711 for (interface = 0; interface < num_interfaces; interface++) {
712 int num_ports = cvmx_helper_ports_on_interface(interface);
713 int port;
714
715 for (port = cvmx_helper_get_ipd_port(interface, 0);
716 port < cvmx_helper_get_ipd_port(interface, num_ports);
717 port++) {
718 union cvmx_pip_prt_tagx pip_prt_tagx;
719
720 pip_prt_tagx.u64 =
721 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
722
723 if (receive_group_order) {
724 int tag_mask;
725
726
727
728
729
730 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
731 pip_prt_tagx.u64 |= 0x3ull << 44;
732
733 tag_mask = ~((1 << receive_group_order) - 1);
734 pip_prt_tagx.s.grptagbase = 0;
735 pip_prt_tagx.s.grptagmask = tag_mask;
736 pip_prt_tagx.s.grptag = 1;
737 pip_prt_tagx.s.tag_mode = 0;
738 pip_prt_tagx.s.inc_prt_flag = 1;
739 pip_prt_tagx.s.ip6_dprt_flag = 1;
740 pip_prt_tagx.s.ip4_dprt_flag = 1;
741 pip_prt_tagx.s.ip6_sprt_flag = 1;
742 pip_prt_tagx.s.ip4_sprt_flag = 1;
743 pip_prt_tagx.s.ip6_dst_flag = 1;
744 pip_prt_tagx.s.ip4_dst_flag = 1;
745 pip_prt_tagx.s.ip6_src_flag = 1;
746 pip_prt_tagx.s.ip4_src_flag = 1;
747 pip_prt_tagx.s.grp = 0;
748 } else {
749 pip_prt_tagx.s.grptag = 0;
750 pip_prt_tagx.s.grp = pow_receive_group;
751 }
752
753 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
754 pip_prt_tagx.u64);
755 }
756 }
757
758 cvmx_helper_ipd_and_packet_input_enable();
759
760 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
761
762
763
764
765
766 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
767
768
769 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
770
771 if ((pow_send_group != -1)) {
772 struct net_device *dev;
773
774 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
775 if (dev) {
776
777 struct octeon_ethernet *priv = netdev_priv(dev);
778
779 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
780 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
781 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
782 priv->queue = -1;
783 strcpy(dev->name, "pow%d");
784 for (qos = 0; qos < 16; qos++)
785 skb_queue_head_init(&priv->tx_free_list[qos]);
786
787 if (register_netdev(dev) < 0) {
788 pr_err("Failed to register ethernet device for POW\n");
789 free_netdev(dev);
790 } else {
791 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
792 pr_info("%s: POW send group %d, receive group %d\n",
793 dev->name, pow_send_group,
794 pow_receive_group);
795 }
796 } else {
797 pr_err("Failed to allocate ethernet device for POW\n");
798 }
799 }
800
801 num_interfaces = cvmx_helper_get_number_of_interfaces();
802 for (interface = 0; interface < num_interfaces; interface++) {
803 cvmx_helper_interface_mode_t imode =
804 cvmx_helper_interface_get_mode(interface);
805 int num_ports = cvmx_helper_ports_on_interface(interface);
806 int port;
807 int port_index;
808
809 for (port_index = 0,
810 port = cvmx_helper_get_ipd_port(interface, 0);
811 port < cvmx_helper_get_ipd_port(interface, num_ports);
812 port_index++, port++) {
813 struct octeon_ethernet *priv;
814 struct net_device *dev =
815 alloc_etherdev(sizeof(struct octeon_ethernet));
816 if (!dev) {
817 pr_err("Failed to allocate ethernet device for port %d\n",
818 port);
819 continue;
820 }
821
822
823 priv = netdev_priv(dev);
824 priv->netdev = dev;
825 priv->of_node = cvm_oct_node_for_port(pip, interface,
826 port_index);
827
828 INIT_DELAYED_WORK(&priv->port_periodic_work,
829 cvm_oct_periodic_worker);
830 priv->imode = imode;
831 priv->port = port;
832 priv->queue = cvmx_pko_get_base_queue(priv->port);
833 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
834 for (qos = 0; qos < 16; qos++)
835 skb_queue_head_init(&priv->tx_free_list[qos]);
836 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
837 qos++)
838 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
839
840 switch (priv->imode) {
841
842 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
843 case CVMX_HELPER_INTERFACE_MODE_PCIE:
844 case CVMX_HELPER_INTERFACE_MODE_PICMG:
845 break;
846
847 case CVMX_HELPER_INTERFACE_MODE_NPI:
848 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
849 strcpy(dev->name, "npi%d");
850 break;
851
852 case CVMX_HELPER_INTERFACE_MODE_XAUI:
853 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
854 strcpy(dev->name, "xaui%d");
855 break;
856
857 case CVMX_HELPER_INTERFACE_MODE_LOOP:
858 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
859 strcpy(dev->name, "loop%d");
860 break;
861
862 case CVMX_HELPER_INTERFACE_MODE_SGMII:
863 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
864 strcpy(dev->name, "eth%d");
865 break;
866
867 case CVMX_HELPER_INTERFACE_MODE_SPI:
868 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
869 strcpy(dev->name, "spi%d");
870 break;
871
872 case CVMX_HELPER_INTERFACE_MODE_RGMII:
873 case CVMX_HELPER_INTERFACE_MODE_GMII:
874 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
875 strcpy(dev->name, "eth%d");
876 cvm_set_rgmii_delay(priv->of_node, interface,
877 port_index);
878 break;
879 }
880
881 if (!dev->netdev_ops) {
882 free_netdev(dev);
883 } else if (register_netdev(dev) < 0) {
884 pr_err("Failed to register ethernet device for interface %d, port %d\n",
885 interface, priv->port);
886 free_netdev(dev);
887 } else {
888 cvm_oct_device[priv->port] = dev;
889 fau -=
890 cvmx_pko_get_num_queues(priv->port) *
891 sizeof(u32);
892 schedule_delayed_work(&priv->port_periodic_work, HZ);
893 }
894 }
895 }
896
897 cvm_oct_tx_initialize();
898 cvm_oct_rx_initialize();
899
900
901
902
903 cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
904
905 schedule_delayed_work(&cvm_oct_rx_refill_work, HZ);
906
907 return 0;
908}
909
910static int cvm_oct_remove(struct platform_device *pdev)
911{
912 int port;
913
914 cvmx_ipd_disable();
915
916 atomic_inc_return(&cvm_oct_poll_queue_stopping);
917 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
918
919 cvm_oct_rx_shutdown();
920 cvm_oct_tx_shutdown();
921
922 cvmx_pko_disable();
923
924
925 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
926 if (cvm_oct_device[port]) {
927 struct net_device *dev = cvm_oct_device[port];
928 struct octeon_ethernet *priv = netdev_priv(dev);
929
930 cancel_delayed_work_sync(&priv->port_periodic_work);
931
932 cvm_oct_tx_shutdown_dev(dev);
933 unregister_netdev(dev);
934 free_netdev(dev);
935 cvm_oct_device[port] = NULL;
936 }
937 }
938
939 cvmx_pko_shutdown();
940
941 cvmx_ipd_free_ptr();
942
943
944 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
945 num_packet_buffers);
946 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
947 num_packet_buffers);
948 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
949 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
950 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
951 return 0;
952}
953
954static const struct of_device_id cvm_oct_match[] = {
955 {
956 .compatible = "cavium,octeon-3860-pip",
957 },
958 {},
959};
960MODULE_DEVICE_TABLE(of, cvm_oct_match);
961
962static struct platform_driver cvm_oct_driver = {
963 .probe = cvm_oct_probe,
964 .remove = cvm_oct_remove,
965 .driver = {
966 .name = KBUILD_MODNAME,
967 .of_match_table = cvm_oct_match,
968 },
969};
970
971module_platform_driver(cvm_oct_driver);
972
973MODULE_LICENSE("GPL");
974MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
975MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
976