1
2
3
4
5
6
7
8
9#include <linux/platform_device.h>
10#include <linux/dma-mapping.h>
11#include <linux/etherdevice.h>
12#include <linux/capability.h>
13#include <linux/net_tstamp.h>
14#include <linux/interrupt.h>
15#include <linux/netdevice.h>
16#include <linux/spinlock.h>
17#include <linux/if_vlan.h>
18#include <linux/of_mdio.h>
19#include <linux/module.h>
20#include <linux/of_net.h>
21#include <linux/init.h>
22#include <linux/slab.h>
23#include <linux/phy.h>
24#include <linux/io.h>
25
26#include <asm/octeon/octeon.h>
27#include <asm/octeon/cvmx-mixx-defs.h>
28#include <asm/octeon/cvmx-agl-defs.h>
29
30#define DRV_NAME "octeon_mgmt"
31#define DRV_VERSION "2.0"
32#define DRV_DESCRIPTION \
33 "Cavium Networks Octeon MII (management) port Network Driver"
34
35#define OCTEON_MGMT_NAPI_WEIGHT 16
36
37
38
39
40#define OCTEON_MGMT_RX_RING_SIZE 512
41#define OCTEON_MGMT_TX_RING_SIZE 128
42
43
44#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
45
46union mgmt_port_ring_entry {
47 u64 d64;
48 struct {
49#define RING_ENTRY_CODE_DONE 0xf
50#define RING_ENTRY_CODE_MORE 0x10
51#ifdef __BIG_ENDIAN_BITFIELD
52 u64 reserved_62_63:2;
53
54 u64 len:14;
55
56 u64 tstamp:1;
57
58 u64 code:7;
59
60 u64 addr:40;
61#else
62 u64 addr:40;
63 u64 code:7;
64 u64 tstamp:1;
65 u64 len:14;
66 u64 reserved_62_63:2;
67#endif
68 } s;
69};
70
71#define MIX_ORING1 0x0
72#define MIX_ORING2 0x8
73#define MIX_IRING1 0x10
74#define MIX_IRING2 0x18
75#define MIX_CTL 0x20
76#define MIX_IRHWM 0x28
77#define MIX_IRCNT 0x30
78#define MIX_ORHWM 0x38
79#define MIX_ORCNT 0x40
80#define MIX_ISR 0x48
81#define MIX_INTENA 0x50
82#define MIX_REMCNT 0x58
83#define MIX_BIST 0x78
84
85#define AGL_GMX_PRT_CFG 0x10
86#define AGL_GMX_RX_FRM_CTL 0x18
87#define AGL_GMX_RX_FRM_MAX 0x30
88#define AGL_GMX_RX_JABBER 0x38
89#define AGL_GMX_RX_STATS_CTL 0x50
90
91#define AGL_GMX_RX_STATS_PKTS_DRP 0xb0
92#define AGL_GMX_RX_STATS_OCTS_DRP 0xb8
93#define AGL_GMX_RX_STATS_PKTS_BAD 0xc0
94
95#define AGL_GMX_RX_ADR_CTL 0x100
96#define AGL_GMX_RX_ADR_CAM_EN 0x108
97#define AGL_GMX_RX_ADR_CAM0 0x180
98#define AGL_GMX_RX_ADR_CAM1 0x188
99#define AGL_GMX_RX_ADR_CAM2 0x190
100#define AGL_GMX_RX_ADR_CAM3 0x198
101#define AGL_GMX_RX_ADR_CAM4 0x1a0
102#define AGL_GMX_RX_ADR_CAM5 0x1a8
103
104#define AGL_GMX_TX_CLK 0x208
105#define AGL_GMX_TX_STATS_CTL 0x268
106#define AGL_GMX_TX_CTL 0x270
107#define AGL_GMX_TX_STAT0 0x280
108#define AGL_GMX_TX_STAT1 0x288
109#define AGL_GMX_TX_STAT2 0x290
110#define AGL_GMX_TX_STAT3 0x298
111#define AGL_GMX_TX_STAT4 0x2a0
112#define AGL_GMX_TX_STAT5 0x2a8
113#define AGL_GMX_TX_STAT6 0x2b0
114#define AGL_GMX_TX_STAT7 0x2b8
115#define AGL_GMX_TX_STAT8 0x2c0
116#define AGL_GMX_TX_STAT9 0x2c8
117
118struct octeon_mgmt {
119 struct net_device *netdev;
120 u64 mix;
121 u64 agl;
122 u64 agl_prt_ctl;
123 int port;
124 int irq;
125 bool has_rx_tstamp;
126 u64 *tx_ring;
127 dma_addr_t tx_ring_handle;
128 unsigned int tx_next;
129 unsigned int tx_next_clean;
130 unsigned int tx_current_fill;
131
132 struct sk_buff_head tx_list;
133
134
135 u64 *rx_ring;
136 dma_addr_t rx_ring_handle;
137 unsigned int rx_next;
138 unsigned int rx_next_fill;
139 unsigned int rx_current_fill;
140 struct sk_buff_head rx_list;
141
142 spinlock_t lock;
143 unsigned int last_duplex;
144 unsigned int last_link;
145 unsigned int last_speed;
146 struct device *dev;
147 struct napi_struct napi;
148 struct tasklet_struct tx_clean_tasklet;
149 struct phy_device *phydev;
150 struct device_node *phy_np;
151 resource_size_t mix_phys;
152 resource_size_t mix_size;
153 resource_size_t agl_phys;
154 resource_size_t agl_size;
155 resource_size_t agl_prt_ctl_phys;
156 resource_size_t agl_prt_ctl_size;
157};
158
159static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
160{
161 union cvmx_mixx_intena mix_intena;
162 unsigned long flags;
163
164 spin_lock_irqsave(&p->lock, flags);
165 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
166 mix_intena.s.ithena = enable ? 1 : 0;
167 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
168 spin_unlock_irqrestore(&p->lock, flags);
169}
170
171static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
172{
173 union cvmx_mixx_intena mix_intena;
174 unsigned long flags;
175
176 spin_lock_irqsave(&p->lock, flags);
177 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
178 mix_intena.s.othena = enable ? 1 : 0;
179 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
180 spin_unlock_irqrestore(&p->lock, flags);
181}
182
183static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
184{
185 octeon_mgmt_set_rx_irq(p, 1);
186}
187
188static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
189{
190 octeon_mgmt_set_rx_irq(p, 0);
191}
192
193static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
194{
195 octeon_mgmt_set_tx_irq(p, 1);
196}
197
198static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
199{
200 octeon_mgmt_set_tx_irq(p, 0);
201}
202
203static unsigned int ring_max_fill(unsigned int ring_size)
204{
205 return ring_size - 8;
206}
207
208static unsigned int ring_size_to_bytes(unsigned int ring_size)
209{
210 return ring_size * sizeof(union mgmt_port_ring_entry);
211}
212
213static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
214{
215 struct octeon_mgmt *p = netdev_priv(netdev);
216
217 while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
218 unsigned int size;
219 union mgmt_port_ring_entry re;
220 struct sk_buff *skb;
221
222
223 size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
224
225 skb = netdev_alloc_skb(netdev, size);
226 if (!skb)
227 break;
228 skb_reserve(skb, NET_IP_ALIGN);
229 __skb_queue_tail(&p->rx_list, skb);
230
231 re.d64 = 0;
232 re.s.len = size;
233 re.s.addr = dma_map_single(p->dev, skb->data,
234 size,
235 DMA_FROM_DEVICE);
236
237
238 p->rx_ring[p->rx_next_fill] = re.d64;
239 dma_sync_single_for_device(p->dev, p->rx_ring_handle,
240 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
241 DMA_BIDIRECTIONAL);
242 p->rx_next_fill =
243 (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
244 p->rx_current_fill++;
245
246 cvmx_write_csr(p->mix + MIX_IRING2, 1);
247 }
248}
249
250static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
251{
252 union cvmx_mixx_orcnt mix_orcnt;
253 union mgmt_port_ring_entry re;
254 struct sk_buff *skb;
255 int cleaned = 0;
256 unsigned long flags;
257
258 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
259 while (mix_orcnt.s.orcnt) {
260 spin_lock_irqsave(&p->tx_list.lock, flags);
261
262 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
263
264 if (mix_orcnt.s.orcnt == 0) {
265 spin_unlock_irqrestore(&p->tx_list.lock, flags);
266 break;
267 }
268
269 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
270 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
271 DMA_BIDIRECTIONAL);
272
273 re.d64 = p->tx_ring[p->tx_next_clean];
274 p->tx_next_clean =
275 (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
276 skb = __skb_dequeue(&p->tx_list);
277
278 mix_orcnt.u64 = 0;
279 mix_orcnt.s.orcnt = 1;
280
281
282 cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
283 p->tx_current_fill--;
284
285 spin_unlock_irqrestore(&p->tx_list.lock, flags);
286
287 dma_unmap_single(p->dev, re.s.addr, re.s.len,
288 DMA_TO_DEVICE);
289
290
291 if (unlikely(re.s.tstamp)) {
292 struct skb_shared_hwtstamps ts;
293 u64 ns;
294
295 memset(&ts, 0, sizeof(ts));
296
297 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
298
299 cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
300
301 ts.hwtstamp = ns_to_ktime(ns);
302 skb_tstamp_tx(skb, &ts);
303 }
304
305 dev_kfree_skb_any(skb);
306 cleaned++;
307
308 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
309 }
310
311 if (cleaned && netif_queue_stopped(p->netdev))
312 netif_wake_queue(p->netdev);
313}
314
315static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
316{
317 struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
318 octeon_mgmt_clean_tx_buffers(p);
319 octeon_mgmt_enable_tx_irq(p);
320}
321
322static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
323{
324 struct octeon_mgmt *p = netdev_priv(netdev);
325 unsigned long flags;
326 u64 drop, bad;
327
328
329 drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
330 bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
331
332 if (drop || bad) {
333
334 spin_lock_irqsave(&p->lock, flags);
335 netdev->stats.rx_errors += bad;
336 netdev->stats.rx_dropped += drop;
337 spin_unlock_irqrestore(&p->lock, flags);
338 }
339}
340
341static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
342{
343 struct octeon_mgmt *p = netdev_priv(netdev);
344 unsigned long flags;
345
346 union cvmx_agl_gmx_txx_stat0 s0;
347 union cvmx_agl_gmx_txx_stat1 s1;
348
349
350 s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
351 s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
352
353 if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
354
355 spin_lock_irqsave(&p->lock, flags);
356 netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
357 netdev->stats.collisions += s1.s.scol + s1.s.mcol;
358 spin_unlock_irqrestore(&p->lock, flags);
359 }
360}
361
362
363
364
365
366static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
367 struct sk_buff **pskb)
368{
369 union mgmt_port_ring_entry re;
370
371 dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
372 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
373 DMA_BIDIRECTIONAL);
374
375 re.d64 = p->rx_ring[p->rx_next];
376 p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
377 p->rx_current_fill--;
378 *pskb = __skb_dequeue(&p->rx_list);
379
380 dma_unmap_single(p->dev, re.s.addr,
381 ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
382 DMA_FROM_DEVICE);
383
384 return re.d64;
385}
386
387
388static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
389{
390 struct net_device *netdev = p->netdev;
391 union cvmx_mixx_ircnt mix_ircnt;
392 union mgmt_port_ring_entry re;
393 struct sk_buff *skb;
394 struct sk_buff *skb2;
395 struct sk_buff *skb_new;
396 union mgmt_port_ring_entry re2;
397 int rc = 1;
398
399
400 re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
401 if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
402
403 skb_put(skb, re.s.len);
404good:
405
406 if (p->has_rx_tstamp) {
407
408 u64 ns = *(u64 *)skb->data;
409 struct skb_shared_hwtstamps *ts;
410 ts = skb_hwtstamps(skb);
411 ts->hwtstamp = ns_to_ktime(ns);
412 __skb_pull(skb, 8);
413 }
414 skb->protocol = eth_type_trans(skb, netdev);
415 netdev->stats.rx_packets++;
416 netdev->stats.rx_bytes += skb->len;
417 netif_receive_skb(skb);
418 rc = 0;
419 } else if (re.s.code == RING_ENTRY_CODE_MORE) {
420
421
422
423
424
425
426
427 skb_put(skb, re.s.len);
428 do {
429 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
430 if (re2.s.code != RING_ENTRY_CODE_MORE
431 && re2.s.code != RING_ENTRY_CODE_DONE)
432 goto split_error;
433 skb_put(skb2, re2.s.len);
434 skb_new = skb_copy_expand(skb, 0, skb2->len,
435 GFP_ATOMIC);
436 if (!skb_new)
437 goto split_error;
438 if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
439 skb2->len))
440 goto split_error;
441 skb_put(skb_new, skb2->len);
442 dev_kfree_skb_any(skb);
443 dev_kfree_skb_any(skb2);
444 skb = skb_new;
445 } while (re2.s.code == RING_ENTRY_CODE_MORE);
446 goto good;
447 } else {
448
449 dev_kfree_skb_any(skb);
450
451
452
453 }
454 goto done;
455split_error:
456
457 dev_kfree_skb_any(skb);
458 dev_kfree_skb_any(skb2);
459 while (re2.s.code == RING_ENTRY_CODE_MORE) {
460 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
461 dev_kfree_skb_any(skb2);
462 }
463 netdev->stats.rx_errors++;
464
465done:
466
467 mix_ircnt.u64 = 0;
468 mix_ircnt.s.ircnt = 1;
469 cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
470 return rc;
471}
472
473static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
474{
475 unsigned int work_done = 0;
476 union cvmx_mixx_ircnt mix_ircnt;
477 int rc;
478
479 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
480 while (work_done < budget && mix_ircnt.s.ircnt) {
481
482 rc = octeon_mgmt_receive_one(p);
483 if (!rc)
484 work_done++;
485
486
487 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
488 }
489
490 octeon_mgmt_rx_fill_ring(p->netdev);
491
492 return work_done;
493}
494
495static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
496{
497 struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
498 struct net_device *netdev = p->netdev;
499 unsigned int work_done = 0;
500
501 work_done = octeon_mgmt_receive_packets(p, budget);
502
503 if (work_done < budget) {
504
505 napi_complete(napi);
506 octeon_mgmt_enable_rx_irq(p);
507 }
508 octeon_mgmt_update_rx_stats(netdev);
509
510 return work_done;
511}
512
513
514static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
515{
516 union cvmx_mixx_ctl mix_ctl;
517 union cvmx_mixx_bist mix_bist;
518 union cvmx_agl_gmx_bist agl_gmx_bist;
519
520 mix_ctl.u64 = 0;
521 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
522 do {
523 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
524 } while (mix_ctl.s.busy);
525 mix_ctl.s.reset = 1;
526 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
527 cvmx_read_csr(p->mix + MIX_CTL);
528 octeon_io_clk_delay(64);
529
530 mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
531 if (mix_bist.u64)
532 dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
533 (unsigned long long)mix_bist.u64);
534
535 agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
536 if (agl_gmx_bist.u64)
537 dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
538 (unsigned long long)agl_gmx_bist.u64);
539}
540
541struct octeon_mgmt_cam_state {
542 u64 cam[6];
543 u64 cam_mask;
544 int cam_index;
545};
546
547static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
548 unsigned char *addr)
549{
550 int i;
551
552 for (i = 0; i < 6; i++)
553 cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
554 cs->cam_mask |= (1ULL << cs->cam_index);
555 cs->cam_index++;
556}
557
558static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
559{
560 struct octeon_mgmt *p = netdev_priv(netdev);
561 union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
562 union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
563 unsigned long flags;
564 unsigned int prev_packet_enable;
565 unsigned int cam_mode = 1;
566 unsigned int multicast_mode = 1;
567 struct octeon_mgmt_cam_state cam_state;
568 struct netdev_hw_addr *ha;
569 int available_cam_entries;
570
571 memset(&cam_state, 0, sizeof(cam_state));
572
573 if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
574 cam_mode = 0;
575 available_cam_entries = 8;
576 } else {
577
578
579
580 available_cam_entries = 7 - netdev->uc.count;
581 }
582
583 if (netdev->flags & IFF_MULTICAST) {
584 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
585 netdev_mc_count(netdev) > available_cam_entries)
586 multicast_mode = 2;
587 else
588 multicast_mode = 0;
589 }
590
591 if (cam_mode == 1) {
592
593 octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
594 netdev_for_each_uc_addr(ha, netdev)
595 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
596 }
597 if (multicast_mode == 0) {
598 netdev_for_each_mc_addr(ha, netdev)
599 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
600 }
601
602 spin_lock_irqsave(&p->lock, flags);
603
604
605 agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
606 prev_packet_enable = agl_gmx_prtx.s.en;
607 agl_gmx_prtx.s.en = 0;
608 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
609
610 adr_ctl.u64 = 0;
611 adr_ctl.s.cam_mode = cam_mode;
612 adr_ctl.s.mcst = multicast_mode;
613 adr_ctl.s.bcst = 1;
614
615 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
616
617 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
618 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
619 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
620 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
621 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
622 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
623 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
624
625
626 agl_gmx_prtx.s.en = prev_packet_enable;
627 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
628
629 spin_unlock_irqrestore(&p->lock, flags);
630}
631
632static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
633{
634 int r = eth_mac_addr(netdev, addr);
635
636 if (r)
637 return r;
638
639 octeon_mgmt_set_rx_filtering(netdev);
640
641 return 0;
642}
643
644static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
645{
646 struct octeon_mgmt *p = netdev_priv(netdev);
647 int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
648
649
650
651
652 if (size_without_fcs < 64 || size_without_fcs > 16383) {
653 dev_warn(p->dev, "MTU must be between %d and %d.\n",
654 64 - OCTEON_MGMT_RX_HEADROOM,
655 16383 - OCTEON_MGMT_RX_HEADROOM);
656 return -EINVAL;
657 }
658
659 netdev->mtu = new_mtu;
660
661 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs);
662 cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
663 (size_without_fcs + 7) & 0xfff8);
664
665 return 0;
666}
667
668static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
669{
670 struct net_device *netdev = dev_id;
671 struct octeon_mgmt *p = netdev_priv(netdev);
672 union cvmx_mixx_isr mixx_isr;
673
674 mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
675
676
677 cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
678 cvmx_read_csr(p->mix + MIX_ISR);
679
680 if (mixx_isr.s.irthresh) {
681 octeon_mgmt_disable_rx_irq(p);
682 napi_schedule(&p->napi);
683 }
684 if (mixx_isr.s.orthresh) {
685 octeon_mgmt_disable_tx_irq(p);
686 tasklet_schedule(&p->tx_clean_tasklet);
687 }
688
689 return IRQ_HANDLED;
690}
691
692static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
693 struct ifreq *rq, int cmd)
694{
695 struct octeon_mgmt *p = netdev_priv(netdev);
696 struct hwtstamp_config config;
697 union cvmx_mio_ptp_clock_cfg ptp;
698 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
699 bool have_hw_timestamps = false;
700
701 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
702 return -EFAULT;
703
704 if (config.flags)
705 return -EINVAL;
706
707
708 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
709
710 ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
711 if (!ptp.s.ext_clk_en) {
712
713
714
715
716 u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate();
717 if (!ptp.s.ptp_en)
718 cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
719 pr_info("PTP Clock: Using sclk reference at %lld Hz\n",
720 (NSEC_PER_SEC << 32) / clock_comp);
721 } else {
722
723 u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
724 pr_info("PTP Clock: Using GPIO %d at %lld Hz\n",
725 ptp.s.ext_clk_in,
726 (NSEC_PER_SEC << 32) / clock_comp);
727 }
728
729
730 if (!ptp.s.ptp_en) {
731 ptp.s.ptp_en = 1;
732 cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
733 }
734 have_hw_timestamps = true;
735 }
736
737 if (!have_hw_timestamps)
738 return -EINVAL;
739
740 switch (config.tx_type) {
741 case HWTSTAMP_TX_OFF:
742 case HWTSTAMP_TX_ON:
743 break;
744 default:
745 return -ERANGE;
746 }
747
748 switch (config.rx_filter) {
749 case HWTSTAMP_FILTER_NONE:
750 p->has_rx_tstamp = false;
751 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
752 rxx_frm_ctl.s.ptp_mode = 0;
753 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
754 break;
755 case HWTSTAMP_FILTER_ALL:
756 case HWTSTAMP_FILTER_SOME:
757 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
758 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
759 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
760 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
761 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
762 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
763 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
764 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
765 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
766 case HWTSTAMP_FILTER_PTP_V2_EVENT:
767 case HWTSTAMP_FILTER_PTP_V2_SYNC:
768 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
769 p->has_rx_tstamp = have_hw_timestamps;
770 config.rx_filter = HWTSTAMP_FILTER_ALL;
771 if (p->has_rx_tstamp) {
772 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
773 rxx_frm_ctl.s.ptp_mode = 1;
774 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
775 }
776 break;
777 default:
778 return -ERANGE;
779 }
780
781 if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
782 return -EFAULT;
783
784 return 0;
785}
786
787static int octeon_mgmt_ioctl(struct net_device *netdev,
788 struct ifreq *rq, int cmd)
789{
790 struct octeon_mgmt *p = netdev_priv(netdev);
791
792 switch (cmd) {
793 case SIOCSHWTSTAMP:
794 return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
795 default:
796 if (p->phydev)
797 return phy_mii_ioctl(p->phydev, rq, cmd);
798 return -EINVAL;
799 }
800}
801
802static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
803{
804 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
805
806
807 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
808 prtx_cfg.s.en = 0;
809 prtx_cfg.s.tx_en = 0;
810 prtx_cfg.s.rx_en = 0;
811 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
812
813 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
814 int i;
815 for (i = 0; i < 10; i++) {
816 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
817 if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
818 break;
819 mdelay(1);
820 i++;
821 }
822 }
823}
824
825static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
826{
827 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
828
829
830 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
831 prtx_cfg.s.tx_en = 1;
832 prtx_cfg.s.rx_en = 1;
833 prtx_cfg.s.en = 1;
834 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
835}
836
837static void octeon_mgmt_update_link(struct octeon_mgmt *p)
838{
839 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
840
841 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
842
843 if (!p->phydev->link)
844 prtx_cfg.s.duplex = 1;
845 else
846 prtx_cfg.s.duplex = p->phydev->duplex;
847
848 switch (p->phydev->speed) {
849 case 10:
850 prtx_cfg.s.speed = 0;
851 prtx_cfg.s.slottime = 0;
852
853 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
854 prtx_cfg.s.burst = 1;
855 prtx_cfg.s.speed_msb = 1;
856 }
857 break;
858 case 100:
859 prtx_cfg.s.speed = 0;
860 prtx_cfg.s.slottime = 0;
861
862 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
863 prtx_cfg.s.burst = 1;
864 prtx_cfg.s.speed_msb = 0;
865 }
866 break;
867 case 1000:
868
869 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
870 prtx_cfg.s.speed = 1;
871 prtx_cfg.s.speed_msb = 0;
872
873 prtx_cfg.s.slottime = 1;
874 prtx_cfg.s.burst = p->phydev->duplex;
875 }
876 break;
877 case 0:
878 default:
879 break;
880 }
881
882
883 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
884
885
886 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
887
888 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
889 union cvmx_agl_gmx_txx_clk agl_clk;
890 union cvmx_agl_prtx_ctl prtx_ctl;
891
892 prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
893 agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
894
895 agl_clk.s.clk_cnt = 1;
896 if (prtx_ctl.s.mode == 0) {
897 if (p->phydev->speed == 10)
898 agl_clk.s.clk_cnt = 50;
899 else if (p->phydev->speed == 100)
900 agl_clk.s.clk_cnt = 5;
901 }
902 cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
903 }
904}
905
906static void octeon_mgmt_adjust_link(struct net_device *netdev)
907{
908 struct octeon_mgmt *p = netdev_priv(netdev);
909 unsigned long flags;
910 int link_changed = 0;
911
912 if (!p->phydev)
913 return;
914
915 spin_lock_irqsave(&p->lock, flags);
916
917
918 if (!p->phydev->link && p->last_link)
919 link_changed = -1;
920
921 if (p->phydev->link
922 && (p->last_duplex != p->phydev->duplex
923 || p->last_link != p->phydev->link
924 || p->last_speed != p->phydev->speed)) {
925 octeon_mgmt_disable_link(p);
926 link_changed = 1;
927 octeon_mgmt_update_link(p);
928 octeon_mgmt_enable_link(p);
929 }
930
931 p->last_link = p->phydev->link;
932 p->last_speed = p->phydev->speed;
933 p->last_duplex = p->phydev->duplex;
934
935 spin_unlock_irqrestore(&p->lock, flags);
936
937 if (link_changed != 0) {
938 if (link_changed > 0) {
939 pr_info("%s: Link is up - %d/%s\n", netdev->name,
940 p->phydev->speed,
941 DUPLEX_FULL == p->phydev->duplex ?
942 "Full" : "Half");
943 } else {
944 pr_info("%s: Link is down\n", netdev->name);
945 }
946 }
947}
948
949static int octeon_mgmt_init_phy(struct net_device *netdev)
950{
951 struct octeon_mgmt *p = netdev_priv(netdev);
952
953 if (octeon_is_simulation() || p->phy_np == NULL) {
954
955 netif_carrier_on(netdev);
956 return 0;
957 }
958
959 p->phydev = of_phy_connect(netdev, p->phy_np,
960 octeon_mgmt_adjust_link, 0,
961 PHY_INTERFACE_MODE_MII);
962
963 if (!p->phydev)
964 return -ENODEV;
965
966 return 0;
967}
968
969static int octeon_mgmt_open(struct net_device *netdev)
970{
971 struct octeon_mgmt *p = netdev_priv(netdev);
972 union cvmx_mixx_ctl mix_ctl;
973 union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
974 union cvmx_mixx_oring1 oring1;
975 union cvmx_mixx_iring1 iring1;
976 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
977 union cvmx_mixx_irhwm mix_irhwm;
978 union cvmx_mixx_orhwm mix_orhwm;
979 union cvmx_mixx_intena mix_intena;
980 struct sockaddr sa;
981
982
983 p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
984 GFP_KERNEL);
985 if (!p->tx_ring)
986 return -ENOMEM;
987 p->tx_ring_handle =
988 dma_map_single(p->dev, p->tx_ring,
989 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
990 DMA_BIDIRECTIONAL);
991 p->tx_next = 0;
992 p->tx_next_clean = 0;
993 p->tx_current_fill = 0;
994
995
996 p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
997 GFP_KERNEL);
998 if (!p->rx_ring)
999 goto err_nomem;
1000 p->rx_ring_handle =
1001 dma_map_single(p->dev, p->rx_ring,
1002 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1003 DMA_BIDIRECTIONAL);
1004
1005 p->rx_next = 0;
1006 p->rx_next_fill = 0;
1007 p->rx_current_fill = 0;
1008
1009 octeon_mgmt_reset_hw(p);
1010
1011 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
1012
1013
1014 if (mix_ctl.s.reset) {
1015 mix_ctl.s.reset = 0;
1016 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
1017 do {
1018 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
1019 } while (mix_ctl.s.reset);
1020 }
1021
1022 if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
1023 agl_gmx_inf_mode.u64 = 0;
1024 agl_gmx_inf_mode.s.en = 1;
1025 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
1026 }
1027 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
1028 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
1029
1030
1031
1032 union cvmx_agl_gmx_drv_ctl drv_ctl;
1033
1034 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
1035 if (p->port) {
1036 drv_ctl.s.byp_en1 = 1;
1037 drv_ctl.s.nctl1 = 6;
1038 drv_ctl.s.pctl1 = 6;
1039 } else {
1040 drv_ctl.s.byp_en = 1;
1041 drv_ctl.s.nctl = 6;
1042 drv_ctl.s.pctl = 6;
1043 }
1044 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
1045 }
1046
1047 oring1.u64 = 0;
1048 oring1.s.obase = p->tx_ring_handle >> 3;
1049 oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
1050 cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
1051
1052 iring1.u64 = 0;
1053 iring1.s.ibase = p->rx_ring_handle >> 3;
1054 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
1055 cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
1056
1057 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
1058 octeon_mgmt_set_mac_address(netdev, &sa);
1059
1060 octeon_mgmt_change_mtu(netdev, netdev->mtu);
1061
1062
1063
1064
1065 mix_ctl.u64 = 0;
1066 mix_ctl.s.crc_strip = 1;
1067 mix_ctl.s.en = 1;
1068 mix_ctl.s.nbtarb = 0;
1069
1070 mix_ctl.s.mrq_hwm = 1;
1071#ifdef __LITTLE_ENDIAN
1072 mix_ctl.s.lendian = 1;
1073#endif
1074 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
1075
1076
1077 if (octeon_mgmt_init_phy(netdev)) {
1078 dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
1079 goto err_noirq;
1080 }
1081
1082
1083 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) {
1084 union cvmx_agl_prtx_ctl agl_prtx_ctl;
1085 int rgmii_mode = (p->phydev->supported &
1086 (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
1087
1088 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1089 agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
1090 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1091
1092
1093
1094
1095
1096#define NS_PER_PHY_CLK 8
1097
1098
1099 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1100 agl_prtx_ctl.s.clkrst = 0;
1101 if (rgmii_mode) {
1102 agl_prtx_ctl.s.dllrst = 0;
1103 agl_prtx_ctl.s.clktx_byp = 0;
1104 }
1105 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1106 cvmx_read_csr(p->agl_prt_ctl);
1107
1108
1109
1110
1111 ndelay(256 * NS_PER_PHY_CLK);
1112
1113
1114 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1115 agl_prtx_ctl.s.enable = 1;
1116 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1117
1118
1119 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1120
1121
1122 agl_prtx_ctl.s.comp = 1;
1123 agl_prtx_ctl.s.drv_byp = 0;
1124 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1125
1126 cvmx_read_csr(p->agl_prt_ctl);
1127
1128
1129 ndelay(1040 * NS_PER_PHY_CLK);
1130
1131
1132
1133
1134
1135
1136
1137 cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
1138 }
1139
1140 octeon_mgmt_rx_fill_ring(netdev);
1141
1142
1143
1144 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
1145 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
1146 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
1147
1148 cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
1149 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
1150 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
1151
1152
1153 cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
1154
1155 if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
1156 netdev)) {
1157 dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
1158 goto err_noirq;
1159 }
1160
1161
1162 mix_irhwm.u64 = 0;
1163 mix_irhwm.s.irhwm = 0;
1164 cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
1165
1166
1167 mix_orhwm.u64 = 0;
1168 mix_orhwm.s.orhwm = 0;
1169 cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
1170
1171
1172 mix_intena.u64 = 0;
1173 mix_intena.s.ithena = 1;
1174 mix_intena.s.othena = 1;
1175 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
1176
1177
1178
1179 rxx_frm_ctl.u64 = 0;
1180 rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
1181 rxx_frm_ctl.s.pre_align = 1;
1182
1183
1184
1185 rxx_frm_ctl.s.pad_len = 1;
1186
1187 rxx_frm_ctl.s.vlan_len = 1;
1188
1189 rxx_frm_ctl.s.pre_free = 1;
1190
1191 rxx_frm_ctl.s.ctl_smac = 0;
1192
1193 rxx_frm_ctl.s.ctl_mcst = 1;
1194
1195 rxx_frm_ctl.s.ctl_bck = 1;
1196
1197 rxx_frm_ctl.s.ctl_drp = 1;
1198
1199 rxx_frm_ctl.s.pre_strp = 1;
1200
1201
1202
1203 rxx_frm_ctl.s.pre_chk = 1;
1204 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
1205
1206
1207 octeon_mgmt_disable_link(p);
1208 if (p->phydev)
1209 octeon_mgmt_update_link(p);
1210 octeon_mgmt_enable_link(p);
1211
1212 p->last_link = 0;
1213 p->last_speed = 0;
1214
1215
1216
1217 if (p->phydev) {
1218 netif_carrier_off(netdev);
1219 phy_start_aneg(p->phydev);
1220 }
1221
1222 netif_wake_queue(netdev);
1223 napi_enable(&p->napi);
1224
1225 return 0;
1226err_noirq:
1227 octeon_mgmt_reset_hw(p);
1228 dma_unmap_single(p->dev, p->rx_ring_handle,
1229 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1230 DMA_BIDIRECTIONAL);
1231 kfree(p->rx_ring);
1232err_nomem:
1233 dma_unmap_single(p->dev, p->tx_ring_handle,
1234 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1235 DMA_BIDIRECTIONAL);
1236 kfree(p->tx_ring);
1237 return -ENOMEM;
1238}
1239
1240static int octeon_mgmt_stop(struct net_device *netdev)
1241{
1242 struct octeon_mgmt *p = netdev_priv(netdev);
1243
1244 napi_disable(&p->napi);
1245 netif_stop_queue(netdev);
1246
1247 if (p->phydev)
1248 phy_disconnect(p->phydev);
1249 p->phydev = NULL;
1250
1251 netif_carrier_off(netdev);
1252
1253 octeon_mgmt_reset_hw(p);
1254
1255 free_irq(p->irq, netdev);
1256
1257
1258 skb_queue_purge(&p->tx_list);
1259 skb_queue_purge(&p->rx_list);
1260
1261 dma_unmap_single(p->dev, p->rx_ring_handle,
1262 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1263 DMA_BIDIRECTIONAL);
1264 kfree(p->rx_ring);
1265
1266 dma_unmap_single(p->dev, p->tx_ring_handle,
1267 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1268 DMA_BIDIRECTIONAL);
1269 kfree(p->tx_ring);
1270
1271 return 0;
1272}
1273
1274static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
1275{
1276 struct octeon_mgmt *p = netdev_priv(netdev);
1277 union mgmt_port_ring_entry re;
1278 unsigned long flags;
1279 int rv = NETDEV_TX_BUSY;
1280
1281 re.d64 = 0;
1282 re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
1283 re.s.len = skb->len;
1284 re.s.addr = dma_map_single(p->dev, skb->data,
1285 skb->len,
1286 DMA_TO_DEVICE);
1287
1288 spin_lock_irqsave(&p->tx_list.lock, flags);
1289
1290 if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
1291 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1292 netif_stop_queue(netdev);
1293 spin_lock_irqsave(&p->tx_list.lock, flags);
1294 }
1295
1296 if (unlikely(p->tx_current_fill >=
1297 ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
1298 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1299 dma_unmap_single(p->dev, re.s.addr, re.s.len,
1300 DMA_TO_DEVICE);
1301 goto out;
1302 }
1303
1304 __skb_queue_tail(&p->tx_list, skb);
1305
1306
1307 p->tx_ring[p->tx_next] = re.d64;
1308 p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
1309 p->tx_current_fill++;
1310
1311 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1312
1313 dma_sync_single_for_device(p->dev, p->tx_ring_handle,
1314 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1315 DMA_BIDIRECTIONAL);
1316
1317 netdev->stats.tx_packets++;
1318 netdev->stats.tx_bytes += skb->len;
1319
1320
1321 cvmx_write_csr(p->mix + MIX_ORING2, 1);
1322
1323 netdev->trans_start = jiffies;
1324 rv = NETDEV_TX_OK;
1325out:
1326 octeon_mgmt_update_tx_stats(netdev);
1327 return rv;
1328}
1329
1330#ifdef CONFIG_NET_POLL_CONTROLLER
1331static void octeon_mgmt_poll_controller(struct net_device *netdev)
1332{
1333 struct octeon_mgmt *p = netdev_priv(netdev);
1334
1335 octeon_mgmt_receive_packets(p, 16);
1336 octeon_mgmt_update_rx_stats(netdev);
1337}
1338#endif
1339
1340static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
1341 struct ethtool_drvinfo *info)
1342{
1343 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1344 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1345 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
1346 strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
1347 info->n_stats = 0;
1348 info->testinfo_len = 0;
1349 info->regdump_len = 0;
1350 info->eedump_len = 0;
1351}
1352
1353static int octeon_mgmt_get_settings(struct net_device *netdev,
1354 struct ethtool_cmd *cmd)
1355{
1356 struct octeon_mgmt *p = netdev_priv(netdev);
1357
1358 if (p->phydev)
1359 return phy_ethtool_gset(p->phydev, cmd);
1360
1361 return -EOPNOTSUPP;
1362}
1363
1364static int octeon_mgmt_set_settings(struct net_device *netdev,
1365 struct ethtool_cmd *cmd)
1366{
1367 struct octeon_mgmt *p = netdev_priv(netdev);
1368
1369 if (!capable(CAP_NET_ADMIN))
1370 return -EPERM;
1371
1372 if (p->phydev)
1373 return phy_ethtool_sset(p->phydev, cmd);
1374
1375 return -EOPNOTSUPP;
1376}
1377
1378static int octeon_mgmt_nway_reset(struct net_device *dev)
1379{
1380 struct octeon_mgmt *p = netdev_priv(dev);
1381
1382 if (!capable(CAP_NET_ADMIN))
1383 return -EPERM;
1384
1385 if (p->phydev)
1386 return phy_start_aneg(p->phydev);
1387
1388 return -EOPNOTSUPP;
1389}
1390
1391static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1392 .get_drvinfo = octeon_mgmt_get_drvinfo,
1393 .get_settings = octeon_mgmt_get_settings,
1394 .set_settings = octeon_mgmt_set_settings,
1395 .nway_reset = octeon_mgmt_nway_reset,
1396 .get_link = ethtool_op_get_link,
1397};
1398
1399static const struct net_device_ops octeon_mgmt_ops = {
1400 .ndo_open = octeon_mgmt_open,
1401 .ndo_stop = octeon_mgmt_stop,
1402 .ndo_start_xmit = octeon_mgmt_xmit,
1403 .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
1404 .ndo_set_mac_address = octeon_mgmt_set_mac_address,
1405 .ndo_do_ioctl = octeon_mgmt_ioctl,
1406 .ndo_change_mtu = octeon_mgmt_change_mtu,
1407#ifdef CONFIG_NET_POLL_CONTROLLER
1408 .ndo_poll_controller = octeon_mgmt_poll_controller,
1409#endif
1410};
1411
1412static int octeon_mgmt_probe(struct platform_device *pdev)
1413{
1414 struct net_device *netdev;
1415 struct octeon_mgmt *p;
1416 const __be32 *data;
1417 const u8 *mac;
1418 struct resource *res_mix;
1419 struct resource *res_agl;
1420 struct resource *res_agl_prt_ctl;
1421 int len;
1422 int result;
1423
1424 netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
1425 if (netdev == NULL)
1426 return -ENOMEM;
1427
1428 SET_NETDEV_DEV(netdev, &pdev->dev);
1429
1430 platform_set_drvdata(pdev, netdev);
1431 p = netdev_priv(netdev);
1432 netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
1433 OCTEON_MGMT_NAPI_WEIGHT);
1434
1435 p->netdev = netdev;
1436 p->dev = &pdev->dev;
1437 p->has_rx_tstamp = false;
1438
1439 data = of_get_property(pdev->dev.of_node, "cell-index", &len);
1440 if (data && len == sizeof(*data)) {
1441 p->port = be32_to_cpup(data);
1442 } else {
1443 dev_err(&pdev->dev, "no 'cell-index' property\n");
1444 result = -ENXIO;
1445 goto err;
1446 }
1447
1448 snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
1449
1450 result = platform_get_irq(pdev, 0);
1451 if (result < 0)
1452 goto err;
1453
1454 p->irq = result;
1455
1456 res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1457 if (res_mix == NULL) {
1458 dev_err(&pdev->dev, "no 'reg' resource\n");
1459 result = -ENXIO;
1460 goto err;
1461 }
1462
1463 res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1464 if (res_agl == NULL) {
1465 dev_err(&pdev->dev, "no 'reg' resource\n");
1466 result = -ENXIO;
1467 goto err;
1468 }
1469
1470 res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1471 if (res_agl_prt_ctl == NULL) {
1472 dev_err(&pdev->dev, "no 'reg' resource\n");
1473 result = -ENXIO;
1474 goto err;
1475 }
1476
1477 p->mix_phys = res_mix->start;
1478 p->mix_size = resource_size(res_mix);
1479 p->agl_phys = res_agl->start;
1480 p->agl_size = resource_size(res_agl);
1481 p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
1482 p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
1483
1484
1485 if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
1486 res_mix->name)) {
1487 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1488 res_mix->name);
1489 result = -ENXIO;
1490 goto err;
1491 }
1492
1493 if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
1494 res_agl->name)) {
1495 result = -ENXIO;
1496 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1497 res_agl->name);
1498 goto err;
1499 }
1500
1501 if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
1502 p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
1503 result = -ENXIO;
1504 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1505 res_agl_prt_ctl->name);
1506 goto err;
1507 }
1508
1509 p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
1510 p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
1511 p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
1512 p->agl_prt_ctl_size);
1513 spin_lock_init(&p->lock);
1514
1515 skb_queue_head_init(&p->tx_list);
1516 skb_queue_head_init(&p->rx_list);
1517 tasklet_init(&p->tx_clean_tasklet,
1518 octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
1519
1520 netdev->priv_flags |= IFF_UNICAST_FLT;
1521
1522 netdev->netdev_ops = &octeon_mgmt_ops;
1523 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1524
1525 mac = of_get_mac_address(pdev->dev.of_node);
1526
1527 if (mac)
1528 memcpy(netdev->dev_addr, mac, ETH_ALEN);
1529 else
1530 eth_hw_addr_random(netdev);
1531
1532 p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1533
1534 result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1535 if (result)
1536 goto err;
1537
1538 netif_carrier_off(netdev);
1539 result = register_netdev(netdev);
1540 if (result)
1541 goto err;
1542
1543 dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
1544 return 0;
1545
1546err:
1547 free_netdev(netdev);
1548 return result;
1549}
1550
1551static int octeon_mgmt_remove(struct platform_device *pdev)
1552{
1553 struct net_device *netdev = platform_get_drvdata(pdev);
1554
1555 unregister_netdev(netdev);
1556 free_netdev(netdev);
1557 return 0;
1558}
1559
1560static struct of_device_id octeon_mgmt_match[] = {
1561 {
1562 .compatible = "cavium,octeon-5750-mix",
1563 },
1564 {},
1565};
1566MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
1567
1568static struct platform_driver octeon_mgmt_driver = {
1569 .driver = {
1570 .name = "octeon_mgmt",
1571 .of_match_table = octeon_mgmt_match,
1572 },
1573 .probe = octeon_mgmt_probe,
1574 .remove = octeon_mgmt_remove,
1575};
1576
1577extern void octeon_mdiobus_force_mod_depencency(void);
1578
1579static int __init octeon_mgmt_mod_init(void)
1580{
1581
1582 octeon_mdiobus_force_mod_depencency();
1583 return platform_driver_register(&octeon_mgmt_driver);
1584}
1585
1586static void __exit octeon_mgmt_mod_exit(void)
1587{
1588 platform_driver_unregister(&octeon_mgmt_driver);
1589}
1590
1591module_init(octeon_mgmt_mod_init);
1592module_exit(octeon_mgmt_mod_exit);
1593
1594MODULE_DESCRIPTION(DRV_DESCRIPTION);
1595MODULE_AUTHOR("David Daney");
1596MODULE_LICENSE("GPL");
1597MODULE_VERSION(DRV_VERSION);
1598