1
2
3
4
5
6
7
8
9#include <linux/platform_device.h>
10#include <linux/dma-mapping.h>
11#include <linux/etherdevice.h>
12#include <linux/capability.h>
13#include <linux/net_tstamp.h>
14#include <linux/interrupt.h>
15#include <linux/netdevice.h>
16#include <linux/spinlock.h>
17#include <linux/if_vlan.h>
18#include <linux/of_mdio.h>
19#include <linux/module.h>
20#include <linux/of_net.h>
21#include <linux/init.h>
22#include <linux/slab.h>
23#include <linux/phy.h>
24#include <linux/io.h>
25
26#include <asm/octeon/octeon.h>
27#include <asm/octeon/cvmx-mixx-defs.h>
28#include <asm/octeon/cvmx-agl-defs.h>
29
30#define DRV_NAME "octeon_mgmt"
31#define DRV_VERSION "2.0"
32#define DRV_DESCRIPTION \
33 "Cavium Networks Octeon MII (management) port Network Driver"
34
35#define OCTEON_MGMT_NAPI_WEIGHT 16
36
37
38
39
40#define OCTEON_MGMT_RX_RING_SIZE 512
41#define OCTEON_MGMT_TX_RING_SIZE 128
42
43
44#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
45
46union mgmt_port_ring_entry {
47 u64 d64;
48 struct {
49#define RING_ENTRY_CODE_DONE 0xf
50#define RING_ENTRY_CODE_MORE 0x10
51#ifdef __BIG_ENDIAN_BITFIELD
52 u64 reserved_62_63:2;
53
54 u64 len:14;
55
56 u64 tstamp:1;
57
58 u64 code:7;
59
60 u64 addr:40;
61#else
62 u64 addr:40;
63 u64 code:7;
64 u64 tstamp:1;
65 u64 len:14;
66 u64 reserved_62_63:2;
67#endif
68 } s;
69};
70
71#define MIX_ORING1 0x0
72#define MIX_ORING2 0x8
73#define MIX_IRING1 0x10
74#define MIX_IRING2 0x18
75#define MIX_CTL 0x20
76#define MIX_IRHWM 0x28
77#define MIX_IRCNT 0x30
78#define MIX_ORHWM 0x38
79#define MIX_ORCNT 0x40
80#define MIX_ISR 0x48
81#define MIX_INTENA 0x50
82#define MIX_REMCNT 0x58
83#define MIX_BIST 0x78
84
85#define AGL_GMX_PRT_CFG 0x10
86#define AGL_GMX_RX_FRM_CTL 0x18
87#define AGL_GMX_RX_FRM_MAX 0x30
88#define AGL_GMX_RX_JABBER 0x38
89#define AGL_GMX_RX_STATS_CTL 0x50
90
91#define AGL_GMX_RX_STATS_PKTS_DRP 0xb0
92#define AGL_GMX_RX_STATS_OCTS_DRP 0xb8
93#define AGL_GMX_RX_STATS_PKTS_BAD 0xc0
94
95#define AGL_GMX_RX_ADR_CTL 0x100
96#define AGL_GMX_RX_ADR_CAM_EN 0x108
97#define AGL_GMX_RX_ADR_CAM0 0x180
98#define AGL_GMX_RX_ADR_CAM1 0x188
99#define AGL_GMX_RX_ADR_CAM2 0x190
100#define AGL_GMX_RX_ADR_CAM3 0x198
101#define AGL_GMX_RX_ADR_CAM4 0x1a0
102#define AGL_GMX_RX_ADR_CAM5 0x1a8
103
104#define AGL_GMX_TX_CLK 0x208
105#define AGL_GMX_TX_STATS_CTL 0x268
106#define AGL_GMX_TX_CTL 0x270
107#define AGL_GMX_TX_STAT0 0x280
108#define AGL_GMX_TX_STAT1 0x288
109#define AGL_GMX_TX_STAT2 0x290
110#define AGL_GMX_TX_STAT3 0x298
111#define AGL_GMX_TX_STAT4 0x2a0
112#define AGL_GMX_TX_STAT5 0x2a8
113#define AGL_GMX_TX_STAT6 0x2b0
114#define AGL_GMX_TX_STAT7 0x2b8
115#define AGL_GMX_TX_STAT8 0x2c0
116#define AGL_GMX_TX_STAT9 0x2c8
117
118struct octeon_mgmt {
119 struct net_device *netdev;
120 u64 mix;
121 u64 agl;
122 u64 agl_prt_ctl;
123 int port;
124 int irq;
125 bool has_rx_tstamp;
126 u64 *tx_ring;
127 dma_addr_t tx_ring_handle;
128 unsigned int tx_next;
129 unsigned int tx_next_clean;
130 unsigned int tx_current_fill;
131
132 struct sk_buff_head tx_list;
133
134
135 u64 *rx_ring;
136 dma_addr_t rx_ring_handle;
137 unsigned int rx_next;
138 unsigned int rx_next_fill;
139 unsigned int rx_current_fill;
140 struct sk_buff_head rx_list;
141
142 spinlock_t lock;
143 unsigned int last_duplex;
144 unsigned int last_link;
145 unsigned int last_speed;
146 struct device *dev;
147 struct napi_struct napi;
148 struct tasklet_struct tx_clean_tasklet;
149 struct device_node *phy_np;
150 resource_size_t mix_phys;
151 resource_size_t mix_size;
152 resource_size_t agl_phys;
153 resource_size_t agl_size;
154 resource_size_t agl_prt_ctl_phys;
155 resource_size_t agl_prt_ctl_size;
156};
157
158static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
159{
160 union cvmx_mixx_intena mix_intena;
161 unsigned long flags;
162
163 spin_lock_irqsave(&p->lock, flags);
164 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
165 mix_intena.s.ithena = enable ? 1 : 0;
166 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
167 spin_unlock_irqrestore(&p->lock, flags);
168}
169
170static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
171{
172 union cvmx_mixx_intena mix_intena;
173 unsigned long flags;
174
175 spin_lock_irqsave(&p->lock, flags);
176 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
177 mix_intena.s.othena = enable ? 1 : 0;
178 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
179 spin_unlock_irqrestore(&p->lock, flags);
180}
181
182static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
183{
184 octeon_mgmt_set_rx_irq(p, 1);
185}
186
187static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
188{
189 octeon_mgmt_set_rx_irq(p, 0);
190}
191
192static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
193{
194 octeon_mgmt_set_tx_irq(p, 1);
195}
196
197static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
198{
199 octeon_mgmt_set_tx_irq(p, 0);
200}
201
202static unsigned int ring_max_fill(unsigned int ring_size)
203{
204 return ring_size - 8;
205}
206
207static unsigned int ring_size_to_bytes(unsigned int ring_size)
208{
209 return ring_size * sizeof(union mgmt_port_ring_entry);
210}
211
212static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
213{
214 struct octeon_mgmt *p = netdev_priv(netdev);
215
216 while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
217 unsigned int size;
218 union mgmt_port_ring_entry re;
219 struct sk_buff *skb;
220
221
222 size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
223
224 skb = netdev_alloc_skb(netdev, size);
225 if (!skb)
226 break;
227 skb_reserve(skb, NET_IP_ALIGN);
228 __skb_queue_tail(&p->rx_list, skb);
229
230 re.d64 = 0;
231 re.s.len = size;
232 re.s.addr = dma_map_single(p->dev, skb->data,
233 size,
234 DMA_FROM_DEVICE);
235
236
237 p->rx_ring[p->rx_next_fill] = re.d64;
238 dma_sync_single_for_device(p->dev, p->rx_ring_handle,
239 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
240 DMA_BIDIRECTIONAL);
241 p->rx_next_fill =
242 (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
243 p->rx_current_fill++;
244
245 cvmx_write_csr(p->mix + MIX_IRING2, 1);
246 }
247}
248
249static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
250{
251 union cvmx_mixx_orcnt mix_orcnt;
252 union mgmt_port_ring_entry re;
253 struct sk_buff *skb;
254 int cleaned = 0;
255 unsigned long flags;
256
257 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
258 while (mix_orcnt.s.orcnt) {
259 spin_lock_irqsave(&p->tx_list.lock, flags);
260
261 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
262
263 if (mix_orcnt.s.orcnt == 0) {
264 spin_unlock_irqrestore(&p->tx_list.lock, flags);
265 break;
266 }
267
268 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
269 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
270 DMA_BIDIRECTIONAL);
271
272 re.d64 = p->tx_ring[p->tx_next_clean];
273 p->tx_next_clean =
274 (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
275 skb = __skb_dequeue(&p->tx_list);
276
277 mix_orcnt.u64 = 0;
278 mix_orcnt.s.orcnt = 1;
279
280
281 cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
282 p->tx_current_fill--;
283
284 spin_unlock_irqrestore(&p->tx_list.lock, flags);
285
286 dma_unmap_single(p->dev, re.s.addr, re.s.len,
287 DMA_TO_DEVICE);
288
289
290 if (unlikely(re.s.tstamp)) {
291 struct skb_shared_hwtstamps ts;
292 u64 ns;
293
294 memset(&ts, 0, sizeof(ts));
295
296 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
297
298 cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
299
300 ts.hwtstamp = ns_to_ktime(ns);
301 skb_tstamp_tx(skb, &ts);
302 }
303
304 dev_kfree_skb_any(skb);
305 cleaned++;
306
307 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
308 }
309
310 if (cleaned && netif_queue_stopped(p->netdev))
311 netif_wake_queue(p->netdev);
312}
313
314static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
315{
316 struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
317 octeon_mgmt_clean_tx_buffers(p);
318 octeon_mgmt_enable_tx_irq(p);
319}
320
321static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
322{
323 struct octeon_mgmt *p = netdev_priv(netdev);
324 unsigned long flags;
325 u64 drop, bad;
326
327
328 drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
329 bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
330
331 if (drop || bad) {
332
333 spin_lock_irqsave(&p->lock, flags);
334 netdev->stats.rx_errors += bad;
335 netdev->stats.rx_dropped += drop;
336 spin_unlock_irqrestore(&p->lock, flags);
337 }
338}
339
340static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
341{
342 struct octeon_mgmt *p = netdev_priv(netdev);
343 unsigned long flags;
344
345 union cvmx_agl_gmx_txx_stat0 s0;
346 union cvmx_agl_gmx_txx_stat1 s1;
347
348
349 s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
350 s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
351
352 if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
353
354 spin_lock_irqsave(&p->lock, flags);
355 netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
356 netdev->stats.collisions += s1.s.scol + s1.s.mcol;
357 spin_unlock_irqrestore(&p->lock, flags);
358 }
359}
360
361
362
363
364
365static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
366 struct sk_buff **pskb)
367{
368 union mgmt_port_ring_entry re;
369
370 dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
371 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
372 DMA_BIDIRECTIONAL);
373
374 re.d64 = p->rx_ring[p->rx_next];
375 p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
376 p->rx_current_fill--;
377 *pskb = __skb_dequeue(&p->rx_list);
378
379 dma_unmap_single(p->dev, re.s.addr,
380 ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
381 DMA_FROM_DEVICE);
382
383 return re.d64;
384}
385
386
387static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
388{
389 struct net_device *netdev = p->netdev;
390 union cvmx_mixx_ircnt mix_ircnt;
391 union mgmt_port_ring_entry re;
392 struct sk_buff *skb;
393 struct sk_buff *skb2;
394 struct sk_buff *skb_new;
395 union mgmt_port_ring_entry re2;
396 int rc = 1;
397
398
399 re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
400 if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
401
402 skb_put(skb, re.s.len);
403good:
404
405 if (p->has_rx_tstamp) {
406
407 u64 ns = *(u64 *)skb->data;
408 struct skb_shared_hwtstamps *ts;
409 ts = skb_hwtstamps(skb);
410 ts->hwtstamp = ns_to_ktime(ns);
411 __skb_pull(skb, 8);
412 }
413 skb->protocol = eth_type_trans(skb, netdev);
414 netdev->stats.rx_packets++;
415 netdev->stats.rx_bytes += skb->len;
416 netif_receive_skb(skb);
417 rc = 0;
418 } else if (re.s.code == RING_ENTRY_CODE_MORE) {
419
420
421
422
423
424
425
426 skb_put(skb, re.s.len);
427 do {
428 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
429 if (re2.s.code != RING_ENTRY_CODE_MORE
430 && re2.s.code != RING_ENTRY_CODE_DONE)
431 goto split_error;
432 skb_put(skb2, re2.s.len);
433 skb_new = skb_copy_expand(skb, 0, skb2->len,
434 GFP_ATOMIC);
435 if (!skb_new)
436 goto split_error;
437 if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
438 skb2->len))
439 goto split_error;
440 skb_put(skb_new, skb2->len);
441 dev_kfree_skb_any(skb);
442 dev_kfree_skb_any(skb2);
443 skb = skb_new;
444 } while (re2.s.code == RING_ENTRY_CODE_MORE);
445 goto good;
446 } else {
447
448 dev_kfree_skb_any(skb);
449
450
451
452 }
453 goto done;
454split_error:
455
456 dev_kfree_skb_any(skb);
457 dev_kfree_skb_any(skb2);
458 while (re2.s.code == RING_ENTRY_CODE_MORE) {
459 re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
460 dev_kfree_skb_any(skb2);
461 }
462 netdev->stats.rx_errors++;
463
464done:
465
466 mix_ircnt.u64 = 0;
467 mix_ircnt.s.ircnt = 1;
468 cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
469 return rc;
470}
471
472static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
473{
474 unsigned int work_done = 0;
475 union cvmx_mixx_ircnt mix_ircnt;
476 int rc;
477
478 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
479 while (work_done < budget && mix_ircnt.s.ircnt) {
480
481 rc = octeon_mgmt_receive_one(p);
482 if (!rc)
483 work_done++;
484
485
486 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
487 }
488
489 octeon_mgmt_rx_fill_ring(p->netdev);
490
491 return work_done;
492}
493
494static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
495{
496 struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
497 struct net_device *netdev = p->netdev;
498 unsigned int work_done = 0;
499
500 work_done = octeon_mgmt_receive_packets(p, budget);
501
502 if (work_done < budget) {
503
504 napi_complete_done(napi, work_done);
505 octeon_mgmt_enable_rx_irq(p);
506 }
507 octeon_mgmt_update_rx_stats(netdev);
508
509 return work_done;
510}
511
512
513static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
514{
515 union cvmx_mixx_ctl mix_ctl;
516 union cvmx_mixx_bist mix_bist;
517 union cvmx_agl_gmx_bist agl_gmx_bist;
518
519 mix_ctl.u64 = 0;
520 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
521 do {
522 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
523 } while (mix_ctl.s.busy);
524 mix_ctl.s.reset = 1;
525 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
526 cvmx_read_csr(p->mix + MIX_CTL);
527 octeon_io_clk_delay(64);
528
529 mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
530 if (mix_bist.u64)
531 dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
532 (unsigned long long)mix_bist.u64);
533
534 agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
535 if (agl_gmx_bist.u64)
536 dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
537 (unsigned long long)agl_gmx_bist.u64);
538}
539
540struct octeon_mgmt_cam_state {
541 u64 cam[6];
542 u64 cam_mask;
543 int cam_index;
544};
545
546static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
547 unsigned char *addr)
548{
549 int i;
550
551 for (i = 0; i < 6; i++)
552 cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
553 cs->cam_mask |= (1ULL << cs->cam_index);
554 cs->cam_index++;
555}
556
557static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
558{
559 struct octeon_mgmt *p = netdev_priv(netdev);
560 union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
561 union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
562 unsigned long flags;
563 unsigned int prev_packet_enable;
564 unsigned int cam_mode = 1;
565 unsigned int multicast_mode = 1;
566 struct octeon_mgmt_cam_state cam_state;
567 struct netdev_hw_addr *ha;
568 int available_cam_entries;
569
570 memset(&cam_state, 0, sizeof(cam_state));
571
572 if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
573 cam_mode = 0;
574 available_cam_entries = 8;
575 } else {
576
577
578
579 available_cam_entries = 7 - netdev->uc.count;
580 }
581
582 if (netdev->flags & IFF_MULTICAST) {
583 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
584 netdev_mc_count(netdev) > available_cam_entries)
585 multicast_mode = 2;
586 else
587 multicast_mode = 0;
588 }
589
590 if (cam_mode == 1) {
591
592 octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
593 netdev_for_each_uc_addr(ha, netdev)
594 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
595 }
596 if (multicast_mode == 0) {
597 netdev_for_each_mc_addr(ha, netdev)
598 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
599 }
600
601 spin_lock_irqsave(&p->lock, flags);
602
603
604 agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
605 prev_packet_enable = agl_gmx_prtx.s.en;
606 agl_gmx_prtx.s.en = 0;
607 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
608
609 adr_ctl.u64 = 0;
610 adr_ctl.s.cam_mode = cam_mode;
611 adr_ctl.s.mcst = multicast_mode;
612 adr_ctl.s.bcst = 1;
613
614 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
615
616 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
617 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
618 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
619 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
620 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
621 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
622 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
623
624
625 agl_gmx_prtx.s.en = prev_packet_enable;
626 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
627
628 spin_unlock_irqrestore(&p->lock, flags);
629}
630
631static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
632{
633 int r = eth_mac_addr(netdev, addr);
634
635 if (r)
636 return r;
637
638 octeon_mgmt_set_rx_filtering(netdev);
639
640 return 0;
641}
642
643static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
644{
645 struct octeon_mgmt *p = netdev_priv(netdev);
646 int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
647
648 netdev->mtu = new_mtu;
649
650 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs);
651 cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
652 (size_without_fcs + 7) & 0xfff8);
653
654 return 0;
655}
656
657static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
658{
659 struct net_device *netdev = dev_id;
660 struct octeon_mgmt *p = netdev_priv(netdev);
661 union cvmx_mixx_isr mixx_isr;
662
663 mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
664
665
666 cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
667 cvmx_read_csr(p->mix + MIX_ISR);
668
669 if (mixx_isr.s.irthresh) {
670 octeon_mgmt_disable_rx_irq(p);
671 napi_schedule(&p->napi);
672 }
673 if (mixx_isr.s.orthresh) {
674 octeon_mgmt_disable_tx_irq(p);
675 tasklet_schedule(&p->tx_clean_tasklet);
676 }
677
678 return IRQ_HANDLED;
679}
680
681static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
682 struct ifreq *rq, int cmd)
683{
684 struct octeon_mgmt *p = netdev_priv(netdev);
685 struct hwtstamp_config config;
686 union cvmx_mio_ptp_clock_cfg ptp;
687 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
688 bool have_hw_timestamps = false;
689
690 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
691 return -EFAULT;
692
693 if (config.flags)
694 return -EINVAL;
695
696
697 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
698
699 ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
700 if (!ptp.s.ext_clk_en) {
701
702
703
704
705 u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate();
706 if (!ptp.s.ptp_en)
707 cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
708 netdev_info(netdev,
709 "PTP Clock using sclk reference @ %lldHz\n",
710 (NSEC_PER_SEC << 32) / clock_comp);
711 } else {
712
713 u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
714 netdev_info(netdev,
715 "PTP Clock using GPIO%d @ %lld Hz\n",
716 ptp.s.ext_clk_in, (NSEC_PER_SEC << 32) / clock_comp);
717 }
718
719
720 if (!ptp.s.ptp_en) {
721 ptp.s.ptp_en = 1;
722 cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
723 }
724 have_hw_timestamps = true;
725 }
726
727 if (!have_hw_timestamps)
728 return -EINVAL;
729
730 switch (config.tx_type) {
731 case HWTSTAMP_TX_OFF:
732 case HWTSTAMP_TX_ON:
733 break;
734 default:
735 return -ERANGE;
736 }
737
738 switch (config.rx_filter) {
739 case HWTSTAMP_FILTER_NONE:
740 p->has_rx_tstamp = false;
741 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
742 rxx_frm_ctl.s.ptp_mode = 0;
743 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
744 break;
745 case HWTSTAMP_FILTER_ALL:
746 case HWTSTAMP_FILTER_SOME:
747 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
748 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
749 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
750 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
751 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
752 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
753 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
754 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
755 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
756 case HWTSTAMP_FILTER_PTP_V2_EVENT:
757 case HWTSTAMP_FILTER_PTP_V2_SYNC:
758 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
759 case HWTSTAMP_FILTER_NTP_ALL:
760 p->has_rx_tstamp = have_hw_timestamps;
761 config.rx_filter = HWTSTAMP_FILTER_ALL;
762 if (p->has_rx_tstamp) {
763 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
764 rxx_frm_ctl.s.ptp_mode = 1;
765 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
766 }
767 break;
768 default:
769 return -ERANGE;
770 }
771
772 if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
773 return -EFAULT;
774
775 return 0;
776}
777
778static int octeon_mgmt_ioctl(struct net_device *netdev,
779 struct ifreq *rq, int cmd)
780{
781 switch (cmd) {
782 case SIOCSHWTSTAMP:
783 return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
784 default:
785 if (netdev->phydev)
786 return phy_mii_ioctl(netdev->phydev, rq, cmd);
787 return -EINVAL;
788 }
789}
790
791static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
792{
793 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
794
795
796 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
797 prtx_cfg.s.en = 0;
798 prtx_cfg.s.tx_en = 0;
799 prtx_cfg.s.rx_en = 0;
800 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
801
802 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
803 int i;
804 for (i = 0; i < 10; i++) {
805 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
806 if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
807 break;
808 mdelay(1);
809 i++;
810 }
811 }
812}
813
814static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
815{
816 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
817
818
819 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
820 prtx_cfg.s.tx_en = 1;
821 prtx_cfg.s.rx_en = 1;
822 prtx_cfg.s.en = 1;
823 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
824}
825
826static void octeon_mgmt_update_link(struct octeon_mgmt *p)
827{
828 struct net_device *ndev = p->netdev;
829 struct phy_device *phydev = ndev->phydev;
830 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
831
832 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
833
834 if (!phydev->link)
835 prtx_cfg.s.duplex = 1;
836 else
837 prtx_cfg.s.duplex = phydev->duplex;
838
839 switch (phydev->speed) {
840 case 10:
841 prtx_cfg.s.speed = 0;
842 prtx_cfg.s.slottime = 0;
843
844 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
845 prtx_cfg.s.burst = 1;
846 prtx_cfg.s.speed_msb = 1;
847 }
848 break;
849 case 100:
850 prtx_cfg.s.speed = 0;
851 prtx_cfg.s.slottime = 0;
852
853 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
854 prtx_cfg.s.burst = 1;
855 prtx_cfg.s.speed_msb = 0;
856 }
857 break;
858 case 1000:
859
860 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
861 prtx_cfg.s.speed = 1;
862 prtx_cfg.s.speed_msb = 0;
863
864 prtx_cfg.s.slottime = 1;
865 prtx_cfg.s.burst = phydev->duplex;
866 }
867 break;
868 case 0:
869 default:
870 break;
871 }
872
873
874 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
875
876
877 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
878
879 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
880 union cvmx_agl_gmx_txx_clk agl_clk;
881 union cvmx_agl_prtx_ctl prtx_ctl;
882
883 prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
884 agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
885
886 agl_clk.s.clk_cnt = 1;
887 if (prtx_ctl.s.mode == 0) {
888 if (phydev->speed == 10)
889 agl_clk.s.clk_cnt = 50;
890 else if (phydev->speed == 100)
891 agl_clk.s.clk_cnt = 5;
892 }
893 cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
894 }
895}
896
897static void octeon_mgmt_adjust_link(struct net_device *netdev)
898{
899 struct octeon_mgmt *p = netdev_priv(netdev);
900 struct phy_device *phydev = netdev->phydev;
901 unsigned long flags;
902 int link_changed = 0;
903
904 if (!phydev)
905 return;
906
907 spin_lock_irqsave(&p->lock, flags);
908
909
910 if (!phydev->link && p->last_link)
911 link_changed = -1;
912
913 if (phydev->link &&
914 (p->last_duplex != phydev->duplex ||
915 p->last_link != phydev->link ||
916 p->last_speed != phydev->speed)) {
917 octeon_mgmt_disable_link(p);
918 link_changed = 1;
919 octeon_mgmt_update_link(p);
920 octeon_mgmt_enable_link(p);
921 }
922
923 p->last_link = phydev->link;
924 p->last_speed = phydev->speed;
925 p->last_duplex = phydev->duplex;
926
927 spin_unlock_irqrestore(&p->lock, flags);
928
929 if (link_changed != 0) {
930 if (link_changed > 0)
931 netdev_info(netdev, "Link is up - %d/%s\n",
932 phydev->speed, phydev->duplex == DUPLEX_FULL ? "Full" : "Half");
933 else
934 netdev_info(netdev, "Link is down\n");
935 }
936}
937
938static int octeon_mgmt_init_phy(struct net_device *netdev)
939{
940 struct octeon_mgmt *p = netdev_priv(netdev);
941 struct phy_device *phydev = NULL;
942
943 if (octeon_is_simulation() || p->phy_np == NULL) {
944
945 netif_carrier_on(netdev);
946 return 0;
947 }
948
949 phydev = of_phy_connect(netdev, p->phy_np,
950 octeon_mgmt_adjust_link, 0,
951 PHY_INTERFACE_MODE_MII);
952
953 if (!phydev)
954 return -ENODEV;
955
956 return 0;
957}
958
959static int octeon_mgmt_open(struct net_device *netdev)
960{
961 struct octeon_mgmt *p = netdev_priv(netdev);
962 union cvmx_mixx_ctl mix_ctl;
963 union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
964 union cvmx_mixx_oring1 oring1;
965 union cvmx_mixx_iring1 iring1;
966 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
967 union cvmx_mixx_irhwm mix_irhwm;
968 union cvmx_mixx_orhwm mix_orhwm;
969 union cvmx_mixx_intena mix_intena;
970 struct sockaddr sa;
971
972
973 p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
974 GFP_KERNEL);
975 if (!p->tx_ring)
976 return -ENOMEM;
977 p->tx_ring_handle =
978 dma_map_single(p->dev, p->tx_ring,
979 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
980 DMA_BIDIRECTIONAL);
981 p->tx_next = 0;
982 p->tx_next_clean = 0;
983 p->tx_current_fill = 0;
984
985
986 p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
987 GFP_KERNEL);
988 if (!p->rx_ring)
989 goto err_nomem;
990 p->rx_ring_handle =
991 dma_map_single(p->dev, p->rx_ring,
992 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
993 DMA_BIDIRECTIONAL);
994
995 p->rx_next = 0;
996 p->rx_next_fill = 0;
997 p->rx_current_fill = 0;
998
999 octeon_mgmt_reset_hw(p);
1000
1001 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
1002
1003
1004 if (mix_ctl.s.reset) {
1005 mix_ctl.s.reset = 0;
1006 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
1007 do {
1008 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
1009 } while (mix_ctl.s.reset);
1010 }
1011
1012 if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
1013 agl_gmx_inf_mode.u64 = 0;
1014 agl_gmx_inf_mode.s.en = 1;
1015 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
1016 }
1017 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
1018 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
1019
1020
1021
1022 union cvmx_agl_gmx_drv_ctl drv_ctl;
1023
1024 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
1025 if (p->port) {
1026 drv_ctl.s.byp_en1 = 1;
1027 drv_ctl.s.nctl1 = 6;
1028 drv_ctl.s.pctl1 = 6;
1029 } else {
1030 drv_ctl.s.byp_en = 1;
1031 drv_ctl.s.nctl = 6;
1032 drv_ctl.s.pctl = 6;
1033 }
1034 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
1035 }
1036
1037 oring1.u64 = 0;
1038 oring1.s.obase = p->tx_ring_handle >> 3;
1039 oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
1040 cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
1041
1042 iring1.u64 = 0;
1043 iring1.s.ibase = p->rx_ring_handle >> 3;
1044 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
1045 cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
1046
1047 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
1048 octeon_mgmt_set_mac_address(netdev, &sa);
1049
1050 octeon_mgmt_change_mtu(netdev, netdev->mtu);
1051
1052
1053
1054
1055 mix_ctl.u64 = 0;
1056 mix_ctl.s.crc_strip = 1;
1057 mix_ctl.s.en = 1;
1058 mix_ctl.s.nbtarb = 0;
1059
1060 mix_ctl.s.mrq_hwm = 1;
1061#ifdef __LITTLE_ENDIAN
1062 mix_ctl.s.lendian = 1;
1063#endif
1064 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
1065
1066
1067 if (octeon_mgmt_init_phy(netdev)) {
1068 dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
1069 goto err_noirq;
1070 }
1071
1072
1073 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
1074 union cvmx_agl_prtx_ctl agl_prtx_ctl;
1075 int rgmii_mode = (netdev->phydev->supported &
1076 (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
1077
1078 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1079 agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
1080 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1081
1082
1083
1084
1085
1086#define NS_PER_PHY_CLK 8
1087
1088
1089 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1090 agl_prtx_ctl.s.clkrst = 0;
1091 if (rgmii_mode) {
1092 agl_prtx_ctl.s.dllrst = 0;
1093 agl_prtx_ctl.s.clktx_byp = 0;
1094 }
1095 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1096 cvmx_read_csr(p->agl_prt_ctl);
1097
1098
1099
1100
1101 ndelay(256 * NS_PER_PHY_CLK);
1102
1103
1104 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1105 agl_prtx_ctl.s.enable = 1;
1106 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1107
1108
1109 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1110
1111
1112 agl_prtx_ctl.s.comp = 1;
1113 agl_prtx_ctl.s.drv_byp = 0;
1114 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1115
1116 cvmx_read_csr(p->agl_prt_ctl);
1117
1118
1119 ndelay(1040 * NS_PER_PHY_CLK);
1120
1121
1122
1123
1124
1125
1126
1127 cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
1128 }
1129
1130 octeon_mgmt_rx_fill_ring(netdev);
1131
1132
1133
1134 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
1135 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
1136 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
1137
1138 cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
1139 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
1140 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
1141
1142
1143 cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
1144
1145 if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
1146 netdev)) {
1147 dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
1148 goto err_noirq;
1149 }
1150
1151
1152 mix_irhwm.u64 = 0;
1153 mix_irhwm.s.irhwm = 0;
1154 cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
1155
1156
1157 mix_orhwm.u64 = 0;
1158 mix_orhwm.s.orhwm = 0;
1159 cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
1160
1161
1162 mix_intena.u64 = 0;
1163 mix_intena.s.ithena = 1;
1164 mix_intena.s.othena = 1;
1165 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
1166
1167
1168
1169 rxx_frm_ctl.u64 = 0;
1170 rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
1171 rxx_frm_ctl.s.pre_align = 1;
1172
1173
1174
1175 rxx_frm_ctl.s.pad_len = 1;
1176
1177 rxx_frm_ctl.s.vlan_len = 1;
1178
1179 rxx_frm_ctl.s.pre_free = 1;
1180
1181 rxx_frm_ctl.s.ctl_smac = 0;
1182
1183 rxx_frm_ctl.s.ctl_mcst = 1;
1184
1185 rxx_frm_ctl.s.ctl_bck = 1;
1186
1187 rxx_frm_ctl.s.ctl_drp = 1;
1188
1189 rxx_frm_ctl.s.pre_strp = 1;
1190
1191
1192
1193 rxx_frm_ctl.s.pre_chk = 1;
1194 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
1195
1196
1197 octeon_mgmt_disable_link(p);
1198 if (netdev->phydev)
1199 octeon_mgmt_update_link(p);
1200 octeon_mgmt_enable_link(p);
1201
1202 p->last_link = 0;
1203 p->last_speed = 0;
1204
1205
1206
1207 if (netdev->phydev) {
1208 netif_carrier_off(netdev);
1209 phy_start_aneg(netdev->phydev);
1210 }
1211
1212 netif_wake_queue(netdev);
1213 napi_enable(&p->napi);
1214
1215 return 0;
1216err_noirq:
1217 octeon_mgmt_reset_hw(p);
1218 dma_unmap_single(p->dev, p->rx_ring_handle,
1219 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1220 DMA_BIDIRECTIONAL);
1221 kfree(p->rx_ring);
1222err_nomem:
1223 dma_unmap_single(p->dev, p->tx_ring_handle,
1224 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1225 DMA_BIDIRECTIONAL);
1226 kfree(p->tx_ring);
1227 return -ENOMEM;
1228}
1229
1230static int octeon_mgmt_stop(struct net_device *netdev)
1231{
1232 struct octeon_mgmt *p = netdev_priv(netdev);
1233
1234 napi_disable(&p->napi);
1235 netif_stop_queue(netdev);
1236
1237 if (netdev->phydev)
1238 phy_disconnect(netdev->phydev);
1239
1240 netif_carrier_off(netdev);
1241
1242 octeon_mgmt_reset_hw(p);
1243
1244 free_irq(p->irq, netdev);
1245
1246
1247 skb_queue_purge(&p->tx_list);
1248 skb_queue_purge(&p->rx_list);
1249
1250 dma_unmap_single(p->dev, p->rx_ring_handle,
1251 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1252 DMA_BIDIRECTIONAL);
1253 kfree(p->rx_ring);
1254
1255 dma_unmap_single(p->dev, p->tx_ring_handle,
1256 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1257 DMA_BIDIRECTIONAL);
1258 kfree(p->tx_ring);
1259
1260 return 0;
1261}
1262
1263static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
1264{
1265 struct octeon_mgmt *p = netdev_priv(netdev);
1266 union mgmt_port_ring_entry re;
1267 unsigned long flags;
1268 int rv = NETDEV_TX_BUSY;
1269
1270 re.d64 = 0;
1271 re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
1272 re.s.len = skb->len;
1273 re.s.addr = dma_map_single(p->dev, skb->data,
1274 skb->len,
1275 DMA_TO_DEVICE);
1276
1277 spin_lock_irqsave(&p->tx_list.lock, flags);
1278
1279 if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
1280 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1281 netif_stop_queue(netdev);
1282 spin_lock_irqsave(&p->tx_list.lock, flags);
1283 }
1284
1285 if (unlikely(p->tx_current_fill >=
1286 ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
1287 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1288 dma_unmap_single(p->dev, re.s.addr, re.s.len,
1289 DMA_TO_DEVICE);
1290 goto out;
1291 }
1292
1293 __skb_queue_tail(&p->tx_list, skb);
1294
1295
1296 p->tx_ring[p->tx_next] = re.d64;
1297 p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
1298 p->tx_current_fill++;
1299
1300 spin_unlock_irqrestore(&p->tx_list.lock, flags);
1301
1302 dma_sync_single_for_device(p->dev, p->tx_ring_handle,
1303 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1304 DMA_BIDIRECTIONAL);
1305
1306 netdev->stats.tx_packets++;
1307 netdev->stats.tx_bytes += skb->len;
1308
1309
1310 cvmx_write_csr(p->mix + MIX_ORING2, 1);
1311
1312 netif_trans_update(netdev);
1313 rv = NETDEV_TX_OK;
1314out:
1315 octeon_mgmt_update_tx_stats(netdev);
1316 return rv;
1317}
1318
1319#ifdef CONFIG_NET_POLL_CONTROLLER
1320static void octeon_mgmt_poll_controller(struct net_device *netdev)
1321{
1322 struct octeon_mgmt *p = netdev_priv(netdev);
1323
1324 octeon_mgmt_receive_packets(p, 16);
1325 octeon_mgmt_update_rx_stats(netdev);
1326}
1327#endif
1328
1329static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
1330 struct ethtool_drvinfo *info)
1331{
1332 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1333 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1334 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
1335 strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
1336}
1337
1338static int octeon_mgmt_nway_reset(struct net_device *dev)
1339{
1340 if (!capable(CAP_NET_ADMIN))
1341 return -EPERM;
1342
1343 if (dev->phydev)
1344 return phy_start_aneg(dev->phydev);
1345
1346 return -EOPNOTSUPP;
1347}
1348
1349static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1350 .get_drvinfo = octeon_mgmt_get_drvinfo,
1351 .nway_reset = octeon_mgmt_nway_reset,
1352 .get_link = ethtool_op_get_link,
1353 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1354 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1355};
1356
1357static const struct net_device_ops octeon_mgmt_ops = {
1358 .ndo_open = octeon_mgmt_open,
1359 .ndo_stop = octeon_mgmt_stop,
1360 .ndo_start_xmit = octeon_mgmt_xmit,
1361 .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
1362 .ndo_set_mac_address = octeon_mgmt_set_mac_address,
1363 .ndo_do_ioctl = octeon_mgmt_ioctl,
1364 .ndo_change_mtu = octeon_mgmt_change_mtu,
1365#ifdef CONFIG_NET_POLL_CONTROLLER
1366 .ndo_poll_controller = octeon_mgmt_poll_controller,
1367#endif
1368};
1369
1370static int octeon_mgmt_probe(struct platform_device *pdev)
1371{
1372 struct net_device *netdev;
1373 struct octeon_mgmt *p;
1374 const __be32 *data;
1375 const u8 *mac;
1376 struct resource *res_mix;
1377 struct resource *res_agl;
1378 struct resource *res_agl_prt_ctl;
1379 int len;
1380 int result;
1381
1382 netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
1383 if (netdev == NULL)
1384 return -ENOMEM;
1385
1386 SET_NETDEV_DEV(netdev, &pdev->dev);
1387
1388 platform_set_drvdata(pdev, netdev);
1389 p = netdev_priv(netdev);
1390 netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
1391 OCTEON_MGMT_NAPI_WEIGHT);
1392
1393 p->netdev = netdev;
1394 p->dev = &pdev->dev;
1395 p->has_rx_tstamp = false;
1396
1397 data = of_get_property(pdev->dev.of_node, "cell-index", &len);
1398 if (data && len == sizeof(*data)) {
1399 p->port = be32_to_cpup(data);
1400 } else {
1401 dev_err(&pdev->dev, "no 'cell-index' property\n");
1402 result = -ENXIO;
1403 goto err;
1404 }
1405
1406 snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
1407
1408 result = platform_get_irq(pdev, 0);
1409 if (result < 0)
1410 goto err;
1411
1412 p->irq = result;
1413
1414 res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1415 if (res_mix == NULL) {
1416 dev_err(&pdev->dev, "no 'reg' resource\n");
1417 result = -ENXIO;
1418 goto err;
1419 }
1420
1421 res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1422 if (res_agl == NULL) {
1423 dev_err(&pdev->dev, "no 'reg' resource\n");
1424 result = -ENXIO;
1425 goto err;
1426 }
1427
1428 res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1429 if (res_agl_prt_ctl == NULL) {
1430 dev_err(&pdev->dev, "no 'reg' resource\n");
1431 result = -ENXIO;
1432 goto err;
1433 }
1434
1435 p->mix_phys = res_mix->start;
1436 p->mix_size = resource_size(res_mix);
1437 p->agl_phys = res_agl->start;
1438 p->agl_size = resource_size(res_agl);
1439 p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
1440 p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
1441
1442
1443 if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
1444 res_mix->name)) {
1445 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1446 res_mix->name);
1447 result = -ENXIO;
1448 goto err;
1449 }
1450
1451 if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
1452 res_agl->name)) {
1453 result = -ENXIO;
1454 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1455 res_agl->name);
1456 goto err;
1457 }
1458
1459 if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
1460 p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
1461 result = -ENXIO;
1462 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1463 res_agl_prt_ctl->name);
1464 goto err;
1465 }
1466
1467 p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
1468 p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
1469 p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
1470 p->agl_prt_ctl_size);
1471 if (!p->mix || !p->agl || !p->agl_prt_ctl) {
1472 dev_err(&pdev->dev, "failed to map I/O memory\n");
1473 result = -ENOMEM;
1474 goto err;
1475 }
1476
1477 spin_lock_init(&p->lock);
1478
1479 skb_queue_head_init(&p->tx_list);
1480 skb_queue_head_init(&p->rx_list);
1481 tasklet_init(&p->tx_clean_tasklet,
1482 octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
1483
1484 netdev->priv_flags |= IFF_UNICAST_FLT;
1485
1486 netdev->netdev_ops = &octeon_mgmt_ops;
1487 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1488
1489 netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
1490 netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM;
1491
1492 mac = of_get_mac_address(pdev->dev.of_node);
1493
1494 if (mac)
1495 memcpy(netdev->dev_addr, mac, ETH_ALEN);
1496 else
1497 eth_hw_addr_random(netdev);
1498
1499 p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1500
1501 result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1502 if (result)
1503 goto err;
1504
1505 netif_carrier_off(netdev);
1506 result = register_netdev(netdev);
1507 if (result)
1508 goto err;
1509
1510 dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
1511 return 0;
1512
1513err:
1514 of_node_put(p->phy_np);
1515 free_netdev(netdev);
1516 return result;
1517}
1518
1519static int octeon_mgmt_remove(struct platform_device *pdev)
1520{
1521 struct net_device *netdev = platform_get_drvdata(pdev);
1522 struct octeon_mgmt *p = netdev_priv(netdev);
1523
1524 unregister_netdev(netdev);
1525 of_node_put(p->phy_np);
1526 free_netdev(netdev);
1527 return 0;
1528}
1529
1530static const struct of_device_id octeon_mgmt_match[] = {
1531 {
1532 .compatible = "cavium,octeon-5750-mix",
1533 },
1534 {},
1535};
1536MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
1537
1538static struct platform_driver octeon_mgmt_driver = {
1539 .driver = {
1540 .name = "octeon_mgmt",
1541 .of_match_table = octeon_mgmt_match,
1542 },
1543 .probe = octeon_mgmt_probe,
1544 .remove = octeon_mgmt_remove,
1545};
1546
1547extern void octeon_mdiobus_force_mod_depencency(void);
1548
1549static int __init octeon_mgmt_mod_init(void)
1550{
1551
1552 octeon_mdiobus_force_mod_depencency();
1553 return platform_driver_register(&octeon_mgmt_driver);
1554}
1555
1556static void __exit octeon_mgmt_mod_exit(void)
1557{
1558 platform_driver_unregister(&octeon_mgmt_driver);
1559}
1560
1561module_init(octeon_mgmt_mod_init);
1562module_exit(octeon_mgmt_mod_exit);
1563
1564MODULE_DESCRIPTION(DRV_DESCRIPTION);
1565MODULE_AUTHOR("David Daney");
1566MODULE_LICENSE("GPL");
1567MODULE_VERSION(DRV_VERSION);
1568