1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/circ_buf.h>
21#include <linux/clk.h>
22#include <linux/etherdevice.h>
23#include <linux/interrupt.h>
24#include <linux/module.h>
25#include <linux/of_mdio.h>
26#include <linux/of_net.h>
27#include <linux/platform_device.h>
28#include <linux/reset.h>
29
30
31#define MAC_PORTSEL 0x0200
32#define MAC_PORTSEL_STAT_CPU BIT(0)
33#define MAC_PORTSEL_RMII BIT(1)
34#define MAC_PORTSET 0x0208
35#define MAC_PORTSET_DUPLEX_FULL BIT(0)
36#define MAC_PORTSET_LINKED BIT(1)
37#define MAC_PORTSET_SPEED_100M BIT(2)
38#define MAC_SET 0x0210
39#define MAX_FRAME_SIZE 1600
40#define MAX_FRAME_SIZE_MASK GENMASK(10, 0)
41#define BIT_PAUSE_EN BIT(18)
42#define RX_COALESCE_SET 0x0340
43#define RX_COALESCED_FRAME_OFFSET 24
44#define RX_COALESCED_FRAMES 8
45#define RX_COALESCED_TIMER 0x74
46#define QLEN_SET 0x0344
47#define RX_DEPTH_OFFSET 8
48#define MAX_HW_FIFO_DEPTH 64
49#define HW_TX_FIFO_DEPTH 12
50#define HW_RX_FIFO_DEPTH (MAX_HW_FIFO_DEPTH - HW_TX_FIFO_DEPTH)
51#define IQFRM_DES 0x0354
52#define RX_FRAME_LEN_MASK GENMASK(11, 0)
53#define IQ_ADDR 0x0358
54#define EQ_ADDR 0x0360
55#define EQFRM_LEN 0x0364
56#define ADDRQ_STAT 0x036C
57#define TX_CNT_INUSE_MASK GENMASK(5, 0)
58#define BIT_TX_READY BIT(24)
59#define BIT_RX_READY BIT(25)
60
61#define GLB_HOSTMAC_L32 0x0000
62#define GLB_HOSTMAC_H16 0x0004
63#define GLB_SOFT_RESET 0x0008
64#define SOFT_RESET_ALL BIT(0)
65#define GLB_FWCTRL 0x0010
66#define FWCTRL_VLAN_ENABLE BIT(0)
67#define FWCTRL_FW2CPU_ENA BIT(5)
68#define FWCTRL_FWALL2CPU BIT(7)
69#define GLB_MACTCTRL 0x0014
70#define MACTCTRL_UNI2CPU BIT(1)
71#define MACTCTRL_MULTI2CPU BIT(3)
72#define MACTCTRL_BROAD2CPU BIT(5)
73#define MACTCTRL_MACT_ENA BIT(7)
74#define GLB_IRQ_STAT 0x0030
75#define GLB_IRQ_ENA 0x0034
76#define IRQ_ENA_PORT0_MASK GENMASK(7, 0)
77#define IRQ_ENA_PORT0 BIT(18)
78#define IRQ_ENA_ALL BIT(19)
79#define GLB_IRQ_RAW 0x0038
80#define IRQ_INT_RX_RDY BIT(0)
81#define IRQ_INT_TX_PER_PACKET BIT(1)
82#define IRQ_INT_TX_FIFO_EMPTY BIT(6)
83#define IRQ_INT_MULTI_RXRDY BIT(7)
84#define DEF_INT_MASK (IRQ_INT_MULTI_RXRDY | \
85 IRQ_INT_TX_PER_PACKET | \
86 IRQ_INT_TX_FIFO_EMPTY)
87#define GLB_MAC_L32_BASE 0x0100
88#define GLB_MAC_H16_BASE 0x0104
89#define MACFLT_HI16_MASK GENMASK(15, 0)
90#define BIT_MACFLT_ENA BIT(17)
91#define BIT_MACFLT_FW2CPU BIT(21)
92#define GLB_MAC_H16(reg) (GLB_MAC_H16_BASE + ((reg) * 0x8))
93#define GLB_MAC_L32(reg) (GLB_MAC_L32_BASE + ((reg) * 0x8))
94#define MAX_MAC_FILTER_NUM 8
95#define MAX_UNICAST_ADDRESSES 2
96#define MAX_MULTICAST_ADDRESSES (MAX_MAC_FILTER_NUM - \
97 MAX_UNICAST_ADDRESSES)
98
99#define TXQ_NUM 64
100#define RXQ_NUM 128
101#define FEMAC_POLL_WEIGHT 16
102
103#define PHY_RESET_DELAYS_PROPERTY "hisilicon,phy-reset-delays-us"
104
105enum phy_reset_delays {
106 PRE_DELAY,
107 PULSE,
108 POST_DELAY,
109 DELAYS_NUM,
110};
111
112struct hisi_femac_queue {
113 struct sk_buff **skb;
114 dma_addr_t *dma_phys;
115 int num;
116 unsigned int head;
117 unsigned int tail;
118};
119
120struct hisi_femac_priv {
121 void __iomem *port_base;
122 void __iomem *glb_base;
123 struct clk *clk;
124 struct reset_control *mac_rst;
125 struct reset_control *phy_rst;
126 u32 phy_reset_delays[DELAYS_NUM];
127 u32 link_status;
128
129 struct device *dev;
130 struct net_device *ndev;
131
132 struct hisi_femac_queue txq;
133 struct hisi_femac_queue rxq;
134 u32 tx_fifo_used_cnt;
135 struct napi_struct napi;
136};
137
138static void hisi_femac_irq_enable(struct hisi_femac_priv *priv, int irqs)
139{
140 u32 val;
141
142 val = readl(priv->glb_base + GLB_IRQ_ENA);
143 writel(val | irqs, priv->glb_base + GLB_IRQ_ENA);
144}
145
146static void hisi_femac_irq_disable(struct hisi_femac_priv *priv, int irqs)
147{
148 u32 val;
149
150 val = readl(priv->glb_base + GLB_IRQ_ENA);
151 writel(val & (~irqs), priv->glb_base + GLB_IRQ_ENA);
152}
153
154static void hisi_femac_tx_dma_unmap(struct hisi_femac_priv *priv,
155 struct sk_buff *skb, unsigned int pos)
156{
157 dma_addr_t dma_addr;
158
159 dma_addr = priv->txq.dma_phys[pos];
160 dma_unmap_single(priv->dev, dma_addr, skb->len, DMA_TO_DEVICE);
161}
162
163static void hisi_femac_xmit_reclaim(struct net_device *dev)
164{
165 struct sk_buff *skb;
166 struct hisi_femac_priv *priv = netdev_priv(dev);
167 struct hisi_femac_queue *txq = &priv->txq;
168 unsigned int bytes_compl = 0, pkts_compl = 0;
169 u32 val;
170
171 netif_tx_lock(dev);
172
173 val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
174 while (val < priv->tx_fifo_used_cnt) {
175 skb = txq->skb[txq->tail];
176 if (unlikely(!skb)) {
177 netdev_err(dev, "xmitq_cnt_inuse=%d, tx_fifo_used=%d\n",
178 val, priv->tx_fifo_used_cnt);
179 break;
180 }
181 hisi_femac_tx_dma_unmap(priv, skb, txq->tail);
182 pkts_compl++;
183 bytes_compl += skb->len;
184 dev_kfree_skb_any(skb);
185
186 priv->tx_fifo_used_cnt--;
187
188 val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
189 txq->skb[txq->tail] = NULL;
190 txq->tail = (txq->tail + 1) % txq->num;
191 }
192
193 netdev_completed_queue(dev, pkts_compl, bytes_compl);
194
195 if (unlikely(netif_queue_stopped(dev)) && pkts_compl)
196 netif_wake_queue(dev);
197
198 netif_tx_unlock(dev);
199}
200
201static void hisi_femac_adjust_link(struct net_device *dev)
202{
203 struct hisi_femac_priv *priv = netdev_priv(dev);
204 struct phy_device *phy = dev->phydev;
205 u32 status = 0;
206
207 if (phy->link)
208 status |= MAC_PORTSET_LINKED;
209 if (phy->duplex == DUPLEX_FULL)
210 status |= MAC_PORTSET_DUPLEX_FULL;
211 if (phy->speed == SPEED_100)
212 status |= MAC_PORTSET_SPEED_100M;
213
214 if ((status != priv->link_status) &&
215 ((status | priv->link_status) & MAC_PORTSET_LINKED)) {
216 writel(status, priv->port_base + MAC_PORTSET);
217 priv->link_status = status;
218 phy_print_status(phy);
219 }
220}
221
222static void hisi_femac_rx_refill(struct hisi_femac_priv *priv)
223{
224 struct hisi_femac_queue *rxq = &priv->rxq;
225 struct sk_buff *skb;
226 u32 pos;
227 u32 len = MAX_FRAME_SIZE;
228 dma_addr_t addr;
229
230 pos = rxq->head;
231 while (readl(priv->port_base + ADDRQ_STAT) & BIT_RX_READY) {
232 if (!CIRC_SPACE(pos, rxq->tail, rxq->num))
233 break;
234 if (unlikely(rxq->skb[pos])) {
235 netdev_err(priv->ndev, "err skb[%d]=%p\n",
236 pos, rxq->skb[pos]);
237 break;
238 }
239 skb = netdev_alloc_skb_ip_align(priv->ndev, len);
240 if (unlikely(!skb))
241 break;
242
243 addr = dma_map_single(priv->dev, skb->data, len,
244 DMA_FROM_DEVICE);
245 if (dma_mapping_error(priv->dev, addr)) {
246 dev_kfree_skb_any(skb);
247 break;
248 }
249 rxq->dma_phys[pos] = addr;
250 rxq->skb[pos] = skb;
251 writel(addr, priv->port_base + IQ_ADDR);
252 pos = (pos + 1) % rxq->num;
253 }
254 rxq->head = pos;
255}
256
257static int hisi_femac_rx(struct net_device *dev, int limit)
258{
259 struct hisi_femac_priv *priv = netdev_priv(dev);
260 struct hisi_femac_queue *rxq = &priv->rxq;
261 struct sk_buff *skb;
262 dma_addr_t addr;
263 u32 rx_pkt_info, pos, len, rx_pkts_num = 0;
264
265 pos = rxq->tail;
266 while (readl(priv->glb_base + GLB_IRQ_RAW) & IRQ_INT_RX_RDY) {
267 rx_pkt_info = readl(priv->port_base + IQFRM_DES);
268 len = rx_pkt_info & RX_FRAME_LEN_MASK;
269 len -= ETH_FCS_LEN;
270
271
272 writel(IRQ_INT_RX_RDY, priv->glb_base + GLB_IRQ_RAW);
273
274 rx_pkts_num++;
275
276 skb = rxq->skb[pos];
277 if (unlikely(!skb)) {
278 netdev_err(dev, "rx skb NULL. pos=%d\n", pos);
279 break;
280 }
281 rxq->skb[pos] = NULL;
282
283 addr = rxq->dma_phys[pos];
284 dma_unmap_single(priv->dev, addr, MAX_FRAME_SIZE,
285 DMA_FROM_DEVICE);
286 skb_put(skb, len);
287 if (unlikely(skb->len > MAX_FRAME_SIZE)) {
288 netdev_err(dev, "rcv len err, len = %d\n", skb->len);
289 dev->stats.rx_errors++;
290 dev->stats.rx_length_errors++;
291 dev_kfree_skb_any(skb);
292 goto next;
293 }
294
295 skb->protocol = eth_type_trans(skb, dev);
296 napi_gro_receive(&priv->napi, skb);
297 dev->stats.rx_packets++;
298 dev->stats.rx_bytes += skb->len;
299next:
300 pos = (pos + 1) % rxq->num;
301 if (rx_pkts_num >= limit)
302 break;
303 }
304 rxq->tail = pos;
305
306 hisi_femac_rx_refill(priv);
307
308 return rx_pkts_num;
309}
310
311static int hisi_femac_poll(struct napi_struct *napi, int budget)
312{
313 struct hisi_femac_priv *priv = container_of(napi,
314 struct hisi_femac_priv, napi);
315 struct net_device *dev = priv->ndev;
316 int work_done = 0, task = budget;
317 int ints, num;
318
319 do {
320 hisi_femac_xmit_reclaim(dev);
321 num = hisi_femac_rx(dev, task);
322 work_done += num;
323 task -= num;
324 if (work_done >= budget)
325 break;
326
327 ints = readl(priv->glb_base + GLB_IRQ_RAW);
328 writel(ints & DEF_INT_MASK,
329 priv->glb_base + GLB_IRQ_RAW);
330 } while (ints & DEF_INT_MASK);
331
332 if (work_done < budget) {
333 napi_complete_done(napi, work_done);
334 hisi_femac_irq_enable(priv, DEF_INT_MASK &
335 (~IRQ_INT_TX_PER_PACKET));
336 }
337
338 return work_done;
339}
340
341static irqreturn_t hisi_femac_interrupt(int irq, void *dev_id)
342{
343 int ints;
344 struct net_device *dev = (struct net_device *)dev_id;
345 struct hisi_femac_priv *priv = netdev_priv(dev);
346
347 ints = readl(priv->glb_base + GLB_IRQ_RAW);
348
349 if (likely(ints & DEF_INT_MASK)) {
350 writel(ints & DEF_INT_MASK,
351 priv->glb_base + GLB_IRQ_RAW);
352 hisi_femac_irq_disable(priv, DEF_INT_MASK);
353 napi_schedule(&priv->napi);
354 }
355
356 return IRQ_HANDLED;
357}
358
359static int hisi_femac_init_queue(struct device *dev,
360 struct hisi_femac_queue *queue,
361 unsigned int num)
362{
363 queue->skb = devm_kcalloc(dev, num, sizeof(struct sk_buff *),
364 GFP_KERNEL);
365 if (!queue->skb)
366 return -ENOMEM;
367
368 queue->dma_phys = devm_kcalloc(dev, num, sizeof(dma_addr_t),
369 GFP_KERNEL);
370 if (!queue->dma_phys)
371 return -ENOMEM;
372
373 queue->num = num;
374 queue->head = 0;
375 queue->tail = 0;
376
377 return 0;
378}
379
380static int hisi_femac_init_tx_and_rx_queues(struct hisi_femac_priv *priv)
381{
382 int ret;
383
384 ret = hisi_femac_init_queue(priv->dev, &priv->txq, TXQ_NUM);
385 if (ret)
386 return ret;
387
388 ret = hisi_femac_init_queue(priv->dev, &priv->rxq, RXQ_NUM);
389 if (ret)
390 return ret;
391
392 priv->tx_fifo_used_cnt = 0;
393
394 return 0;
395}
396
397static void hisi_femac_free_skb_rings(struct hisi_femac_priv *priv)
398{
399 struct hisi_femac_queue *txq = &priv->txq;
400 struct hisi_femac_queue *rxq = &priv->rxq;
401 struct sk_buff *skb;
402 dma_addr_t dma_addr;
403 u32 pos;
404
405 pos = rxq->tail;
406 while (pos != rxq->head) {
407 skb = rxq->skb[pos];
408 if (unlikely(!skb)) {
409 netdev_err(priv->ndev, "NULL rx skb. pos=%d, head=%d\n",
410 pos, rxq->head);
411 continue;
412 }
413
414 dma_addr = rxq->dma_phys[pos];
415 dma_unmap_single(priv->dev, dma_addr, MAX_FRAME_SIZE,
416 DMA_FROM_DEVICE);
417
418 dev_kfree_skb_any(skb);
419 rxq->skb[pos] = NULL;
420 pos = (pos + 1) % rxq->num;
421 }
422 rxq->tail = pos;
423
424 pos = txq->tail;
425 while (pos != txq->head) {
426 skb = txq->skb[pos];
427 if (unlikely(!skb)) {
428 netdev_err(priv->ndev, "NULL tx skb. pos=%d, head=%d\n",
429 pos, txq->head);
430 continue;
431 }
432 hisi_femac_tx_dma_unmap(priv, skb, pos);
433 dev_kfree_skb_any(skb);
434 txq->skb[pos] = NULL;
435 pos = (pos + 1) % txq->num;
436 }
437 txq->tail = pos;
438 priv->tx_fifo_used_cnt = 0;
439}
440
441static int hisi_femac_set_hw_mac_addr(struct hisi_femac_priv *priv,
442 unsigned char *mac)
443{
444 u32 reg;
445
446 reg = mac[1] | (mac[0] << 8);
447 writel(reg, priv->glb_base + GLB_HOSTMAC_H16);
448
449 reg = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
450 writel(reg, priv->glb_base + GLB_HOSTMAC_L32);
451
452 return 0;
453}
454
455static int hisi_femac_port_reset(struct hisi_femac_priv *priv)
456{
457 u32 val;
458
459 val = readl(priv->glb_base + GLB_SOFT_RESET);
460 val |= SOFT_RESET_ALL;
461 writel(val, priv->glb_base + GLB_SOFT_RESET);
462
463 usleep_range(500, 800);
464
465 val &= ~SOFT_RESET_ALL;
466 writel(val, priv->glb_base + GLB_SOFT_RESET);
467
468 return 0;
469}
470
471static int hisi_femac_net_open(struct net_device *dev)
472{
473 struct hisi_femac_priv *priv = netdev_priv(dev);
474
475 hisi_femac_port_reset(priv);
476 hisi_femac_set_hw_mac_addr(priv, dev->dev_addr);
477 hisi_femac_rx_refill(priv);
478
479 netif_carrier_off(dev);
480 netdev_reset_queue(dev);
481 netif_start_queue(dev);
482 napi_enable(&priv->napi);
483
484 priv->link_status = 0;
485 if (dev->phydev)
486 phy_start(dev->phydev);
487
488 writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW);
489 hisi_femac_irq_enable(priv, IRQ_ENA_ALL | IRQ_ENA_PORT0 | DEF_INT_MASK);
490
491 return 0;
492}
493
494static int hisi_femac_net_close(struct net_device *dev)
495{
496 struct hisi_femac_priv *priv = netdev_priv(dev);
497
498 hisi_femac_irq_disable(priv, IRQ_ENA_PORT0);
499
500 if (dev->phydev)
501 phy_stop(dev->phydev);
502
503 netif_stop_queue(dev);
504 napi_disable(&priv->napi);
505
506 hisi_femac_free_skb_rings(priv);
507
508 return 0;
509}
510
511static netdev_tx_t hisi_femac_net_xmit(struct sk_buff *skb,
512 struct net_device *dev)
513{
514 struct hisi_femac_priv *priv = netdev_priv(dev);
515 struct hisi_femac_queue *txq = &priv->txq;
516 dma_addr_t addr;
517 u32 val;
518
519 val = readl(priv->port_base + ADDRQ_STAT);
520 val &= BIT_TX_READY;
521 if (!val) {
522 hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET);
523 dev->stats.tx_dropped++;
524 dev->stats.tx_fifo_errors++;
525 netif_stop_queue(dev);
526 return NETDEV_TX_BUSY;
527 }
528
529 if (unlikely(!CIRC_SPACE(txq->head, txq->tail,
530 txq->num))) {
531 hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET);
532 dev->stats.tx_dropped++;
533 dev->stats.tx_fifo_errors++;
534 netif_stop_queue(dev);
535 return NETDEV_TX_BUSY;
536 }
537
538 addr = dma_map_single(priv->dev, skb->data,
539 skb->len, DMA_TO_DEVICE);
540 if (unlikely(dma_mapping_error(priv->dev, addr))) {
541 dev_kfree_skb_any(skb);
542 dev->stats.tx_dropped++;
543 return NETDEV_TX_OK;
544 }
545 txq->dma_phys[txq->head] = addr;
546
547 txq->skb[txq->head] = skb;
548 txq->head = (txq->head + 1) % txq->num;
549
550 writel(addr, priv->port_base + EQ_ADDR);
551 writel(skb->len + ETH_FCS_LEN, priv->port_base + EQFRM_LEN);
552
553 priv->tx_fifo_used_cnt++;
554
555 dev->stats.tx_packets++;
556 dev->stats.tx_bytes += skb->len;
557 netdev_sent_queue(dev, skb->len);
558
559 return NETDEV_TX_OK;
560}
561
562static int hisi_femac_set_mac_address(struct net_device *dev, void *p)
563{
564 struct hisi_femac_priv *priv = netdev_priv(dev);
565 struct sockaddr *skaddr = p;
566
567 if (!is_valid_ether_addr(skaddr->sa_data))
568 return -EADDRNOTAVAIL;
569
570 memcpy(dev->dev_addr, skaddr->sa_data, dev->addr_len);
571 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
572
573 hisi_femac_set_hw_mac_addr(priv, dev->dev_addr);
574
575 return 0;
576}
577
578static void hisi_femac_enable_hw_addr_filter(struct hisi_femac_priv *priv,
579 unsigned int reg_n, bool enable)
580{
581 u32 val;
582
583 val = readl(priv->glb_base + GLB_MAC_H16(reg_n));
584 if (enable)
585 val |= BIT_MACFLT_ENA;
586 else
587 val &= ~BIT_MACFLT_ENA;
588 writel(val, priv->glb_base + GLB_MAC_H16(reg_n));
589}
590
591static void hisi_femac_set_hw_addr_filter(struct hisi_femac_priv *priv,
592 unsigned char *addr,
593 unsigned int reg_n)
594{
595 unsigned int high, low;
596 u32 val;
597
598 high = GLB_MAC_H16(reg_n);
599 low = GLB_MAC_L32(reg_n);
600
601 val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
602 writel(val, priv->glb_base + low);
603
604 val = readl(priv->glb_base + high);
605 val &= ~MACFLT_HI16_MASK;
606 val |= ((addr[0] << 8) | addr[1]);
607 val |= (BIT_MACFLT_ENA | BIT_MACFLT_FW2CPU);
608 writel(val, priv->glb_base + high);
609}
610
611static void hisi_femac_set_promisc_mode(struct hisi_femac_priv *priv,
612 bool promisc_mode)
613{
614 u32 val;
615
616 val = readl(priv->glb_base + GLB_FWCTRL);
617 if (promisc_mode)
618 val |= FWCTRL_FWALL2CPU;
619 else
620 val &= ~FWCTRL_FWALL2CPU;
621 writel(val, priv->glb_base + GLB_FWCTRL);
622}
623
624
625static void hisi_femac_set_mc_addr_filter(struct hisi_femac_priv *priv)
626{
627 struct net_device *dev = priv->ndev;
628 u32 val;
629
630 val = readl(priv->glb_base + GLB_MACTCTRL);
631 if ((netdev_mc_count(dev) > MAX_MULTICAST_ADDRESSES) ||
632 (dev->flags & IFF_ALLMULTI)) {
633 val |= MACTCTRL_MULTI2CPU;
634 } else {
635 int reg = MAX_UNICAST_ADDRESSES;
636 int i;
637 struct netdev_hw_addr *ha;
638
639 for (i = reg; i < MAX_MAC_FILTER_NUM; i++)
640 hisi_femac_enable_hw_addr_filter(priv, i, false);
641
642 netdev_for_each_mc_addr(ha, dev) {
643 hisi_femac_set_hw_addr_filter(priv, ha->addr, reg);
644 reg++;
645 }
646 val &= ~MACTCTRL_MULTI2CPU;
647 }
648 writel(val, priv->glb_base + GLB_MACTCTRL);
649}
650
651
652static void hisi_femac_set_uc_addr_filter(struct hisi_femac_priv *priv)
653{
654 struct net_device *dev = priv->ndev;
655 u32 val;
656
657 val = readl(priv->glb_base + GLB_MACTCTRL);
658 if (netdev_uc_count(dev) > MAX_UNICAST_ADDRESSES) {
659 val |= MACTCTRL_UNI2CPU;
660 } else {
661 int reg = 0;
662 int i;
663 struct netdev_hw_addr *ha;
664
665 for (i = reg; i < MAX_UNICAST_ADDRESSES; i++)
666 hisi_femac_enable_hw_addr_filter(priv, i, false);
667
668 netdev_for_each_uc_addr(ha, dev) {
669 hisi_femac_set_hw_addr_filter(priv, ha->addr, reg);
670 reg++;
671 }
672 val &= ~MACTCTRL_UNI2CPU;
673 }
674 writel(val, priv->glb_base + GLB_MACTCTRL);
675}
676
677static void hisi_femac_net_set_rx_mode(struct net_device *dev)
678{
679 struct hisi_femac_priv *priv = netdev_priv(dev);
680
681 if (dev->flags & IFF_PROMISC) {
682 hisi_femac_set_promisc_mode(priv, true);
683 } else {
684 hisi_femac_set_promisc_mode(priv, false);
685 hisi_femac_set_mc_addr_filter(priv);
686 hisi_femac_set_uc_addr_filter(priv);
687 }
688}
689
690static int hisi_femac_net_ioctl(struct net_device *dev,
691 struct ifreq *ifreq, int cmd)
692{
693 if (!netif_running(dev))
694 return -EINVAL;
695
696 if (!dev->phydev)
697 return -EINVAL;
698
699 return phy_mii_ioctl(dev->phydev, ifreq, cmd);
700}
701
702static const struct ethtool_ops hisi_femac_ethtools_ops = {
703 .get_link = ethtool_op_get_link,
704 .get_link_ksettings = phy_ethtool_get_link_ksettings,
705 .set_link_ksettings = phy_ethtool_set_link_ksettings,
706};
707
708static const struct net_device_ops hisi_femac_netdev_ops = {
709 .ndo_open = hisi_femac_net_open,
710 .ndo_stop = hisi_femac_net_close,
711 .ndo_start_xmit = hisi_femac_net_xmit,
712 .ndo_do_ioctl = hisi_femac_net_ioctl,
713 .ndo_set_mac_address = hisi_femac_set_mac_address,
714 .ndo_set_rx_mode = hisi_femac_net_set_rx_mode,
715};
716
717static void hisi_femac_core_reset(struct hisi_femac_priv *priv)
718{
719 reset_control_assert(priv->mac_rst);
720 reset_control_deassert(priv->mac_rst);
721}
722
723static void hisi_femac_sleep_us(u32 time_us)
724{
725 u32 time_ms;
726
727 if (!time_us)
728 return;
729
730 time_ms = DIV_ROUND_UP(time_us, 1000);
731 if (time_ms < 20)
732 usleep_range(time_us, time_us + 500);
733 else
734 msleep(time_ms);
735}
736
737static void hisi_femac_phy_reset(struct hisi_femac_priv *priv)
738{
739
740
741
742
743 reset_control_deassert(priv->phy_rst);
744 hisi_femac_sleep_us(priv->phy_reset_delays[PRE_DELAY]);
745
746 reset_control_assert(priv->phy_rst);
747
748
749
750 hisi_femac_sleep_us(priv->phy_reset_delays[PULSE]);
751 reset_control_deassert(priv->phy_rst);
752
753 hisi_femac_sleep_us(priv->phy_reset_delays[POST_DELAY]);
754}
755
756static void hisi_femac_port_init(struct hisi_femac_priv *priv)
757{
758 u32 val;
759
760
761 val = MAC_PORTSEL_STAT_CPU;
762 if (priv->ndev->phydev->interface == PHY_INTERFACE_MODE_RMII)
763 val |= MAC_PORTSEL_RMII;
764 writel(val, priv->port_base + MAC_PORTSEL);
765
766
767 writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW);
768 hisi_femac_irq_disable(priv, IRQ_ENA_PORT0_MASK | IRQ_ENA_PORT0);
769
770 val = readl(priv->glb_base + GLB_FWCTRL);
771 val &= ~(FWCTRL_VLAN_ENABLE | FWCTRL_FWALL2CPU);
772 val |= FWCTRL_FW2CPU_ENA;
773 writel(val, priv->glb_base + GLB_FWCTRL);
774
775 val = readl(priv->glb_base + GLB_MACTCTRL);
776 val |= (MACTCTRL_BROAD2CPU | MACTCTRL_MACT_ENA);
777 writel(val, priv->glb_base + GLB_MACTCTRL);
778
779 val = readl(priv->port_base + MAC_SET);
780 val &= ~MAX_FRAME_SIZE_MASK;
781 val |= MAX_FRAME_SIZE;
782 writel(val, priv->port_base + MAC_SET);
783
784 val = RX_COALESCED_TIMER |
785 (RX_COALESCED_FRAMES << RX_COALESCED_FRAME_OFFSET);
786 writel(val, priv->port_base + RX_COALESCE_SET);
787
788 val = (HW_RX_FIFO_DEPTH << RX_DEPTH_OFFSET) | HW_TX_FIFO_DEPTH;
789 writel(val, priv->port_base + QLEN_SET);
790}
791
792static int hisi_femac_drv_probe(struct platform_device *pdev)
793{
794 struct device *dev = &pdev->dev;
795 struct device_node *node = dev->of_node;
796 struct resource *res;
797 struct net_device *ndev;
798 struct hisi_femac_priv *priv;
799 struct phy_device *phy;
800 int ret;
801
802 ndev = alloc_etherdev(sizeof(*priv));
803 if (!ndev)
804 return -ENOMEM;
805
806 platform_set_drvdata(pdev, ndev);
807 SET_NETDEV_DEV(ndev, &pdev->dev);
808
809 priv = netdev_priv(ndev);
810 priv->dev = dev;
811 priv->ndev = ndev;
812
813 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
814 priv->port_base = devm_ioremap_resource(dev, res);
815 if (IS_ERR(priv->port_base)) {
816 ret = PTR_ERR(priv->port_base);
817 goto out_free_netdev;
818 }
819
820 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
821 priv->glb_base = devm_ioremap_resource(dev, res);
822 if (IS_ERR(priv->glb_base)) {
823 ret = PTR_ERR(priv->glb_base);
824 goto out_free_netdev;
825 }
826
827 priv->clk = devm_clk_get(&pdev->dev, NULL);
828 if (IS_ERR(priv->clk)) {
829 dev_err(dev, "failed to get clk\n");
830 ret = -ENODEV;
831 goto out_free_netdev;
832 }
833
834 ret = clk_prepare_enable(priv->clk);
835 if (ret) {
836 dev_err(dev, "failed to enable clk %d\n", ret);
837 goto out_free_netdev;
838 }
839
840 priv->mac_rst = devm_reset_control_get(dev, "mac");
841 if (IS_ERR(priv->mac_rst)) {
842 ret = PTR_ERR(priv->mac_rst);
843 goto out_disable_clk;
844 }
845 hisi_femac_core_reset(priv);
846
847 priv->phy_rst = devm_reset_control_get(dev, "phy");
848 if (IS_ERR(priv->phy_rst)) {
849 priv->phy_rst = NULL;
850 } else {
851 ret = of_property_read_u32_array(node,
852 PHY_RESET_DELAYS_PROPERTY,
853 priv->phy_reset_delays,
854 DELAYS_NUM);
855 if (ret)
856 goto out_disable_clk;
857 hisi_femac_phy_reset(priv);
858 }
859
860 phy = of_phy_get_and_connect(ndev, node, hisi_femac_adjust_link);
861 if (!phy) {
862 dev_err(dev, "connect to PHY failed!\n");
863 ret = -ENODEV;
864 goto out_disable_clk;
865 }
866
867 phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n",
868 (unsigned long)phy->phy_id,
869 phy_modes(phy->interface));
870
871 ret = of_get_mac_address(node, ndev->dev_addr);
872 if (ret) {
873 eth_hw_addr_random(ndev);
874 dev_warn(dev, "using random MAC address %pM\n",
875 ndev->dev_addr);
876 }
877
878 ndev->watchdog_timeo = 6 * HZ;
879 ndev->priv_flags |= IFF_UNICAST_FLT;
880 ndev->netdev_ops = &hisi_femac_netdev_ops;
881 ndev->ethtool_ops = &hisi_femac_ethtools_ops;
882 netif_napi_add(ndev, &priv->napi, hisi_femac_poll, FEMAC_POLL_WEIGHT);
883
884 hisi_femac_port_init(priv);
885
886 ret = hisi_femac_init_tx_and_rx_queues(priv);
887 if (ret)
888 goto out_disconnect_phy;
889
890 ndev->irq = platform_get_irq(pdev, 0);
891 if (ndev->irq <= 0) {
892 dev_err(dev, "No irq resource\n");
893 ret = -ENODEV;
894 goto out_disconnect_phy;
895 }
896
897 ret = devm_request_irq(dev, ndev->irq, hisi_femac_interrupt,
898 IRQF_SHARED, pdev->name, ndev);
899 if (ret) {
900 dev_err(dev, "devm_request_irq %d failed!\n", ndev->irq);
901 goto out_disconnect_phy;
902 }
903
904 ret = register_netdev(ndev);
905 if (ret) {
906 dev_err(dev, "register_netdev failed!\n");
907 goto out_disconnect_phy;
908 }
909
910 return ret;
911
912out_disconnect_phy:
913 netif_napi_del(&priv->napi);
914 phy_disconnect(phy);
915out_disable_clk:
916 clk_disable_unprepare(priv->clk);
917out_free_netdev:
918 free_netdev(ndev);
919
920 return ret;
921}
922
923static int hisi_femac_drv_remove(struct platform_device *pdev)
924{
925 struct net_device *ndev = platform_get_drvdata(pdev);
926 struct hisi_femac_priv *priv = netdev_priv(ndev);
927
928 netif_napi_del(&priv->napi);
929 unregister_netdev(ndev);
930
931 phy_disconnect(ndev->phydev);
932 clk_disable_unprepare(priv->clk);
933 free_netdev(ndev);
934
935 return 0;
936}
937
938#ifdef CONFIG_PM
939static int hisi_femac_drv_suspend(struct platform_device *pdev,
940 pm_message_t state)
941{
942 struct net_device *ndev = platform_get_drvdata(pdev);
943 struct hisi_femac_priv *priv = netdev_priv(ndev);
944
945 disable_irq(ndev->irq);
946 if (netif_running(ndev)) {
947 hisi_femac_net_close(ndev);
948 netif_device_detach(ndev);
949 }
950
951 clk_disable_unprepare(priv->clk);
952
953 return 0;
954}
955
956static int hisi_femac_drv_resume(struct platform_device *pdev)
957{
958 struct net_device *ndev = platform_get_drvdata(pdev);
959 struct hisi_femac_priv *priv = netdev_priv(ndev);
960
961 clk_prepare_enable(priv->clk);
962 if (priv->phy_rst)
963 hisi_femac_phy_reset(priv);
964
965 if (netif_running(ndev)) {
966 hisi_femac_port_init(priv);
967 hisi_femac_net_open(ndev);
968 netif_device_attach(ndev);
969 }
970 enable_irq(ndev->irq);
971
972 return 0;
973}
974#endif
975
976static const struct of_device_id hisi_femac_match[] = {
977 {.compatible = "hisilicon,hisi-femac-v1",},
978 {.compatible = "hisilicon,hisi-femac-v2",},
979 {.compatible = "hisilicon,hi3516cv300-femac",},
980 {},
981};
982
983MODULE_DEVICE_TABLE(of, hisi_femac_match);
984
985static struct platform_driver hisi_femac_driver = {
986 .driver = {
987 .name = "hisi-femac",
988 .of_match_table = hisi_femac_match,
989 },
990 .probe = hisi_femac_drv_probe,
991 .remove = hisi_femac_drv_remove,
992#ifdef CONFIG_PM
993 .suspend = hisi_femac_drv_suspend,
994 .resume = hisi_femac_drv_resume,
995#endif
996};
997
998module_platform_driver(hisi_femac_driver);
999
1000MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC driver");
1001MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>");
1002MODULE_LICENSE("GPL v2");
1003MODULE_ALIAS("platform:hisi-femac");
1004