1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <linux/circ_buf.h>
20#include <linux/interrupt.h>
21#include <linux/etherdevice.h>
22#include <linux/platform_device.h>
23#include <linux/skbuff.h>
24#include <linux/ethtool.h>
25#include <linux/if.h>
26#include <linux/crc32.h>
27#include <linux/dma-mapping.h>
28#include <linux/slab.h>
29
30
31#define XGMAC_CONTROL 0x00000000
32#define XGMAC_FRAME_FILTER 0x00000004
33#define XGMAC_FLOW_CTRL 0x00000018
34#define XGMAC_VLAN_TAG 0x0000001C
35#define XGMAC_VERSION 0x00000020
36#define XGMAC_VLAN_INCL 0x00000024
37#define XGMAC_LPI_CTRL 0x00000028
38#define XGMAC_LPI_TIMER 0x0000002C
39#define XGMAC_TX_PACE 0x00000030
40#define XGMAC_VLAN_HASH 0x00000034
41#define XGMAC_DEBUG 0x00000038
42#define XGMAC_INT_STAT 0x0000003C
43#define XGMAC_ADDR_HIGH(reg) (0x00000040 + ((reg) * 8))
44#define XGMAC_ADDR_LOW(reg) (0x00000044 + ((reg) * 8))
45#define XGMAC_HASH(n) (0x00000300 + (n) * 4)
46#define XGMAC_NUM_HASH 16
47#define XGMAC_OMR 0x00000400
48#define XGMAC_REMOTE_WAKE 0x00000700
49#define XGMAC_PMT 0x00000704
50#define XGMAC_MMC_CTRL 0x00000800
51#define XGMAC_MMC_INTR_RX 0x00000804
52#define XGMAC_MMC_INTR_TX 0x00000808
53#define XGMAC_MMC_INTR_MASK_RX 0x0000080c
54#define XGMAC_MMC_INTR_MASK_TX 0x00000810
55
56
57#define XGMAC_MMC_TXOCTET_GB_LO 0x00000814
58#define XGMAC_MMC_TXOCTET_GB_HI 0x00000818
59#define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C
60#define XGMAC_MMC_TXFRAME_GB_HI 0x00000820
61#define XGMAC_MMC_TXBCFRAME_G 0x00000824
62#define XGMAC_MMC_TXMCFRAME_G 0x0000082C
63#define XGMAC_MMC_TXUCFRAME_GB 0x00000864
64#define XGMAC_MMC_TXMCFRAME_GB 0x0000086C
65#define XGMAC_MMC_TXBCFRAME_GB 0x00000874
66#define XGMAC_MMC_TXUNDERFLOW 0x0000087C
67#define XGMAC_MMC_TXOCTET_G_LO 0x00000884
68#define XGMAC_MMC_TXOCTET_G_HI 0x00000888
69#define XGMAC_MMC_TXFRAME_G_LO 0x0000088C
70#define XGMAC_MMC_TXFRAME_G_HI 0x00000890
71#define XGMAC_MMC_TXPAUSEFRAME 0x00000894
72#define XGMAC_MMC_TXVLANFRAME 0x0000089C
73
74
75#define XGMAC_MMC_RXFRAME_GB_LO 0x00000900
76#define XGMAC_MMC_RXFRAME_GB_HI 0x00000904
77#define XGMAC_MMC_RXOCTET_GB_LO 0x00000908
78#define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C
79#define XGMAC_MMC_RXOCTET_G_LO 0x00000910
80#define XGMAC_MMC_RXOCTET_G_HI 0x00000914
81#define XGMAC_MMC_RXBCFRAME_G 0x00000918
82#define XGMAC_MMC_RXMCFRAME_G 0x00000920
83#define XGMAC_MMC_RXCRCERR 0x00000928
84#define XGMAC_MMC_RXRUNT 0x00000930
85#define XGMAC_MMC_RXJABBER 0x00000934
86#define XGMAC_MMC_RXUCFRAME_G 0x00000970
87#define XGMAC_MMC_RXLENGTHERR 0x00000978
88#define XGMAC_MMC_RXPAUSEFRAME 0x00000988
89#define XGMAC_MMC_RXOVERFLOW 0x00000990
90#define XGMAC_MMC_RXVLANFRAME 0x00000998
91#define XGMAC_MMC_RXWATCHDOG 0x000009a0
92
93
94#define XGMAC_DMA_BUS_MODE 0x00000f00
95#define XGMAC_DMA_TX_POLL 0x00000f04
96#define XGMAC_DMA_RX_POLL 0x00000f08
97#define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c
98#define XGMAC_DMA_TX_BASE_ADDR 0x00000f10
99#define XGMAC_DMA_STATUS 0x00000f14
100#define XGMAC_DMA_CONTROL 0x00000f18
101#define XGMAC_DMA_INTR_ENA 0x00000f1c
102#define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20
103#define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24
104#define XGMAC_DMA_AXI_BUS 0x00000f28
105#define XGMAC_DMA_AXI_STATUS 0x00000f2C
106#define XGMAC_DMA_HW_FEATURE 0x00000f58
107
108#define XGMAC_ADDR_AE 0x80000000
109
110
111#define XGMAC_PMT_POINTER_RESET 0x80000000
112#define XGMAC_PMT_GLBL_UNICAST 0x00000200
113#define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040
114#define XGMAC_PMT_MAGIC_PKT 0x00000020
115#define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004
116#define XGMAC_PMT_MAGIC_PKT_EN 0x00000002
117#define XGMAC_PMT_POWERDOWN 0x00000001
118
119#define XGMAC_CONTROL_SPD 0x40000000
120#define XGMAC_CONTROL_SPD_MASK 0x60000000
121#define XGMAC_CONTROL_SPD_1G 0x60000000
122#define XGMAC_CONTROL_SPD_2_5G 0x40000000
123#define XGMAC_CONTROL_SPD_10G 0x00000000
124#define XGMAC_CONTROL_SARC 0x10000000
125#define XGMAC_CONTROL_SARK_MASK 0x18000000
126#define XGMAC_CONTROL_CAR 0x04000000
127#define XGMAC_CONTROL_CAR_MASK 0x06000000
128#define XGMAC_CONTROL_DP 0x01000000
129#define XGMAC_CONTROL_WD 0x00800000
130#define XGMAC_CONTROL_JD 0x00400000
131#define XGMAC_CONTROL_JE 0x00100000
132#define XGMAC_CONTROL_LM 0x00001000
133#define XGMAC_CONTROL_IPC 0x00000400
134#define XGMAC_CONTROL_ACS 0x00000080
135#define XGMAC_CONTROL_DDIC 0x00000010
136#define XGMAC_CONTROL_TE 0x00000008
137#define XGMAC_CONTROL_RE 0x00000004
138
139
140#define XGMAC_FRAME_FILTER_PR 0x00000001
141#define XGMAC_FRAME_FILTER_HUC 0x00000002
142#define XGMAC_FRAME_FILTER_HMC 0x00000004
143#define XGMAC_FRAME_FILTER_DAIF 0x00000008
144#define XGMAC_FRAME_FILTER_PM 0x00000010
145#define XGMAC_FRAME_FILTER_DBF 0x00000020
146#define XGMAC_FRAME_FILTER_SAIF 0x00000100
147#define XGMAC_FRAME_FILTER_SAF 0x00000200
148#define XGMAC_FRAME_FILTER_HPF 0x00000400
149#define XGMAC_FRAME_FILTER_VHF 0x00000800
150#define XGMAC_FRAME_FILTER_VPF 0x00001000
151#define XGMAC_FRAME_FILTER_RA 0x80000000
152
153
154#define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000
155#define XGMAC_FLOW_CTRL_PT_SHIFT 16
156#define XGMAC_FLOW_CTRL_DZQP 0x00000080
157#define XGMAC_FLOW_CTRL_PLT 0x00000020
158#define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030
159#define XGMAC_FLOW_CTRL_UP 0x00000008
160#define XGMAC_FLOW_CTRL_RFE 0x00000004
161#define XGMAC_FLOW_CTRL_TFE 0x00000002
162#define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001
163
164
165#define XGMAC_INT_STAT_PMTIM 0x00800000
166#define XGMAC_INT_STAT_PMT 0x0080
167#define XGMAC_INT_STAT_LPI 0x0040
168
169
170#define DMA_BUS_MODE_SFT_RESET 0x00000001
171#define DMA_BUS_MODE_DSL_MASK 0x0000007c
172#define DMA_BUS_MODE_DSL_SHIFT 2
173#define DMA_BUS_MODE_ATDS 0x00000080
174
175
176#define DMA_BUS_MODE_PBL_MASK 0x00003f00
177#define DMA_BUS_MODE_PBL_SHIFT 8
178#define DMA_BUS_MODE_FB 0x00010000
179#define DMA_BUS_MODE_RPBL_MASK 0x003e0000
180#define DMA_BUS_MODE_RPBL_SHIFT 17
181#define DMA_BUS_MODE_USP 0x00800000
182#define DMA_BUS_MODE_8PBL 0x01000000
183#define DMA_BUS_MODE_AAL 0x02000000
184
185
186#define DMA_BUS_PR_RATIO_MASK 0x0000c000
187#define DMA_BUS_PR_RATIO_SHIFT 14
188#define DMA_BUS_FB 0x00010000
189
190
191#define DMA_CONTROL_ST 0x00002000
192#define DMA_CONTROL_SR 0x00000002
193#define DMA_CONTROL_DFF 0x01000000
194#define DMA_CONTROL_OSF 0x00000004
195
196
197#define DMA_INTR_ENA_NIE 0x00010000
198#define DMA_INTR_ENA_AIE 0x00008000
199#define DMA_INTR_ENA_ERE 0x00004000
200#define DMA_INTR_ENA_FBE 0x00002000
201#define DMA_INTR_ENA_ETE 0x00000400
202#define DMA_INTR_ENA_RWE 0x00000200
203#define DMA_INTR_ENA_RSE 0x00000100
204#define DMA_INTR_ENA_RUE 0x00000080
205#define DMA_INTR_ENA_RIE 0x00000040
206#define DMA_INTR_ENA_UNE 0x00000020
207#define DMA_INTR_ENA_OVE 0x00000010
208#define DMA_INTR_ENA_TJE 0x00000008
209#define DMA_INTR_ENA_TUE 0x00000004
210#define DMA_INTR_ENA_TSE 0x00000002
211#define DMA_INTR_ENA_TIE 0x00000001
212
213#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
214 DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE)
215
216#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
217 DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
218 DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \
219 DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \
220 DMA_INTR_ENA_TSE)
221
222
223#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
224
225
226#define DMA_STATUS_GMI 0x08000000
227#define DMA_STATUS_GLI 0x04000000
228#define DMA_STATUS_EB_MASK 0x00380000
229#define DMA_STATUS_EB_TX_ABORT 0x00080000
230#define DMA_STATUS_EB_RX_ABORT 0x00100000
231#define DMA_STATUS_TS_MASK 0x00700000
232#define DMA_STATUS_TS_SHIFT 20
233#define DMA_STATUS_RS_MASK 0x000e0000
234#define DMA_STATUS_RS_SHIFT 17
235#define DMA_STATUS_NIS 0x00010000
236#define DMA_STATUS_AIS 0x00008000
237#define DMA_STATUS_ERI 0x00004000
238#define DMA_STATUS_FBI 0x00002000
239#define DMA_STATUS_ETI 0x00000400
240#define DMA_STATUS_RWT 0x00000200
241#define DMA_STATUS_RPS 0x00000100
242#define DMA_STATUS_RU 0x00000080
243#define DMA_STATUS_RI 0x00000040
244#define DMA_STATUS_UNF 0x00000020
245#define DMA_STATUS_OVF 0x00000010
246#define DMA_STATUS_TJT 0x00000008
247#define DMA_STATUS_TU 0x00000004
248#define DMA_STATUS_TPS 0x00000002
249#define DMA_STATUS_TI 0x00000001
250
251
252#define MAC_ENABLE_TX 0x00000008
253#define MAC_ENABLE_RX 0x00000004
254
255
256#define XGMAC_OMR_TSF 0x00200000
257#define XGMAC_OMR_FTF 0x00100000
258#define XGMAC_OMR_TTC 0x00020000
259#define XGMAC_OMR_TTC_MASK 0x00030000
260#define XGMAC_OMR_RFD 0x00006000
261#define XGMAC_OMR_RFD_MASK 0x00007000
262#define XGMAC_OMR_RFA 0x00000600
263#define XGMAC_OMR_RFA_MASK 0x00000E00
264#define XGMAC_OMR_EFC 0x00000100
265#define XGMAC_OMR_FEF 0x00000080
266#define XGMAC_OMR_DT 0x00000040
267#define XGMAC_OMR_RSF 0x00000020
268#define XGMAC_OMR_RTC_256 0x00000018
269#define XGMAC_OMR_RTC_MASK 0x00000018
270
271
272#define DMA_HW_FEAT_TXCOESEL 0x00010000
273
274#define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008
275
276
277#define MAX_DESC_BUF_SZ (0x2000 - 8)
278
279#define RXDESC_EXT_STATUS 0x00000001
280#define RXDESC_CRC_ERR 0x00000002
281#define RXDESC_RX_ERR 0x00000008
282#define RXDESC_RX_WDOG 0x00000010
283#define RXDESC_FRAME_TYPE 0x00000020
284#define RXDESC_GIANT_FRAME 0x00000080
285#define RXDESC_LAST_SEG 0x00000100
286#define RXDESC_FIRST_SEG 0x00000200
287#define RXDESC_VLAN_FRAME 0x00000400
288#define RXDESC_OVERFLOW_ERR 0x00000800
289#define RXDESC_LENGTH_ERR 0x00001000
290#define RXDESC_SA_FILTER_FAIL 0x00002000
291#define RXDESC_DESCRIPTOR_ERR 0x00004000
292#define RXDESC_ERROR_SUMMARY 0x00008000
293#define RXDESC_FRAME_LEN_OFFSET 16
294#define RXDESC_FRAME_LEN_MASK 0x3fff0000
295#define RXDESC_DA_FILTER_FAIL 0x40000000
296
297#define RXDESC1_END_RING 0x00008000
298
299#define RXDESC_IP_PAYLOAD_MASK 0x00000003
300#define RXDESC_IP_PAYLOAD_UDP 0x00000001
301#define RXDESC_IP_PAYLOAD_TCP 0x00000002
302#define RXDESC_IP_PAYLOAD_ICMP 0x00000003
303#define RXDESC_IP_HEADER_ERR 0x00000008
304#define RXDESC_IP_PAYLOAD_ERR 0x00000010
305#define RXDESC_IPV4_PACKET 0x00000040
306#define RXDESC_IPV6_PACKET 0x00000080
307#define TXDESC_UNDERFLOW_ERR 0x00000001
308#define TXDESC_JABBER_TIMEOUT 0x00000002
309#define TXDESC_LOCAL_FAULT 0x00000004
310#define TXDESC_REMOTE_FAULT 0x00000008
311#define TXDESC_VLAN_FRAME 0x00000010
312#define TXDESC_FRAME_FLUSHED 0x00000020
313#define TXDESC_IP_HEADER_ERR 0x00000040
314#define TXDESC_PAYLOAD_CSUM_ERR 0x00000080
315#define TXDESC_ERROR_SUMMARY 0x00008000
316#define TXDESC_SA_CTRL_INSERT 0x00040000
317#define TXDESC_SA_CTRL_REPLACE 0x00080000
318#define TXDESC_2ND_ADDR_CHAINED 0x00100000
319#define TXDESC_END_RING 0x00200000
320#define TXDESC_CSUM_IP 0x00400000
321#define TXDESC_CSUM_IP_PAYLD 0x00800000
322#define TXDESC_CSUM_ALL 0x00C00000
323#define TXDESC_CRC_EN_REPLACE 0x01000000
324#define TXDESC_CRC_EN_APPEND 0x02000000
325#define TXDESC_DISABLE_PAD 0x04000000
326#define TXDESC_FIRST_SEG 0x10000000
327#define TXDESC_LAST_SEG 0x20000000
328#define TXDESC_INTERRUPT 0x40000000
329
330#define DESC_OWN 0x80000000
331#define DESC_BUFFER1_SZ_MASK 0x00001fff
332#define DESC_BUFFER2_SZ_MASK 0x1fff0000
333#define DESC_BUFFER2_SZ_OFFSET 16
334
335struct xgmac_dma_desc {
336 __le32 flags;
337 __le32 buf_size;
338 __le32 buf1_addr;
339 __le32 buf2_addr;
340 __le32 ext_status;
341 __le32 res[3];
342};
343
344struct xgmac_extra_stats {
345
346 unsigned long tx_jabber;
347 unsigned long tx_frame_flushed;
348 unsigned long tx_payload_error;
349 unsigned long tx_ip_header_error;
350 unsigned long tx_local_fault;
351 unsigned long tx_remote_fault;
352
353 unsigned long rx_watchdog;
354 unsigned long rx_da_filter_fail;
355 unsigned long rx_payload_error;
356 unsigned long rx_ip_header_error;
357
358 unsigned long tx_process_stopped;
359 unsigned long rx_buf_unav;
360 unsigned long rx_process_stopped;
361 unsigned long tx_early;
362 unsigned long fatal_bus_error;
363};
364
365struct xgmac_priv {
366 struct xgmac_dma_desc *dma_rx;
367 struct sk_buff **rx_skbuff;
368 unsigned int rx_tail;
369 unsigned int rx_head;
370
371 struct xgmac_dma_desc *dma_tx;
372 struct sk_buff **tx_skbuff;
373 unsigned int tx_head;
374 unsigned int tx_tail;
375 int tx_irq_cnt;
376
377 void __iomem *base;
378 unsigned int dma_buf_sz;
379 dma_addr_t dma_rx_phy;
380 dma_addr_t dma_tx_phy;
381
382 struct net_device *dev;
383 struct device *device;
384 struct napi_struct napi;
385
386 int max_macs;
387 struct xgmac_extra_stats xstats;
388
389 spinlock_t stats_lock;
390 int pmt_irq;
391 char rx_pause;
392 char tx_pause;
393 int wolopts;
394 struct work_struct tx_timeout_work;
395};
396
397
398#define MAX_MTU 9000
399#define PAUSE_TIME 0x400
400
401#define DMA_RX_RING_SZ 256
402#define DMA_TX_RING_SZ 128
403
404#define TX_THRESH (DMA_TX_RING_SZ/4)
405
406
407#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
408#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
409#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s)
410
411#define tx_dma_ring_space(p) \
412 dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ)
413
414
415static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
416{
417 if (buf_sz > MAX_DESC_BUF_SZ)
418 p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ |
419 (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET);
420 else
421 p->buf_size = cpu_to_le32(buf_sz);
422}
423
424static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
425{
426 u32 len = le32_to_cpu(p->buf_size);
427 return (len & DESC_BUFFER1_SZ_MASK) +
428 ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
429}
430
431static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size,
432 int buf_sz)
433{
434 struct xgmac_dma_desc *end = p + ring_size - 1;
435
436 memset(p, 0, sizeof(*p) * ring_size);
437
438 for (; p <= end; p++)
439 desc_set_buf_len(p, buf_sz);
440
441 end->buf_size |= cpu_to_le32(RXDESC1_END_RING);
442}
443
444static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size)
445{
446 memset(p, 0, sizeof(*p) * ring_size);
447 p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING);
448}
449
450static inline int desc_get_owner(struct xgmac_dma_desc *p)
451{
452 return le32_to_cpu(p->flags) & DESC_OWN;
453}
454
455static inline void desc_set_rx_owner(struct xgmac_dma_desc *p)
456{
457
458 p->flags = cpu_to_le32(DESC_OWN);
459}
460
461static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
462{
463 u32 tmpflags = le32_to_cpu(p->flags);
464 tmpflags &= TXDESC_END_RING;
465 tmpflags |= flags | DESC_OWN;
466 p->flags = cpu_to_le32(tmpflags);
467}
468
469static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p)
470{
471 u32 tmpflags = le32_to_cpu(p->flags);
472 tmpflags &= TXDESC_END_RING;
473 p->flags = cpu_to_le32(tmpflags);
474}
475
476static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
477{
478 return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
479}
480
481static inline int desc_get_tx_fs(struct xgmac_dma_desc *p)
482{
483 return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG;
484}
485
486static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
487{
488 return le32_to_cpu(p->buf1_addr);
489}
490
491static inline void desc_set_buf_addr(struct xgmac_dma_desc *p,
492 u32 paddr, int len)
493{
494 p->buf1_addr = cpu_to_le32(paddr);
495 if (len > MAX_DESC_BUF_SZ)
496 p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ);
497}
498
499static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p,
500 u32 paddr, int len)
501{
502 desc_set_buf_len(p, len);
503 desc_set_buf_addr(p, paddr, len);
504}
505
506static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p)
507{
508 u32 data = le32_to_cpu(p->flags);
509 u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET;
510 if (data & RXDESC_FRAME_TYPE)
511 len -= ETH_FCS_LEN;
512
513 return len;
514}
515
516static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr)
517{
518 int timeout = 1000;
519 u32 reg = readl(ioaddr + XGMAC_OMR);
520 writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR);
521
522 while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF)
523 udelay(1);
524}
525
526static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
527{
528 struct xgmac_extra_stats *x = &priv->xstats;
529 u32 status = le32_to_cpu(p->flags);
530
531 if (!(status & TXDESC_ERROR_SUMMARY))
532 return 0;
533
534 netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status);
535 if (status & TXDESC_JABBER_TIMEOUT)
536 x->tx_jabber++;
537 if (status & TXDESC_FRAME_FLUSHED)
538 x->tx_frame_flushed++;
539 if (status & TXDESC_UNDERFLOW_ERR)
540 xgmac_dma_flush_tx_fifo(priv->base);
541 if (status & TXDESC_IP_HEADER_ERR)
542 x->tx_ip_header_error++;
543 if (status & TXDESC_LOCAL_FAULT)
544 x->tx_local_fault++;
545 if (status & TXDESC_REMOTE_FAULT)
546 x->tx_remote_fault++;
547 if (status & TXDESC_PAYLOAD_CSUM_ERR)
548 x->tx_payload_error++;
549
550 return -1;
551}
552
553static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
554{
555 struct xgmac_extra_stats *x = &priv->xstats;
556 int ret = CHECKSUM_UNNECESSARY;
557 u32 status = le32_to_cpu(p->flags);
558 u32 ext_status = le32_to_cpu(p->ext_status);
559
560 if (status & RXDESC_DA_FILTER_FAIL) {
561 netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n");
562 x->rx_da_filter_fail++;
563 return -1;
564 }
565
566
567 if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG))
568 return -1;
569
570
571 if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
572 !(ext_status & RXDESC_IP_PAYLOAD_MASK))
573 ret = CHECKSUM_NONE;
574
575 netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n",
576 (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status);
577
578 if (!(status & RXDESC_ERROR_SUMMARY))
579 return ret;
580
581
582 if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR |
583 RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR))
584 return -1;
585
586 if (status & RXDESC_EXT_STATUS) {
587 if (ext_status & RXDESC_IP_HEADER_ERR)
588 x->rx_ip_header_error++;
589 if (ext_status & RXDESC_IP_PAYLOAD_ERR)
590 x->rx_payload_error++;
591 netdev_dbg(priv->dev, "IP checksum error - stat %08x\n",
592 ext_status);
593 return CHECKSUM_NONE;
594 }
595
596 return ret;
597}
598
599static inline void xgmac_mac_enable(void __iomem *ioaddr)
600{
601 u32 value = readl(ioaddr + XGMAC_CONTROL);
602 value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
603 writel(value, ioaddr + XGMAC_CONTROL);
604
605 value = readl(ioaddr + XGMAC_DMA_CONTROL);
606 value |= DMA_CONTROL_ST | DMA_CONTROL_SR;
607 writel(value, ioaddr + XGMAC_DMA_CONTROL);
608}
609
610static inline void xgmac_mac_disable(void __iomem *ioaddr)
611{
612 u32 value = readl(ioaddr + XGMAC_DMA_CONTROL);
613 value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
614 writel(value, ioaddr + XGMAC_DMA_CONTROL);
615
616 value = readl(ioaddr + XGMAC_CONTROL);
617 value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
618 writel(value, ioaddr + XGMAC_CONTROL);
619}
620
621static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
622 int num)
623{
624 u32 data;
625
626 if (addr) {
627 data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
628 writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
629 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
630 writel(data, ioaddr + XGMAC_ADDR_LOW(num));
631 } else {
632 writel(0, ioaddr + XGMAC_ADDR_HIGH(num));
633 writel(0, ioaddr + XGMAC_ADDR_LOW(num));
634 }
635}
636
637static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
638 int num)
639{
640 u32 hi_addr, lo_addr;
641
642
643 hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num));
644 lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num));
645
646
647 addr[0] = lo_addr & 0xff;
648 addr[1] = (lo_addr >> 8) & 0xff;
649 addr[2] = (lo_addr >> 16) & 0xff;
650 addr[3] = (lo_addr >> 24) & 0xff;
651 addr[4] = hi_addr & 0xff;
652 addr[5] = (hi_addr >> 8) & 0xff;
653}
654
655static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx)
656{
657 u32 reg;
658 unsigned int flow = 0;
659
660 priv->rx_pause = rx;
661 priv->tx_pause = tx;
662
663 if (rx || tx) {
664 if (rx)
665 flow |= XGMAC_FLOW_CTRL_RFE;
666 if (tx)
667 flow |= XGMAC_FLOW_CTRL_TFE;
668
669 flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP;
670 flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT);
671
672 writel(flow, priv->base + XGMAC_FLOW_CTRL);
673
674 reg = readl(priv->base + XGMAC_OMR);
675 reg |= XGMAC_OMR_EFC;
676 writel(reg, priv->base + XGMAC_OMR);
677 } else {
678 writel(0, priv->base + XGMAC_FLOW_CTRL);
679
680 reg = readl(priv->base + XGMAC_OMR);
681 reg &= ~XGMAC_OMR_EFC;
682 writel(reg, priv->base + XGMAC_OMR);
683 }
684
685 return 0;
686}
687
688static void xgmac_rx_refill(struct xgmac_priv *priv)
689{
690 struct xgmac_dma_desc *p;
691 dma_addr_t paddr;
692 int bufsz = priv->dev->mtu + ETH_HLEN + ETH_FCS_LEN;
693
694 while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
695 int entry = priv->rx_head;
696 struct sk_buff *skb;
697
698 p = priv->dma_rx + entry;
699
700 if (priv->rx_skbuff[entry] == NULL) {
701 skb = netdev_alloc_skb_ip_align(priv->dev, bufsz);
702 if (unlikely(skb == NULL))
703 break;
704
705 paddr = dma_map_single(priv->device, skb->data,
706 priv->dma_buf_sz - NET_IP_ALIGN,
707 DMA_FROM_DEVICE);
708 if (dma_mapping_error(priv->device, paddr)) {
709 dev_kfree_skb_any(skb);
710 break;
711 }
712 priv->rx_skbuff[entry] = skb;
713 desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
714 }
715
716 netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
717 priv->rx_head, priv->rx_tail);
718
719 priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
720 desc_set_rx_owner(p);
721 }
722}
723
724
725
726
727
728
729
730static int xgmac_dma_desc_rings_init(struct net_device *dev)
731{
732 struct xgmac_priv *priv = netdev_priv(dev);
733 unsigned int bfsize;
734
735
736
737
738
739 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
740
741 netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
742
743 priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ,
744 GFP_KERNEL);
745 if (!priv->rx_skbuff)
746 return -ENOMEM;
747
748 priv->dma_rx = dma_alloc_coherent(priv->device,
749 DMA_RX_RING_SZ *
750 sizeof(struct xgmac_dma_desc),
751 &priv->dma_rx_phy,
752 GFP_KERNEL);
753 if (!priv->dma_rx)
754 goto err_dma_rx;
755
756 priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ,
757 GFP_KERNEL);
758 if (!priv->tx_skbuff)
759 goto err_tx_skb;
760
761 priv->dma_tx = dma_alloc_coherent(priv->device,
762 DMA_TX_RING_SZ *
763 sizeof(struct xgmac_dma_desc),
764 &priv->dma_tx_phy,
765 GFP_KERNEL);
766 if (!priv->dma_tx)
767 goto err_dma_tx;
768
769 netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, "
770 "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
771 priv->dma_rx, priv->dma_tx,
772 (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
773
774 priv->rx_tail = 0;
775 priv->rx_head = 0;
776 priv->dma_buf_sz = bfsize;
777 desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz);
778 xgmac_rx_refill(priv);
779
780 priv->tx_tail = 0;
781 priv->tx_head = 0;
782 desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
783
784 writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
785 writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR);
786
787 return 0;
788
789err_dma_tx:
790 kfree(priv->tx_skbuff);
791err_tx_skb:
792 dma_free_coherent(priv->device,
793 DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
794 priv->dma_rx, priv->dma_rx_phy);
795err_dma_rx:
796 kfree(priv->rx_skbuff);
797 return -ENOMEM;
798}
799
800static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
801{
802 int i;
803 struct xgmac_dma_desc *p;
804
805 if (!priv->rx_skbuff)
806 return;
807
808 for (i = 0; i < DMA_RX_RING_SZ; i++) {
809 struct sk_buff *skb = priv->rx_skbuff[i];
810 if (skb == NULL)
811 continue;
812
813 p = priv->dma_rx + i;
814 dma_unmap_single(priv->device, desc_get_buf_addr(p),
815 priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
816 dev_kfree_skb_any(skb);
817 priv->rx_skbuff[i] = NULL;
818 }
819}
820
821static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
822{
823 int i;
824 struct xgmac_dma_desc *p;
825
826 if (!priv->tx_skbuff)
827 return;
828
829 for (i = 0; i < DMA_TX_RING_SZ; i++) {
830 if (priv->tx_skbuff[i] == NULL)
831 continue;
832
833 p = priv->dma_tx + i;
834 if (desc_get_tx_fs(p))
835 dma_unmap_single(priv->device, desc_get_buf_addr(p),
836 desc_get_buf_len(p), DMA_TO_DEVICE);
837 else
838 dma_unmap_page(priv->device, desc_get_buf_addr(p),
839 desc_get_buf_len(p), DMA_TO_DEVICE);
840
841 if (desc_get_tx_ls(p))
842 dev_kfree_skb_any(priv->tx_skbuff[i]);
843 priv->tx_skbuff[i] = NULL;
844 }
845}
846
847static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
848{
849
850 xgmac_free_rx_skbufs(priv);
851 xgmac_free_tx_skbufs(priv);
852
853
854 if (priv->dma_tx) {
855 dma_free_coherent(priv->device,
856 DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc),
857 priv->dma_tx, priv->dma_tx_phy);
858 priv->dma_tx = NULL;
859 }
860 if (priv->dma_rx) {
861 dma_free_coherent(priv->device,
862 DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
863 priv->dma_rx, priv->dma_rx_phy);
864 priv->dma_rx = NULL;
865 }
866 kfree(priv->rx_skbuff);
867 priv->rx_skbuff = NULL;
868 kfree(priv->tx_skbuff);
869 priv->tx_skbuff = NULL;
870}
871
872
873
874
875
876
877static void xgmac_tx_complete(struct xgmac_priv *priv)
878{
879 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
880 unsigned int entry = priv->tx_tail;
881 struct sk_buff *skb = priv->tx_skbuff[entry];
882 struct xgmac_dma_desc *p = priv->dma_tx + entry;
883
884
885 if (desc_get_owner(p))
886 break;
887
888 netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
889 priv->tx_head, priv->tx_tail);
890
891 if (desc_get_tx_fs(p))
892 dma_unmap_single(priv->device, desc_get_buf_addr(p),
893 desc_get_buf_len(p), DMA_TO_DEVICE);
894 else
895 dma_unmap_page(priv->device, desc_get_buf_addr(p),
896 desc_get_buf_len(p), DMA_TO_DEVICE);
897
898
899 if (desc_get_tx_ls(p)) {
900 desc_get_tx_status(priv, p);
901 dev_kfree_skb(skb);
902 }
903
904 priv->tx_skbuff[entry] = NULL;
905 priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
906 }
907
908
909 smp_mb();
910 if (unlikely(netif_queue_stopped(priv->dev) &&
911 (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)))
912 netif_wake_queue(priv->dev);
913}
914
915static void xgmac_tx_timeout_work(struct work_struct *work)
916{
917 u32 reg, value;
918 struct xgmac_priv *priv =
919 container_of(work, struct xgmac_priv, tx_timeout_work);
920
921 napi_disable(&priv->napi);
922
923 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
924
925 netif_tx_lock(priv->dev);
926
927 reg = readl(priv->base + XGMAC_DMA_CONTROL);
928 writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
929 do {
930 value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000;
931 } while (value && (value != 0x600000));
932
933 xgmac_free_tx_skbufs(priv);
934 desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
935 priv->tx_tail = 0;
936 priv->tx_head = 0;
937 writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
938 writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
939
940 writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
941 priv->base + XGMAC_DMA_STATUS);
942
943 netif_tx_unlock(priv->dev);
944 netif_wake_queue(priv->dev);
945
946 napi_enable(&priv->napi);
947
948
949 writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS);
950 writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
951}
952
953static int xgmac_hw_init(struct net_device *dev)
954{
955 u32 value, ctrl;
956 int limit;
957 struct xgmac_priv *priv = netdev_priv(dev);
958 void __iomem *ioaddr = priv->base;
959
960
961 ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK;
962
963
964 value = DMA_BUS_MODE_SFT_RESET;
965 writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
966 limit = 15000;
967 while (limit-- &&
968 (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
969 cpu_relax();
970 if (limit < 0)
971 return -EBUSY;
972
973 value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) |
974 (0x10 << DMA_BUS_MODE_RPBL_SHIFT) |
975 DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
976 writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
977
978 writel(0, ioaddr + XGMAC_DMA_INTR_ENA);
979
980
981 writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
982
983
984 writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
985
986 ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
987 XGMAC_CONTROL_CAR;
988 if (dev->features & NETIF_F_RXCSUM)
989 ctrl |= XGMAC_CONTROL_IPC;
990 writel(ctrl, ioaddr + XGMAC_CONTROL);
991
992 writel(DMA_CONTROL_OSF, ioaddr + XGMAC_DMA_CONTROL);
993
994
995 writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA |
996 XGMAC_OMR_RTC_256,
997 ioaddr + XGMAC_OMR);
998
999
1000 writel(1, ioaddr + XGMAC_MMC_CTRL);
1001 return 0;
1002}
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013static int xgmac_open(struct net_device *dev)
1014{
1015 int ret;
1016 struct xgmac_priv *priv = netdev_priv(dev);
1017 void __iomem *ioaddr = priv->base;
1018
1019
1020
1021
1022
1023 if (!is_valid_ether_addr(dev->dev_addr)) {
1024 eth_hw_addr_random(dev);
1025 netdev_dbg(priv->dev, "generated random MAC address %pM\n",
1026 dev->dev_addr);
1027 }
1028
1029 memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
1030
1031
1032 xgmac_hw_init(dev);
1033 xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
1034 xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause);
1035
1036 ret = xgmac_dma_desc_rings_init(dev);
1037 if (ret < 0)
1038 return ret;
1039
1040
1041 xgmac_mac_enable(ioaddr);
1042
1043 napi_enable(&priv->napi);
1044 netif_start_queue(dev);
1045
1046
1047 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
1048 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
1049
1050 return 0;
1051}
1052
1053
1054
1055
1056
1057
1058
1059static int xgmac_stop(struct net_device *dev)
1060{
1061 struct xgmac_priv *priv = netdev_priv(dev);
1062
1063 netif_stop_queue(dev);
1064
1065 if (readl(priv->base + XGMAC_DMA_INTR_ENA))
1066 napi_disable(&priv->napi);
1067
1068 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1069
1070
1071 xgmac_mac_disable(priv->base);
1072
1073
1074 xgmac_free_dma_desc_rings(priv);
1075
1076 return 0;
1077}
1078
1079
1080
1081
1082
1083
1084
1085static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1086{
1087 struct xgmac_priv *priv = netdev_priv(dev);
1088 unsigned int entry;
1089 int i;
1090 u32 irq_flag;
1091 int nfrags = skb_shinfo(skb)->nr_frags;
1092 struct xgmac_dma_desc *desc, *first;
1093 unsigned int desc_flags;
1094 unsigned int len;
1095 dma_addr_t paddr;
1096
1097 priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1);
1098 irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT;
1099
1100 desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
1101 TXDESC_CSUM_ALL : 0;
1102 entry = priv->tx_head;
1103 desc = priv->dma_tx + entry;
1104 first = desc;
1105
1106 len = skb_headlen(skb);
1107 paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
1108 if (dma_mapping_error(priv->device, paddr)) {
1109 dev_kfree_skb(skb);
1110 return NETDEV_TX_OK;
1111 }
1112 priv->tx_skbuff[entry] = skb;
1113 desc_set_buf_addr_and_size(desc, paddr, len);
1114
1115 for (i = 0; i < nfrags; i++) {
1116 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1117
1118 len = frag->size;
1119
1120 paddr = skb_frag_dma_map(priv->device, frag, 0, len,
1121 DMA_TO_DEVICE);
1122 if (dma_mapping_error(priv->device, paddr))
1123 goto dma_err;
1124
1125 entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
1126 desc = priv->dma_tx + entry;
1127 priv->tx_skbuff[entry] = skb;
1128
1129 desc_set_buf_addr_and_size(desc, paddr, len);
1130 if (i < (nfrags - 1))
1131 desc_set_tx_owner(desc, desc_flags);
1132 }
1133
1134
1135 if (desc != first)
1136 desc_set_tx_owner(desc, desc_flags |
1137 TXDESC_LAST_SEG | irq_flag);
1138 else
1139 desc_flags |= TXDESC_LAST_SEG | irq_flag;
1140
1141
1142 wmb();
1143 desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
1144
1145 writel(1, priv->base + XGMAC_DMA_TX_POLL);
1146
1147 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
1148
1149
1150 smp_mb();
1151 if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) {
1152 netif_stop_queue(dev);
1153
1154 smp_mb();
1155 if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)
1156 netif_start_queue(dev);
1157 }
1158 return NETDEV_TX_OK;
1159
1160dma_err:
1161 entry = priv->tx_head;
1162 for ( ; i > 0; i--) {
1163 entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
1164 desc = priv->dma_tx + entry;
1165 priv->tx_skbuff[entry] = NULL;
1166 dma_unmap_page(priv->device, desc_get_buf_addr(desc),
1167 desc_get_buf_len(desc), DMA_TO_DEVICE);
1168 desc_clear_tx_owner(desc);
1169 }
1170 desc = first;
1171 dma_unmap_single(priv->device, desc_get_buf_addr(desc),
1172 desc_get_buf_len(desc), DMA_TO_DEVICE);
1173 dev_kfree_skb(skb);
1174 return NETDEV_TX_OK;
1175}
1176
1177static int xgmac_rx(struct xgmac_priv *priv, int limit)
1178{
1179 unsigned int entry;
1180 unsigned int count = 0;
1181 struct xgmac_dma_desc *p;
1182
1183 while (count < limit) {
1184 int ip_checksum;
1185 struct sk_buff *skb;
1186 int frame_len;
1187
1188 if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ))
1189 break;
1190
1191 entry = priv->rx_tail;
1192 p = priv->dma_rx + entry;
1193 if (desc_get_owner(p))
1194 break;
1195
1196 count++;
1197 priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ);
1198
1199
1200 ip_checksum = desc_get_rx_status(priv, p);
1201 if (ip_checksum < 0)
1202 continue;
1203
1204 skb = priv->rx_skbuff[entry];
1205 if (unlikely(!skb)) {
1206 netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n");
1207 break;
1208 }
1209 priv->rx_skbuff[entry] = NULL;
1210
1211 frame_len = desc_get_rx_frame_len(p);
1212 netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n",
1213 frame_len, ip_checksum);
1214
1215 skb_put(skb, frame_len);
1216 dma_unmap_single(priv->device, desc_get_buf_addr(p),
1217 priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
1218
1219 skb->protocol = eth_type_trans(skb, priv->dev);
1220 skb->ip_summed = ip_checksum;
1221 if (ip_checksum == CHECKSUM_NONE)
1222 netif_receive_skb(skb);
1223 else
1224 napi_gro_receive(&priv->napi, skb);
1225 }
1226
1227 xgmac_rx_refill(priv);
1228
1229 return count;
1230}
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241static int xgmac_poll(struct napi_struct *napi, int budget)
1242{
1243 struct xgmac_priv *priv = container_of(napi,
1244 struct xgmac_priv, napi);
1245 int work_done = 0;
1246
1247 xgmac_tx_complete(priv);
1248 work_done = xgmac_rx(priv, budget);
1249
1250 if (work_done < budget) {
1251 napi_complete(napi);
1252 __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
1253 }
1254 return work_done;
1255}
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265static void xgmac_tx_timeout(struct net_device *dev)
1266{
1267 struct xgmac_priv *priv = netdev_priv(dev);
1268 schedule_work(&priv->tx_timeout_work);
1269}
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280static void xgmac_set_rx_mode(struct net_device *dev)
1281{
1282 int i;
1283 struct xgmac_priv *priv = netdev_priv(dev);
1284 void __iomem *ioaddr = priv->base;
1285 unsigned int value = 0;
1286 u32 hash_filter[XGMAC_NUM_HASH];
1287 int reg = 1;
1288 struct netdev_hw_addr *ha;
1289 bool use_hash = false;
1290
1291 netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
1292 netdev_mc_count(dev), netdev_uc_count(dev));
1293
1294 if (dev->flags & IFF_PROMISC)
1295 value |= XGMAC_FRAME_FILTER_PR;
1296
1297 memset(hash_filter, 0, sizeof(hash_filter));
1298
1299 if (netdev_uc_count(dev) > priv->max_macs) {
1300 use_hash = true;
1301 value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
1302 }
1303 netdev_for_each_uc_addr(ha, dev) {
1304 if (use_hash) {
1305 u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
1306
1307
1308
1309
1310 hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1311 } else {
1312 xgmac_set_mac_addr(ioaddr, ha->addr, reg);
1313 reg++;
1314 }
1315 }
1316
1317 if (dev->flags & IFF_ALLMULTI) {
1318 value |= XGMAC_FRAME_FILTER_PM;
1319 goto out;
1320 }
1321
1322 if ((netdev_mc_count(dev) + reg - 1) > priv->max_macs) {
1323 use_hash = true;
1324 value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
1325 } else {
1326 use_hash = false;
1327 }
1328 netdev_for_each_mc_addr(ha, dev) {
1329 if (use_hash) {
1330 u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
1331
1332
1333
1334
1335 hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1336 } else {
1337 xgmac_set_mac_addr(ioaddr, ha->addr, reg);
1338 reg++;
1339 }
1340 }
1341
1342out:
1343 for (i = reg; i <= priv->max_macs; i++)
1344 xgmac_set_mac_addr(ioaddr, NULL, i);
1345 for (i = 0; i < XGMAC_NUM_HASH; i++)
1346 writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
1347
1348 writel(value, ioaddr + XGMAC_FRAME_FILTER);
1349}
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
1363{
1364 struct xgmac_priv *priv = netdev_priv(dev);
1365 int old_mtu;
1366
1367 if ((new_mtu < 46) || (new_mtu > MAX_MTU)) {
1368 netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU);
1369 return -EINVAL;
1370 }
1371
1372 old_mtu = dev->mtu;
1373 dev->mtu = new_mtu;
1374
1375
1376 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
1377 return 0;
1378 if (old_mtu == new_mtu)
1379 return 0;
1380
1381
1382 if (!netif_running(dev))
1383 return 0;
1384
1385
1386 xgmac_stop(dev);
1387 return xgmac_open(dev);
1388}
1389
1390static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
1391{
1392 u32 intr_status;
1393 struct net_device *dev = (struct net_device *)dev_id;
1394 struct xgmac_priv *priv = netdev_priv(dev);
1395 void __iomem *ioaddr = priv->base;
1396
1397 intr_status = __raw_readl(ioaddr + XGMAC_INT_STAT);
1398 if (intr_status & XGMAC_INT_STAT_PMT) {
1399 netdev_dbg(priv->dev, "received Magic frame\n");
1400
1401 readl(ioaddr + XGMAC_PMT);
1402 }
1403 return IRQ_HANDLED;
1404}
1405
1406static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
1407{
1408 u32 intr_status;
1409 struct net_device *dev = (struct net_device *)dev_id;
1410 struct xgmac_priv *priv = netdev_priv(dev);
1411 struct xgmac_extra_stats *x = &priv->xstats;
1412
1413
1414 intr_status = __raw_readl(priv->base + XGMAC_DMA_STATUS);
1415 intr_status &= __raw_readl(priv->base + XGMAC_DMA_INTR_ENA);
1416 __raw_writel(intr_status, priv->base + XGMAC_DMA_STATUS);
1417
1418
1419
1420 if (unlikely(intr_status & DMA_STATUS_AIS)) {
1421 if (intr_status & DMA_STATUS_TJT) {
1422 netdev_err(priv->dev, "transmit jabber\n");
1423 x->tx_jabber++;
1424 }
1425 if (intr_status & DMA_STATUS_RU)
1426 x->rx_buf_unav++;
1427 if (intr_status & DMA_STATUS_RPS) {
1428 netdev_err(priv->dev, "receive process stopped\n");
1429 x->rx_process_stopped++;
1430 }
1431 if (intr_status & DMA_STATUS_ETI) {
1432 netdev_err(priv->dev, "transmit early interrupt\n");
1433 x->tx_early++;
1434 }
1435 if (intr_status & DMA_STATUS_TPS) {
1436 netdev_err(priv->dev, "transmit process stopped\n");
1437 x->tx_process_stopped++;
1438 schedule_work(&priv->tx_timeout_work);
1439 }
1440 if (intr_status & DMA_STATUS_FBI) {
1441 netdev_err(priv->dev, "fatal bus error\n");
1442 x->fatal_bus_error++;
1443 }
1444 }
1445
1446
1447 if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) {
1448 __raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
1449 napi_schedule(&priv->napi);
1450 }
1451
1452 return IRQ_HANDLED;
1453}
1454
1455#ifdef CONFIG_NET_POLL_CONTROLLER
1456
1457
1458static void xgmac_poll_controller(struct net_device *dev)
1459{
1460 disable_irq(dev->irq);
1461 xgmac_interrupt(dev->irq, dev);
1462 enable_irq(dev->irq);
1463}
1464#endif
1465
1466static struct rtnl_link_stats64 *
1467xgmac_get_stats64(struct net_device *dev,
1468 struct rtnl_link_stats64 *storage)
1469{
1470 struct xgmac_priv *priv = netdev_priv(dev);
1471 void __iomem *base = priv->base;
1472 u32 count;
1473
1474 spin_lock_bh(&priv->stats_lock);
1475 writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL);
1476
1477 storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO);
1478 storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32;
1479
1480 storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO);
1481 storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G);
1482 storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR);
1483 storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR);
1484 storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW);
1485
1486 storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO);
1487 storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32;
1488
1489 count = readl(base + XGMAC_MMC_TXFRAME_GB_LO);
1490 storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO);
1491 storage->tx_packets = count;
1492 storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW);
1493
1494 writel(0, base + XGMAC_MMC_CTRL);
1495 spin_unlock_bh(&priv->stats_lock);
1496 return storage;
1497}
1498
1499static int xgmac_set_mac_address(struct net_device *dev, void *p)
1500{
1501 struct xgmac_priv *priv = netdev_priv(dev);
1502 void __iomem *ioaddr = priv->base;
1503 struct sockaddr *addr = p;
1504
1505 if (!is_valid_ether_addr(addr->sa_data))
1506 return -EADDRNOTAVAIL;
1507
1508 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1509
1510 xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
1511
1512 return 0;
1513}
1514
1515static int xgmac_set_features(struct net_device *dev, netdev_features_t features)
1516{
1517 u32 ctrl;
1518 struct xgmac_priv *priv = netdev_priv(dev);
1519 void __iomem *ioaddr = priv->base;
1520 netdev_features_t changed = dev->features ^ features;
1521
1522 if (!(changed & NETIF_F_RXCSUM))
1523 return 0;
1524
1525 ctrl = readl(ioaddr + XGMAC_CONTROL);
1526 if (features & NETIF_F_RXCSUM)
1527 ctrl |= XGMAC_CONTROL_IPC;
1528 else
1529 ctrl &= ~XGMAC_CONTROL_IPC;
1530 writel(ctrl, ioaddr + XGMAC_CONTROL);
1531
1532 return 0;
1533}
1534
1535static const struct net_device_ops xgmac_netdev_ops = {
1536 .ndo_open = xgmac_open,
1537 .ndo_start_xmit = xgmac_xmit,
1538 .ndo_stop = xgmac_stop,
1539 .ndo_change_mtu = xgmac_change_mtu,
1540 .ndo_set_rx_mode = xgmac_set_rx_mode,
1541 .ndo_tx_timeout = xgmac_tx_timeout,
1542 .ndo_get_stats64 = xgmac_get_stats64,
1543#ifdef CONFIG_NET_POLL_CONTROLLER
1544 .ndo_poll_controller = xgmac_poll_controller,
1545#endif
1546 .ndo_set_mac_address = xgmac_set_mac_address,
1547 .ndo_set_features = xgmac_set_features,
1548};
1549
1550static int xgmac_ethtool_getsettings(struct net_device *dev,
1551 struct ethtool_cmd *cmd)
1552{
1553 cmd->autoneg = 0;
1554 cmd->duplex = DUPLEX_FULL;
1555 ethtool_cmd_speed_set(cmd, 10000);
1556 cmd->supported = 0;
1557 cmd->advertising = 0;
1558 cmd->transceiver = XCVR_INTERNAL;
1559 return 0;
1560}
1561
1562static void xgmac_get_pauseparam(struct net_device *netdev,
1563 struct ethtool_pauseparam *pause)
1564{
1565 struct xgmac_priv *priv = netdev_priv(netdev);
1566
1567 pause->rx_pause = priv->rx_pause;
1568 pause->tx_pause = priv->tx_pause;
1569}
1570
1571static int xgmac_set_pauseparam(struct net_device *netdev,
1572 struct ethtool_pauseparam *pause)
1573{
1574 struct xgmac_priv *priv = netdev_priv(netdev);
1575
1576 if (pause->autoneg)
1577 return -EINVAL;
1578
1579 return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause);
1580}
1581
1582struct xgmac_stats {
1583 char stat_string[ETH_GSTRING_LEN];
1584 int stat_offset;
1585 bool is_reg;
1586};
1587
1588#define XGMAC_STAT(m) \
1589 { #m, offsetof(struct xgmac_priv, xstats.m), false }
1590#define XGMAC_HW_STAT(m, reg_offset) \
1591 { #m, reg_offset, true }
1592
1593static const struct xgmac_stats xgmac_gstrings_stats[] = {
1594 XGMAC_STAT(tx_frame_flushed),
1595 XGMAC_STAT(tx_payload_error),
1596 XGMAC_STAT(tx_ip_header_error),
1597 XGMAC_STAT(tx_local_fault),
1598 XGMAC_STAT(tx_remote_fault),
1599 XGMAC_STAT(tx_early),
1600 XGMAC_STAT(tx_process_stopped),
1601 XGMAC_STAT(tx_jabber),
1602 XGMAC_STAT(rx_buf_unav),
1603 XGMAC_STAT(rx_process_stopped),
1604 XGMAC_STAT(rx_payload_error),
1605 XGMAC_STAT(rx_ip_header_error),
1606 XGMAC_STAT(rx_da_filter_fail),
1607 XGMAC_STAT(fatal_bus_error),
1608 XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
1609 XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
1610 XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME),
1611 XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME),
1612 XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME),
1613};
1614#define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats)
1615
1616static void xgmac_get_ethtool_stats(struct net_device *dev,
1617 struct ethtool_stats *dummy,
1618 u64 *data)
1619{
1620 struct xgmac_priv *priv = netdev_priv(dev);
1621 void *p = priv;
1622 int i;
1623
1624 for (i = 0; i < XGMAC_STATS_LEN; i++) {
1625 if (xgmac_gstrings_stats[i].is_reg)
1626 *data++ = readl(priv->base +
1627 xgmac_gstrings_stats[i].stat_offset);
1628 else
1629 *data++ = *(u32 *)(p +
1630 xgmac_gstrings_stats[i].stat_offset);
1631 }
1632}
1633
1634static int xgmac_get_sset_count(struct net_device *netdev, int sset)
1635{
1636 switch (sset) {
1637 case ETH_SS_STATS:
1638 return XGMAC_STATS_LEN;
1639 default:
1640 return -EINVAL;
1641 }
1642}
1643
1644static void xgmac_get_strings(struct net_device *dev, u32 stringset,
1645 u8 *data)
1646{
1647 int i;
1648 u8 *p = data;
1649
1650 switch (stringset) {
1651 case ETH_SS_STATS:
1652 for (i = 0; i < XGMAC_STATS_LEN; i++) {
1653 memcpy(p, xgmac_gstrings_stats[i].stat_string,
1654 ETH_GSTRING_LEN);
1655 p += ETH_GSTRING_LEN;
1656 }
1657 break;
1658 default:
1659 WARN_ON(1);
1660 break;
1661 }
1662}
1663
1664static void xgmac_get_wol(struct net_device *dev,
1665 struct ethtool_wolinfo *wol)
1666{
1667 struct xgmac_priv *priv = netdev_priv(dev);
1668
1669 if (device_can_wakeup(priv->device)) {
1670 wol->supported = WAKE_MAGIC | WAKE_UCAST;
1671 wol->wolopts = priv->wolopts;
1672 }
1673}
1674
1675static int xgmac_set_wol(struct net_device *dev,
1676 struct ethtool_wolinfo *wol)
1677{
1678 struct xgmac_priv *priv = netdev_priv(dev);
1679 u32 support = WAKE_MAGIC | WAKE_UCAST;
1680
1681 if (!device_can_wakeup(priv->device))
1682 return -ENOTSUPP;
1683
1684 if (wol->wolopts & ~support)
1685 return -EINVAL;
1686
1687 priv->wolopts = wol->wolopts;
1688
1689 if (wol->wolopts) {
1690 device_set_wakeup_enable(priv->device, 1);
1691 enable_irq_wake(dev->irq);
1692 } else {
1693 device_set_wakeup_enable(priv->device, 0);
1694 disable_irq_wake(dev->irq);
1695 }
1696
1697 return 0;
1698}
1699
1700static const struct ethtool_ops xgmac_ethtool_ops = {
1701 .get_settings = xgmac_ethtool_getsettings,
1702 .get_link = ethtool_op_get_link,
1703 .get_pauseparam = xgmac_get_pauseparam,
1704 .set_pauseparam = xgmac_set_pauseparam,
1705 .get_ethtool_stats = xgmac_get_ethtool_stats,
1706 .get_strings = xgmac_get_strings,
1707 .get_wol = xgmac_get_wol,
1708 .set_wol = xgmac_set_wol,
1709 .get_sset_count = xgmac_get_sset_count,
1710};
1711
1712
1713
1714
1715
1716
1717static int xgmac_probe(struct platform_device *pdev)
1718{
1719 int ret = 0;
1720 struct resource *res;
1721 struct net_device *ndev = NULL;
1722 struct xgmac_priv *priv = NULL;
1723 u32 uid;
1724
1725 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1726 if (!res)
1727 return -ENODEV;
1728
1729 if (!request_mem_region(res->start, resource_size(res), pdev->name))
1730 return -EBUSY;
1731
1732 ndev = alloc_etherdev(sizeof(struct xgmac_priv));
1733 if (!ndev) {
1734 ret = -ENOMEM;
1735 goto err_alloc;
1736 }
1737
1738 SET_NETDEV_DEV(ndev, &pdev->dev);
1739 priv = netdev_priv(ndev);
1740 platform_set_drvdata(pdev, ndev);
1741 ether_setup(ndev);
1742 ndev->netdev_ops = &xgmac_netdev_ops;
1743 SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
1744 spin_lock_init(&priv->stats_lock);
1745 INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
1746
1747 priv->device = &pdev->dev;
1748 priv->dev = ndev;
1749 priv->rx_pause = 1;
1750 priv->tx_pause = 1;
1751
1752 priv->base = ioremap(res->start, resource_size(res));
1753 if (!priv->base) {
1754 netdev_err(ndev, "ioremap failed\n");
1755 ret = -ENOMEM;
1756 goto err_io;
1757 }
1758
1759 uid = readl(priv->base + XGMAC_VERSION);
1760 netdev_info(ndev, "h/w version is 0x%x\n", uid);
1761
1762
1763 writel(1, priv->base + XGMAC_ADDR_HIGH(31));
1764 if (readl(priv->base + XGMAC_ADDR_HIGH(31)) == 1)
1765 priv->max_macs = 31;
1766 else
1767 priv->max_macs = 7;
1768
1769 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1770 ndev->irq = platform_get_irq(pdev, 0);
1771 if (ndev->irq == -ENXIO) {
1772 netdev_err(ndev, "No irq resource\n");
1773 ret = ndev->irq;
1774 goto err_irq;
1775 }
1776
1777 ret = request_irq(ndev->irq, xgmac_interrupt, 0,
1778 dev_name(&pdev->dev), ndev);
1779 if (ret < 0) {
1780 netdev_err(ndev, "Could not request irq %d - ret %d)\n",
1781 ndev->irq, ret);
1782 goto err_irq;
1783 }
1784
1785 priv->pmt_irq = platform_get_irq(pdev, 1);
1786 if (priv->pmt_irq == -ENXIO) {
1787 netdev_err(ndev, "No pmt irq resource\n");
1788 ret = priv->pmt_irq;
1789 goto err_pmt_irq;
1790 }
1791
1792 ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0,
1793 dev_name(&pdev->dev), ndev);
1794 if (ret < 0) {
1795 netdev_err(ndev, "Could not request irq %d - ret %d)\n",
1796 priv->pmt_irq, ret);
1797 goto err_pmt_irq;
1798 }
1799
1800 device_set_wakeup_capable(&pdev->dev, 1);
1801 if (device_can_wakeup(priv->device))
1802 priv->wolopts = WAKE_MAGIC;
1803
1804 ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA;
1805 if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
1806 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1807 NETIF_F_RXCSUM;
1808 ndev->features |= ndev->hw_features;
1809 ndev->priv_flags |= IFF_UNICAST_FLT;
1810
1811
1812 xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
1813 if (!is_valid_ether_addr(ndev->dev_addr))
1814 netdev_warn(ndev, "MAC address %pM not valid",
1815 ndev->dev_addr);
1816
1817 netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
1818 ret = register_netdev(ndev);
1819 if (ret)
1820 goto err_reg;
1821
1822 return 0;
1823
1824err_reg:
1825 netif_napi_del(&priv->napi);
1826 free_irq(priv->pmt_irq, ndev);
1827err_pmt_irq:
1828 free_irq(ndev->irq, ndev);
1829err_irq:
1830 iounmap(priv->base);
1831err_io:
1832 free_netdev(ndev);
1833err_alloc:
1834 release_mem_region(res->start, resource_size(res));
1835 return ret;
1836}
1837
1838
1839
1840
1841
1842
1843
1844
1845static int xgmac_remove(struct platform_device *pdev)
1846{
1847 struct net_device *ndev = platform_get_drvdata(pdev);
1848 struct xgmac_priv *priv = netdev_priv(ndev);
1849 struct resource *res;
1850
1851 xgmac_mac_disable(priv->base);
1852
1853
1854 free_irq(ndev->irq, ndev);
1855 free_irq(priv->pmt_irq, ndev);
1856
1857 unregister_netdev(ndev);
1858 netif_napi_del(&priv->napi);
1859
1860 iounmap(priv->base);
1861 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1862 release_mem_region(res->start, resource_size(res));
1863
1864 free_netdev(ndev);
1865
1866 return 0;
1867}
1868
1869#ifdef CONFIG_PM_SLEEP
1870static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
1871{
1872 unsigned int pmt = 0;
1873
1874 if (mode & WAKE_MAGIC)
1875 pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN;
1876 if (mode & WAKE_UCAST)
1877 pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
1878
1879 writel(pmt, ioaddr + XGMAC_PMT);
1880}
1881
1882static int xgmac_suspend(struct device *dev)
1883{
1884 struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
1885 struct xgmac_priv *priv = netdev_priv(ndev);
1886 u32 value;
1887
1888 if (!ndev || !netif_running(ndev))
1889 return 0;
1890
1891 netif_device_detach(ndev);
1892 napi_disable(&priv->napi);
1893 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1894
1895 if (device_may_wakeup(priv->device)) {
1896
1897 value = readl(priv->base + XGMAC_DMA_CONTROL);
1898 value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
1899 writel(value, priv->base + XGMAC_DMA_CONTROL);
1900
1901 xgmac_pmt(priv->base, priv->wolopts);
1902 } else
1903 xgmac_mac_disable(priv->base);
1904
1905 return 0;
1906}
1907
1908static int xgmac_resume(struct device *dev)
1909{
1910 struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
1911 struct xgmac_priv *priv = netdev_priv(ndev);
1912 void __iomem *ioaddr = priv->base;
1913
1914 if (!netif_running(ndev))
1915 return 0;
1916
1917 xgmac_pmt(ioaddr, 0);
1918
1919
1920 xgmac_mac_enable(ioaddr);
1921 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
1922 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
1923
1924 netif_device_attach(ndev);
1925 napi_enable(&priv->napi);
1926
1927 return 0;
1928}
1929#endif
1930
1931static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume);
1932
1933static const struct of_device_id xgmac_of_match[] = {
1934 { .compatible = "calxeda,hb-xgmac", },
1935 {},
1936};
1937MODULE_DEVICE_TABLE(of, xgmac_of_match);
1938
1939static struct platform_driver xgmac_driver = {
1940 .driver = {
1941 .name = "calxedaxgmac",
1942 .of_match_table = xgmac_of_match,
1943 },
1944 .probe = xgmac_probe,
1945 .remove = xgmac_remove,
1946 .driver.pm = &xgmac_pm_ops,
1947};
1948
1949module_platform_driver(xgmac_driver);
1950
1951MODULE_AUTHOR("Calxeda, Inc.");
1952MODULE_DESCRIPTION("Calxeda 10G XGMAC driver");
1953MODULE_LICENSE("GPL v2");
1954