1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
27#include <linux/interrupt.h>
28#include <linux/errno.h>
29#include <linux/ioport.h>
30#include <linux/crc32.h>
31#include <linux/platform_device.h>
32#include <linux/spinlock.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/clk.h>
36#include <linux/workqueue.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/skbuff.h>
40#include <linux/phy.h>
41#include <linux/dma-mapping.h>
42#include <linux/of.h>
43#include <linux/of_net.h>
44#include <linux/types.h>
45
46#include <linux/io.h>
47#include <mach/board.h>
48#include <mach/platform.h>
49#include <mach/hardware.h>
50
51#define MODNAME "lpc-eth"
52#define DRV_VERSION "1.00"
53
54#define ENET_MAXF_SIZE 1536
55#define ENET_RX_DESC 48
56#define ENET_TX_DESC 16
57
58#define NAPI_WEIGHT 16
59
60
61
62
63#define LPC_ENET_MAC1(x) (x + 0x000)
64#define LPC_ENET_MAC2(x) (x + 0x004)
65#define LPC_ENET_IPGT(x) (x + 0x008)
66#define LPC_ENET_IPGR(x) (x + 0x00C)
67#define LPC_ENET_CLRT(x) (x + 0x010)
68#define LPC_ENET_MAXF(x) (x + 0x014)
69#define LPC_ENET_SUPP(x) (x + 0x018)
70#define LPC_ENET_TEST(x) (x + 0x01C)
71#define LPC_ENET_MCFG(x) (x + 0x020)
72#define LPC_ENET_MCMD(x) (x + 0x024)
73#define LPC_ENET_MADR(x) (x + 0x028)
74#define LPC_ENET_MWTD(x) (x + 0x02C)
75#define LPC_ENET_MRDD(x) (x + 0x030)
76#define LPC_ENET_MIND(x) (x + 0x034)
77#define LPC_ENET_SA0(x) (x + 0x040)
78#define LPC_ENET_SA1(x) (x + 0x044)
79#define LPC_ENET_SA2(x) (x + 0x048)
80#define LPC_ENET_COMMAND(x) (x + 0x100)
81#define LPC_ENET_STATUS(x) (x + 0x104)
82#define LPC_ENET_RXDESCRIPTOR(x) (x + 0x108)
83#define LPC_ENET_RXSTATUS(x) (x + 0x10C)
84#define LPC_ENET_RXDESCRIPTORNUMBER(x) (x + 0x110)
85#define LPC_ENET_RXPRODUCEINDEX(x) (x + 0x114)
86#define LPC_ENET_RXCONSUMEINDEX(x) (x + 0x118)
87#define LPC_ENET_TXDESCRIPTOR(x) (x + 0x11C)
88#define LPC_ENET_TXSTATUS(x) (x + 0x120)
89#define LPC_ENET_TXDESCRIPTORNUMBER(x) (x + 0x124)
90#define LPC_ENET_TXPRODUCEINDEX(x) (x + 0x128)
91#define LPC_ENET_TXCONSUMEINDEX(x) (x + 0x12C)
92#define LPC_ENET_TSV0(x) (x + 0x158)
93#define LPC_ENET_TSV1(x) (x + 0x15C)
94#define LPC_ENET_RSV(x) (x + 0x160)
95#define LPC_ENET_FLOWCONTROLCOUNTER(x) (x + 0x170)
96#define LPC_ENET_FLOWCONTROLSTATUS(x) (x + 0x174)
97#define LPC_ENET_RXFILTER_CTRL(x) (x + 0x200)
98#define LPC_ENET_RXFILTERWOLSTATUS(x) (x + 0x204)
99#define LPC_ENET_RXFILTERWOLCLEAR(x) (x + 0x208)
100#define LPC_ENET_HASHFILTERL(x) (x + 0x210)
101#define LPC_ENET_HASHFILTERH(x) (x + 0x214)
102#define LPC_ENET_INTSTATUS(x) (x + 0xFE0)
103#define LPC_ENET_INTENABLE(x) (x + 0xFE4)
104#define LPC_ENET_INTCLEAR(x) (x + 0xFE8)
105#define LPC_ENET_INTSET(x) (x + 0xFEC)
106#define LPC_ENET_POWERDOWN(x) (x + 0xFF4)
107
108
109
110
111#define LPC_MAC1_RECV_ENABLE (1 << 0)
112#define LPC_MAC1_PASS_ALL_RX_FRAMES (1 << 1)
113#define LPC_MAC1_RX_FLOW_CONTROL (1 << 2)
114#define LPC_MAC1_TX_FLOW_CONTROL (1 << 3)
115#define LPC_MAC1_LOOPBACK (1 << 4)
116#define LPC_MAC1_RESET_TX (1 << 8)
117#define LPC_MAC1_RESET_MCS_TX (1 << 9)
118#define LPC_MAC1_RESET_RX (1 << 10)
119#define LPC_MAC1_RESET_MCS_RX (1 << 11)
120#define LPC_MAC1_SIMULATION_RESET (1 << 14)
121#define LPC_MAC1_SOFT_RESET (1 << 15)
122
123
124
125
126#define LPC_MAC2_FULL_DUPLEX (1 << 0)
127#define LPC_MAC2_FRAME_LENGTH_CHECKING (1 << 1)
128#define LPC_MAC2_HUGH_LENGTH_CHECKING (1 << 2)
129#define LPC_MAC2_DELAYED_CRC (1 << 3)
130#define LPC_MAC2_CRC_ENABLE (1 << 4)
131#define LPC_MAC2_PAD_CRC_ENABLE (1 << 5)
132#define LPC_MAC2_VLAN_PAD_ENABLE (1 << 6)
133#define LPC_MAC2_AUTO_DETECT_PAD_ENABLE (1 << 7)
134#define LPC_MAC2_PURE_PREAMBLE_ENFORCEMENT (1 << 8)
135#define LPC_MAC2_LONG_PREAMBLE_ENFORCEMENT (1 << 9)
136#define LPC_MAC2_NO_BACKOFF (1 << 12)
137#define LPC_MAC2_BACK_PRESSURE (1 << 13)
138#define LPC_MAC2_EXCESS_DEFER (1 << 14)
139
140
141
142
143#define LPC_IPGT_LOAD(n) ((n) & 0x7F)
144
145
146
147
148#define LPC_IPGR_LOAD_PART2(n) ((n) & 0x7F)
149#define LPC_IPGR_LOAD_PART1(n) (((n) & 0x7F) << 8)
150
151
152
153
154#define LPC_CLRT_LOAD_RETRY_MAX(n) ((n) & 0xF)
155#define LPC_CLRT_LOAD_COLLISION_WINDOW(n) (((n) & 0x3F) << 8)
156
157
158
159
160#define LPC_MAXF_LOAD_MAX_FRAME_LEN(n) ((n) & 0xFFFF)
161
162
163
164
165#define LPC_SUPP_SPEED (1 << 8)
166#define LPC_SUPP_RESET_RMII (1 << 11)
167
168
169
170
171#define LPC_TEST_SHORTCUT_PAUSE_QUANTA (1 << 0)
172#define LPC_TEST_PAUSE (1 << 1)
173#define LPC_TEST_BACKPRESSURE (1 << 2)
174
175
176
177
178#define LPC_MCFG_SCAN_INCREMENT (1 << 0)
179#define LPC_MCFG_SUPPRESS_PREAMBLE (1 << 1)
180#define LPC_MCFG_CLOCK_SELECT(n) (((n) & 0x7) << 2)
181#define LPC_MCFG_CLOCK_HOST_DIV_4 0
182#define LPC_MCFG_CLOCK_HOST_DIV_6 2
183#define LPC_MCFG_CLOCK_HOST_DIV_8 3
184#define LPC_MCFG_CLOCK_HOST_DIV_10 4
185#define LPC_MCFG_CLOCK_HOST_DIV_14 5
186#define LPC_MCFG_CLOCK_HOST_DIV_20 6
187#define LPC_MCFG_CLOCK_HOST_DIV_28 7
188#define LPC_MCFG_RESET_MII_MGMT (1 << 15)
189
190
191
192
193#define LPC_MCMD_READ (1 << 0)
194#define LPC_MCMD_SCAN (1 << 1)
195
196
197
198
199#define LPC_MADR_REGISTER_ADDRESS(n) ((n) & 0x1F)
200#define LPC_MADR_PHY_0ADDRESS(n) (((n) & 0x1F) << 8)
201
202
203
204
205#define LPC_MWDT_WRITE(n) ((n) & 0xFFFF)
206
207
208
209
210#define LPC_MRDD_READ_MASK 0xFFFF
211
212
213
214
215#define LPC_MIND_BUSY (1 << 0)
216#define LPC_MIND_SCANNING (1 << 1)
217#define LPC_MIND_NOT_VALID (1 << 2)
218#define LPC_MIND_MII_LINK_FAIL (1 << 3)
219
220
221
222
223#define LPC_COMMAND_RXENABLE (1 << 0)
224#define LPC_COMMAND_TXENABLE (1 << 1)
225#define LPC_COMMAND_REG_RESET (1 << 3)
226#define LPC_COMMAND_TXRESET (1 << 4)
227#define LPC_COMMAND_RXRESET (1 << 5)
228#define LPC_COMMAND_PASSRUNTFRAME (1 << 6)
229#define LPC_COMMAND_PASSRXFILTER (1 << 7)
230#define LPC_COMMAND_TXFLOWCONTROL (1 << 8)
231#define LPC_COMMAND_RMII (1 << 9)
232#define LPC_COMMAND_FULLDUPLEX (1 << 10)
233
234
235
236
237#define LPC_STATUS_RXACTIVE (1 << 0)
238#define LPC_STATUS_TXACTIVE (1 << 1)
239
240
241
242
243#define LPC_TSV0_CRC_ERROR (1 << 0)
244#define LPC_TSV0_LENGTH_CHECK_ERROR (1 << 1)
245#define LPC_TSV0_LENGTH_OUT_OF_RANGE (1 << 2)
246#define LPC_TSV0_DONE (1 << 3)
247#define LPC_TSV0_MULTICAST (1 << 4)
248#define LPC_TSV0_BROADCAST (1 << 5)
249#define LPC_TSV0_PACKET_DEFER (1 << 6)
250#define LPC_TSV0_ESCESSIVE_DEFER (1 << 7)
251#define LPC_TSV0_ESCESSIVE_COLLISION (1 << 8)
252#define LPC_TSV0_LATE_COLLISION (1 << 9)
253#define LPC_TSV0_GIANT (1 << 10)
254#define LPC_TSV0_UNDERRUN (1 << 11)
255#define LPC_TSV0_TOTAL_BYTES(n) (((n) >> 12) & 0xFFFF)
256#define LPC_TSV0_CONTROL_FRAME (1 << 28)
257#define LPC_TSV0_PAUSE (1 << 29)
258#define LPC_TSV0_BACKPRESSURE (1 << 30)
259#define LPC_TSV0_VLAN (1 << 31)
260
261
262
263
264#define LPC_TSV1_TRANSMIT_BYTE_COUNT(n) ((n) & 0xFFFF)
265#define LPC_TSV1_COLLISION_COUNT(n) (((n) >> 16) & 0xF)
266
267
268
269
270#define LPC_RSV_RECEIVED_BYTE_COUNT(n) ((n) & 0xFFFF)
271#define LPC_RSV_RXDV_EVENT_IGNORED (1 << 16)
272#define LPC_RSV_RXDV_EVENT_PREVIOUSLY_SEEN (1 << 17)
273#define LPC_RSV_CARRIER_EVNT_PREVIOUS_SEEN (1 << 18)
274#define LPC_RSV_RECEIVE_CODE_VIOLATION (1 << 19)
275#define LPC_RSV_CRC_ERROR (1 << 20)
276#define LPC_RSV_LENGTH_CHECK_ERROR (1 << 21)
277#define LPC_RSV_LENGTH_OUT_OF_RANGE (1 << 22)
278#define LPC_RSV_RECEIVE_OK (1 << 23)
279#define LPC_RSV_MULTICAST (1 << 24)
280#define LPC_RSV_BROADCAST (1 << 25)
281#define LPC_RSV_DRIBBLE_NIBBLE (1 << 26)
282#define LPC_RSV_CONTROL_FRAME (1 << 27)
283#define LPC_RSV_PAUSE (1 << 28)
284#define LPC_RSV_UNSUPPORTED_OPCODE (1 << 29)
285#define LPC_RSV_VLAN (1 << 30)
286
287
288
289
290#define LPC_FCCR_MIRRORCOUNTER(n) ((n) & 0xFFFF)
291#define LPC_FCCR_PAUSETIMER(n) (((n) >> 16) & 0xFFFF)
292
293
294
295
296#define LPC_FCCR_MIRRORCOUNTERCURRENT(n) ((n) & 0xFFFF)
297
298
299
300
301
302#define LPC_RXFLTRW_ACCEPTUNICAST (1 << 0)
303#define LPC_RXFLTRW_ACCEPTUBROADCAST (1 << 1)
304#define LPC_RXFLTRW_ACCEPTUMULTICAST (1 << 2)
305#define LPC_RXFLTRW_ACCEPTUNICASTHASH (1 << 3)
306#define LPC_RXFLTRW_ACCEPTUMULTICASTHASH (1 << 4)
307#define LPC_RXFLTRW_ACCEPTPERFECT (1 << 5)
308
309
310
311
312#define LPC_RXFLTRWSTS_MAGICPACKETENWOL (1 << 12)
313#define LPC_RXFLTRWSTS_RXFILTERENWOL (1 << 13)
314
315
316
317
318#define LPC_RXFLTRWSTS_RXFILTERWOL (1 << 7)
319#define LPC_RXFLTRWSTS_MAGICPACKETWOL (1 << 8)
320
321
322
323
324
325#define LPC_MACINT_RXOVERRUNINTEN (1 << 0)
326#define LPC_MACINT_RXERRORONINT (1 << 1)
327#define LPC_MACINT_RXFINISHEDINTEN (1 << 2)
328#define LPC_MACINT_RXDONEINTEN (1 << 3)
329#define LPC_MACINT_TXUNDERRUNINTEN (1 << 4)
330#define LPC_MACINT_TXERRORINTEN (1 << 5)
331#define LPC_MACINT_TXFINISHEDINTEN (1 << 6)
332#define LPC_MACINT_TXDONEINTEN (1 << 7)
333#define LPC_MACINT_SOFTINTEN (1 << 12)
334#define LPC_MACINT_WAKEUPINTEN (1 << 13)
335
336
337
338
339#define LPC_POWERDOWN_MACAHB (1 << 31)
340
341static phy_interface_t lpc_phy_interface_mode(struct device *dev)
342{
343 if (dev && dev->of_node) {
344 const char *mode = of_get_property(dev->of_node,
345 "phy-mode", NULL);
346 if (mode && !strcmp(mode, "mii"))
347 return PHY_INTERFACE_MODE_MII;
348 }
349 return PHY_INTERFACE_MODE_RMII;
350}
351
352static bool use_iram_for_net(struct device *dev)
353{
354 if (dev && dev->of_node)
355 return of_property_read_bool(dev->of_node, "use-iram");
356 return false;
357}
358
359
360#define RXSTATUS_SIZE 0x000007FF
361#define RXSTATUS_CONTROL (1 << 18)
362#define RXSTATUS_VLAN (1 << 19)
363#define RXSTATUS_FILTER (1 << 20)
364#define RXSTATUS_MULTICAST (1 << 21)
365#define RXSTATUS_BROADCAST (1 << 22)
366#define RXSTATUS_CRC (1 << 23)
367#define RXSTATUS_SYMBOL (1 << 24)
368#define RXSTATUS_LENGTH (1 << 25)
369#define RXSTATUS_RANGE (1 << 26)
370#define RXSTATUS_ALIGN (1 << 27)
371#define RXSTATUS_OVERRUN (1 << 28)
372#define RXSTATUS_NODESC (1 << 29)
373#define RXSTATUS_LAST (1 << 30)
374#define RXSTATUS_ERROR (1 << 31)
375
376#define RXSTATUS_STATUS_ERROR \
377 (RXSTATUS_NODESC | RXSTATUS_OVERRUN | RXSTATUS_ALIGN | \
378 RXSTATUS_RANGE | RXSTATUS_LENGTH | RXSTATUS_SYMBOL | RXSTATUS_CRC)
379
380
381#define RXDESC_CONTROL_SIZE 0x000007FF
382#define RXDESC_CONTROL_INT (1 << 31)
383
384
385#define TXSTATUS_COLLISIONS_GET(x) (((x) >> 21) & 0xF)
386#define TXSTATUS_DEFER (1 << 25)
387#define TXSTATUS_EXCESSDEFER (1 << 26)
388#define TXSTATUS_EXCESSCOLL (1 << 27)
389#define TXSTATUS_LATECOLL (1 << 28)
390#define TXSTATUS_UNDERRUN (1 << 29)
391#define TXSTATUS_NODESC (1 << 30)
392#define TXSTATUS_ERROR (1 << 31)
393
394
395#define TXDESC_CONTROL_SIZE 0x000007FF
396#define TXDESC_CONTROL_OVERRIDE (1 << 26)
397#define TXDESC_CONTROL_HUGE (1 << 27)
398#define TXDESC_CONTROL_PAD (1 << 28)
399#define TXDESC_CONTROL_CRC (1 << 29)
400#define TXDESC_CONTROL_LAST (1 << 30)
401#define TXDESC_CONTROL_INT (1 << 31)
402
403
404
405
406struct txrx_desc_t {
407 __le32 packet;
408 __le32 control;
409};
410struct rx_status_t {
411 __le32 statusinfo;
412 __le32 statushashcrc;
413};
414
415
416
417
418struct netdata_local {
419 struct platform_device *pdev;
420 struct net_device *ndev;
421 spinlock_t lock;
422 void __iomem *net_base;
423 u32 msg_enable;
424 unsigned int skblen[ENET_TX_DESC];
425 unsigned int last_tx_idx;
426 unsigned int num_used_tx_buffs;
427 struct mii_bus *mii_bus;
428 struct clk *clk;
429 dma_addr_t dma_buff_base_p;
430 void *dma_buff_base_v;
431 size_t dma_buff_size;
432 struct txrx_desc_t *tx_desc_v;
433 u32 *tx_stat_v;
434 void *tx_buff_v;
435 struct txrx_desc_t *rx_desc_v;
436 struct rx_status_t *rx_stat_v;
437 void *rx_buff_v;
438 int link;
439 int speed;
440 int duplex;
441 struct napi_struct napi;
442};
443
444
445
446
447static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac)
448{
449 u32 tmp;
450
451
452 tmp = mac[0] | ((u32)mac[1] << 8);
453 writel(tmp, LPC_ENET_SA2(pldat->net_base));
454 tmp = mac[2] | ((u32)mac[3] << 8);
455 writel(tmp, LPC_ENET_SA1(pldat->net_base));
456 tmp = mac[4] | ((u32)mac[5] << 8);
457 writel(tmp, LPC_ENET_SA0(pldat->net_base));
458
459 netdev_dbg(pldat->ndev, "Ethernet MAC address %pM\n", mac);
460}
461
462static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
463{
464 u32 tmp;
465
466
467 tmp = readl(LPC_ENET_SA2(pldat->net_base));
468 mac[0] = tmp & 0xFF;
469 mac[1] = tmp >> 8;
470 tmp = readl(LPC_ENET_SA1(pldat->net_base));
471 mac[2] = tmp & 0xFF;
472 mac[3] = tmp >> 8;
473 tmp = readl(LPC_ENET_SA0(pldat->net_base));
474 mac[4] = tmp & 0xFF;
475 mac[5] = tmp >> 8;
476}
477
478static void __lpc_params_setup(struct netdata_local *pldat)
479{
480 u32 tmp;
481
482 if (pldat->duplex == DUPLEX_FULL) {
483 tmp = readl(LPC_ENET_MAC2(pldat->net_base));
484 tmp |= LPC_MAC2_FULL_DUPLEX;
485 writel(tmp, LPC_ENET_MAC2(pldat->net_base));
486 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
487 tmp |= LPC_COMMAND_FULLDUPLEX;
488 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
489 writel(LPC_IPGT_LOAD(0x15), LPC_ENET_IPGT(pldat->net_base));
490 } else {
491 tmp = readl(LPC_ENET_MAC2(pldat->net_base));
492 tmp &= ~LPC_MAC2_FULL_DUPLEX;
493 writel(tmp, LPC_ENET_MAC2(pldat->net_base));
494 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
495 tmp &= ~LPC_COMMAND_FULLDUPLEX;
496 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
497 writel(LPC_IPGT_LOAD(0x12), LPC_ENET_IPGT(pldat->net_base));
498 }
499
500 if (pldat->speed == SPEED_100)
501 writel(LPC_SUPP_SPEED, LPC_ENET_SUPP(pldat->net_base));
502 else
503 writel(0, LPC_ENET_SUPP(pldat->net_base));
504}
505
506static void __lpc_eth_reset(struct netdata_local *pldat)
507{
508
509 writel((LPC_MAC1_RESET_TX | LPC_MAC1_RESET_MCS_TX | LPC_MAC1_RESET_RX |
510 LPC_MAC1_RESET_MCS_RX | LPC_MAC1_SIMULATION_RESET |
511 LPC_MAC1_SOFT_RESET), LPC_ENET_MAC1(pldat->net_base));
512 writel((LPC_COMMAND_REG_RESET | LPC_COMMAND_TXRESET |
513 LPC_COMMAND_RXRESET), LPC_ENET_COMMAND(pldat->net_base));
514}
515
516static int __lpc_mii_mngt_reset(struct netdata_local *pldat)
517{
518
519 writel(LPC_MCFG_RESET_MII_MGMT, LPC_ENET_MCFG(pldat->net_base));
520
521
522 writel(LPC_MCFG_CLOCK_SELECT(LPC_MCFG_CLOCK_HOST_DIV_28),
523 LPC_ENET_MCFG(pldat->net_base));
524
525 return 0;
526}
527
528static inline phys_addr_t __va_to_pa(void *addr, struct netdata_local *pldat)
529{
530 phys_addr_t phaddr;
531
532 phaddr = addr - pldat->dma_buff_base_v;
533 phaddr += pldat->dma_buff_base_p;
534
535 return phaddr;
536}
537
538static void lpc_eth_enable_int(void __iomem *regbase)
539{
540 writel((LPC_MACINT_RXDONEINTEN | LPC_MACINT_TXDONEINTEN),
541 LPC_ENET_INTENABLE(regbase));
542}
543
544static void lpc_eth_disable_int(void __iomem *regbase)
545{
546 writel(0, LPC_ENET_INTENABLE(regbase));
547}
548
549
550static void __lpc_txrx_desc_setup(struct netdata_local *pldat)
551{
552 u32 *ptxstat;
553 void *tbuff;
554 int i;
555 struct txrx_desc_t *ptxrxdesc;
556 struct rx_status_t *prxstat;
557
558 tbuff = PTR_ALIGN(pldat->dma_buff_base_v, 16);
559
560
561 pldat->tx_desc_v = tbuff;
562 tbuff += sizeof(struct txrx_desc_t) * ENET_TX_DESC;
563
564 pldat->tx_stat_v = tbuff;
565 tbuff += sizeof(u32) * ENET_TX_DESC;
566
567 tbuff = PTR_ALIGN(tbuff, 16);
568 pldat->tx_buff_v = tbuff;
569 tbuff += ENET_MAXF_SIZE * ENET_TX_DESC;
570
571
572 pldat->rx_desc_v = tbuff;
573 tbuff += sizeof(struct txrx_desc_t) * ENET_RX_DESC;
574
575 tbuff = PTR_ALIGN(tbuff, 16);
576 pldat->rx_stat_v = tbuff;
577 tbuff += sizeof(struct rx_status_t) * ENET_RX_DESC;
578
579 tbuff = PTR_ALIGN(tbuff, 16);
580 pldat->rx_buff_v = tbuff;
581 tbuff += ENET_MAXF_SIZE * ENET_RX_DESC;
582
583
584 for (i = 0; i < ENET_TX_DESC; i++) {
585 ptxstat = &pldat->tx_stat_v[i];
586 ptxrxdesc = &pldat->tx_desc_v[i];
587
588 ptxrxdesc->packet = __va_to_pa(
589 pldat->tx_buff_v + i * ENET_MAXF_SIZE, pldat);
590 ptxrxdesc->control = 0;
591 *ptxstat = 0;
592 }
593
594
595 for (i = 0; i < ENET_RX_DESC; i++) {
596 prxstat = &pldat->rx_stat_v[i];
597 ptxrxdesc = &pldat->rx_desc_v[i];
598
599 ptxrxdesc->packet = __va_to_pa(
600 pldat->rx_buff_v + i * ENET_MAXF_SIZE, pldat);
601 ptxrxdesc->control = RXDESC_CONTROL_INT | (ENET_MAXF_SIZE - 1);
602 prxstat->statusinfo = 0;
603 prxstat->statushashcrc = 0;
604 }
605
606
607
608
609 writel((ENET_TX_DESC - 1),
610 LPC_ENET_TXDESCRIPTORNUMBER(pldat->net_base));
611 writel(__va_to_pa(pldat->tx_desc_v, pldat),
612 LPC_ENET_TXDESCRIPTOR(pldat->net_base));
613 writel(__va_to_pa(pldat->tx_stat_v, pldat),
614 LPC_ENET_TXSTATUS(pldat->net_base));
615 writel((ENET_RX_DESC - 1),
616 LPC_ENET_RXDESCRIPTORNUMBER(pldat->net_base));
617 writel(__va_to_pa(pldat->rx_desc_v, pldat),
618 LPC_ENET_RXDESCRIPTOR(pldat->net_base));
619 writel(__va_to_pa(pldat->rx_stat_v, pldat),
620 LPC_ENET_RXSTATUS(pldat->net_base));
621}
622
623static void __lpc_eth_init(struct netdata_local *pldat)
624{
625 u32 tmp;
626
627
628 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
629 tmp &= ~LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
630 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
631 tmp = readl(LPC_ENET_MAC1(pldat->net_base));
632 tmp &= ~LPC_MAC1_RECV_ENABLE;
633 writel(tmp, LPC_ENET_MAC1(pldat->net_base));
634
635
636 writel(LPC_MAC1_PASS_ALL_RX_FRAMES, LPC_ENET_MAC1(pldat->net_base));
637 writel((LPC_MAC2_PAD_CRC_ENABLE | LPC_MAC2_CRC_ENABLE),
638 LPC_ENET_MAC2(pldat->net_base));
639 writel(ENET_MAXF_SIZE, LPC_ENET_MAXF(pldat->net_base));
640
641
642 writel((LPC_CLRT_LOAD_RETRY_MAX(0xF) |
643 LPC_CLRT_LOAD_COLLISION_WINDOW(0x37)),
644 LPC_ENET_CLRT(pldat->net_base));
645 writel(LPC_IPGR_LOAD_PART2(0x12), LPC_ENET_IPGR(pldat->net_base));
646
647 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
648 writel(LPC_COMMAND_PASSRUNTFRAME,
649 LPC_ENET_COMMAND(pldat->net_base));
650 else {
651 writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
652 LPC_ENET_COMMAND(pldat->net_base));
653 writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
654 }
655
656 __lpc_params_setup(pldat);
657
658
659 __lpc_txrx_desc_setup(pldat);
660
661
662 writel((LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT),
663 LPC_ENET_RXFILTER_CTRL(pldat->net_base));
664
665
666 pldat->num_used_tx_buffs = 0;
667 pldat->last_tx_idx =
668 readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
669
670
671 writel(0xFFFF, LPC_ENET_INTCLEAR(pldat->net_base));
672 smp_wmb();
673 lpc_eth_enable_int(pldat->net_base);
674
675
676 tmp = readl(LPC_ENET_COMMAND(pldat->net_base));
677 tmp |= LPC_COMMAND_RXENABLE | LPC_COMMAND_TXENABLE;
678 writel(tmp, LPC_ENET_COMMAND(pldat->net_base));
679 tmp = readl(LPC_ENET_MAC1(pldat->net_base));
680 tmp |= LPC_MAC1_RECV_ENABLE;
681 writel(tmp, LPC_ENET_MAC1(pldat->net_base));
682}
683
684static void __lpc_eth_shutdown(struct netdata_local *pldat)
685{
686
687 __lpc_eth_reset(pldat);
688 writel(0, LPC_ENET_MAC1(pldat->net_base));
689 writel(0, LPC_ENET_MAC2(pldat->net_base));
690}
691
692
693
694
695static int lpc_mdio_read(struct mii_bus *bus, int phy_id, int phyreg)
696{
697 struct netdata_local *pldat = bus->priv;
698 unsigned long timeout = jiffies + msecs_to_jiffies(100);
699 int lps;
700
701 writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
702 writel(LPC_MCMD_READ, LPC_ENET_MCMD(pldat->net_base));
703
704
705 while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
706 if (time_after(jiffies, timeout))
707 return -EIO;
708 cpu_relax();
709 }
710
711 lps = readl(LPC_ENET_MRDD(pldat->net_base));
712 writel(0, LPC_ENET_MCMD(pldat->net_base));
713
714 return lps;
715}
716
717static int lpc_mdio_write(struct mii_bus *bus, int phy_id, int phyreg,
718 u16 phydata)
719{
720 struct netdata_local *pldat = bus->priv;
721 unsigned long timeout = jiffies + msecs_to_jiffies(100);
722
723 writel(((phy_id << 8) | phyreg), LPC_ENET_MADR(pldat->net_base));
724 writel(phydata, LPC_ENET_MWTD(pldat->net_base));
725
726
727 while (readl(LPC_ENET_MIND(pldat->net_base)) & LPC_MIND_BUSY) {
728 if (time_after(jiffies, timeout))
729 return -EIO;
730 cpu_relax();
731 }
732
733 return 0;
734}
735
736static int lpc_mdio_reset(struct mii_bus *bus)
737{
738 return __lpc_mii_mngt_reset((struct netdata_local *)bus->priv);
739}
740
741static void lpc_handle_link_change(struct net_device *ndev)
742{
743 struct netdata_local *pldat = netdev_priv(ndev);
744 struct phy_device *phydev = ndev->phydev;
745 unsigned long flags;
746
747 bool status_change = false;
748
749 spin_lock_irqsave(&pldat->lock, flags);
750
751 if (phydev->link) {
752 if ((pldat->speed != phydev->speed) ||
753 (pldat->duplex != phydev->duplex)) {
754 pldat->speed = phydev->speed;
755 pldat->duplex = phydev->duplex;
756 status_change = true;
757 }
758 }
759
760 if (phydev->link != pldat->link) {
761 if (!phydev->link) {
762 pldat->speed = 0;
763 pldat->duplex = -1;
764 }
765 pldat->link = phydev->link;
766
767 status_change = true;
768 }
769
770 spin_unlock_irqrestore(&pldat->lock, flags);
771
772 if (status_change)
773 __lpc_params_setup(pldat);
774}
775
776static int lpc_mii_probe(struct net_device *ndev)
777{
778 struct netdata_local *pldat = netdev_priv(ndev);
779 struct phy_device *phydev = phy_find_first(pldat->mii_bus);
780
781 if (!phydev) {
782 netdev_err(ndev, "no PHY found\n");
783 return -ENODEV;
784 }
785
786
787 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
788 netdev_info(ndev, "using MII interface\n");
789 else
790 netdev_info(ndev, "using RMII interface\n");
791 phydev = phy_connect(ndev, phydev_name(phydev),
792 &lpc_handle_link_change,
793 lpc_phy_interface_mode(&pldat->pdev->dev));
794
795 if (IS_ERR(phydev)) {
796 netdev_err(ndev, "Could not attach to PHY\n");
797 return PTR_ERR(phydev);
798 }
799
800 phy_set_max_speed(phydev, SPEED_100);
801
802 pldat->link = 0;
803 pldat->speed = 0;
804 pldat->duplex = -1;
805
806 phy_attached_info(phydev);
807
808 return 0;
809}
810
811static int lpc_mii_init(struct netdata_local *pldat)
812{
813 int err = -ENXIO;
814
815 pldat->mii_bus = mdiobus_alloc();
816 if (!pldat->mii_bus) {
817 err = -ENOMEM;
818 goto err_out;
819 }
820
821
822 if (lpc_phy_interface_mode(&pldat->pdev->dev) == PHY_INTERFACE_MODE_MII)
823 writel(LPC_COMMAND_PASSRUNTFRAME,
824 LPC_ENET_COMMAND(pldat->net_base));
825 else {
826 writel((LPC_COMMAND_PASSRUNTFRAME | LPC_COMMAND_RMII),
827 LPC_ENET_COMMAND(pldat->net_base));
828 writel(LPC_SUPP_RESET_RMII, LPC_ENET_SUPP(pldat->net_base));
829 }
830
831 pldat->mii_bus->name = "lpc_mii_bus";
832 pldat->mii_bus->read = &lpc_mdio_read;
833 pldat->mii_bus->write = &lpc_mdio_write;
834 pldat->mii_bus->reset = &lpc_mdio_reset;
835 snprintf(pldat->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
836 pldat->pdev->name, pldat->pdev->id);
837 pldat->mii_bus->priv = pldat;
838 pldat->mii_bus->parent = &pldat->pdev->dev;
839
840 platform_set_drvdata(pldat->pdev, pldat->mii_bus);
841
842 if (mdiobus_register(pldat->mii_bus))
843 goto err_out_unregister_bus;
844
845 if (lpc_mii_probe(pldat->ndev) != 0)
846 goto err_out_unregister_bus;
847
848 return 0;
849
850err_out_unregister_bus:
851 mdiobus_unregister(pldat->mii_bus);
852 mdiobus_free(pldat->mii_bus);
853err_out:
854 return err;
855}
856
857static void __lpc_handle_xmit(struct net_device *ndev)
858{
859 struct netdata_local *pldat = netdev_priv(ndev);
860 u32 txcidx, *ptxstat, txstat;
861
862 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
863 while (pldat->last_tx_idx != txcidx) {
864 unsigned int skblen = pldat->skblen[pldat->last_tx_idx];
865
866
867 ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx];
868 txstat = *ptxstat;
869
870
871 pldat->num_used_tx_buffs--;
872 pldat->last_tx_idx++;
873 if (pldat->last_tx_idx >= ENET_TX_DESC)
874 pldat->last_tx_idx = 0;
875
876
877 ndev->stats.collisions += TXSTATUS_COLLISIONS_GET(txstat);
878
879
880 if (txstat & TXSTATUS_ERROR) {
881 if (txstat & TXSTATUS_UNDERRUN) {
882
883 ndev->stats.tx_fifo_errors++;
884 }
885 if (txstat & TXSTATUS_LATECOLL) {
886
887 ndev->stats.tx_aborted_errors++;
888 }
889 if (txstat & TXSTATUS_EXCESSCOLL) {
890
891 ndev->stats.tx_aborted_errors++;
892 }
893 if (txstat & TXSTATUS_EXCESSDEFER) {
894
895 ndev->stats.tx_aborted_errors++;
896 }
897 ndev->stats.tx_errors++;
898 } else {
899
900 ndev->stats.tx_packets++;
901 ndev->stats.tx_bytes += skblen;
902 }
903
904 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
905 }
906
907 if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
908 if (netif_queue_stopped(ndev))
909 netif_wake_queue(ndev);
910 }
911}
912
913static int __lpc_handle_recv(struct net_device *ndev, int budget)
914{
915 struct netdata_local *pldat = netdev_priv(ndev);
916 struct sk_buff *skb;
917 u32 rxconsidx, len, ethst;
918 struct rx_status_t *prxstat;
919 int rx_done = 0;
920
921
922 rxconsidx = readl(LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
923 while (rx_done < budget && rxconsidx !=
924 readl(LPC_ENET_RXPRODUCEINDEX(pldat->net_base))) {
925
926 prxstat = &pldat->rx_stat_v[rxconsidx];
927 len = (prxstat->statusinfo & RXSTATUS_SIZE) + 1;
928
929
930 ethst = prxstat->statusinfo;
931 if ((ethst & (RXSTATUS_ERROR | RXSTATUS_STATUS_ERROR)) ==
932 (RXSTATUS_ERROR | RXSTATUS_RANGE))
933 ethst &= ~RXSTATUS_ERROR;
934
935 if (ethst & RXSTATUS_ERROR) {
936 int si = prxstat->statusinfo;
937
938 if (si & RXSTATUS_OVERRUN) {
939
940 ndev->stats.rx_fifo_errors++;
941 } else if (si & RXSTATUS_CRC) {
942
943 ndev->stats.rx_crc_errors++;
944 } else if (si & RXSTATUS_LENGTH) {
945
946 ndev->stats.rx_length_errors++;
947 } else if (si & RXSTATUS_ERROR) {
948
949 ndev->stats.rx_length_errors++;
950 }
951 ndev->stats.rx_errors++;
952 } else {
953
954 skb = dev_alloc_skb(len);
955 if (!skb) {
956 ndev->stats.rx_dropped++;
957 } else {
958
959 skb_put_data(skb,
960 pldat->rx_buff_v + rxconsidx * ENET_MAXF_SIZE,
961 len);
962
963
964 skb->protocol = eth_type_trans(skb, ndev);
965 netif_receive_skb(skb);
966 ndev->stats.rx_packets++;
967 ndev->stats.rx_bytes += len;
968 }
969 }
970
971
972 rxconsidx = rxconsidx + 1;
973 if (rxconsidx >= ENET_RX_DESC)
974 rxconsidx = 0;
975 writel(rxconsidx,
976 LPC_ENET_RXCONSUMEINDEX(pldat->net_base));
977 rx_done++;
978 }
979
980 return rx_done;
981}
982
983static int lpc_eth_poll(struct napi_struct *napi, int budget)
984{
985 struct netdata_local *pldat = container_of(napi,
986 struct netdata_local, napi);
987 struct net_device *ndev = pldat->ndev;
988 int rx_done = 0;
989 struct netdev_queue *txq = netdev_get_tx_queue(ndev, 0);
990
991 __netif_tx_lock(txq, smp_processor_id());
992 __lpc_handle_xmit(ndev);
993 __netif_tx_unlock(txq);
994 rx_done = __lpc_handle_recv(ndev, budget);
995
996 if (rx_done < budget) {
997 napi_complete_done(napi, rx_done);
998 lpc_eth_enable_int(pldat->net_base);
999 }
1000
1001 return rx_done;
1002}
1003
1004static irqreturn_t __lpc_eth_interrupt(int irq, void *dev_id)
1005{
1006 struct net_device *ndev = dev_id;
1007 struct netdata_local *pldat = netdev_priv(ndev);
1008 u32 tmp;
1009
1010 spin_lock(&pldat->lock);
1011
1012 tmp = readl(LPC_ENET_INTSTATUS(pldat->net_base));
1013
1014 writel(tmp, LPC_ENET_INTCLEAR(pldat->net_base));
1015
1016 lpc_eth_disable_int(pldat->net_base);
1017 if (likely(napi_schedule_prep(&pldat->napi)))
1018 __napi_schedule(&pldat->napi);
1019
1020 spin_unlock(&pldat->lock);
1021
1022 return IRQ_HANDLED;
1023}
1024
1025static int lpc_eth_close(struct net_device *ndev)
1026{
1027 unsigned long flags;
1028 struct netdata_local *pldat = netdev_priv(ndev);
1029
1030 if (netif_msg_ifdown(pldat))
1031 dev_dbg(&pldat->pdev->dev, "shutting down %s\n", ndev->name);
1032
1033 napi_disable(&pldat->napi);
1034 netif_stop_queue(ndev);
1035
1036 if (ndev->phydev)
1037 phy_stop(ndev->phydev);
1038
1039 spin_lock_irqsave(&pldat->lock, flags);
1040 __lpc_eth_reset(pldat);
1041 netif_carrier_off(ndev);
1042 writel(0, LPC_ENET_MAC1(pldat->net_base));
1043 writel(0, LPC_ENET_MAC2(pldat->net_base));
1044 spin_unlock_irqrestore(&pldat->lock, flags);
1045
1046 clk_disable_unprepare(pldat->clk);
1047
1048 return 0;
1049}
1050
1051static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1052{
1053 struct netdata_local *pldat = netdev_priv(ndev);
1054 u32 len, txidx;
1055 u32 *ptxstat;
1056 struct txrx_desc_t *ptxrxdesc;
1057
1058 len = skb->len;
1059
1060 spin_lock_irq(&pldat->lock);
1061
1062 if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1)) {
1063
1064
1065 netif_stop_queue(ndev);
1066 spin_unlock_irq(&pldat->lock);
1067 WARN(1, "BUG! TX request when no free TX buffers!\n");
1068 return NETDEV_TX_BUSY;
1069 }
1070
1071
1072 txidx = readl(LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1073
1074
1075 ptxstat = &pldat->tx_stat_v[txidx];
1076 *ptxstat = 0;
1077 ptxrxdesc = &pldat->tx_desc_v[txidx];
1078 ptxrxdesc->control =
1079 (len - 1) | TXDESC_CONTROL_LAST | TXDESC_CONTROL_INT;
1080
1081
1082 memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
1083
1084
1085 pldat->skblen[txidx] = len;
1086 pldat->num_used_tx_buffs++;
1087
1088
1089 txidx++;
1090 if (txidx >= ENET_TX_DESC)
1091 txidx = 0;
1092 writel(txidx, LPC_ENET_TXPRODUCEINDEX(pldat->net_base));
1093
1094
1095 if (pldat->num_used_tx_buffs >= (ENET_TX_DESC - 1))
1096 netif_stop_queue(ndev);
1097
1098 spin_unlock_irq(&pldat->lock);
1099
1100 dev_kfree_skb(skb);
1101 return NETDEV_TX_OK;
1102}
1103
1104static int lpc_set_mac_address(struct net_device *ndev, void *p)
1105{
1106 struct sockaddr *addr = p;
1107 struct netdata_local *pldat = netdev_priv(ndev);
1108 unsigned long flags;
1109
1110 if (!is_valid_ether_addr(addr->sa_data))
1111 return -EADDRNOTAVAIL;
1112 memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
1113
1114 spin_lock_irqsave(&pldat->lock, flags);
1115
1116
1117 __lpc_set_mac(pldat, ndev->dev_addr);
1118
1119 spin_unlock_irqrestore(&pldat->lock, flags);
1120
1121 return 0;
1122}
1123
1124static void lpc_eth_set_multicast_list(struct net_device *ndev)
1125{
1126 struct netdata_local *pldat = netdev_priv(ndev);
1127 struct netdev_hw_addr_list *mcptr = &ndev->mc;
1128 struct netdev_hw_addr *ha;
1129 u32 tmp32, hash_val, hashlo, hashhi;
1130 unsigned long flags;
1131
1132 spin_lock_irqsave(&pldat->lock, flags);
1133
1134
1135 __lpc_set_mac(pldat, ndev->dev_addr);
1136
1137 tmp32 = LPC_RXFLTRW_ACCEPTUBROADCAST | LPC_RXFLTRW_ACCEPTPERFECT;
1138
1139 if (ndev->flags & IFF_PROMISC)
1140 tmp32 |= LPC_RXFLTRW_ACCEPTUNICAST |
1141 LPC_RXFLTRW_ACCEPTUMULTICAST;
1142 if (ndev->flags & IFF_ALLMULTI)
1143 tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICAST;
1144
1145 if (netdev_hw_addr_list_count(mcptr))
1146 tmp32 |= LPC_RXFLTRW_ACCEPTUMULTICASTHASH;
1147
1148 writel(tmp32, LPC_ENET_RXFILTER_CTRL(pldat->net_base));
1149
1150
1151
1152 hashlo = 0x0;
1153 hashhi = 0x0;
1154
1155
1156 netdev_hw_addr_list_for_each(ha, mcptr) {
1157 hash_val = (ether_crc(6, ha->addr) >> 23) & 0x3F;
1158
1159 if (hash_val >= 32)
1160 hashhi |= 1 << (hash_val - 32);
1161 else
1162 hashlo |= 1 << hash_val;
1163 }
1164
1165 writel(hashlo, LPC_ENET_HASHFILTERL(pldat->net_base));
1166 writel(hashhi, LPC_ENET_HASHFILTERH(pldat->net_base));
1167
1168 spin_unlock_irqrestore(&pldat->lock, flags);
1169}
1170
1171static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
1172{
1173 struct phy_device *phydev = ndev->phydev;
1174
1175 if (!netif_running(ndev))
1176 return -EINVAL;
1177
1178 if (!phydev)
1179 return -ENODEV;
1180
1181 return phy_mii_ioctl(phydev, req, cmd);
1182}
1183
1184static int lpc_eth_open(struct net_device *ndev)
1185{
1186 struct netdata_local *pldat = netdev_priv(ndev);
1187 int ret;
1188
1189 if (netif_msg_ifup(pldat))
1190 dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
1191
1192 ret = clk_prepare_enable(pldat->clk);
1193 if (ret)
1194 return ret;
1195
1196
1197 phy_resume(ndev->phydev);
1198
1199
1200 __lpc_eth_reset(pldat);
1201 __lpc_eth_init(pldat);
1202
1203
1204 phy_start(ndev->phydev);
1205 netif_start_queue(ndev);
1206 napi_enable(&pldat->napi);
1207
1208 return 0;
1209}
1210
1211
1212
1213
1214static void lpc_eth_ethtool_getdrvinfo(struct net_device *ndev,
1215 struct ethtool_drvinfo *info)
1216{
1217 strlcpy(info->driver, MODNAME, sizeof(info->driver));
1218 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1219 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
1220 sizeof(info->bus_info));
1221}
1222
1223static u32 lpc_eth_ethtool_getmsglevel(struct net_device *ndev)
1224{
1225 struct netdata_local *pldat = netdev_priv(ndev);
1226
1227 return pldat->msg_enable;
1228}
1229
1230static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level)
1231{
1232 struct netdata_local *pldat = netdev_priv(ndev);
1233
1234 pldat->msg_enable = level;
1235}
1236
1237static const struct ethtool_ops lpc_eth_ethtool_ops = {
1238 .get_drvinfo = lpc_eth_ethtool_getdrvinfo,
1239 .get_msglevel = lpc_eth_ethtool_getmsglevel,
1240 .set_msglevel = lpc_eth_ethtool_setmsglevel,
1241 .get_link = ethtool_op_get_link,
1242 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1243 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1244};
1245
1246static const struct net_device_ops lpc_netdev_ops = {
1247 .ndo_open = lpc_eth_open,
1248 .ndo_stop = lpc_eth_close,
1249 .ndo_start_xmit = lpc_eth_hard_start_xmit,
1250 .ndo_set_rx_mode = lpc_eth_set_multicast_list,
1251 .ndo_do_ioctl = lpc_eth_ioctl,
1252 .ndo_set_mac_address = lpc_set_mac_address,
1253 .ndo_validate_addr = eth_validate_addr,
1254};
1255
1256static int lpc_eth_drv_probe(struct platform_device *pdev)
1257{
1258 struct resource *res;
1259 struct net_device *ndev;
1260 struct netdata_local *pldat;
1261 struct phy_device *phydev;
1262 dma_addr_t dma_handle;
1263 int irq, ret;
1264 u32 tmp;
1265
1266
1267 tmp = __raw_readl(LPC32XX_CLKPWR_MACCLK_CTRL);
1268 tmp &= ~LPC32XX_CLKPWR_MACCTRL_PINS_MSK;
1269 if (lpc_phy_interface_mode(&pdev->dev) == PHY_INTERFACE_MODE_MII)
1270 tmp |= LPC32XX_CLKPWR_MACCTRL_USE_MII_PINS;
1271 else
1272 tmp |= LPC32XX_CLKPWR_MACCTRL_USE_RMII_PINS;
1273 __raw_writel(tmp, LPC32XX_CLKPWR_MACCLK_CTRL);
1274
1275
1276 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1277 irq = platform_get_irq(pdev, 0);
1278 if (!res || irq < 0) {
1279 dev_err(&pdev->dev, "error getting resources.\n");
1280 ret = -ENXIO;
1281 goto err_exit;
1282 }
1283
1284
1285 ndev = alloc_etherdev(sizeof(struct netdata_local));
1286 if (!ndev) {
1287 dev_err(&pdev->dev, "could not allocate device.\n");
1288 ret = -ENOMEM;
1289 goto err_exit;
1290 }
1291
1292 SET_NETDEV_DEV(ndev, &pdev->dev);
1293
1294 pldat = netdev_priv(ndev);
1295 pldat->pdev = pdev;
1296 pldat->ndev = ndev;
1297
1298 spin_lock_init(&pldat->lock);
1299
1300
1301 ndev->irq = irq;
1302
1303
1304 pldat->clk = clk_get(&pdev->dev, NULL);
1305 if (IS_ERR(pldat->clk)) {
1306 dev_err(&pdev->dev, "error getting clock.\n");
1307 ret = PTR_ERR(pldat->clk);
1308 goto err_out_free_dev;
1309 }
1310
1311
1312 ret = clk_prepare_enable(pldat->clk);
1313 if (ret)
1314 goto err_out_clk_put;
1315
1316
1317 pldat->net_base = ioremap(res->start, resource_size(res));
1318 if (!pldat->net_base) {
1319 dev_err(&pdev->dev, "failed to map registers\n");
1320 ret = -ENOMEM;
1321 goto err_out_disable_clocks;
1322 }
1323 ret = request_irq(ndev->irq, __lpc_eth_interrupt, 0,
1324 ndev->name, ndev);
1325 if (ret) {
1326 dev_err(&pdev->dev, "error requesting interrupt.\n");
1327 goto err_out_iounmap;
1328 }
1329
1330
1331 ndev->netdev_ops = &lpc_netdev_ops;
1332 ndev->ethtool_ops = &lpc_eth_ethtool_ops;
1333 ndev->watchdog_timeo = msecs_to_jiffies(2500);
1334
1335
1336 pldat->dma_buff_size = (ENET_TX_DESC + ENET_RX_DESC) * (ENET_MAXF_SIZE +
1337 sizeof(struct txrx_desc_t) + sizeof(struct rx_status_t));
1338 pldat->dma_buff_base_v = 0;
1339
1340 if (use_iram_for_net(&pldat->pdev->dev)) {
1341 dma_handle = LPC32XX_IRAM_BASE;
1342 if (pldat->dma_buff_size <= lpc32xx_return_iram_size())
1343 pldat->dma_buff_base_v =
1344 io_p2v(LPC32XX_IRAM_BASE);
1345 else
1346 netdev_err(ndev,
1347 "IRAM not big enough for net buffers, using SDRAM instead.\n");
1348 }
1349
1350 if (pldat->dma_buff_base_v == 0) {
1351 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1352 if (ret)
1353 goto err_out_free_irq;
1354
1355 pldat->dma_buff_size = PAGE_ALIGN(pldat->dma_buff_size);
1356
1357
1358
1359 pldat->dma_buff_base_v =
1360 dma_alloc_coherent(&pldat->pdev->dev,
1361 pldat->dma_buff_size, &dma_handle,
1362 GFP_KERNEL);
1363 if (pldat->dma_buff_base_v == NULL) {
1364 ret = -ENOMEM;
1365 goto err_out_free_irq;
1366 }
1367 }
1368 pldat->dma_buff_base_p = dma_handle;
1369
1370 netdev_dbg(ndev, "IO address space :%pR\n", res);
1371 netdev_dbg(ndev, "IO address size :%d\n", resource_size(res));
1372 netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
1373 pldat->net_base);
1374 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
1375 netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size);
1376 netdev_dbg(ndev, "DMA buffer P address :0x%08x\n",
1377 pldat->dma_buff_base_p);
1378 netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
1379 pldat->dma_buff_base_v);
1380
1381
1382 __lpc_get_mac(pldat, ndev->dev_addr);
1383
1384 if (!is_valid_ether_addr(ndev->dev_addr)) {
1385 const char *macaddr = of_get_mac_address(pdev->dev.of_node);
1386 if (macaddr)
1387 memcpy(ndev->dev_addr, macaddr, ETH_ALEN);
1388 }
1389 if (!is_valid_ether_addr(ndev->dev_addr))
1390 eth_hw_addr_random(ndev);
1391
1392
1393 __lpc_eth_reset(pldat);
1394
1395
1396 __lpc_eth_shutdown(pldat);
1397
1398
1399 pldat->msg_enable = NETIF_MSG_LINK;
1400
1401
1402 __lpc_mii_mngt_reset(pldat);
1403
1404
1405
1406 pldat->link = 0;
1407 pldat->speed = 100;
1408 pldat->duplex = DUPLEX_FULL;
1409 __lpc_params_setup(pldat);
1410
1411 netif_napi_add(ndev, &pldat->napi, lpc_eth_poll, NAPI_WEIGHT);
1412
1413 ret = register_netdev(ndev);
1414 if (ret) {
1415 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
1416 goto err_out_dma_unmap;
1417 }
1418 platform_set_drvdata(pdev, ndev);
1419
1420 ret = lpc_mii_init(pldat);
1421 if (ret)
1422 goto err_out_unregister_netdev;
1423
1424 netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
1425 res->start, ndev->irq);
1426
1427 phydev = ndev->phydev;
1428
1429 device_init_wakeup(&pdev->dev, 1);
1430 device_set_wakeup_enable(&pdev->dev, 0);
1431
1432 return 0;
1433
1434err_out_unregister_netdev:
1435 unregister_netdev(ndev);
1436err_out_dma_unmap:
1437 if (!use_iram_for_net(&pldat->pdev->dev) ||
1438 pldat->dma_buff_size > lpc32xx_return_iram_size())
1439 dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
1440 pldat->dma_buff_base_v,
1441 pldat->dma_buff_base_p);
1442err_out_free_irq:
1443 free_irq(ndev->irq, ndev);
1444err_out_iounmap:
1445 iounmap(pldat->net_base);
1446err_out_disable_clocks:
1447 clk_disable_unprepare(pldat->clk);
1448err_out_clk_put:
1449 clk_put(pldat->clk);
1450err_out_free_dev:
1451 free_netdev(ndev);
1452err_exit:
1453 pr_err("%s: not found (%d).\n", MODNAME, ret);
1454 return ret;
1455}
1456
1457static int lpc_eth_drv_remove(struct platform_device *pdev)
1458{
1459 struct net_device *ndev = platform_get_drvdata(pdev);
1460 struct netdata_local *pldat = netdev_priv(ndev);
1461
1462 unregister_netdev(ndev);
1463
1464 if (!use_iram_for_net(&pldat->pdev->dev) ||
1465 pldat->dma_buff_size > lpc32xx_return_iram_size())
1466 dma_free_coherent(&pldat->pdev->dev, pldat->dma_buff_size,
1467 pldat->dma_buff_base_v,
1468 pldat->dma_buff_base_p);
1469 free_irq(ndev->irq, ndev);
1470 iounmap(pldat->net_base);
1471 mdiobus_unregister(pldat->mii_bus);
1472 mdiobus_free(pldat->mii_bus);
1473 clk_disable_unprepare(pldat->clk);
1474 clk_put(pldat->clk);
1475 free_netdev(ndev);
1476
1477 return 0;
1478}
1479
1480#ifdef CONFIG_PM
1481static int lpc_eth_drv_suspend(struct platform_device *pdev,
1482 pm_message_t state)
1483{
1484 struct net_device *ndev = platform_get_drvdata(pdev);
1485 struct netdata_local *pldat = netdev_priv(ndev);
1486
1487 if (device_may_wakeup(&pdev->dev))
1488 enable_irq_wake(ndev->irq);
1489
1490 if (ndev) {
1491 if (netif_running(ndev)) {
1492 netif_device_detach(ndev);
1493 __lpc_eth_shutdown(pldat);
1494 clk_disable_unprepare(pldat->clk);
1495
1496
1497
1498
1499
1500 __lpc_eth_reset(pldat);
1501 }
1502 }
1503
1504 return 0;
1505}
1506
1507static int lpc_eth_drv_resume(struct platform_device *pdev)
1508{
1509 struct net_device *ndev = platform_get_drvdata(pdev);
1510 struct netdata_local *pldat;
1511
1512 if (device_may_wakeup(&pdev->dev))
1513 disable_irq_wake(ndev->irq);
1514
1515 if (ndev) {
1516 if (netif_running(ndev)) {
1517 pldat = netdev_priv(ndev);
1518
1519
1520 clk_enable(pldat->clk);
1521
1522
1523 __lpc_eth_reset(pldat);
1524 __lpc_eth_init(pldat);
1525
1526 netif_device_attach(ndev);
1527 }
1528 }
1529
1530 return 0;
1531}
1532#endif
1533
1534#ifdef CONFIG_OF
1535static const struct of_device_id lpc_eth_match[] = {
1536 { .compatible = "nxp,lpc-eth" },
1537 { }
1538};
1539MODULE_DEVICE_TABLE(of, lpc_eth_match);
1540#endif
1541
1542static struct platform_driver lpc_eth_driver = {
1543 .probe = lpc_eth_drv_probe,
1544 .remove = lpc_eth_drv_remove,
1545#ifdef CONFIG_PM
1546 .suspend = lpc_eth_drv_suspend,
1547 .resume = lpc_eth_drv_resume,
1548#endif
1549 .driver = {
1550 .name = MODNAME,
1551 .of_match_table = of_match_ptr(lpc_eth_match),
1552 },
1553};
1554
1555module_platform_driver(lpc_eth_driver);
1556
1557MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
1558MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
1559MODULE_DESCRIPTION("LPC Ethernet Driver");
1560MODULE_LICENSE("GPL");
1561