1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/version.h>
18#include <linux/module.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/ethtool.h>
22#include <linux/usb.h>
23#include <linux/crc32.h>
24#include <linux/signal.h>
25#include <linux/slab.h>
26#include <linux/if_vlan.h>
27#include <linux/uaccess.h>
28#include <linux/linkmode.h>
29#include <linux/list.h>
30#include <linux/ip.h>
31#include <linux/ipv6.h>
32#include <linux/mdio.h>
33#include <linux/phy.h>
34#include <net/ip6_checksum.h>
35#include <linux/interrupt.h>
36#include <linux/irqdomain.h>
37#include <linux/irq.h>
38#include <linux/irqchip/chained_irq.h>
39#include <linux/microchipphy.h>
40#include <linux/phy_fixed.h>
41#include <linux/of_mdio.h>
42#include <linux/of_net.h>
43#include "lan78xx.h"
44
45#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
46#define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
47#define DRIVER_NAME "lan78xx"
48
49#define TX_TIMEOUT_JIFFIES (5 * HZ)
50#define THROTTLE_JIFFIES (HZ / 8)
51#define UNLINK_TIMEOUT_MS 3
52
53#define RX_MAX_QUEUE_MEMORY (60 * 1518)
54
55#define SS_USB_PKT_SIZE (1024)
56#define HS_USB_PKT_SIZE (512)
57#define FS_USB_PKT_SIZE (64)
58
59#define MAX_RX_FIFO_SIZE (12 * 1024)
60#define MAX_TX_FIFO_SIZE (12 * 1024)
61#define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
62#define DEFAULT_BULK_IN_DELAY (0x0800)
63#define MAX_SINGLE_PACKET_SIZE (9000)
64#define DEFAULT_TX_CSUM_ENABLE (true)
65#define DEFAULT_RX_CSUM_ENABLE (true)
66#define DEFAULT_TSO_CSUM_ENABLE (true)
67#define DEFAULT_VLAN_FILTER_ENABLE (true)
68#define DEFAULT_VLAN_RX_OFFLOAD (true)
69#define TX_OVERHEAD (8)
70#define RXW_PADDING 2
71
72#define LAN78XX_USB_VENDOR_ID (0x0424)
73#define LAN7800_USB_PRODUCT_ID (0x7800)
74#define LAN7850_USB_PRODUCT_ID (0x7850)
75#define LAN7801_USB_PRODUCT_ID (0x7801)
76#define LAN78XX_EEPROM_MAGIC (0x78A5)
77#define LAN78XX_OTP_MAGIC (0x78F3)
78
79#define MII_READ 1
80#define MII_WRITE 0
81
82#define EEPROM_INDICATOR (0xA5)
83#define EEPROM_MAC_OFFSET (0x01)
84#define MAX_EEPROM_SIZE 512
85#define OTP_INDICATOR_1 (0xF3)
86#define OTP_INDICATOR_2 (0xF7)
87
88#define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
89 WAKE_MCAST | WAKE_BCAST | \
90 WAKE_ARP | WAKE_MAGIC)
91
92
93#define BULK_IN_PIPE 1
94#define BULK_OUT_PIPE 2
95
96
97#define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
98
99
100#define STAT_UPDATE_TIMER (1 * 1000)
101
102
103#define MAX_INT_EP (32)
104#define INT_EP_INTEP (31)
105#define INT_EP_OTP_WR_DONE (28)
106#define INT_EP_EEE_TX_LPI_START (26)
107#define INT_EP_EEE_TX_LPI_STOP (25)
108#define INT_EP_EEE_RX_LPI (24)
109#define INT_EP_MAC_RESET_TIMEOUT (23)
110#define INT_EP_RDFO (22)
111#define INT_EP_TXE (21)
112#define INT_EP_USB_STATUS (20)
113#define INT_EP_TX_DIS (19)
114#define INT_EP_RX_DIS (18)
115#define INT_EP_PHY (17)
116#define INT_EP_DP (16)
117#define INT_EP_MAC_ERR (15)
118#define INT_EP_TDFU (14)
119#define INT_EP_TDFO (13)
120#define INT_EP_UTX (12)
121#define INT_EP_GPIO_11 (11)
122#define INT_EP_GPIO_10 (10)
123#define INT_EP_GPIO_9 (9)
124#define INT_EP_GPIO_8 (8)
125#define INT_EP_GPIO_7 (7)
126#define INT_EP_GPIO_6 (6)
127#define INT_EP_GPIO_5 (5)
128#define INT_EP_GPIO_4 (4)
129#define INT_EP_GPIO_3 (3)
130#define INT_EP_GPIO_2 (2)
131#define INT_EP_GPIO_1 (1)
132#define INT_EP_GPIO_0 (0)
133
134static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
135 "RX FCS Errors",
136 "RX Alignment Errors",
137 "Rx Fragment Errors",
138 "RX Jabber Errors",
139 "RX Undersize Frame Errors",
140 "RX Oversize Frame Errors",
141 "RX Dropped Frames",
142 "RX Unicast Byte Count",
143 "RX Broadcast Byte Count",
144 "RX Multicast Byte Count",
145 "RX Unicast Frames",
146 "RX Broadcast Frames",
147 "RX Multicast Frames",
148 "RX Pause Frames",
149 "RX 64 Byte Frames",
150 "RX 65 - 127 Byte Frames",
151 "RX 128 - 255 Byte Frames",
152 "RX 256 - 511 Bytes Frames",
153 "RX 512 - 1023 Byte Frames",
154 "RX 1024 - 1518 Byte Frames",
155 "RX Greater 1518 Byte Frames",
156 "EEE RX LPI Transitions",
157 "EEE RX LPI Time",
158 "TX FCS Errors",
159 "TX Excess Deferral Errors",
160 "TX Carrier Errors",
161 "TX Bad Byte Count",
162 "TX Single Collisions",
163 "TX Multiple Collisions",
164 "TX Excessive Collision",
165 "TX Late Collisions",
166 "TX Unicast Byte Count",
167 "TX Broadcast Byte Count",
168 "TX Multicast Byte Count",
169 "TX Unicast Frames",
170 "TX Broadcast Frames",
171 "TX Multicast Frames",
172 "TX Pause Frames",
173 "TX 64 Byte Frames",
174 "TX 65 - 127 Byte Frames",
175 "TX 128 - 255 Byte Frames",
176 "TX 256 - 511 Bytes Frames",
177 "TX 512 - 1023 Byte Frames",
178 "TX 1024 - 1518 Byte Frames",
179 "TX Greater 1518 Byte Frames",
180 "EEE TX LPI Transitions",
181 "EEE TX LPI Time",
182};
183
184struct lan78xx_statstage {
185 u32 rx_fcs_errors;
186 u32 rx_alignment_errors;
187 u32 rx_fragment_errors;
188 u32 rx_jabber_errors;
189 u32 rx_undersize_frame_errors;
190 u32 rx_oversize_frame_errors;
191 u32 rx_dropped_frames;
192 u32 rx_unicast_byte_count;
193 u32 rx_broadcast_byte_count;
194 u32 rx_multicast_byte_count;
195 u32 rx_unicast_frames;
196 u32 rx_broadcast_frames;
197 u32 rx_multicast_frames;
198 u32 rx_pause_frames;
199 u32 rx_64_byte_frames;
200 u32 rx_65_127_byte_frames;
201 u32 rx_128_255_byte_frames;
202 u32 rx_256_511_bytes_frames;
203 u32 rx_512_1023_byte_frames;
204 u32 rx_1024_1518_byte_frames;
205 u32 rx_greater_1518_byte_frames;
206 u32 eee_rx_lpi_transitions;
207 u32 eee_rx_lpi_time;
208 u32 tx_fcs_errors;
209 u32 tx_excess_deferral_errors;
210 u32 tx_carrier_errors;
211 u32 tx_bad_byte_count;
212 u32 tx_single_collisions;
213 u32 tx_multiple_collisions;
214 u32 tx_excessive_collision;
215 u32 tx_late_collisions;
216 u32 tx_unicast_byte_count;
217 u32 tx_broadcast_byte_count;
218 u32 tx_multicast_byte_count;
219 u32 tx_unicast_frames;
220 u32 tx_broadcast_frames;
221 u32 tx_multicast_frames;
222 u32 tx_pause_frames;
223 u32 tx_64_byte_frames;
224 u32 tx_65_127_byte_frames;
225 u32 tx_128_255_byte_frames;
226 u32 tx_256_511_bytes_frames;
227 u32 tx_512_1023_byte_frames;
228 u32 tx_1024_1518_byte_frames;
229 u32 tx_greater_1518_byte_frames;
230 u32 eee_tx_lpi_transitions;
231 u32 eee_tx_lpi_time;
232};
233
234struct lan78xx_statstage64 {
235 u64 rx_fcs_errors;
236 u64 rx_alignment_errors;
237 u64 rx_fragment_errors;
238 u64 rx_jabber_errors;
239 u64 rx_undersize_frame_errors;
240 u64 rx_oversize_frame_errors;
241 u64 rx_dropped_frames;
242 u64 rx_unicast_byte_count;
243 u64 rx_broadcast_byte_count;
244 u64 rx_multicast_byte_count;
245 u64 rx_unicast_frames;
246 u64 rx_broadcast_frames;
247 u64 rx_multicast_frames;
248 u64 rx_pause_frames;
249 u64 rx_64_byte_frames;
250 u64 rx_65_127_byte_frames;
251 u64 rx_128_255_byte_frames;
252 u64 rx_256_511_bytes_frames;
253 u64 rx_512_1023_byte_frames;
254 u64 rx_1024_1518_byte_frames;
255 u64 rx_greater_1518_byte_frames;
256 u64 eee_rx_lpi_transitions;
257 u64 eee_rx_lpi_time;
258 u64 tx_fcs_errors;
259 u64 tx_excess_deferral_errors;
260 u64 tx_carrier_errors;
261 u64 tx_bad_byte_count;
262 u64 tx_single_collisions;
263 u64 tx_multiple_collisions;
264 u64 tx_excessive_collision;
265 u64 tx_late_collisions;
266 u64 tx_unicast_byte_count;
267 u64 tx_broadcast_byte_count;
268 u64 tx_multicast_byte_count;
269 u64 tx_unicast_frames;
270 u64 tx_broadcast_frames;
271 u64 tx_multicast_frames;
272 u64 tx_pause_frames;
273 u64 tx_64_byte_frames;
274 u64 tx_65_127_byte_frames;
275 u64 tx_128_255_byte_frames;
276 u64 tx_256_511_bytes_frames;
277 u64 tx_512_1023_byte_frames;
278 u64 tx_1024_1518_byte_frames;
279 u64 tx_greater_1518_byte_frames;
280 u64 eee_tx_lpi_transitions;
281 u64 eee_tx_lpi_time;
282};
283
284static u32 lan78xx_regs[] = {
285 ID_REV,
286 INT_STS,
287 HW_CFG,
288 PMT_CTL,
289 E2P_CMD,
290 E2P_DATA,
291 USB_STATUS,
292 VLAN_TYPE,
293 MAC_CR,
294 MAC_RX,
295 MAC_TX,
296 FLOW,
297 ERR_STS,
298 MII_ACC,
299 MII_DATA,
300 EEE_TX_LPI_REQ_DLY,
301 EEE_TW_TX_SYS,
302 EEE_TX_LPI_REM_DLY,
303 WUCSR
304};
305
306#define PHY_REG_SIZE (32 * sizeof(u32))
307
308struct lan78xx_net;
309
310struct lan78xx_priv {
311 struct lan78xx_net *dev;
312 u32 rfe_ctl;
313 u32 mchash_table[DP_SEL_VHF_HASH_LEN];
314 u32 pfilter_table[NUM_OF_MAF][2];
315 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
316 struct mutex dataport_mutex;
317 spinlock_t rfe_ctl_lock;
318 struct work_struct set_multicast;
319 struct work_struct set_vlan;
320 u32 wol;
321};
322
323enum skb_state {
324 illegal = 0,
325 tx_start,
326 tx_done,
327 rx_start,
328 rx_done,
329 rx_cleanup,
330 unlink_start
331};
332
333struct skb_data {
334 struct urb *urb;
335 struct lan78xx_net *dev;
336 enum skb_state state;
337 size_t length;
338 int num_of_packet;
339};
340
341struct usb_context {
342 struct usb_ctrlrequest req;
343 struct lan78xx_net *dev;
344};
345
346#define EVENT_TX_HALT 0
347#define EVENT_RX_HALT 1
348#define EVENT_RX_MEMORY 2
349#define EVENT_STS_SPLIT 3
350#define EVENT_LINK_RESET 4
351#define EVENT_RX_PAUSED 5
352#define EVENT_DEV_WAKING 6
353#define EVENT_DEV_ASLEEP 7
354#define EVENT_DEV_OPEN 8
355#define EVENT_STAT_UPDATE 9
356
357struct statstage {
358 struct mutex access_lock;
359 struct lan78xx_statstage saved;
360 struct lan78xx_statstage rollover_count;
361 struct lan78xx_statstage rollover_max;
362 struct lan78xx_statstage64 curr_stat;
363};
364
365struct irq_domain_data {
366 struct irq_domain *irqdomain;
367 unsigned int phyirq;
368 struct irq_chip *irqchip;
369 irq_flow_handler_t irq_handler;
370 u32 irqenable;
371 struct mutex irq_lock;
372};
373
374struct lan78xx_net {
375 struct net_device *net;
376 struct usb_device *udev;
377 struct usb_interface *intf;
378 void *driver_priv;
379
380 int rx_qlen;
381 int tx_qlen;
382 struct sk_buff_head rxq;
383 struct sk_buff_head txq;
384 struct sk_buff_head done;
385 struct sk_buff_head rxq_pause;
386 struct sk_buff_head txq_pend;
387
388 struct tasklet_struct bh;
389 struct delayed_work wq;
390
391 struct usb_host_endpoint *ep_blkin;
392 struct usb_host_endpoint *ep_blkout;
393 struct usb_host_endpoint *ep_intr;
394
395 int msg_enable;
396
397 struct urb *urb_intr;
398 struct usb_anchor deferred;
399
400 struct mutex phy_mutex;
401 unsigned pipe_in, pipe_out, pipe_intr;
402
403 u32 hard_mtu;
404 size_t rx_urb_size;
405
406 unsigned long flags;
407
408 wait_queue_head_t *wait;
409 unsigned char suspend_count;
410
411 unsigned maxpacket;
412 struct timer_list delay;
413 struct timer_list stat_monitor;
414
415 unsigned long data[5];
416
417 int link_on;
418 u8 mdix_ctrl;
419
420 u32 chipid;
421 u32 chiprev;
422 struct mii_bus *mdiobus;
423 phy_interface_t interface;
424
425 int fc_autoneg;
426 u8 fc_request_control;
427
428 int delta;
429 struct statstage stats;
430
431 struct irq_domain_data domain_data;
432};
433
434
435#define PHY_LAN8835 (0x0007C130)
436#define PHY_KSZ9031RNX (0x00221620)
437
438
439static int msg_level = -1;
440module_param(msg_level, int, 0);
441MODULE_PARM_DESC(msg_level, "Override default message level");
442
443static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
444{
445 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
446 int ret;
447
448 if (!buf)
449 return -ENOMEM;
450
451 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
452 USB_VENDOR_REQUEST_READ_REGISTER,
453 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
454 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
455 if (likely(ret >= 0)) {
456 le32_to_cpus(buf);
457 *data = *buf;
458 } else {
459 netdev_warn(dev->net,
460 "Failed to read register index 0x%08x. ret = %d",
461 index, ret);
462 }
463
464 kfree(buf);
465
466 return ret;
467}
468
469static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
470{
471 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
472 int ret;
473
474 if (!buf)
475 return -ENOMEM;
476
477 *buf = data;
478 cpu_to_le32s(buf);
479
480 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
481 USB_VENDOR_REQUEST_WRITE_REGISTER,
482 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
483 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
484 if (unlikely(ret < 0)) {
485 netdev_warn(dev->net,
486 "Failed to write register index 0x%08x. ret = %d",
487 index, ret);
488 }
489
490 kfree(buf);
491
492 return ret;
493}
494
495static int lan78xx_read_stats(struct lan78xx_net *dev,
496 struct lan78xx_statstage *data)
497{
498 int ret = 0;
499 int i;
500 struct lan78xx_statstage *stats;
501 u32 *src;
502 u32 *dst;
503
504 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
505 if (!stats)
506 return -ENOMEM;
507
508 ret = usb_control_msg(dev->udev,
509 usb_rcvctrlpipe(dev->udev, 0),
510 USB_VENDOR_REQUEST_GET_STATS,
511 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
512 0,
513 0,
514 (void *)stats,
515 sizeof(*stats),
516 USB_CTRL_SET_TIMEOUT);
517 if (likely(ret >= 0)) {
518 src = (u32 *)stats;
519 dst = (u32 *)data;
520 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
521 le32_to_cpus(&src[i]);
522 dst[i] = src[i];
523 }
524 } else {
525 netdev_warn(dev->net,
526 "Failed to read stat ret = 0x%x", ret);
527 }
528
529 kfree(stats);
530
531 return ret;
532}
533
534#define check_counter_rollover(struct1, dev_stats, member) { \
535 if (struct1->member < dev_stats.saved.member) \
536 dev_stats.rollover_count.member++; \
537 }
538
539static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
540 struct lan78xx_statstage *stats)
541{
542 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
543 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
544 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
545 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
546 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
547 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
548 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
549 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
550 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
551 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
552 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
553 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
554 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
555 check_counter_rollover(stats, dev->stats, rx_pause_frames);
556 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
557 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
558 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
559 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
560 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
561 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
562 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
563 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
564 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
565 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
566 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
567 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
568 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
569 check_counter_rollover(stats, dev->stats, tx_single_collisions);
570 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
571 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
572 check_counter_rollover(stats, dev->stats, tx_late_collisions);
573 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
574 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
575 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
576 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
577 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
578 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
579 check_counter_rollover(stats, dev->stats, tx_pause_frames);
580 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
581 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
582 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
583 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
584 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
585 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
586 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
587 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
588 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
589
590 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
591}
592
593static void lan78xx_update_stats(struct lan78xx_net *dev)
594{
595 u32 *p, *count, *max;
596 u64 *data;
597 int i;
598 struct lan78xx_statstage lan78xx_stats;
599
600 if (usb_autopm_get_interface(dev->intf) < 0)
601 return;
602
603 p = (u32 *)&lan78xx_stats;
604 count = (u32 *)&dev->stats.rollover_count;
605 max = (u32 *)&dev->stats.rollover_max;
606 data = (u64 *)&dev->stats.curr_stat;
607
608 mutex_lock(&dev->stats.access_lock);
609
610 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
611 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
612
613 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
614 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
615
616 mutex_unlock(&dev->stats.access_lock);
617
618 usb_autopm_put_interface(dev->intf);
619}
620
621
622static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
623{
624 unsigned long start_time = jiffies;
625 u32 val;
626 int ret;
627
628 do {
629 ret = lan78xx_read_reg(dev, MII_ACC, &val);
630 if (unlikely(ret < 0))
631 return -EIO;
632
633 if (!(val & MII_ACC_MII_BUSY_))
634 return 0;
635 } while (!time_after(jiffies, start_time + HZ));
636
637 return -EIO;
638}
639
640static inline u32 mii_access(int id, int index, int read)
641{
642 u32 ret;
643
644 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
645 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
646 if (read)
647 ret |= MII_ACC_MII_READ_;
648 else
649 ret |= MII_ACC_MII_WRITE_;
650 ret |= MII_ACC_MII_BUSY_;
651
652 return ret;
653}
654
655static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
656{
657 unsigned long start_time = jiffies;
658 u32 val;
659 int ret;
660
661 do {
662 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
663 if (unlikely(ret < 0))
664 return -EIO;
665
666 if (!(val & E2P_CMD_EPC_BUSY_) ||
667 (val & E2P_CMD_EPC_TIMEOUT_))
668 break;
669 usleep_range(40, 100);
670 } while (!time_after(jiffies, start_time + HZ));
671
672 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
673 netdev_warn(dev->net, "EEPROM read operation timeout");
674 return -EIO;
675 }
676
677 return 0;
678}
679
680static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
681{
682 unsigned long start_time = jiffies;
683 u32 val;
684 int ret;
685
686 do {
687 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
688 if (unlikely(ret < 0))
689 return -EIO;
690
691 if (!(val & E2P_CMD_EPC_BUSY_))
692 return 0;
693
694 usleep_range(40, 100);
695 } while (!time_after(jiffies, start_time + HZ));
696
697 netdev_warn(dev->net, "EEPROM is busy");
698 return -EIO;
699}
700
701static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
702 u32 length, u8 *data)
703{
704 u32 val;
705 u32 saved;
706 int i, ret;
707 int retval;
708
709
710
711
712 ret = lan78xx_read_reg(dev, HW_CFG, &val);
713 saved = val;
714 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
715 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
716 ret = lan78xx_write_reg(dev, HW_CFG, val);
717 }
718
719 retval = lan78xx_eeprom_confirm_not_busy(dev);
720 if (retval)
721 return retval;
722
723 for (i = 0; i < length; i++) {
724 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
725 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
726 ret = lan78xx_write_reg(dev, E2P_CMD, val);
727 if (unlikely(ret < 0)) {
728 retval = -EIO;
729 goto exit;
730 }
731
732 retval = lan78xx_wait_eeprom(dev);
733 if (retval < 0)
734 goto exit;
735
736 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
737 if (unlikely(ret < 0)) {
738 retval = -EIO;
739 goto exit;
740 }
741
742 data[i] = val & 0xFF;
743 offset++;
744 }
745
746 retval = 0;
747exit:
748 if (dev->chipid == ID_REV_CHIP_ID_7800_)
749 ret = lan78xx_write_reg(dev, HW_CFG, saved);
750
751 return retval;
752}
753
754static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
755 u32 length, u8 *data)
756{
757 u8 sig;
758 int ret;
759
760 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
761 if ((ret == 0) && (sig == EEPROM_INDICATOR))
762 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
763 else
764 ret = -EINVAL;
765
766 return ret;
767}
768
769static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
770 u32 length, u8 *data)
771{
772 u32 val;
773 u32 saved;
774 int i, ret;
775 int retval;
776
777
778
779
780 ret = lan78xx_read_reg(dev, HW_CFG, &val);
781 saved = val;
782 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
783 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
784 ret = lan78xx_write_reg(dev, HW_CFG, val);
785 }
786
787 retval = lan78xx_eeprom_confirm_not_busy(dev);
788 if (retval)
789 goto exit;
790
791
792 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
793 ret = lan78xx_write_reg(dev, E2P_CMD, val);
794 if (unlikely(ret < 0)) {
795 retval = -EIO;
796 goto exit;
797 }
798
799 retval = lan78xx_wait_eeprom(dev);
800 if (retval < 0)
801 goto exit;
802
803 for (i = 0; i < length; i++) {
804
805 val = data[i];
806 ret = lan78xx_write_reg(dev, E2P_DATA, val);
807 if (ret < 0) {
808 retval = -EIO;
809 goto exit;
810 }
811
812
813 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
814 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
815 ret = lan78xx_write_reg(dev, E2P_CMD, val);
816 if (ret < 0) {
817 retval = -EIO;
818 goto exit;
819 }
820
821 retval = lan78xx_wait_eeprom(dev);
822 if (retval < 0)
823 goto exit;
824
825 offset++;
826 }
827
828 retval = 0;
829exit:
830 if (dev->chipid == ID_REV_CHIP_ID_7800_)
831 ret = lan78xx_write_reg(dev, HW_CFG, saved);
832
833 return retval;
834}
835
836static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
837 u32 length, u8 *data)
838{
839 int i;
840 int ret;
841 u32 buf;
842 unsigned long timeout;
843
844 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
845
846 if (buf & OTP_PWR_DN_PWRDN_N_) {
847
848 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
849
850 timeout = jiffies + HZ;
851 do {
852 usleep_range(1, 10);
853 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
854 if (time_after(jiffies, timeout)) {
855 netdev_warn(dev->net,
856 "timeout on OTP_PWR_DN");
857 return -EIO;
858 }
859 } while (buf & OTP_PWR_DN_PWRDN_N_);
860 }
861
862 for (i = 0; i < length; i++) {
863 ret = lan78xx_write_reg(dev, OTP_ADDR1,
864 ((offset + i) >> 8) & OTP_ADDR1_15_11);
865 ret = lan78xx_write_reg(dev, OTP_ADDR2,
866 ((offset + i) & OTP_ADDR2_10_3));
867
868 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
869 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
870
871 timeout = jiffies + HZ;
872 do {
873 udelay(1);
874 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
875 if (time_after(jiffies, timeout)) {
876 netdev_warn(dev->net,
877 "timeout on OTP_STATUS");
878 return -EIO;
879 }
880 } while (buf & OTP_STATUS_BUSY_);
881
882 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
883
884 data[i] = (u8)(buf & 0xFF);
885 }
886
887 return 0;
888}
889
890static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
891 u32 length, u8 *data)
892{
893 int i;
894 int ret;
895 u32 buf;
896 unsigned long timeout;
897
898 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
899
900 if (buf & OTP_PWR_DN_PWRDN_N_) {
901
902 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
903
904 timeout = jiffies + HZ;
905 do {
906 udelay(1);
907 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
908 if (time_after(jiffies, timeout)) {
909 netdev_warn(dev->net,
910 "timeout on OTP_PWR_DN completion");
911 return -EIO;
912 }
913 } while (buf & OTP_PWR_DN_PWRDN_N_);
914 }
915
916
917 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
918
919 for (i = 0; i < length; i++) {
920 ret = lan78xx_write_reg(dev, OTP_ADDR1,
921 ((offset + i) >> 8) & OTP_ADDR1_15_11);
922 ret = lan78xx_write_reg(dev, OTP_ADDR2,
923 ((offset + i) & OTP_ADDR2_10_3));
924 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
925 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
926 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
927
928 timeout = jiffies + HZ;
929 do {
930 udelay(1);
931 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
932 if (time_after(jiffies, timeout)) {
933 netdev_warn(dev->net,
934 "Timeout on OTP_STATUS completion");
935 return -EIO;
936 }
937 } while (buf & OTP_STATUS_BUSY_);
938 }
939
940 return 0;
941}
942
943static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
944 u32 length, u8 *data)
945{
946 u8 sig;
947 int ret;
948
949 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
950
951 if (ret == 0) {
952 if (sig == OTP_INDICATOR_1)
953 offset = offset;
954 else if (sig == OTP_INDICATOR_2)
955 offset += 0x100;
956 else
957 ret = -EINVAL;
958 if (!ret)
959 ret = lan78xx_read_raw_otp(dev, offset, length, data);
960 }
961
962 return ret;
963}
964
965static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
966{
967 int i, ret;
968
969 for (i = 0; i < 100; i++) {
970 u32 dp_sel;
971
972 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
973 if (unlikely(ret < 0))
974 return -EIO;
975
976 if (dp_sel & DP_SEL_DPRDY_)
977 return 0;
978
979 usleep_range(40, 100);
980 }
981
982 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
983
984 return -EIO;
985}
986
987static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
988 u32 addr, u32 length, u32 *buf)
989{
990 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
991 u32 dp_sel;
992 int i, ret;
993
994 if (usb_autopm_get_interface(dev->intf) < 0)
995 return 0;
996
997 mutex_lock(&pdata->dataport_mutex);
998
999 ret = lan78xx_dataport_wait_not_busy(dev);
1000 if (ret < 0)
1001 goto done;
1002
1003 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1004
1005 dp_sel &= ~DP_SEL_RSEL_MASK_;
1006 dp_sel |= ram_select;
1007 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
1008
1009 for (i = 0; i < length; i++) {
1010 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1011
1012 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1013
1014 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1015
1016 ret = lan78xx_dataport_wait_not_busy(dev);
1017 if (ret < 0)
1018 goto done;
1019 }
1020
1021done:
1022 mutex_unlock(&pdata->dataport_mutex);
1023 usb_autopm_put_interface(dev->intf);
1024
1025 return ret;
1026}
1027
1028static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1029 int index, u8 addr[ETH_ALEN])
1030{
1031 u32 temp;
1032
1033 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1034 temp = addr[3];
1035 temp = addr[2] | (temp << 8);
1036 temp = addr[1] | (temp << 8);
1037 temp = addr[0] | (temp << 8);
1038 pdata->pfilter_table[index][1] = temp;
1039 temp = addr[5];
1040 temp = addr[4] | (temp << 8);
1041 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1042 pdata->pfilter_table[index][0] = temp;
1043 }
1044}
1045
1046
1047static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1048{
1049 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1050}
1051
1052static void lan78xx_deferred_multicast_write(struct work_struct *param)
1053{
1054 struct lan78xx_priv *pdata =
1055 container_of(param, struct lan78xx_priv, set_multicast);
1056 struct lan78xx_net *dev = pdata->dev;
1057 int i;
1058 int ret;
1059
1060 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1061 pdata->rfe_ctl);
1062
1063 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1064 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1065
1066 for (i = 1; i < NUM_OF_MAF; i++) {
1067 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1068 ret = lan78xx_write_reg(dev, MAF_LO(i),
1069 pdata->pfilter_table[i][1]);
1070 ret = lan78xx_write_reg(dev, MAF_HI(i),
1071 pdata->pfilter_table[i][0]);
1072 }
1073
1074 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1075}
1076
1077static void lan78xx_set_multicast(struct net_device *netdev)
1078{
1079 struct lan78xx_net *dev = netdev_priv(netdev);
1080 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1081 unsigned long flags;
1082 int i;
1083
1084 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1085
1086 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1087 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1088
1089 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1090 pdata->mchash_table[i] = 0;
1091
1092 for (i = 1; i < NUM_OF_MAF; i++) {
1093 pdata->pfilter_table[i][0] =
1094 pdata->pfilter_table[i][1] = 0;
1095 }
1096
1097 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1098
1099 if (dev->net->flags & IFF_PROMISC) {
1100 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1101 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1102 } else {
1103 if (dev->net->flags & IFF_ALLMULTI) {
1104 netif_dbg(dev, drv, dev->net,
1105 "receive all multicast enabled");
1106 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1107 }
1108 }
1109
1110 if (netdev_mc_count(dev->net)) {
1111 struct netdev_hw_addr *ha;
1112 int i;
1113
1114 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1115
1116 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1117
1118 i = 1;
1119 netdev_for_each_mc_addr(ha, netdev) {
1120
1121 if (i < 33) {
1122 lan78xx_set_addr_filter(pdata, i, ha->addr);
1123 } else {
1124 u32 bitnum = lan78xx_hash(ha->addr);
1125
1126 pdata->mchash_table[bitnum / 32] |=
1127 (1 << (bitnum % 32));
1128 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1129 }
1130 i++;
1131 }
1132 }
1133
1134 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1135
1136
1137 schedule_work(&pdata->set_multicast);
1138}
1139
1140static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1141 u16 lcladv, u16 rmtadv)
1142{
1143 u32 flow = 0, fct_flow = 0;
1144 int ret;
1145 u8 cap;
1146
1147 if (dev->fc_autoneg)
1148 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1149 else
1150 cap = dev->fc_request_control;
1151
1152 if (cap & FLOW_CTRL_TX)
1153 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1154
1155 if (cap & FLOW_CTRL_RX)
1156 flow |= FLOW_CR_RX_FCEN_;
1157
1158 if (dev->udev->speed == USB_SPEED_SUPER)
1159 fct_flow = 0x817;
1160 else if (dev->udev->speed == USB_SPEED_HIGH)
1161 fct_flow = 0x211;
1162
1163 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1164 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1165 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1166
1167 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1168
1169
1170 ret = lan78xx_write_reg(dev, FLOW, flow);
1171
1172 return 0;
1173}
1174
1175static int lan78xx_link_reset(struct lan78xx_net *dev)
1176{
1177 struct phy_device *phydev = dev->net->phydev;
1178 struct ethtool_link_ksettings ecmd;
1179 int ladv, radv, ret;
1180 u32 buf;
1181
1182
1183 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1184 if (unlikely(ret < 0))
1185 return -EIO;
1186
1187 phy_read_status(phydev);
1188
1189 if (!phydev->link && dev->link_on) {
1190 dev->link_on = false;
1191
1192
1193 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1194 if (unlikely(ret < 0))
1195 return -EIO;
1196 buf |= MAC_CR_RST_;
1197 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1198 if (unlikely(ret < 0))
1199 return -EIO;
1200
1201 del_timer(&dev->stat_monitor);
1202 } else if (phydev->link && !dev->link_on) {
1203 dev->link_on = true;
1204
1205 phy_ethtool_ksettings_get(phydev, &ecmd);
1206
1207 if (dev->udev->speed == USB_SPEED_SUPER) {
1208 if (ecmd.base.speed == 1000) {
1209
1210 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1211 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1212 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1213
1214 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1215 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1216 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1217 } else {
1218
1219 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1220 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1221 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1222 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1223 }
1224 }
1225
1226 ladv = phy_read(phydev, MII_ADVERTISE);
1227 if (ladv < 0)
1228 return ladv;
1229
1230 radv = phy_read(phydev, MII_LPA);
1231 if (radv < 0)
1232 return radv;
1233
1234 netif_dbg(dev, link, dev->net,
1235 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1236 ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1237
1238 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1239 radv);
1240
1241 if (!timer_pending(&dev->stat_monitor)) {
1242 dev->delta = 1;
1243 mod_timer(&dev->stat_monitor,
1244 jiffies + STAT_UPDATE_TIMER);
1245 }
1246
1247 tasklet_schedule(&dev->bh);
1248 }
1249
1250 return ret;
1251}
1252
1253
1254
1255
1256
1257
1258static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1259{
1260 set_bit(work, &dev->flags);
1261 if (!schedule_delayed_work(&dev->wq, 0))
1262 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1263}
1264
1265static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1266{
1267 u32 intdata;
1268
1269 if (urb->actual_length != 4) {
1270 netdev_warn(dev->net,
1271 "unexpected urb length %d", urb->actual_length);
1272 return;
1273 }
1274
1275 memcpy(&intdata, urb->transfer_buffer, 4);
1276 le32_to_cpus(&intdata);
1277
1278 if (intdata & INT_ENP_PHY_INT) {
1279 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1280 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1281
1282 if (dev->domain_data.phyirq > 0)
1283 generic_handle_irq(dev->domain_data.phyirq);
1284 } else
1285 netdev_warn(dev->net,
1286 "unexpected interrupt: 0x%08x\n", intdata);
1287}
1288
1289static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1290{
1291 return MAX_EEPROM_SIZE;
1292}
1293
1294static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1295 struct ethtool_eeprom *ee, u8 *data)
1296{
1297 struct lan78xx_net *dev = netdev_priv(netdev);
1298 int ret;
1299
1300 ret = usb_autopm_get_interface(dev->intf);
1301 if (ret)
1302 return ret;
1303
1304 ee->magic = LAN78XX_EEPROM_MAGIC;
1305
1306 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1307
1308 usb_autopm_put_interface(dev->intf);
1309
1310 return ret;
1311}
1312
1313static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1314 struct ethtool_eeprom *ee, u8 *data)
1315{
1316 struct lan78xx_net *dev = netdev_priv(netdev);
1317 int ret;
1318
1319 ret = usb_autopm_get_interface(dev->intf);
1320 if (ret)
1321 return ret;
1322
1323
1324
1325
1326 if (ee->magic == LAN78XX_EEPROM_MAGIC)
1327 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1328 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1329 (ee->offset == 0) &&
1330 (ee->len == 512) &&
1331 (data[0] == OTP_INDICATOR_1))
1332 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1333
1334 usb_autopm_put_interface(dev->intf);
1335
1336 return ret;
1337}
1338
1339static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1340 u8 *data)
1341{
1342 if (stringset == ETH_SS_STATS)
1343 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1344}
1345
1346static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1347{
1348 if (sset == ETH_SS_STATS)
1349 return ARRAY_SIZE(lan78xx_gstrings);
1350 else
1351 return -EOPNOTSUPP;
1352}
1353
1354static void lan78xx_get_stats(struct net_device *netdev,
1355 struct ethtool_stats *stats, u64 *data)
1356{
1357 struct lan78xx_net *dev = netdev_priv(netdev);
1358
1359 lan78xx_update_stats(dev);
1360
1361 mutex_lock(&dev->stats.access_lock);
1362 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1363 mutex_unlock(&dev->stats.access_lock);
1364}
1365
1366static void lan78xx_get_wol(struct net_device *netdev,
1367 struct ethtool_wolinfo *wol)
1368{
1369 struct lan78xx_net *dev = netdev_priv(netdev);
1370 int ret;
1371 u32 buf;
1372 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1373
1374 if (usb_autopm_get_interface(dev->intf) < 0)
1375 return;
1376
1377 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1378 if (unlikely(ret < 0)) {
1379 wol->supported = 0;
1380 wol->wolopts = 0;
1381 } else {
1382 if (buf & USB_CFG_RMT_WKP_) {
1383 wol->supported = WAKE_ALL;
1384 wol->wolopts = pdata->wol;
1385 } else {
1386 wol->supported = 0;
1387 wol->wolopts = 0;
1388 }
1389 }
1390
1391 usb_autopm_put_interface(dev->intf);
1392}
1393
1394static int lan78xx_set_wol(struct net_device *netdev,
1395 struct ethtool_wolinfo *wol)
1396{
1397 struct lan78xx_net *dev = netdev_priv(netdev);
1398 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1399 int ret;
1400
1401 ret = usb_autopm_get_interface(dev->intf);
1402 if (ret < 0)
1403 return ret;
1404
1405 pdata->wol = 0;
1406 if (wol->wolopts & WAKE_UCAST)
1407 pdata->wol |= WAKE_UCAST;
1408 if (wol->wolopts & WAKE_MCAST)
1409 pdata->wol |= WAKE_MCAST;
1410 if (wol->wolopts & WAKE_BCAST)
1411 pdata->wol |= WAKE_BCAST;
1412 if (wol->wolopts & WAKE_MAGIC)
1413 pdata->wol |= WAKE_MAGIC;
1414 if (wol->wolopts & WAKE_PHY)
1415 pdata->wol |= WAKE_PHY;
1416 if (wol->wolopts & WAKE_ARP)
1417 pdata->wol |= WAKE_ARP;
1418
1419 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1420
1421 phy_ethtool_set_wol(netdev->phydev, wol);
1422
1423 usb_autopm_put_interface(dev->intf);
1424
1425 return ret;
1426}
1427
1428static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1429{
1430 struct lan78xx_net *dev = netdev_priv(net);
1431 struct phy_device *phydev = net->phydev;
1432 int ret;
1433 u32 buf;
1434
1435 ret = usb_autopm_get_interface(dev->intf);
1436 if (ret < 0)
1437 return ret;
1438
1439 ret = phy_ethtool_get_eee(phydev, edata);
1440 if (ret < 0)
1441 goto exit;
1442
1443 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1444 if (buf & MAC_CR_EEE_EN_) {
1445 edata->eee_enabled = true;
1446 edata->eee_active = !!(edata->advertised &
1447 edata->lp_advertised);
1448 edata->tx_lpi_enabled = true;
1449
1450 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1451 edata->tx_lpi_timer = buf;
1452 } else {
1453 edata->eee_enabled = false;
1454 edata->eee_active = false;
1455 edata->tx_lpi_enabled = false;
1456 edata->tx_lpi_timer = 0;
1457 }
1458
1459 ret = 0;
1460exit:
1461 usb_autopm_put_interface(dev->intf);
1462
1463 return ret;
1464}
1465
1466static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1467{
1468 struct lan78xx_net *dev = netdev_priv(net);
1469 int ret;
1470 u32 buf;
1471
1472 ret = usb_autopm_get_interface(dev->intf);
1473 if (ret < 0)
1474 return ret;
1475
1476 if (edata->eee_enabled) {
1477 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1478 buf |= MAC_CR_EEE_EN_;
1479 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1480
1481 phy_ethtool_set_eee(net->phydev, edata);
1482
1483 buf = (u32)edata->tx_lpi_timer;
1484 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1485 } else {
1486 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1487 buf &= ~MAC_CR_EEE_EN_;
1488 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1489 }
1490
1491 usb_autopm_put_interface(dev->intf);
1492
1493 return 0;
1494}
1495
1496static u32 lan78xx_get_link(struct net_device *net)
1497{
1498 phy_read_status(net->phydev);
1499
1500 return net->phydev->link;
1501}
1502
1503static void lan78xx_get_drvinfo(struct net_device *net,
1504 struct ethtool_drvinfo *info)
1505{
1506 struct lan78xx_net *dev = netdev_priv(net);
1507
1508 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1509 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1510}
1511
1512static u32 lan78xx_get_msglevel(struct net_device *net)
1513{
1514 struct lan78xx_net *dev = netdev_priv(net);
1515
1516 return dev->msg_enable;
1517}
1518
1519static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1520{
1521 struct lan78xx_net *dev = netdev_priv(net);
1522
1523 dev->msg_enable = level;
1524}
1525
1526static int lan78xx_get_link_ksettings(struct net_device *net,
1527 struct ethtool_link_ksettings *cmd)
1528{
1529 struct lan78xx_net *dev = netdev_priv(net);
1530 struct phy_device *phydev = net->phydev;
1531 int ret;
1532
1533 ret = usb_autopm_get_interface(dev->intf);
1534 if (ret < 0)
1535 return ret;
1536
1537 phy_ethtool_ksettings_get(phydev, cmd);
1538
1539 usb_autopm_put_interface(dev->intf);
1540
1541 return ret;
1542}
1543
1544static int lan78xx_set_link_ksettings(struct net_device *net,
1545 const struct ethtool_link_ksettings *cmd)
1546{
1547 struct lan78xx_net *dev = netdev_priv(net);
1548 struct phy_device *phydev = net->phydev;
1549 int ret = 0;
1550 int temp;
1551
1552 ret = usb_autopm_get_interface(dev->intf);
1553 if (ret < 0)
1554 return ret;
1555
1556
1557 ret = phy_ethtool_ksettings_set(phydev, cmd);
1558
1559 if (!cmd->base.autoneg) {
1560
1561 temp = phy_read(phydev, MII_BMCR);
1562 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1563 mdelay(1);
1564 phy_write(phydev, MII_BMCR, temp);
1565 }
1566
1567 usb_autopm_put_interface(dev->intf);
1568
1569 return ret;
1570}
1571
1572static void lan78xx_get_pause(struct net_device *net,
1573 struct ethtool_pauseparam *pause)
1574{
1575 struct lan78xx_net *dev = netdev_priv(net);
1576 struct phy_device *phydev = net->phydev;
1577 struct ethtool_link_ksettings ecmd;
1578
1579 phy_ethtool_ksettings_get(phydev, &ecmd);
1580
1581 pause->autoneg = dev->fc_autoneg;
1582
1583 if (dev->fc_request_control & FLOW_CTRL_TX)
1584 pause->tx_pause = 1;
1585
1586 if (dev->fc_request_control & FLOW_CTRL_RX)
1587 pause->rx_pause = 1;
1588}
1589
1590static int lan78xx_set_pause(struct net_device *net,
1591 struct ethtool_pauseparam *pause)
1592{
1593 struct lan78xx_net *dev = netdev_priv(net);
1594 struct phy_device *phydev = net->phydev;
1595 struct ethtool_link_ksettings ecmd;
1596 int ret;
1597
1598 phy_ethtool_ksettings_get(phydev, &ecmd);
1599
1600 if (pause->autoneg && !ecmd.base.autoneg) {
1601 ret = -EINVAL;
1602 goto exit;
1603 }
1604
1605 dev->fc_request_control = 0;
1606 if (pause->rx_pause)
1607 dev->fc_request_control |= FLOW_CTRL_RX;
1608
1609 if (pause->tx_pause)
1610 dev->fc_request_control |= FLOW_CTRL_TX;
1611
1612 if (ecmd.base.autoneg) {
1613 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
1614 u32 mii_adv;
1615
1616 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1617 ecmd.link_modes.advertising);
1618 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1619 ecmd.link_modes.advertising);
1620 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1621 mii_adv_to_linkmode_adv_t(fc, mii_adv);
1622 linkmode_or(ecmd.link_modes.advertising, fc,
1623 ecmd.link_modes.advertising);
1624
1625 phy_ethtool_ksettings_set(phydev, &ecmd);
1626 }
1627
1628 dev->fc_autoneg = pause->autoneg;
1629
1630 ret = 0;
1631exit:
1632 return ret;
1633}
1634
1635static int lan78xx_get_regs_len(struct net_device *netdev)
1636{
1637 if (!netdev->phydev)
1638 return (sizeof(lan78xx_regs));
1639 else
1640 return (sizeof(lan78xx_regs) + PHY_REG_SIZE);
1641}
1642
1643static void
1644lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1645 void *buf)
1646{
1647 u32 *data = buf;
1648 int i, j;
1649 struct lan78xx_net *dev = netdev_priv(netdev);
1650
1651
1652 for (i = 0; i < (sizeof(lan78xx_regs) / sizeof(u32)); i++)
1653 lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1654
1655 if (!netdev->phydev)
1656 return;
1657
1658
1659 for (j = 0; j < 32; i++, j++)
1660 data[i] = phy_read(netdev->phydev, j);
1661}
1662
1663static const struct ethtool_ops lan78xx_ethtool_ops = {
1664 .get_link = lan78xx_get_link,
1665 .nway_reset = phy_ethtool_nway_reset,
1666 .get_drvinfo = lan78xx_get_drvinfo,
1667 .get_msglevel = lan78xx_get_msglevel,
1668 .set_msglevel = lan78xx_set_msglevel,
1669 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1670 .get_eeprom = lan78xx_ethtool_get_eeprom,
1671 .set_eeprom = lan78xx_ethtool_set_eeprom,
1672 .get_ethtool_stats = lan78xx_get_stats,
1673 .get_sset_count = lan78xx_get_sset_count,
1674 .get_strings = lan78xx_get_strings,
1675 .get_wol = lan78xx_get_wol,
1676 .set_wol = lan78xx_set_wol,
1677 .get_eee = lan78xx_get_eee,
1678 .set_eee = lan78xx_set_eee,
1679 .get_pauseparam = lan78xx_get_pause,
1680 .set_pauseparam = lan78xx_set_pause,
1681 .get_link_ksettings = lan78xx_get_link_ksettings,
1682 .set_link_ksettings = lan78xx_set_link_ksettings,
1683 .get_regs_len = lan78xx_get_regs_len,
1684 .get_regs = lan78xx_get_regs,
1685};
1686
1687static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1688{
1689 if (!netif_running(netdev))
1690 return -EINVAL;
1691
1692 return phy_mii_ioctl(netdev->phydev, rq, cmd);
1693}
1694
1695static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1696{
1697 u32 addr_lo, addr_hi;
1698 int ret;
1699 u8 addr[6];
1700
1701 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1702 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1703
1704 addr[0] = addr_lo & 0xFF;
1705 addr[1] = (addr_lo >> 8) & 0xFF;
1706 addr[2] = (addr_lo >> 16) & 0xFF;
1707 addr[3] = (addr_lo >> 24) & 0xFF;
1708 addr[4] = addr_hi & 0xFF;
1709 addr[5] = (addr_hi >> 8) & 0xFF;
1710
1711 if (!is_valid_ether_addr(addr)) {
1712 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1713
1714 netif_dbg(dev, ifup, dev->net,
1715 "MAC address read from Device Tree");
1716 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1717 ETH_ALEN, addr) == 0) ||
1718 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1719 ETH_ALEN, addr) == 0)) &&
1720 is_valid_ether_addr(addr)) {
1721
1722 netif_dbg(dev, ifup, dev->net,
1723 "MAC address read from EEPROM");
1724 } else {
1725
1726 eth_random_addr(addr);
1727 netif_dbg(dev, ifup, dev->net,
1728 "MAC address set to random addr");
1729 }
1730
1731 addr_lo = addr[0] | (addr[1] << 8) |
1732 (addr[2] << 16) | (addr[3] << 24);
1733 addr_hi = addr[4] | (addr[5] << 8);
1734
1735 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1736 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1737 }
1738
1739 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1740 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1741
1742 ether_addr_copy(dev->net->dev_addr, addr);
1743}
1744
1745
1746static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1747{
1748 struct lan78xx_net *dev = bus->priv;
1749 u32 val, addr;
1750 int ret;
1751
1752 ret = usb_autopm_get_interface(dev->intf);
1753 if (ret < 0)
1754 return ret;
1755
1756 mutex_lock(&dev->phy_mutex);
1757
1758
1759 ret = lan78xx_phy_wait_not_busy(dev);
1760 if (ret < 0)
1761 goto done;
1762
1763
1764 addr = mii_access(phy_id, idx, MII_READ);
1765 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1766
1767 ret = lan78xx_phy_wait_not_busy(dev);
1768 if (ret < 0)
1769 goto done;
1770
1771 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1772
1773 ret = (int)(val & 0xFFFF);
1774
1775done:
1776 mutex_unlock(&dev->phy_mutex);
1777 usb_autopm_put_interface(dev->intf);
1778
1779 return ret;
1780}
1781
1782static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1783 u16 regval)
1784{
1785 struct lan78xx_net *dev = bus->priv;
1786 u32 val, addr;
1787 int ret;
1788
1789 ret = usb_autopm_get_interface(dev->intf);
1790 if (ret < 0)
1791 return ret;
1792
1793 mutex_lock(&dev->phy_mutex);
1794
1795
1796 ret = lan78xx_phy_wait_not_busy(dev);
1797 if (ret < 0)
1798 goto done;
1799
1800 val = (u32)regval;
1801 ret = lan78xx_write_reg(dev, MII_DATA, val);
1802
1803
1804 addr = mii_access(phy_id, idx, MII_WRITE);
1805 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1806
1807 ret = lan78xx_phy_wait_not_busy(dev);
1808 if (ret < 0)
1809 goto done;
1810
1811done:
1812 mutex_unlock(&dev->phy_mutex);
1813 usb_autopm_put_interface(dev->intf);
1814 return 0;
1815}
1816
1817static int lan78xx_mdio_init(struct lan78xx_net *dev)
1818{
1819 struct device_node *node;
1820 int ret;
1821
1822 dev->mdiobus = mdiobus_alloc();
1823 if (!dev->mdiobus) {
1824 netdev_err(dev->net, "can't allocate MDIO bus\n");
1825 return -ENOMEM;
1826 }
1827
1828 dev->mdiobus->priv = (void *)dev;
1829 dev->mdiobus->read = lan78xx_mdiobus_read;
1830 dev->mdiobus->write = lan78xx_mdiobus_write;
1831 dev->mdiobus->name = "lan78xx-mdiobus";
1832
1833 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1834 dev->udev->bus->busnum, dev->udev->devnum);
1835
1836 switch (dev->chipid) {
1837 case ID_REV_CHIP_ID_7800_:
1838 case ID_REV_CHIP_ID_7850_:
1839
1840 dev->mdiobus->phy_mask = ~(1 << 1);
1841 break;
1842 case ID_REV_CHIP_ID_7801_:
1843
1844 dev->mdiobus->phy_mask = ~(0xFF);
1845 break;
1846 }
1847
1848 node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
1849 ret = of_mdiobus_register(dev->mdiobus, node);
1850 if (node)
1851 of_node_put(node);
1852 if (ret) {
1853 netdev_err(dev->net, "can't register MDIO bus\n");
1854 goto exit1;
1855 }
1856
1857 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1858 return 0;
1859exit1:
1860 mdiobus_free(dev->mdiobus);
1861 return ret;
1862}
1863
1864static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1865{
1866 mdiobus_unregister(dev->mdiobus);
1867 mdiobus_free(dev->mdiobus);
1868}
1869
1870static void lan78xx_link_status_change(struct net_device *net)
1871{
1872 struct phy_device *phydev = net->phydev;
1873 int ret, temp;
1874
1875
1876
1877
1878
1879
1880 if (!phydev->autoneg && (phydev->speed == 100)) {
1881
1882 temp = phy_read(phydev, LAN88XX_INT_MASK);
1883 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1884 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1885
1886 temp = phy_read(phydev, MII_BMCR);
1887 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1888 phy_write(phydev, MII_BMCR, temp);
1889 temp |= BMCR_SPEED100;
1890 phy_write(phydev, MII_BMCR, temp);
1891
1892
1893 temp = phy_read(phydev, LAN88XX_INT_STS);
1894
1895
1896 temp = phy_read(phydev, LAN88XX_INT_MASK);
1897 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1898 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1899 }
1900}
1901
1902static int irq_map(struct irq_domain *d, unsigned int irq,
1903 irq_hw_number_t hwirq)
1904{
1905 struct irq_domain_data *data = d->host_data;
1906
1907 irq_set_chip_data(irq, data);
1908 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1909 irq_set_noprobe(irq);
1910
1911 return 0;
1912}
1913
1914static void irq_unmap(struct irq_domain *d, unsigned int irq)
1915{
1916 irq_set_chip_and_handler(irq, NULL, NULL);
1917 irq_set_chip_data(irq, NULL);
1918}
1919
1920static const struct irq_domain_ops chip_domain_ops = {
1921 .map = irq_map,
1922 .unmap = irq_unmap,
1923};
1924
1925static void lan78xx_irq_mask(struct irq_data *irqd)
1926{
1927 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1928
1929 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1930}
1931
1932static void lan78xx_irq_unmask(struct irq_data *irqd)
1933{
1934 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1935
1936 data->irqenable |= BIT(irqd_to_hwirq(irqd));
1937}
1938
1939static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1940{
1941 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1942
1943 mutex_lock(&data->irq_lock);
1944}
1945
1946static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1947{
1948 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1949 struct lan78xx_net *dev =
1950 container_of(data, struct lan78xx_net, domain_data);
1951 u32 buf;
1952 int ret;
1953
1954
1955
1956
1957 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1958 if (buf != data->irqenable)
1959 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1960
1961 mutex_unlock(&data->irq_lock);
1962}
1963
1964static struct irq_chip lan78xx_irqchip = {
1965 .name = "lan78xx-irqs",
1966 .irq_mask = lan78xx_irq_mask,
1967 .irq_unmask = lan78xx_irq_unmask,
1968 .irq_bus_lock = lan78xx_irq_bus_lock,
1969 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
1970};
1971
1972static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1973{
1974 struct device_node *of_node;
1975 struct irq_domain *irqdomain;
1976 unsigned int irqmap = 0;
1977 u32 buf;
1978 int ret = 0;
1979
1980 of_node = dev->udev->dev.parent->of_node;
1981
1982 mutex_init(&dev->domain_data.irq_lock);
1983
1984 lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1985 dev->domain_data.irqenable = buf;
1986
1987 dev->domain_data.irqchip = &lan78xx_irqchip;
1988 dev->domain_data.irq_handler = handle_simple_irq;
1989
1990 irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1991 &chip_domain_ops, &dev->domain_data);
1992 if (irqdomain) {
1993
1994 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1995 if (!irqmap) {
1996 irq_domain_remove(irqdomain);
1997
1998 irqdomain = NULL;
1999 ret = -EINVAL;
2000 }
2001 } else {
2002 ret = -EINVAL;
2003 }
2004
2005 dev->domain_data.irqdomain = irqdomain;
2006 dev->domain_data.phyirq = irqmap;
2007
2008 return ret;
2009}
2010
2011static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2012{
2013 if (dev->domain_data.phyirq > 0) {
2014 irq_dispose_mapping(dev->domain_data.phyirq);
2015
2016 if (dev->domain_data.irqdomain)
2017 irq_domain_remove(dev->domain_data.irqdomain);
2018 }
2019 dev->domain_data.phyirq = 0;
2020 dev->domain_data.irqdomain = NULL;
2021}
2022
2023static int lan8835_fixup(struct phy_device *phydev)
2024{
2025 int buf;
2026 int ret;
2027 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2028
2029
2030 buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
2031 buf &= ~0x1800;
2032 buf |= 0x0800;
2033 phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
2034
2035
2036 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2037 MAC_RGMII_ID_TXC_DELAY_EN_);
2038
2039
2040 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2041
2042 dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
2043
2044 return 1;
2045}
2046
2047static int ksz9031rnx_fixup(struct phy_device *phydev)
2048{
2049 struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
2050
2051
2052
2053 phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
2054
2055 phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
2056
2057 phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2058
2059 dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2060
2061 return 1;
2062}
2063
2064static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
2065{
2066 u32 buf;
2067 int ret;
2068 struct fixed_phy_status fphy_status = {
2069 .link = 1,
2070 .speed = SPEED_1000,
2071 .duplex = DUPLEX_FULL,
2072 };
2073 struct phy_device *phydev;
2074
2075 phydev = phy_find_first(dev->mdiobus);
2076 if (!phydev) {
2077 netdev_dbg(dev->net, "PHY Not Found!! Registering Fixed PHY\n");
2078 phydev = fixed_phy_register(PHY_POLL, &fphy_status, -1,
2079 NULL);
2080 if (IS_ERR(phydev)) {
2081 netdev_err(dev->net, "No PHY/fixed_PHY found\n");
2082 return NULL;
2083 }
2084 netdev_dbg(dev->net, "Registered FIXED PHY\n");
2085 dev->interface = PHY_INTERFACE_MODE_RGMII;
2086 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2087 MAC_RGMII_ID_TXC_DELAY_EN_);
2088 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2089 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2090 buf |= HW_CFG_CLK125_EN_;
2091 buf |= HW_CFG_REFCLK25_EN_;
2092 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2093 } else {
2094 if (!phydev->drv) {
2095 netdev_err(dev->net, "no PHY driver found\n");
2096 return NULL;
2097 }
2098 dev->interface = PHY_INTERFACE_MODE_RGMII;
2099
2100 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2101 ksz9031rnx_fixup);
2102 if (ret < 0) {
2103 netdev_err(dev->net, "Failed to register fixup for PHY_KSZ9031RNX\n");
2104 return NULL;
2105 }
2106
2107 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2108 lan8835_fixup);
2109 if (ret < 0) {
2110 netdev_err(dev->net, "Failed to register fixup for PHY_LAN8835\n");
2111 return NULL;
2112 }
2113
2114
2115 phydev->is_internal = false;
2116 }
2117 return phydev;
2118}
2119
2120static int lan78xx_phy_init(struct lan78xx_net *dev)
2121{
2122 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2123 int ret;
2124 u32 mii_adv;
2125 struct phy_device *phydev;
2126
2127 switch (dev->chipid) {
2128 case ID_REV_CHIP_ID_7801_:
2129 phydev = lan7801_phy_init(dev);
2130 if (!phydev) {
2131 netdev_err(dev->net, "lan7801: PHY Init Failed");
2132 return -EIO;
2133 }
2134 break;
2135
2136 case ID_REV_CHIP_ID_7800_:
2137 case ID_REV_CHIP_ID_7850_:
2138 phydev = phy_find_first(dev->mdiobus);
2139 if (!phydev) {
2140 netdev_err(dev->net, "no PHY found\n");
2141 return -EIO;
2142 }
2143 phydev->is_internal = true;
2144 dev->interface = PHY_INTERFACE_MODE_GMII;
2145 break;
2146
2147 default:
2148 netdev_err(dev->net, "Unknown CHIP ID found\n");
2149 return -EIO;
2150 }
2151
2152
2153 if (dev->domain_data.phyirq > 0)
2154 phydev->irq = dev->domain_data.phyirq;
2155 else
2156 phydev->irq = 0;
2157 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2158
2159
2160 phydev->mdix = ETH_TP_MDI_AUTO;
2161
2162 ret = phy_connect_direct(dev->net, phydev,
2163 lan78xx_link_status_change,
2164 dev->interface);
2165 if (ret) {
2166 netdev_err(dev->net, "can't attach PHY to %s\n",
2167 dev->mdiobus->id);
2168 if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2169 if (phy_is_pseudo_fixed_link(phydev)) {
2170 fixed_phy_unregister(phydev);
2171 } else {
2172 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX,
2173 0xfffffff0);
2174 phy_unregister_fixup_for_uid(PHY_LAN8835,
2175 0xfffffff0);
2176 }
2177 }
2178 return -EIO;
2179 }
2180
2181
2182 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2183
2184
2185 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2186 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2187 phydev->advertising);
2188 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2189 phydev->advertising);
2190 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2191 mii_adv_to_linkmode_adv_t(fc, mii_adv);
2192 linkmode_or(phydev->advertising, fc, phydev->advertising);
2193
2194 if (phydev->mdio.dev.of_node) {
2195 u32 reg;
2196 int len;
2197
2198 len = of_property_count_elems_of_size(phydev->mdio.dev.of_node,
2199 "microchip,led-modes",
2200 sizeof(u32));
2201 if (len >= 0) {
2202
2203 lan78xx_read_reg(dev, HW_CFG, ®);
2204 reg &= ~(HW_CFG_LED0_EN_ |
2205 HW_CFG_LED1_EN_ |
2206 HW_CFG_LED2_EN_ |
2207 HW_CFG_LED3_EN_);
2208 reg |= (len > 0) * HW_CFG_LED0_EN_ |
2209 (len > 1) * HW_CFG_LED1_EN_ |
2210 (len > 2) * HW_CFG_LED2_EN_ |
2211 (len > 3) * HW_CFG_LED3_EN_;
2212 lan78xx_write_reg(dev, HW_CFG, reg);
2213 }
2214 }
2215
2216 genphy_config_aneg(phydev);
2217
2218 dev->fc_autoneg = phydev->autoneg;
2219
2220 return 0;
2221}
2222
2223static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2224{
2225 int ret = 0;
2226 u32 buf;
2227 bool rxenabled;
2228
2229 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2230
2231 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2232
2233 if (rxenabled) {
2234 buf &= ~MAC_RX_RXEN_;
2235 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2236 }
2237
2238
2239 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2240 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2241
2242 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2243
2244 if (rxenabled) {
2245 buf |= MAC_RX_RXEN_;
2246 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2247 }
2248
2249 return 0;
2250}
2251
2252static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2253{
2254 struct sk_buff *skb;
2255 unsigned long flags;
2256 int count = 0;
2257
2258 spin_lock_irqsave(&q->lock, flags);
2259 while (!skb_queue_empty(q)) {
2260 struct skb_data *entry;
2261 struct urb *urb;
2262 int ret;
2263
2264 skb_queue_walk(q, skb) {
2265 entry = (struct skb_data *)skb->cb;
2266 if (entry->state != unlink_start)
2267 goto found;
2268 }
2269 break;
2270found:
2271 entry->state = unlink_start;
2272 urb = entry->urb;
2273
2274
2275
2276
2277
2278
2279
2280 usb_get_urb(urb);
2281 spin_unlock_irqrestore(&q->lock, flags);
2282
2283
2284
2285 ret = usb_unlink_urb(urb);
2286 if (ret != -EINPROGRESS && ret != 0)
2287 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2288 else
2289 count++;
2290 usb_put_urb(urb);
2291 spin_lock_irqsave(&q->lock, flags);
2292 }
2293 spin_unlock_irqrestore(&q->lock, flags);
2294 return count;
2295}
2296
2297static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2298{
2299 struct lan78xx_net *dev = netdev_priv(netdev);
2300 int ll_mtu = new_mtu + netdev->hard_header_len;
2301 int old_hard_mtu = dev->hard_mtu;
2302 int old_rx_urb_size = dev->rx_urb_size;
2303 int ret;
2304
2305
2306 if ((ll_mtu % dev->maxpacket) == 0)
2307 return -EDOM;
2308
2309 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2310
2311 netdev->mtu = new_mtu;
2312
2313 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2314 if (dev->rx_urb_size == old_hard_mtu) {
2315 dev->rx_urb_size = dev->hard_mtu;
2316 if (dev->rx_urb_size > old_rx_urb_size) {
2317 if (netif_running(dev->net)) {
2318 unlink_urbs(dev, &dev->rxq);
2319 tasklet_schedule(&dev->bh);
2320 }
2321 }
2322 }
2323
2324 return 0;
2325}
2326
2327static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2328{
2329 struct lan78xx_net *dev = netdev_priv(netdev);
2330 struct sockaddr *addr = p;
2331 u32 addr_lo, addr_hi;
2332 int ret;
2333
2334 if (netif_running(netdev))
2335 return -EBUSY;
2336
2337 if (!is_valid_ether_addr(addr->sa_data))
2338 return -EADDRNOTAVAIL;
2339
2340 ether_addr_copy(netdev->dev_addr, addr->sa_data);
2341
2342 addr_lo = netdev->dev_addr[0] |
2343 netdev->dev_addr[1] << 8 |
2344 netdev->dev_addr[2] << 16 |
2345 netdev->dev_addr[3] << 24;
2346 addr_hi = netdev->dev_addr[4] |
2347 netdev->dev_addr[5] << 8;
2348
2349 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2350 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2351
2352 return 0;
2353}
2354
2355
2356static int lan78xx_set_features(struct net_device *netdev,
2357 netdev_features_t features)
2358{
2359 struct lan78xx_net *dev = netdev_priv(netdev);
2360 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2361 unsigned long flags;
2362 int ret;
2363
2364 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2365
2366 if (features & NETIF_F_RXCSUM) {
2367 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2368 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2369 } else {
2370 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2371 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2372 }
2373
2374 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2375 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2376 else
2377 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2378
2379 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2380 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2381 else
2382 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2383
2384 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2385
2386 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2387
2388 return 0;
2389}
2390
2391static void lan78xx_deferred_vlan_write(struct work_struct *param)
2392{
2393 struct lan78xx_priv *pdata =
2394 container_of(param, struct lan78xx_priv, set_vlan);
2395 struct lan78xx_net *dev = pdata->dev;
2396
2397 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2398 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2399}
2400
2401static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2402 __be16 proto, u16 vid)
2403{
2404 struct lan78xx_net *dev = netdev_priv(netdev);
2405 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2406 u16 vid_bit_index;
2407 u16 vid_dword_index;
2408
2409 vid_dword_index = (vid >> 5) & 0x7F;
2410 vid_bit_index = vid & 0x1F;
2411
2412 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2413
2414
2415 schedule_work(&pdata->set_vlan);
2416
2417 return 0;
2418}
2419
2420static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2421 __be16 proto, u16 vid)
2422{
2423 struct lan78xx_net *dev = netdev_priv(netdev);
2424 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2425 u16 vid_bit_index;
2426 u16 vid_dword_index;
2427
2428 vid_dword_index = (vid >> 5) & 0x7F;
2429 vid_bit_index = vid & 0x1F;
2430
2431 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2432
2433
2434 schedule_work(&pdata->set_vlan);
2435
2436 return 0;
2437}
2438
2439static void lan78xx_init_ltm(struct lan78xx_net *dev)
2440{
2441 int ret;
2442 u32 buf;
2443 u32 regs[6] = { 0 };
2444
2445 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2446 if (buf & USB_CFG1_LTM_ENABLE_) {
2447 u8 temp[2];
2448
2449 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2450 if (temp[0] == 24) {
2451 ret = lan78xx_read_raw_eeprom(dev,
2452 temp[1] * 2,
2453 24,
2454 (u8 *)regs);
2455 if (ret < 0)
2456 return;
2457 }
2458 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2459 if (temp[0] == 24) {
2460 ret = lan78xx_read_raw_otp(dev,
2461 temp[1] * 2,
2462 24,
2463 (u8 *)regs);
2464 if (ret < 0)
2465 return;
2466 }
2467 }
2468 }
2469
2470 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2471 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2472 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2473 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2474 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2475 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2476}
2477
2478static int lan78xx_reset(struct lan78xx_net *dev)
2479{
2480 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2481 u32 buf;
2482 int ret = 0;
2483 unsigned long timeout;
2484 u8 sig;
2485
2486 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2487 buf |= HW_CFG_LRST_;
2488 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2489
2490 timeout = jiffies + HZ;
2491 do {
2492 mdelay(1);
2493 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2494 if (time_after(jiffies, timeout)) {
2495 netdev_warn(dev->net,
2496 "timeout on completion of LiteReset");
2497 return -EIO;
2498 }
2499 } while (buf & HW_CFG_LRST_);
2500
2501 lan78xx_init_mac_address(dev);
2502
2503
2504 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2505 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2506 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2507
2508
2509 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2510 buf |= USB_CFG_BIR_;
2511 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2512
2513
2514 lan78xx_init_ltm(dev);
2515
2516 if (dev->udev->speed == USB_SPEED_SUPER) {
2517 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2518 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2519 dev->rx_qlen = 4;
2520 dev->tx_qlen = 4;
2521 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2522 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2523 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2524 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2525 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2526 } else {
2527 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2528 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2529 dev->rx_qlen = 4;
2530 dev->tx_qlen = 4;
2531 }
2532
2533 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2534 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2535
2536 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2537 buf |= HW_CFG_MEF_;
2538 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2539
2540 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2541 buf |= USB_CFG_BCE_;
2542 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2543
2544
2545 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2546 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2547
2548 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2549 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2550
2551 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2552 ret = lan78xx_write_reg(dev, FLOW, 0);
2553 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2554
2555
2556 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2557 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2558 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2559
2560
2561 lan78xx_set_features(dev->net, dev->net->features);
2562
2563 lan78xx_set_multicast(dev->net);
2564
2565
2566 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2567 buf |= PMT_CTL_PHY_RST_;
2568 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2569
2570 timeout = jiffies + HZ;
2571 do {
2572 mdelay(1);
2573 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2574 if (time_after(jiffies, timeout)) {
2575 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2576 return -EIO;
2577 }
2578 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2579
2580 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2581
2582 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2583 buf &= ~MAC_CR_GMII_EN_;
2584
2585 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2586 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2587 if (!ret && sig != EEPROM_INDICATOR) {
2588
2589 netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2590 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2591 }
2592 }
2593 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2594
2595 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2596 buf |= MAC_TX_TXEN_;
2597 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2598
2599 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2600 buf |= FCT_TX_CTL_EN_;
2601 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2602
2603 ret = lan78xx_set_rx_max_frame_length(dev,
2604 dev->net->mtu + VLAN_ETH_HLEN);
2605
2606 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2607 buf |= MAC_RX_RXEN_;
2608 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2609
2610 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2611 buf |= FCT_RX_CTL_EN_;
2612 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2613
2614 return 0;
2615}
2616
2617static void lan78xx_init_stats(struct lan78xx_net *dev)
2618{
2619 u32 *p;
2620 int i;
2621
2622
2623
2624
2625 p = (u32 *)&dev->stats.rollover_max;
2626 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2627 p[i] = 0xFFFFF;
2628
2629 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2630 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2631 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2632 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2633 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2634 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2635 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2636 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2637 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2638 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2639
2640 set_bit(EVENT_STAT_UPDATE, &dev->flags);
2641}
2642
2643static int lan78xx_open(struct net_device *net)
2644{
2645 struct lan78xx_net *dev = netdev_priv(net);
2646 int ret;
2647
2648 ret = usb_autopm_get_interface(dev->intf);
2649 if (ret < 0)
2650 goto out;
2651
2652 phy_start(net->phydev);
2653
2654 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2655
2656
2657 if (dev->urb_intr) {
2658 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2659 if (ret < 0) {
2660 netif_err(dev, ifup, dev->net,
2661 "intr submit %d\n", ret);
2662 goto done;
2663 }
2664 }
2665
2666 lan78xx_init_stats(dev);
2667
2668 set_bit(EVENT_DEV_OPEN, &dev->flags);
2669
2670 netif_start_queue(net);
2671
2672 dev->link_on = false;
2673
2674 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2675done:
2676 usb_autopm_put_interface(dev->intf);
2677
2678out:
2679 return ret;
2680}
2681
2682static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2683{
2684 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2685 DECLARE_WAITQUEUE(wait, current);
2686 int temp;
2687
2688
2689 add_wait_queue(&unlink_wakeup, &wait);
2690 set_current_state(TASK_UNINTERRUPTIBLE);
2691 dev->wait = &unlink_wakeup;
2692 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2693
2694
2695 while (!skb_queue_empty(&dev->rxq) &&
2696 !skb_queue_empty(&dev->txq) &&
2697 !skb_queue_empty(&dev->done)) {
2698 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2699 set_current_state(TASK_UNINTERRUPTIBLE);
2700 netif_dbg(dev, ifdown, dev->net,
2701 "waited for %d urb completions\n", temp);
2702 }
2703 set_current_state(TASK_RUNNING);
2704 dev->wait = NULL;
2705 remove_wait_queue(&unlink_wakeup, &wait);
2706}
2707
2708static int lan78xx_stop(struct net_device *net)
2709{
2710 struct lan78xx_net *dev = netdev_priv(net);
2711
2712 if (timer_pending(&dev->stat_monitor))
2713 del_timer_sync(&dev->stat_monitor);
2714
2715 if (net->phydev)
2716 phy_stop(net->phydev);
2717
2718 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2719 netif_stop_queue(net);
2720
2721 netif_info(dev, ifdown, dev->net,
2722 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2723 net->stats.rx_packets, net->stats.tx_packets,
2724 net->stats.rx_errors, net->stats.tx_errors);
2725
2726 lan78xx_terminate_urbs(dev);
2727
2728 usb_kill_urb(dev->urb_intr);
2729
2730 skb_queue_purge(&dev->rxq_pause);
2731
2732
2733
2734
2735
2736 dev->flags = 0;
2737 cancel_delayed_work_sync(&dev->wq);
2738 tasklet_kill(&dev->bh);
2739
2740 usb_autopm_put_interface(dev->intf);
2741
2742 return 0;
2743}
2744
2745static int lan78xx_linearize(struct sk_buff *skb)
2746{
2747 return skb_linearize(skb);
2748}
2749
2750static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2751 struct sk_buff *skb, gfp_t flags)
2752{
2753 u32 tx_cmd_a, tx_cmd_b;
2754
2755 if (skb_cow_head(skb, TX_OVERHEAD)) {
2756 dev_kfree_skb_any(skb);
2757 return NULL;
2758 }
2759
2760 if (lan78xx_linearize(skb) < 0)
2761 return NULL;
2762
2763 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2764
2765 if (skb->ip_summed == CHECKSUM_PARTIAL)
2766 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2767
2768 tx_cmd_b = 0;
2769 if (skb_is_gso(skb)) {
2770 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2771
2772 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2773
2774 tx_cmd_a |= TX_CMD_A_LSO_;
2775 }
2776
2777 if (skb_vlan_tag_present(skb)) {
2778 tx_cmd_a |= TX_CMD_A_IVTG_;
2779 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2780 }
2781
2782 skb_push(skb, 4);
2783 cpu_to_le32s(&tx_cmd_b);
2784 memcpy(skb->data, &tx_cmd_b, 4);
2785
2786 skb_push(skb, 4);
2787 cpu_to_le32s(&tx_cmd_a);
2788 memcpy(skb->data, &tx_cmd_a, 4);
2789
2790 return skb;
2791}
2792
2793static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2794 struct sk_buff_head *list, enum skb_state state)
2795{
2796 unsigned long flags;
2797 enum skb_state old_state;
2798 struct skb_data *entry = (struct skb_data *)skb->cb;
2799
2800 spin_lock_irqsave(&list->lock, flags);
2801 old_state = entry->state;
2802 entry->state = state;
2803
2804 __skb_unlink(skb, list);
2805 spin_unlock(&list->lock);
2806 spin_lock(&dev->done.lock);
2807
2808 __skb_queue_tail(&dev->done, skb);
2809 if (skb_queue_len(&dev->done) == 1)
2810 tasklet_schedule(&dev->bh);
2811 spin_unlock_irqrestore(&dev->done.lock, flags);
2812
2813 return old_state;
2814}
2815
2816static void tx_complete(struct urb *urb)
2817{
2818 struct sk_buff *skb = (struct sk_buff *)urb->context;
2819 struct skb_data *entry = (struct skb_data *)skb->cb;
2820 struct lan78xx_net *dev = entry->dev;
2821
2822 if (urb->status == 0) {
2823 dev->net->stats.tx_packets += entry->num_of_packet;
2824 dev->net->stats.tx_bytes += entry->length;
2825 } else {
2826 dev->net->stats.tx_errors++;
2827
2828 switch (urb->status) {
2829 case -EPIPE:
2830 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2831 break;
2832
2833
2834 case -ECONNRESET:
2835 case -ESHUTDOWN:
2836 break;
2837
2838 case -EPROTO:
2839 case -ETIME:
2840 case -EILSEQ:
2841 netif_stop_queue(dev->net);
2842 break;
2843 default:
2844 netif_dbg(dev, tx_err, dev->net,
2845 "tx err %d\n", entry->urb->status);
2846 break;
2847 }
2848 }
2849
2850 usb_autopm_put_interface_async(dev->intf);
2851
2852 defer_bh(dev, skb, &dev->txq, tx_done);
2853}
2854
2855static void lan78xx_queue_skb(struct sk_buff_head *list,
2856 struct sk_buff *newsk, enum skb_state state)
2857{
2858 struct skb_data *entry = (struct skb_data *)newsk->cb;
2859
2860 __skb_queue_tail(list, newsk);
2861 entry->state = state;
2862}
2863
2864static netdev_tx_t
2865lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2866{
2867 struct lan78xx_net *dev = netdev_priv(net);
2868 struct sk_buff *skb2 = NULL;
2869
2870 if (skb) {
2871 skb_tx_timestamp(skb);
2872 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2873 }
2874
2875 if (skb2) {
2876 skb_queue_tail(&dev->txq_pend, skb2);
2877
2878
2879 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2880 (skb_queue_len(&dev->txq_pend) > 10))
2881 netif_stop_queue(net);
2882 } else {
2883 netif_dbg(dev, tx_err, dev->net,
2884 "lan78xx_tx_prep return NULL\n");
2885 dev->net->stats.tx_errors++;
2886 dev->net->stats.tx_dropped++;
2887 }
2888
2889 tasklet_schedule(&dev->bh);
2890
2891 return NETDEV_TX_OK;
2892}
2893
2894static int
2895lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2896{
2897 int tmp;
2898 struct usb_host_interface *alt = NULL;
2899 struct usb_host_endpoint *in = NULL, *out = NULL;
2900 struct usb_host_endpoint *status = NULL;
2901
2902 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2903 unsigned ep;
2904
2905 in = NULL;
2906 out = NULL;
2907 status = NULL;
2908 alt = intf->altsetting + tmp;
2909
2910 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2911 struct usb_host_endpoint *e;
2912 int intr = 0;
2913
2914 e = alt->endpoint + ep;
2915 switch (e->desc.bmAttributes) {
2916 case USB_ENDPOINT_XFER_INT:
2917 if (!usb_endpoint_dir_in(&e->desc))
2918 continue;
2919 intr = 1;
2920
2921 case USB_ENDPOINT_XFER_BULK:
2922 break;
2923 default:
2924 continue;
2925 }
2926 if (usb_endpoint_dir_in(&e->desc)) {
2927 if (!intr && !in)
2928 in = e;
2929 else if (intr && !status)
2930 status = e;
2931 } else {
2932 if (!out)
2933 out = e;
2934 }
2935 }
2936 if (in && out)
2937 break;
2938 }
2939 if (!alt || !in || !out)
2940 return -EINVAL;
2941
2942 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2943 in->desc.bEndpointAddress &
2944 USB_ENDPOINT_NUMBER_MASK);
2945 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2946 out->desc.bEndpointAddress &
2947 USB_ENDPOINT_NUMBER_MASK);
2948 dev->ep_intr = status;
2949
2950 return 0;
2951}
2952
2953static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2954{
2955 struct lan78xx_priv *pdata = NULL;
2956 int ret;
2957 int i;
2958
2959 ret = lan78xx_get_endpoints(dev, intf);
2960
2961 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2962
2963 pdata = (struct lan78xx_priv *)(dev->data[0]);
2964 if (!pdata) {
2965 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2966 return -ENOMEM;
2967 }
2968
2969 pdata->dev = dev;
2970
2971 spin_lock_init(&pdata->rfe_ctl_lock);
2972 mutex_init(&pdata->dataport_mutex);
2973
2974 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2975
2976 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2977 pdata->vlan_table[i] = 0;
2978
2979 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2980
2981 dev->net->features = 0;
2982
2983 if (DEFAULT_TX_CSUM_ENABLE)
2984 dev->net->features |= NETIF_F_HW_CSUM;
2985
2986 if (DEFAULT_RX_CSUM_ENABLE)
2987 dev->net->features |= NETIF_F_RXCSUM;
2988
2989 if (DEFAULT_TSO_CSUM_ENABLE)
2990 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2991
2992 if (DEFAULT_VLAN_RX_OFFLOAD)
2993 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2994
2995 if (DEFAULT_VLAN_FILTER_ENABLE)
2996 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2997
2998 dev->net->hw_features = dev->net->features;
2999
3000 ret = lan78xx_setup_irq_domain(dev);
3001 if (ret < 0) {
3002 netdev_warn(dev->net,
3003 "lan78xx_setup_irq_domain() failed : %d", ret);
3004 goto out1;
3005 }
3006
3007 dev->net->hard_header_len += TX_OVERHEAD;
3008 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
3009
3010
3011 ret = lan78xx_reset(dev);
3012 if (ret) {
3013 netdev_warn(dev->net, "Registers INIT FAILED....");
3014 goto out2;
3015 }
3016
3017 ret = lan78xx_mdio_init(dev);
3018 if (ret) {
3019 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3020 goto out2;
3021 }
3022
3023 dev->net->flags |= IFF_MULTICAST;
3024
3025 pdata->wol = WAKE_MAGIC;
3026
3027 return ret;
3028
3029out2:
3030 lan78xx_remove_irq_domain(dev);
3031
3032out1:
3033 netdev_warn(dev->net, "Bind routine FAILED");
3034 cancel_work_sync(&pdata->set_multicast);
3035 cancel_work_sync(&pdata->set_vlan);
3036 kfree(pdata);
3037 return ret;
3038}
3039
3040static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3041{
3042 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3043
3044 lan78xx_remove_irq_domain(dev);
3045
3046 lan78xx_remove_mdio(dev);
3047
3048 if (pdata) {
3049 cancel_work_sync(&pdata->set_multicast);
3050 cancel_work_sync(&pdata->set_vlan);
3051 netif_dbg(dev, ifdown, dev->net, "free pdata");
3052 kfree(pdata);
3053 pdata = NULL;
3054 dev->data[0] = 0;
3055 }
3056}
3057
3058static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3059 struct sk_buff *skb,
3060 u32 rx_cmd_a, u32 rx_cmd_b)
3061{
3062
3063
3064
3065 if (!(dev->net->features & NETIF_F_RXCSUM) ||
3066 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3067 ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3068 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3069 skb->ip_summed = CHECKSUM_NONE;
3070 } else {
3071 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3072 skb->ip_summed = CHECKSUM_COMPLETE;
3073 }
3074}
3075
3076static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3077 struct sk_buff *skb,
3078 u32 rx_cmd_a, u32 rx_cmd_b)
3079{
3080 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3081 (rx_cmd_a & RX_CMD_A_FVTG_))
3082 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3083 (rx_cmd_b & 0xffff));
3084}
3085
3086static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3087{
3088 int status;
3089
3090 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
3091 skb_queue_tail(&dev->rxq_pause, skb);
3092 return;
3093 }
3094
3095 dev->net->stats.rx_packets++;
3096 dev->net->stats.rx_bytes += skb->len;
3097
3098 skb->protocol = eth_type_trans(skb, dev->net);
3099
3100 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3101 skb->len + sizeof(struct ethhdr), skb->protocol);
3102 memset(skb->cb, 0, sizeof(struct skb_data));
3103
3104 if (skb_defer_rx_timestamp(skb))
3105 return;
3106
3107 status = netif_rx(skb);
3108 if (status != NET_RX_SUCCESS)
3109 netif_dbg(dev, rx_err, dev->net,
3110 "netif_rx status %d\n", status);
3111}
3112
3113static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3114{
3115 if (skb->len < dev->net->hard_header_len)
3116 return 0;
3117
3118 while (skb->len > 0) {
3119 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3120 u16 rx_cmd_c;
3121 struct sk_buff *skb2;
3122 unsigned char *packet;
3123
3124 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
3125 le32_to_cpus(&rx_cmd_a);
3126 skb_pull(skb, sizeof(rx_cmd_a));
3127
3128 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
3129 le32_to_cpus(&rx_cmd_b);
3130 skb_pull(skb, sizeof(rx_cmd_b));
3131
3132 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
3133 le16_to_cpus(&rx_cmd_c);
3134 skb_pull(skb, sizeof(rx_cmd_c));
3135
3136 packet = skb->data;
3137
3138
3139 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3140 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3141
3142 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3143 netif_dbg(dev, rx_err, dev->net,
3144 "Error rx_cmd_a=0x%08x", rx_cmd_a);
3145 } else {
3146
3147 if (skb->len == size) {
3148 lan78xx_rx_csum_offload(dev, skb,
3149 rx_cmd_a, rx_cmd_b);
3150 lan78xx_rx_vlan_offload(dev, skb,
3151 rx_cmd_a, rx_cmd_b);
3152
3153 skb_trim(skb, skb->len - 4);
3154 skb->truesize = size + sizeof(struct sk_buff);
3155
3156 return 1;
3157 }
3158
3159 skb2 = skb_clone(skb, GFP_ATOMIC);
3160 if (unlikely(!skb2)) {
3161 netdev_warn(dev->net, "Error allocating skb");
3162 return 0;
3163 }
3164
3165 skb2->len = size;
3166 skb2->data = packet;
3167 skb_set_tail_pointer(skb2, size);
3168
3169 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3170 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3171
3172 skb_trim(skb2, skb2->len - 4);
3173 skb2->truesize = size + sizeof(struct sk_buff);
3174
3175 lan78xx_skb_return(dev, skb2);
3176 }
3177
3178 skb_pull(skb, size);
3179
3180
3181 if (skb->len)
3182 skb_pull(skb, align_count);
3183 }
3184
3185 return 1;
3186}
3187
3188static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3189{
3190 if (!lan78xx_rx(dev, skb)) {
3191 dev->net->stats.rx_errors++;
3192 goto done;
3193 }
3194
3195 if (skb->len) {
3196 lan78xx_skb_return(dev, skb);
3197 return;
3198 }
3199
3200 netif_dbg(dev, rx_err, dev->net, "drop\n");
3201 dev->net->stats.rx_errors++;
3202done:
3203 skb_queue_tail(&dev->done, skb);
3204}
3205
3206static void rx_complete(struct urb *urb);
3207
3208static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3209{
3210 struct sk_buff *skb;
3211 struct skb_data *entry;
3212 unsigned long lockflags;
3213 size_t size = dev->rx_urb_size;
3214 int ret = 0;
3215
3216 skb = netdev_alloc_skb_ip_align(dev->net, size);
3217 if (!skb) {
3218 usb_free_urb(urb);
3219 return -ENOMEM;
3220 }
3221
3222 entry = (struct skb_data *)skb->cb;
3223 entry->urb = urb;
3224 entry->dev = dev;
3225 entry->length = 0;
3226
3227 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3228 skb->data, size, rx_complete, skb);
3229
3230 spin_lock_irqsave(&dev->rxq.lock, lockflags);
3231
3232 if (netif_device_present(dev->net) &&
3233 netif_running(dev->net) &&
3234 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3235 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3236 ret = usb_submit_urb(urb, GFP_ATOMIC);
3237 switch (ret) {
3238 case 0:
3239 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3240 break;
3241 case -EPIPE:
3242 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3243 break;
3244 case -ENODEV:
3245 netif_dbg(dev, ifdown, dev->net, "device gone\n");
3246 netif_device_detach(dev->net);
3247 break;
3248 case -EHOSTUNREACH:
3249 ret = -ENOLINK;
3250 break;
3251 default:
3252 netif_dbg(dev, rx_err, dev->net,
3253 "rx submit, %d\n", ret);
3254 tasklet_schedule(&dev->bh);
3255 }
3256 } else {
3257 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3258 ret = -ENOLINK;
3259 }
3260 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3261 if (ret) {
3262 dev_kfree_skb_any(skb);
3263 usb_free_urb(urb);
3264 }
3265 return ret;
3266}
3267
3268static void rx_complete(struct urb *urb)
3269{
3270 struct sk_buff *skb = (struct sk_buff *)urb->context;
3271 struct skb_data *entry = (struct skb_data *)skb->cb;
3272 struct lan78xx_net *dev = entry->dev;
3273 int urb_status = urb->status;
3274 enum skb_state state;
3275
3276 skb_put(skb, urb->actual_length);
3277 state = rx_done;
3278 entry->urb = NULL;
3279
3280 switch (urb_status) {
3281 case 0:
3282 if (skb->len < dev->net->hard_header_len) {
3283 state = rx_cleanup;
3284 dev->net->stats.rx_errors++;
3285 dev->net->stats.rx_length_errors++;
3286 netif_dbg(dev, rx_err, dev->net,
3287 "rx length %d\n", skb->len);
3288 }
3289 usb_mark_last_busy(dev->udev);
3290 break;
3291 case -EPIPE:
3292 dev->net->stats.rx_errors++;
3293 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3294
3295 case -ECONNRESET:
3296 case -ESHUTDOWN:
3297 netif_dbg(dev, ifdown, dev->net,
3298 "rx shutdown, code %d\n", urb_status);
3299 state = rx_cleanup;
3300 entry->urb = urb;
3301 urb = NULL;
3302 break;
3303 case -EPROTO:
3304 case -ETIME:
3305 case -EILSEQ:
3306 dev->net->stats.rx_errors++;
3307 state = rx_cleanup;
3308 entry->urb = urb;
3309 urb = NULL;
3310 break;
3311
3312
3313 case -EOVERFLOW:
3314 dev->net->stats.rx_over_errors++;
3315
3316
3317 default:
3318 state = rx_cleanup;
3319 dev->net->stats.rx_errors++;
3320 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3321 break;
3322 }
3323
3324 state = defer_bh(dev, skb, &dev->rxq, state);
3325
3326 if (urb) {
3327 if (netif_running(dev->net) &&
3328 !test_bit(EVENT_RX_HALT, &dev->flags) &&
3329 state != unlink_start) {
3330 rx_submit(dev, urb, GFP_ATOMIC);
3331 return;
3332 }
3333 usb_free_urb(urb);
3334 }
3335 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3336}
3337
3338static void lan78xx_tx_bh(struct lan78xx_net *dev)
3339{
3340 int length;
3341 struct urb *urb = NULL;
3342 struct skb_data *entry;
3343 unsigned long flags;
3344 struct sk_buff_head *tqp = &dev->txq_pend;
3345 struct sk_buff *skb, *skb2;
3346 int ret;
3347 int count, pos;
3348 int skb_totallen, pkt_cnt;
3349
3350 skb_totallen = 0;
3351 pkt_cnt = 0;
3352 count = 0;
3353 length = 0;
3354 spin_lock_irqsave(&tqp->lock, flags);
3355 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3356 if (skb_is_gso(skb)) {
3357 if (pkt_cnt) {
3358
3359 break;
3360 }
3361 count = 1;
3362 length = skb->len - TX_OVERHEAD;
3363 __skb_unlink(skb, tqp);
3364 spin_unlock_irqrestore(&tqp->lock, flags);
3365 goto gso_skb;
3366 }
3367
3368 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3369 break;
3370 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3371 pkt_cnt++;
3372 }
3373 spin_unlock_irqrestore(&tqp->lock, flags);
3374
3375
3376 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3377 if (!skb)
3378 goto drop;
3379
3380 skb_put(skb, skb_totallen);
3381
3382 for (count = pos = 0; count < pkt_cnt; count++) {
3383 skb2 = skb_dequeue(tqp);
3384 if (skb2) {
3385 length += (skb2->len - TX_OVERHEAD);
3386 memcpy(skb->data + pos, skb2->data, skb2->len);
3387 pos += roundup(skb2->len, sizeof(u32));
3388 dev_kfree_skb(skb2);
3389 }
3390 }
3391
3392gso_skb:
3393 urb = usb_alloc_urb(0, GFP_ATOMIC);
3394 if (!urb)
3395 goto drop;
3396
3397 entry = (struct skb_data *)skb->cb;
3398 entry->urb = urb;
3399 entry->dev = dev;
3400 entry->length = length;
3401 entry->num_of_packet = count;
3402
3403 spin_lock_irqsave(&dev->txq.lock, flags);
3404 ret = usb_autopm_get_interface_async(dev->intf);
3405 if (ret < 0) {
3406 spin_unlock_irqrestore(&dev->txq.lock, flags);
3407 goto drop;
3408 }
3409
3410 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
3411 skb->data, skb->len, tx_complete, skb);
3412
3413 if (length % dev->maxpacket == 0) {
3414
3415 urb->transfer_flags |= URB_ZERO_PACKET;
3416 }
3417
3418#ifdef CONFIG_PM
3419
3420 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3421
3422 usb_anchor_urb(urb, &dev->deferred);
3423
3424 netif_stop_queue(dev->net);
3425 usb_put_urb(urb);
3426 spin_unlock_irqrestore(&dev->txq.lock, flags);
3427 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3428 return;
3429 }
3430#endif
3431
3432 ret = usb_submit_urb(urb, GFP_ATOMIC);
3433 switch (ret) {
3434 case 0:
3435 netif_trans_update(dev->net);
3436 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3437 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3438 netif_stop_queue(dev->net);
3439 break;
3440 case -EPIPE:
3441 netif_stop_queue(dev->net);
3442 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3443 usb_autopm_put_interface_async(dev->intf);
3444 break;
3445 default:
3446 usb_autopm_put_interface_async(dev->intf);
3447 netif_dbg(dev, tx_err, dev->net,
3448 "tx: submit urb err %d\n", ret);
3449 break;
3450 }
3451
3452 spin_unlock_irqrestore(&dev->txq.lock, flags);
3453
3454 if (ret) {
3455 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3456drop:
3457 dev->net->stats.tx_dropped++;
3458 if (skb)
3459 dev_kfree_skb_any(skb);
3460 usb_free_urb(urb);
3461 } else
3462 netif_dbg(dev, tx_queued, dev->net,
3463 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3464}
3465
3466static void lan78xx_rx_bh(struct lan78xx_net *dev)
3467{
3468 struct urb *urb;
3469 int i;
3470
3471 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3472 for (i = 0; i < 10; i++) {
3473 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3474 break;
3475 urb = usb_alloc_urb(0, GFP_ATOMIC);
3476 if (urb)
3477 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3478 return;
3479 }
3480
3481 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3482 tasklet_schedule(&dev->bh);
3483 }
3484 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3485 netif_wake_queue(dev->net);
3486}
3487
3488static void lan78xx_bh(unsigned long param)
3489{
3490 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3491 struct sk_buff *skb;
3492 struct skb_data *entry;
3493
3494 while ((skb = skb_dequeue(&dev->done))) {
3495 entry = (struct skb_data *)(skb->cb);
3496 switch (entry->state) {
3497 case rx_done:
3498 entry->state = rx_cleanup;
3499 rx_process(dev, skb);
3500 continue;
3501 case tx_done:
3502 usb_free_urb(entry->urb);
3503 dev_kfree_skb(skb);
3504 continue;
3505 case rx_cleanup:
3506 usb_free_urb(entry->urb);
3507 dev_kfree_skb(skb);
3508 continue;
3509 default:
3510 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3511 return;
3512 }
3513 }
3514
3515 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3516
3517 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3518 dev->delta = 1;
3519 mod_timer(&dev->stat_monitor,
3520 jiffies + STAT_UPDATE_TIMER);
3521 }
3522
3523 if (!skb_queue_empty(&dev->txq_pend))
3524 lan78xx_tx_bh(dev);
3525
3526 if (!timer_pending(&dev->delay) &&
3527 !test_bit(EVENT_RX_HALT, &dev->flags))
3528 lan78xx_rx_bh(dev);
3529 }
3530}
3531
3532static void lan78xx_delayedwork(struct work_struct *work)
3533{
3534 int status;
3535 struct lan78xx_net *dev;
3536
3537 dev = container_of(work, struct lan78xx_net, wq.work);
3538
3539 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3540 unlink_urbs(dev, &dev->txq);
3541 status = usb_autopm_get_interface(dev->intf);
3542 if (status < 0)
3543 goto fail_pipe;
3544 status = usb_clear_halt(dev->udev, dev->pipe_out);
3545 usb_autopm_put_interface(dev->intf);
3546 if (status < 0 &&
3547 status != -EPIPE &&
3548 status != -ESHUTDOWN) {
3549 if (netif_msg_tx_err(dev))
3550fail_pipe:
3551 netdev_err(dev->net,
3552 "can't clear tx halt, status %d\n",
3553 status);
3554 } else {
3555 clear_bit(EVENT_TX_HALT, &dev->flags);
3556 if (status != -ESHUTDOWN)
3557 netif_wake_queue(dev->net);
3558 }
3559 }
3560 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3561 unlink_urbs(dev, &dev->rxq);
3562 status = usb_autopm_get_interface(dev->intf);
3563 if (status < 0)
3564 goto fail_halt;
3565 status = usb_clear_halt(dev->udev, dev->pipe_in);
3566 usb_autopm_put_interface(dev->intf);
3567 if (status < 0 &&
3568 status != -EPIPE &&
3569 status != -ESHUTDOWN) {
3570 if (netif_msg_rx_err(dev))
3571fail_halt:
3572 netdev_err(dev->net,
3573 "can't clear rx halt, status %d\n",
3574 status);
3575 } else {
3576 clear_bit(EVENT_RX_HALT, &dev->flags);
3577 tasklet_schedule(&dev->bh);
3578 }
3579 }
3580
3581 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3582 int ret = 0;
3583
3584 clear_bit(EVENT_LINK_RESET, &dev->flags);
3585 status = usb_autopm_get_interface(dev->intf);
3586 if (status < 0)
3587 goto skip_reset;
3588 if (lan78xx_link_reset(dev) < 0) {
3589 usb_autopm_put_interface(dev->intf);
3590skip_reset:
3591 netdev_info(dev->net, "link reset failed (%d)\n",
3592 ret);
3593 } else {
3594 usb_autopm_put_interface(dev->intf);
3595 }
3596 }
3597
3598 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3599 lan78xx_update_stats(dev);
3600
3601 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3602
3603 mod_timer(&dev->stat_monitor,
3604 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3605
3606 dev->delta = min((dev->delta * 2), 50);
3607 }
3608}
3609
3610static void intr_complete(struct urb *urb)
3611{
3612 struct lan78xx_net *dev = urb->context;
3613 int status = urb->status;
3614
3615 switch (status) {
3616
3617 case 0:
3618 lan78xx_status(dev, urb);
3619 break;
3620
3621
3622 case -ENOENT:
3623 case -ESHUTDOWN:
3624 netif_dbg(dev, ifdown, dev->net,
3625 "intr shutdown, code %d\n", status);
3626 return;
3627
3628
3629
3630
3631 default:
3632 netdev_dbg(dev->net, "intr status %d\n", status);
3633 break;
3634 }
3635
3636 if (!netif_running(dev->net))
3637 return;
3638
3639 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3640 status = usb_submit_urb(urb, GFP_ATOMIC);
3641 if (status != 0)
3642 netif_err(dev, timer, dev->net,
3643 "intr resubmit --> %d\n", status);
3644}
3645
3646static void lan78xx_disconnect(struct usb_interface *intf)
3647{
3648 struct lan78xx_net *dev;
3649 struct usb_device *udev;
3650 struct net_device *net;
3651 struct phy_device *phydev;
3652
3653 dev = usb_get_intfdata(intf);
3654 usb_set_intfdata(intf, NULL);
3655 if (!dev)
3656 return;
3657
3658 udev = interface_to_usbdev(intf);
3659 net = dev->net;
3660 phydev = net->phydev;
3661
3662 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
3663 phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
3664
3665 phy_disconnect(net->phydev);
3666
3667 if (phy_is_pseudo_fixed_link(phydev))
3668 fixed_phy_unregister(phydev);
3669
3670 unregister_netdev(net);
3671
3672 cancel_delayed_work_sync(&dev->wq);
3673
3674 usb_scuttle_anchored_urbs(&dev->deferred);
3675
3676 lan78xx_unbind(dev, intf);
3677
3678 usb_kill_urb(dev->urb_intr);
3679 usb_free_urb(dev->urb_intr);
3680
3681 free_netdev(net);
3682 usb_put_dev(udev);
3683}
3684
3685static void lan78xx_tx_timeout(struct net_device *net)
3686{
3687 struct lan78xx_net *dev = netdev_priv(net);
3688
3689 unlink_urbs(dev, &dev->txq);
3690 tasklet_schedule(&dev->bh);
3691}
3692
3693static const struct net_device_ops lan78xx_netdev_ops = {
3694 .ndo_open = lan78xx_open,
3695 .ndo_stop = lan78xx_stop,
3696 .ndo_start_xmit = lan78xx_start_xmit,
3697 .ndo_tx_timeout = lan78xx_tx_timeout,
3698 .ndo_change_mtu = lan78xx_change_mtu,
3699 .ndo_set_mac_address = lan78xx_set_mac_addr,
3700 .ndo_validate_addr = eth_validate_addr,
3701 .ndo_do_ioctl = lan78xx_ioctl,
3702 .ndo_set_rx_mode = lan78xx_set_multicast,
3703 .ndo_set_features = lan78xx_set_features,
3704 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3705 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3706};
3707
3708static void lan78xx_stat_monitor(struct timer_list *t)
3709{
3710 struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
3711
3712 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3713}
3714
3715static int lan78xx_probe(struct usb_interface *intf,
3716 const struct usb_device_id *id)
3717{
3718 struct lan78xx_net *dev;
3719 struct net_device *netdev;
3720 struct usb_device *udev;
3721 int ret;
3722 unsigned maxp;
3723 unsigned period;
3724 u8 *buf = NULL;
3725
3726 udev = interface_to_usbdev(intf);
3727 udev = usb_get_dev(udev);
3728
3729 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3730 if (!netdev) {
3731 dev_err(&intf->dev, "Error: OOM\n");
3732 ret = -ENOMEM;
3733 goto out1;
3734 }
3735
3736
3737 SET_NETDEV_DEV(netdev, &intf->dev);
3738
3739 dev = netdev_priv(netdev);
3740 dev->udev = udev;
3741 dev->intf = intf;
3742 dev->net = netdev;
3743 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3744 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3745
3746 skb_queue_head_init(&dev->rxq);
3747 skb_queue_head_init(&dev->txq);
3748 skb_queue_head_init(&dev->done);
3749 skb_queue_head_init(&dev->rxq_pause);
3750 skb_queue_head_init(&dev->txq_pend);
3751 mutex_init(&dev->phy_mutex);
3752
3753 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3754 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3755 init_usb_anchor(&dev->deferred);
3756
3757 netdev->netdev_ops = &lan78xx_netdev_ops;
3758 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3759 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3760
3761 dev->delta = 1;
3762 timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
3763
3764 mutex_init(&dev->stats.access_lock);
3765
3766 ret = lan78xx_bind(dev, intf);
3767 if (ret < 0)
3768 goto out2;
3769 strcpy(netdev->name, "eth%d");
3770
3771 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3772 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3773
3774
3775 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
3776
3777 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3778 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3779 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3780
3781 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3782 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3783
3784 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3785 dev->ep_intr->desc.bEndpointAddress &
3786 USB_ENDPOINT_NUMBER_MASK);
3787 period = dev->ep_intr->desc.bInterval;
3788
3789 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3790 buf = kmalloc(maxp, GFP_KERNEL);
3791 if (buf) {
3792 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3793 if (!dev->urb_intr) {
3794 ret = -ENOMEM;
3795 kfree(buf);
3796 goto out3;
3797 } else {
3798 usb_fill_int_urb(dev->urb_intr, dev->udev,
3799 dev->pipe_intr, buf, maxp,
3800 intr_complete, dev, period);
3801 }
3802 }
3803
3804 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3805
3806
3807 intf->needs_remote_wakeup = 1;
3808
3809 ret = register_netdev(netdev);
3810 if (ret != 0) {
3811 netif_err(dev, probe, netdev, "couldn't register the device\n");
3812 goto out3;
3813 }
3814
3815 usb_set_intfdata(intf, dev);
3816
3817 ret = device_set_wakeup_enable(&udev->dev, true);
3818
3819
3820
3821
3822 pm_runtime_set_autosuspend_delay(&udev->dev,
3823 DEFAULT_AUTOSUSPEND_DELAY);
3824
3825 ret = lan78xx_phy_init(dev);
3826 if (ret < 0)
3827 goto out4;
3828
3829 return 0;
3830
3831out4:
3832 unregister_netdev(netdev);
3833out3:
3834 lan78xx_unbind(dev, intf);
3835out2:
3836 free_netdev(netdev);
3837out1:
3838 usb_put_dev(udev);
3839
3840 return ret;
3841}
3842
3843static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3844{
3845 const u16 crc16poly = 0x8005;
3846 int i;
3847 u16 bit, crc, msb;
3848 u8 data;
3849
3850 crc = 0xFFFF;
3851 for (i = 0; i < len; i++) {
3852 data = *buf++;
3853 for (bit = 0; bit < 8; bit++) {
3854 msb = crc >> 15;
3855 crc <<= 1;
3856
3857 if (msb ^ (u16)(data & 1)) {
3858 crc ^= crc16poly;
3859 crc |= (u16)0x0001U;
3860 }
3861 data >>= 1;
3862 }
3863 }
3864
3865 return crc;
3866}
3867
3868static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3869{
3870 u32 buf;
3871 int ret;
3872 int mask_index;
3873 u16 crc;
3874 u32 temp_wucsr;
3875 u32 temp_pmt_ctl;
3876 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3877 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3878 const u8 arp_type[2] = { 0x08, 0x06 };
3879
3880 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3881 buf &= ~MAC_TX_TXEN_;
3882 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3883 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3884 buf &= ~MAC_RX_RXEN_;
3885 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3886
3887 ret = lan78xx_write_reg(dev, WUCSR, 0);
3888 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3889 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3890
3891 temp_wucsr = 0;
3892
3893 temp_pmt_ctl = 0;
3894 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3895 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3896 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3897
3898 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3899 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3900
3901 mask_index = 0;
3902 if (wol & WAKE_PHY) {
3903 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3904
3905 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3906 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3907 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3908 }
3909 if (wol & WAKE_MAGIC) {
3910 temp_wucsr |= WUCSR_MPEN_;
3911
3912 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3913 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3914 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3915 }
3916 if (wol & WAKE_BCAST) {
3917 temp_wucsr |= WUCSR_BCST_EN_;
3918
3919 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3920 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3921 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3922 }
3923 if (wol & WAKE_MCAST) {
3924 temp_wucsr |= WUCSR_WAKE_EN_;
3925
3926
3927 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3928 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3929 WUF_CFGX_EN_ |
3930 WUF_CFGX_TYPE_MCAST_ |
3931 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3932 (crc & WUF_CFGX_CRC16_MASK_));
3933
3934 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3935 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3936 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3937 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3938 mask_index++;
3939
3940
3941 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3942 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3943 WUF_CFGX_EN_ |
3944 WUF_CFGX_TYPE_MCAST_ |
3945 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3946 (crc & WUF_CFGX_CRC16_MASK_));
3947
3948 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3949 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3950 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3951 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3952 mask_index++;
3953
3954 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3955 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3956 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3957 }
3958 if (wol & WAKE_UCAST) {
3959 temp_wucsr |= WUCSR_PFDA_EN_;
3960
3961 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3962 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3963 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3964 }
3965 if (wol & WAKE_ARP) {
3966 temp_wucsr |= WUCSR_WAKE_EN_;
3967
3968
3969
3970
3971 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3972 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3973 WUF_CFGX_EN_ |
3974 WUF_CFGX_TYPE_ALL_ |
3975 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3976 (crc & WUF_CFGX_CRC16_MASK_));
3977
3978 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3979 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3980 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3981 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3982 mask_index++;
3983
3984 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3985 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3986 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3987 }
3988
3989 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3990
3991
3992 if (hweight_long((unsigned long)wol) > 1) {
3993 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3994 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3995 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3996 }
3997 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3998
3999
4000 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4001 buf |= PMT_CTL_WUPS_MASK_;
4002 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4003
4004 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4005 buf |= MAC_RX_RXEN_;
4006 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4007
4008 return 0;
4009}
4010
4011static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
4012{
4013 struct lan78xx_net *dev = usb_get_intfdata(intf);
4014 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
4015 u32 buf;
4016 int ret;
4017 int event;
4018
4019 event = message.event;
4020
4021 if (!dev->suspend_count++) {
4022 spin_lock_irq(&dev->txq.lock);
4023
4024 if ((skb_queue_len(&dev->txq) ||
4025 skb_queue_len(&dev->txq_pend)) &&
4026 PMSG_IS_AUTO(message)) {
4027 spin_unlock_irq(&dev->txq.lock);
4028 ret = -EBUSY;
4029 goto out;
4030 } else {
4031 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
4032 spin_unlock_irq(&dev->txq.lock);
4033 }
4034
4035
4036 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4037 buf &= ~MAC_TX_TXEN_;
4038 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4039 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4040 buf &= ~MAC_RX_RXEN_;
4041 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4042
4043
4044 netif_device_detach(dev->net);
4045 lan78xx_terminate_urbs(dev);
4046 usb_kill_urb(dev->urb_intr);
4047
4048
4049 netif_device_attach(dev->net);
4050 }
4051
4052 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4053 del_timer(&dev->stat_monitor);
4054
4055 if (PMSG_IS_AUTO(message)) {
4056
4057 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4058 buf &= ~MAC_TX_TXEN_;
4059 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4060 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4061 buf &= ~MAC_RX_RXEN_;
4062 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4063
4064 ret = lan78xx_write_reg(dev, WUCSR, 0);
4065 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4066 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4067
4068
4069 ret = lan78xx_read_reg(dev, WUCSR, &buf);
4070
4071 buf |= WUCSR_RFE_WAKE_EN_;
4072 buf |= WUCSR_STORE_WAKE_;
4073
4074 ret = lan78xx_write_reg(dev, WUCSR, buf);
4075
4076 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4077
4078 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4079 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4080
4081 buf |= PMT_CTL_PHY_WAKE_EN_;
4082 buf |= PMT_CTL_WOL_EN_;
4083 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4084 buf |= PMT_CTL_SUS_MODE_3_;
4085
4086 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4087
4088 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4089
4090 buf |= PMT_CTL_WUPS_MASK_;
4091
4092 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4093
4094 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
4095 buf |= MAC_RX_RXEN_;
4096 ret = lan78xx_write_reg(dev, MAC_RX, buf);
4097 } else {
4098 lan78xx_set_suspend(dev, pdata->wol);
4099 }
4100 }
4101
4102 ret = 0;
4103out:
4104 return ret;
4105}
4106
4107static int lan78xx_resume(struct usb_interface *intf)
4108{
4109 struct lan78xx_net *dev = usb_get_intfdata(intf);
4110 struct sk_buff *skb;
4111 struct urb *res;
4112 int ret;
4113 u32 buf;
4114
4115 if (!timer_pending(&dev->stat_monitor)) {
4116 dev->delta = 1;
4117 mod_timer(&dev->stat_monitor,
4118 jiffies + STAT_UPDATE_TIMER);
4119 }
4120
4121 if (!--dev->suspend_count) {
4122
4123 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
4124 usb_submit_urb(dev->urb_intr, GFP_NOIO);
4125
4126 spin_lock_irq(&dev->txq.lock);
4127 while ((res = usb_get_from_anchor(&dev->deferred))) {
4128 skb = (struct sk_buff *)res->context;
4129 ret = usb_submit_urb(res, GFP_ATOMIC);
4130 if (ret < 0) {
4131 dev_kfree_skb_any(skb);
4132 usb_free_urb(res);
4133 usb_autopm_put_interface_async(dev->intf);
4134 } else {
4135 netif_trans_update(dev->net);
4136 lan78xx_queue_skb(&dev->txq, skb, tx_start);
4137 }
4138 }
4139
4140 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
4141 spin_unlock_irq(&dev->txq.lock);
4142
4143 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
4144 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
4145 netif_start_queue(dev->net);
4146 tasklet_schedule(&dev->bh);
4147 }
4148 }
4149
4150 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4151 ret = lan78xx_write_reg(dev, WUCSR, 0);
4152 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4153
4154 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
4155 WUCSR2_ARP_RCD_ |
4156 WUCSR2_IPV6_TCPSYN_RCD_ |
4157 WUCSR2_IPV4_TCPSYN_RCD_);
4158
4159 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
4160 WUCSR_EEE_RX_WAKE_ |
4161 WUCSR_PFDA_FR_ |
4162 WUCSR_RFE_WAKE_FR_ |
4163 WUCSR_WUFR_ |
4164 WUCSR_MPR_ |
4165 WUCSR_BCST_FR_);
4166
4167 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
4168 buf |= MAC_TX_TXEN_;
4169 ret = lan78xx_write_reg(dev, MAC_TX, buf);
4170
4171 return 0;
4172}
4173
4174static int lan78xx_reset_resume(struct usb_interface *intf)
4175{
4176 struct lan78xx_net *dev = usb_get_intfdata(intf);
4177
4178 lan78xx_reset(dev);
4179
4180 phy_start(dev->net->phydev);
4181
4182 return lan78xx_resume(intf);
4183}
4184
4185static const struct usb_device_id products[] = {
4186 {
4187
4188 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
4189 },
4190 {
4191
4192 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
4193 },
4194 {
4195
4196 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
4197 },
4198 {},
4199};
4200MODULE_DEVICE_TABLE(usb, products);
4201
4202static struct usb_driver lan78xx_driver = {
4203 .name = DRIVER_NAME,
4204 .id_table = products,
4205 .probe = lan78xx_probe,
4206 .disconnect = lan78xx_disconnect,
4207 .suspend = lan78xx_suspend,
4208 .resume = lan78xx_resume,
4209 .reset_resume = lan78xx_reset_resume,
4210 .supports_autosuspend = 1,
4211 .disable_hub_initiated_lpm = 1,
4212};
4213
4214module_usb_driver(lan78xx_driver);
4215
4216MODULE_AUTHOR(DRIVER_AUTHOR);
4217MODULE_DESCRIPTION(DRIVER_DESC);
4218MODULE_LICENSE("GPL");
4219