1
2
3
4
5
6
7
8
9
10
11
12
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <linux/types.h>
20#include <linux/netdevice.h>
21#include <linux/ethtool.h>
22#include <linux/mii.h>
23#include <linux/if_ether.h>
24#include <linux/if_vlan.h>
25#include <linux/etherdevice.h>
26#include <linux/pci.h>
27#include <linux/delay.h>
28#include <linux/init.h>
29#include <linux/interrupt.h>
30#include <linux/dma-mapping.h>
31#include <linux/ssb/ssb.h>
32#include <linux/slab.h>
33#include <linux/phy.h>
34
35#include <linux/uaccess.h>
36#include <asm/io.h>
37#include <asm/irq.h>
38
39
40#include "b44.h"
41
42#define DRV_MODULE_NAME "b44"
43#define DRV_DESCRIPTION "Broadcom 44xx/47xx 10/100 PCI ethernet driver"
44
45#define B44_DEF_MSG_ENABLE \
46 (NETIF_MSG_DRV | \
47 NETIF_MSG_PROBE | \
48 NETIF_MSG_LINK | \
49 NETIF_MSG_TIMER | \
50 NETIF_MSG_IFDOWN | \
51 NETIF_MSG_IFUP | \
52 NETIF_MSG_RX_ERR | \
53 NETIF_MSG_TX_ERR)
54
55
56
57
58#define B44_TX_TIMEOUT (5 * HZ)
59
60
61#define B44_MIN_MTU ETH_ZLEN
62#define B44_MAX_MTU ETH_DATA_LEN
63
64#define B44_RX_RING_SIZE 512
65#define B44_DEF_RX_RING_PENDING 200
66#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
67 B44_RX_RING_SIZE)
68#define B44_TX_RING_SIZE 512
69#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
70#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
71 B44_TX_RING_SIZE)
72
73#define TX_RING_GAP(BP) \
74 (B44_TX_RING_SIZE - (BP)->tx_pending)
75#define TX_BUFFS_AVAIL(BP) \
76 (((BP)->tx_cons <= (BP)->tx_prod) ? \
77 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
78 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
79#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
80
81#define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
82#define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
83
84
85#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
86
87
88#define B44_PATTERN_BASE 0x400
89#define B44_PATTERN_SIZE 0x80
90#define B44_PMASK_BASE 0x600
91#define B44_PMASK_SIZE 0x10
92#define B44_MAX_PATTERNS 16
93#define B44_ETHIPV6UDP_HLEN 62
94#define B44_ETHIPV4UDP_HLEN 42
95
96MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
97MODULE_DESCRIPTION(DRV_DESCRIPTION);
98MODULE_LICENSE("GPL");
99
100static int b44_debug = -1;
101module_param(b44_debug, int, 0);
102MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
103
104
105#ifdef CONFIG_B44_PCI
106static const struct pci_device_id b44_pci_tbl[] = {
107 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
109 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
110 { 0 }
111};
112MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
113
114static struct pci_driver b44_pci_driver = {
115 .name = DRV_MODULE_NAME,
116 .id_table = b44_pci_tbl,
117};
118#endif
119
120static const struct ssb_device_id b44_ssb_tbl[] = {
121 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
122 {},
123};
124MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
125
126static void b44_halt(struct b44 *);
127static void b44_init_rings(struct b44 *);
128
129#define B44_FULL_RESET 1
130#define B44_FULL_RESET_SKIP_PHY 2
131#define B44_PARTIAL_RESET 3
132#define B44_CHIP_RESET_FULL 4
133#define B44_CHIP_RESET_PARTIAL 5
134
135static void b44_init_hw(struct b44 *, int);
136
137static int dma_desc_sync_size;
138static int instance;
139
140static const char b44_gstrings[][ETH_GSTRING_LEN] = {
141#define _B44(x...) # x,
142B44_STAT_REG_DECLARE
143#undef _B44
144};
145
146static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
147 dma_addr_t dma_base,
148 unsigned long offset,
149 enum dma_data_direction dir)
150{
151 dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
152 dma_desc_sync_size, dir);
153}
154
155static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
156 dma_addr_t dma_base,
157 unsigned long offset,
158 enum dma_data_direction dir)
159{
160 dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
161 dma_desc_sync_size, dir);
162}
163
164static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
165{
166 return ssb_read32(bp->sdev, reg);
167}
168
169static inline void bw32(const struct b44 *bp,
170 unsigned long reg, unsigned long val)
171{
172 ssb_write32(bp->sdev, reg, val);
173}
174
175static int b44_wait_bit(struct b44 *bp, unsigned long reg,
176 u32 bit, unsigned long timeout, const int clear)
177{
178 unsigned long i;
179
180 for (i = 0; i < timeout; i++) {
181 u32 val = br32(bp, reg);
182
183 if (clear && !(val & bit))
184 break;
185 if (!clear && (val & bit))
186 break;
187 udelay(10);
188 }
189 if (i == timeout) {
190 if (net_ratelimit())
191 netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n",
192 bit, reg, clear ? "clear" : "set");
193
194 return -ENODEV;
195 }
196 return 0;
197}
198
199static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
200{
201 u32 val;
202
203 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
204 (index << CAM_CTRL_INDEX_SHIFT)));
205
206 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
207
208 val = br32(bp, B44_CAM_DATA_LO);
209
210 data[2] = (val >> 24) & 0xFF;
211 data[3] = (val >> 16) & 0xFF;
212 data[4] = (val >> 8) & 0xFF;
213 data[5] = (val >> 0) & 0xFF;
214
215 val = br32(bp, B44_CAM_DATA_HI);
216
217 data[0] = (val >> 8) & 0xFF;
218 data[1] = (val >> 0) & 0xFF;
219}
220
221static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
222{
223 u32 val;
224
225 val = ((u32) data[2]) << 24;
226 val |= ((u32) data[3]) << 16;
227 val |= ((u32) data[4]) << 8;
228 val |= ((u32) data[5]) << 0;
229 bw32(bp, B44_CAM_DATA_LO, val);
230 val = (CAM_DATA_HI_VALID |
231 (((u32) data[0]) << 8) |
232 (((u32) data[1]) << 0));
233 bw32(bp, B44_CAM_DATA_HI, val);
234 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
235 (index << CAM_CTRL_INDEX_SHIFT)));
236 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
237}
238
239static inline void __b44_disable_ints(struct b44 *bp)
240{
241 bw32(bp, B44_IMASK, 0);
242}
243
244static void b44_disable_ints(struct b44 *bp)
245{
246 __b44_disable_ints(bp);
247
248
249 br32(bp, B44_IMASK);
250}
251
252static void b44_enable_ints(struct b44 *bp)
253{
254 bw32(bp, B44_IMASK, bp->imask);
255}
256
257static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
258{
259 int err;
260
261 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
262 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
263 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
264 (phy_addr << MDIO_DATA_PMD_SHIFT) |
265 (reg << MDIO_DATA_RA_SHIFT) |
266 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
267 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
268 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
269
270 return err;
271}
272
273static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
274{
275 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
276 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
277 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
278 (phy_addr << MDIO_DATA_PMD_SHIFT) |
279 (reg << MDIO_DATA_RA_SHIFT) |
280 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
281 (val & MDIO_DATA_DATA)));
282 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
283}
284
285static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
286{
287 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
288 return 0;
289
290 return __b44_readphy(bp, bp->phy_addr, reg, val);
291}
292
293static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
294{
295 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
296 return 0;
297
298 return __b44_writephy(bp, bp->phy_addr, reg, val);
299}
300
301
302static int b44_mdio_read_mii(struct net_device *dev, int phy_id, int location)
303{
304 u32 val;
305 struct b44 *bp = netdev_priv(dev);
306 int rc = __b44_readphy(bp, phy_id, location, &val);
307 if (rc)
308 return 0xffffffff;
309 return val;
310}
311
312static void b44_mdio_write_mii(struct net_device *dev, int phy_id, int location,
313 int val)
314{
315 struct b44 *bp = netdev_priv(dev);
316 __b44_writephy(bp, phy_id, location, val);
317}
318
319static int b44_mdio_read_phylib(struct mii_bus *bus, int phy_id, int location)
320{
321 u32 val;
322 struct b44 *bp = bus->priv;
323 int rc = __b44_readphy(bp, phy_id, location, &val);
324 if (rc)
325 return 0xffffffff;
326 return val;
327}
328
329static int b44_mdio_write_phylib(struct mii_bus *bus, int phy_id, int location,
330 u16 val)
331{
332 struct b44 *bp = bus->priv;
333 return __b44_writephy(bp, phy_id, location, val);
334}
335
336static int b44_phy_reset(struct b44 *bp)
337{
338 u32 val;
339 int err;
340
341 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
342 return 0;
343 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
344 if (err)
345 return err;
346 udelay(100);
347 err = b44_readphy(bp, MII_BMCR, &val);
348 if (!err) {
349 if (val & BMCR_RESET) {
350 netdev_err(bp->dev, "PHY Reset would not complete\n");
351 err = -ENODEV;
352 }
353 }
354
355 return err;
356}
357
358static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
359{
360 u32 val;
361
362 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
363 bp->flags |= pause_flags;
364
365 val = br32(bp, B44_RXCONFIG);
366 if (pause_flags & B44_FLAG_RX_PAUSE)
367 val |= RXCONFIG_FLOW;
368 else
369 val &= ~RXCONFIG_FLOW;
370 bw32(bp, B44_RXCONFIG, val);
371
372 val = br32(bp, B44_MAC_FLOW);
373 if (pause_flags & B44_FLAG_TX_PAUSE)
374 val |= (MAC_FLOW_PAUSE_ENAB |
375 (0xc0 & MAC_FLOW_RX_HI_WATER));
376 else
377 val &= ~MAC_FLOW_PAUSE_ENAB;
378 bw32(bp, B44_MAC_FLOW, val);
379}
380
381static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
382{
383 u32 pause_enab = 0;
384
385
386
387
388
389
390 if ((local & ADVERTISE_PAUSE_CAP) &&
391 (local & ADVERTISE_PAUSE_ASYM)){
392 if ((remote & LPA_PAUSE_ASYM) &&
393 !(remote & LPA_PAUSE_CAP))
394 pause_enab |= B44_FLAG_RX_PAUSE;
395 }
396
397 __b44_set_flow_ctrl(bp, pause_enab);
398}
399
400#ifdef CONFIG_BCM47XX
401#include <linux/bcm47xx_nvram.h>
402static void b44_wap54g10_workaround(struct b44 *bp)
403{
404 char buf[20];
405 u32 val;
406 int err;
407
408
409
410
411
412
413 if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
414 return;
415 if (simple_strtoul(buf, NULL, 0) == 2) {
416 err = __b44_readphy(bp, 0, MII_BMCR, &val);
417 if (err)
418 goto error;
419 if (!(val & BMCR_ISOLATE))
420 return;
421 val &= ~BMCR_ISOLATE;
422 err = __b44_writephy(bp, 0, MII_BMCR, val);
423 if (err)
424 goto error;
425 }
426 return;
427error:
428 pr_warn("PHY: cannot reset MII transceiver isolate bit\n");
429}
430#else
431static inline void b44_wap54g10_workaround(struct b44 *bp)
432{
433}
434#endif
435
436static int b44_setup_phy(struct b44 *bp)
437{
438 u32 val;
439 int err;
440
441 b44_wap54g10_workaround(bp);
442
443 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
444 return 0;
445 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
446 goto out;
447 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
448 val & MII_ALEDCTRL_ALLMSK)) != 0)
449 goto out;
450 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
451 goto out;
452 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
453 val | MII_TLEDCTRL_ENABLE)) != 0)
454 goto out;
455
456 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
457 u32 adv = ADVERTISE_CSMA;
458
459 if (bp->flags & B44_FLAG_ADV_10HALF)
460 adv |= ADVERTISE_10HALF;
461 if (bp->flags & B44_FLAG_ADV_10FULL)
462 adv |= ADVERTISE_10FULL;
463 if (bp->flags & B44_FLAG_ADV_100HALF)
464 adv |= ADVERTISE_100HALF;
465 if (bp->flags & B44_FLAG_ADV_100FULL)
466 adv |= ADVERTISE_100FULL;
467
468 if (bp->flags & B44_FLAG_PAUSE_AUTO)
469 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
470
471 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
472 goto out;
473 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
474 BMCR_ANRESTART))) != 0)
475 goto out;
476 } else {
477 u32 bmcr;
478
479 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
480 goto out;
481 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
482 if (bp->flags & B44_FLAG_100_BASE_T)
483 bmcr |= BMCR_SPEED100;
484 if (bp->flags & B44_FLAG_FULL_DUPLEX)
485 bmcr |= BMCR_FULLDPLX;
486 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
487 goto out;
488
489
490
491
492
493 b44_set_flow_ctrl(bp, 0, 0);
494 }
495
496out:
497 return err;
498}
499
500static void b44_stats_update(struct b44 *bp)
501{
502 unsigned long reg;
503 u64 *val;
504
505 val = &bp->hw_stats.tx_good_octets;
506 u64_stats_update_begin(&bp->hw_stats.syncp);
507
508 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
509 *val++ += br32(bp, reg);
510 }
511
512 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
513 *val++ += br32(bp, reg);
514 }
515
516 u64_stats_update_end(&bp->hw_stats.syncp);
517}
518
519static void b44_link_report(struct b44 *bp)
520{
521 if (!netif_carrier_ok(bp->dev)) {
522 netdev_info(bp->dev, "Link is down\n");
523 } else {
524 netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
525 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
526 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
527
528 netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
529 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
530 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
531 }
532}
533
534static void b44_check_phy(struct b44 *bp)
535{
536 u32 bmsr, aux;
537
538 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
539 bp->flags |= B44_FLAG_100_BASE_T;
540 if (!netif_carrier_ok(bp->dev)) {
541 u32 val = br32(bp, B44_TX_CTRL);
542 if (bp->flags & B44_FLAG_FULL_DUPLEX)
543 val |= TX_CTRL_DUPLEX;
544 else
545 val &= ~TX_CTRL_DUPLEX;
546 bw32(bp, B44_TX_CTRL, val);
547 netif_carrier_on(bp->dev);
548 b44_link_report(bp);
549 }
550 return;
551 }
552
553 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
554 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
555 (bmsr != 0xffff)) {
556 if (aux & MII_AUXCTRL_SPEED)
557 bp->flags |= B44_FLAG_100_BASE_T;
558 else
559 bp->flags &= ~B44_FLAG_100_BASE_T;
560 if (aux & MII_AUXCTRL_DUPLEX)
561 bp->flags |= B44_FLAG_FULL_DUPLEX;
562 else
563 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
564
565 if (!netif_carrier_ok(bp->dev) &&
566 (bmsr & BMSR_LSTATUS)) {
567 u32 val = br32(bp, B44_TX_CTRL);
568 u32 local_adv, remote_adv;
569
570 if (bp->flags & B44_FLAG_FULL_DUPLEX)
571 val |= TX_CTRL_DUPLEX;
572 else
573 val &= ~TX_CTRL_DUPLEX;
574 bw32(bp, B44_TX_CTRL, val);
575
576 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
577 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
578 !b44_readphy(bp, MII_LPA, &remote_adv))
579 b44_set_flow_ctrl(bp, local_adv, remote_adv);
580
581
582 netif_carrier_on(bp->dev);
583 b44_link_report(bp);
584 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
585
586 netif_carrier_off(bp->dev);
587 b44_link_report(bp);
588 }
589
590 if (bmsr & BMSR_RFAULT)
591 netdev_warn(bp->dev, "Remote fault detected in PHY\n");
592 if (bmsr & BMSR_JCD)
593 netdev_warn(bp->dev, "Jabber detected in PHY\n");
594 }
595}
596
597static void b44_timer(struct timer_list *t)
598{
599 struct b44 *bp = from_timer(bp, t, timer);
600
601 spin_lock_irq(&bp->lock);
602
603 b44_check_phy(bp);
604
605 b44_stats_update(bp);
606
607 spin_unlock_irq(&bp->lock);
608
609 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
610}
611
612static void b44_tx(struct b44 *bp)
613{
614 u32 cur, cons;
615 unsigned bytes_compl = 0, pkts_compl = 0;
616
617 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
618 cur /= sizeof(struct dma_desc);
619
620
621 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
622 struct ring_info *rp = &bp->tx_buffers[cons];
623 struct sk_buff *skb = rp->skb;
624
625 BUG_ON(skb == NULL);
626
627 dma_unmap_single(bp->sdev->dma_dev,
628 rp->mapping,
629 skb->len,
630 DMA_TO_DEVICE);
631 rp->skb = NULL;
632
633 bytes_compl += skb->len;
634 pkts_compl++;
635
636 dev_consume_skb_irq(skb);
637 }
638
639 netdev_completed_queue(bp->dev, pkts_compl, bytes_compl);
640 bp->tx_cons = cons;
641 if (netif_queue_stopped(bp->dev) &&
642 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
643 netif_wake_queue(bp->dev);
644
645 bw32(bp, B44_GPTIMER, 0);
646}
647
648
649
650
651
652
653static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
654{
655 struct dma_desc *dp;
656 struct ring_info *src_map, *map;
657 struct rx_header *rh;
658 struct sk_buff *skb;
659 dma_addr_t mapping;
660 int dest_idx;
661 u32 ctrl;
662
663 src_map = NULL;
664 if (src_idx >= 0)
665 src_map = &bp->rx_buffers[src_idx];
666 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
667 map = &bp->rx_buffers[dest_idx];
668 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
669 if (skb == NULL)
670 return -ENOMEM;
671
672 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
673 RX_PKT_BUF_SZ,
674 DMA_FROM_DEVICE);
675
676
677
678 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
679 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
680
681 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
682 dma_unmap_single(bp->sdev->dma_dev, mapping,
683 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
684 dev_kfree_skb_any(skb);
685 skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
686 if (skb == NULL)
687 return -ENOMEM;
688 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
689 RX_PKT_BUF_SZ,
690 DMA_FROM_DEVICE);
691 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
692 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
693 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
694 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
695 dev_kfree_skb_any(skb);
696 return -ENOMEM;
697 }
698 bp->force_copybreak = 1;
699 }
700
701 rh = (struct rx_header *) skb->data;
702
703 rh->len = 0;
704 rh->flags = 0;
705
706 map->skb = skb;
707 map->mapping = mapping;
708
709 if (src_map != NULL)
710 src_map->skb = NULL;
711
712 ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
713 if (dest_idx == (B44_RX_RING_SIZE - 1))
714 ctrl |= DESC_CTRL_EOT;
715
716 dp = &bp->rx_ring[dest_idx];
717 dp->ctrl = cpu_to_le32(ctrl);
718 dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
719
720 if (bp->flags & B44_FLAG_RX_RING_HACK)
721 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
722 dest_idx * sizeof(*dp),
723 DMA_BIDIRECTIONAL);
724
725 return RX_PKT_BUF_SZ;
726}
727
728static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
729{
730 struct dma_desc *src_desc, *dest_desc;
731 struct ring_info *src_map, *dest_map;
732 struct rx_header *rh;
733 int dest_idx;
734 __le32 ctrl;
735
736 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
737 dest_desc = &bp->rx_ring[dest_idx];
738 dest_map = &bp->rx_buffers[dest_idx];
739 src_desc = &bp->rx_ring[src_idx];
740 src_map = &bp->rx_buffers[src_idx];
741
742 dest_map->skb = src_map->skb;
743 rh = (struct rx_header *) src_map->skb->data;
744 rh->len = 0;
745 rh->flags = 0;
746 dest_map->mapping = src_map->mapping;
747
748 if (bp->flags & B44_FLAG_RX_RING_HACK)
749 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
750 src_idx * sizeof(*src_desc),
751 DMA_BIDIRECTIONAL);
752
753 ctrl = src_desc->ctrl;
754 if (dest_idx == (B44_RX_RING_SIZE - 1))
755 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
756 else
757 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
758
759 dest_desc->ctrl = ctrl;
760 dest_desc->addr = src_desc->addr;
761
762 src_map->skb = NULL;
763
764 if (bp->flags & B44_FLAG_RX_RING_HACK)
765 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
766 dest_idx * sizeof(*dest_desc),
767 DMA_BIDIRECTIONAL);
768
769 dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
770 RX_PKT_BUF_SZ,
771 DMA_FROM_DEVICE);
772}
773
774static int b44_rx(struct b44 *bp, int budget)
775{
776 int received;
777 u32 cons, prod;
778
779 received = 0;
780 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
781 prod /= sizeof(struct dma_desc);
782 cons = bp->rx_cons;
783
784 while (cons != prod && budget > 0) {
785 struct ring_info *rp = &bp->rx_buffers[cons];
786 struct sk_buff *skb = rp->skb;
787 dma_addr_t map = rp->mapping;
788 struct rx_header *rh;
789 u16 len;
790
791 dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
792 RX_PKT_BUF_SZ,
793 DMA_FROM_DEVICE);
794 rh = (struct rx_header *) skb->data;
795 len = le16_to_cpu(rh->len);
796 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
797 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
798 drop_it:
799 b44_recycle_rx(bp, cons, bp->rx_prod);
800 drop_it_no_recycle:
801 bp->dev->stats.rx_dropped++;
802 goto next_pkt;
803 }
804
805 if (len == 0) {
806 int i = 0;
807
808 do {
809 udelay(2);
810 barrier();
811 len = le16_to_cpu(rh->len);
812 } while (len == 0 && i++ < 5);
813 if (len == 0)
814 goto drop_it;
815 }
816
817
818 len -= 4;
819
820 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
821 int skb_size;
822 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
823 if (skb_size < 0)
824 goto drop_it;
825 dma_unmap_single(bp->sdev->dma_dev, map,
826 skb_size, DMA_FROM_DEVICE);
827
828 skb_put(skb, len + RX_PKT_OFFSET);
829 skb_pull(skb, RX_PKT_OFFSET);
830 } else {
831 struct sk_buff *copy_skb;
832
833 b44_recycle_rx(bp, cons, bp->rx_prod);
834 copy_skb = napi_alloc_skb(&bp->napi, len);
835 if (copy_skb == NULL)
836 goto drop_it_no_recycle;
837
838 skb_put(copy_skb, len);
839
840 skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
841 copy_skb->data, len);
842 skb = copy_skb;
843 }
844 skb_checksum_none_assert(skb);
845 skb->protocol = eth_type_trans(skb, bp->dev);
846 netif_receive_skb(skb);
847 received++;
848 budget--;
849 next_pkt:
850 bp->rx_prod = (bp->rx_prod + 1) &
851 (B44_RX_RING_SIZE - 1);
852 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
853 }
854
855 bp->rx_cons = cons;
856 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
857
858 return received;
859}
860
861static int b44_poll(struct napi_struct *napi, int budget)
862{
863 struct b44 *bp = container_of(napi, struct b44, napi);
864 int work_done;
865 unsigned long flags;
866
867 spin_lock_irqsave(&bp->lock, flags);
868
869 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
870
871 b44_tx(bp);
872
873 }
874 if (bp->istat & ISTAT_RFO) {
875 bp->istat &= ~ISTAT_RFO;
876 b44_disable_ints(bp);
877 ssb_device_enable(bp->sdev, 0);
878 b44_init_rings(bp);
879 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
880 netif_wake_queue(bp->dev);
881 }
882
883 spin_unlock_irqrestore(&bp->lock, flags);
884
885 work_done = 0;
886 if (bp->istat & ISTAT_RX)
887 work_done += b44_rx(bp, budget);
888
889 if (bp->istat & ISTAT_ERRORS) {
890 spin_lock_irqsave(&bp->lock, flags);
891 b44_halt(bp);
892 b44_init_rings(bp);
893 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
894 netif_wake_queue(bp->dev);
895 spin_unlock_irqrestore(&bp->lock, flags);
896 work_done = 0;
897 }
898
899 if (work_done < budget) {
900 napi_complete_done(napi, work_done);
901 b44_enable_ints(bp);
902 }
903
904 return work_done;
905}
906
907static irqreturn_t b44_interrupt(int irq, void *dev_id)
908{
909 struct net_device *dev = dev_id;
910 struct b44 *bp = netdev_priv(dev);
911 u32 istat, imask;
912 int handled = 0;
913
914 spin_lock(&bp->lock);
915
916 istat = br32(bp, B44_ISTAT);
917 imask = br32(bp, B44_IMASK);
918
919
920
921
922
923 istat &= imask;
924 if (istat) {
925 handled = 1;
926
927 if (unlikely(!netif_running(dev))) {
928 netdev_info(dev, "late interrupt\n");
929 goto irq_ack;
930 }
931
932 if (napi_schedule_prep(&bp->napi)) {
933
934
935
936 bp->istat = istat;
937 __b44_disable_ints(bp);
938 __napi_schedule(&bp->napi);
939 }
940
941irq_ack:
942 bw32(bp, B44_ISTAT, istat);
943 br32(bp, B44_ISTAT);
944 }
945 spin_unlock(&bp->lock);
946 return IRQ_RETVAL(handled);
947}
948
949static void b44_tx_timeout(struct net_device *dev, unsigned int txqueue)
950{
951 struct b44 *bp = netdev_priv(dev);
952
953 netdev_err(dev, "transmit timed out, resetting\n");
954
955 spin_lock_irq(&bp->lock);
956
957 b44_halt(bp);
958 b44_init_rings(bp);
959 b44_init_hw(bp, B44_FULL_RESET);
960
961 spin_unlock_irq(&bp->lock);
962
963 b44_enable_ints(bp);
964
965 netif_wake_queue(dev);
966}
967
968static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
969{
970 struct b44 *bp = netdev_priv(dev);
971 int rc = NETDEV_TX_OK;
972 dma_addr_t mapping;
973 u32 len, entry, ctrl;
974 unsigned long flags;
975
976 len = skb->len;
977 spin_lock_irqsave(&bp->lock, flags);
978
979
980 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
981 netif_stop_queue(dev);
982 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
983 goto err_out;
984 }
985
986 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
987 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
988 struct sk_buff *bounce_skb;
989
990
991 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
992 dma_unmap_single(bp->sdev->dma_dev, mapping, len,
993 DMA_TO_DEVICE);
994
995 bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
996 if (!bounce_skb)
997 goto err_out;
998
999 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
1000 len, DMA_TO_DEVICE);
1001 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
1002 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
1003 dma_unmap_single(bp->sdev->dma_dev, mapping,
1004 len, DMA_TO_DEVICE);
1005 dev_kfree_skb_any(bounce_skb);
1006 goto err_out;
1007 }
1008
1009 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
1010 dev_consume_skb_any(skb);
1011 skb = bounce_skb;
1012 }
1013
1014 entry = bp->tx_prod;
1015 bp->tx_buffers[entry].skb = skb;
1016 bp->tx_buffers[entry].mapping = mapping;
1017
1018 ctrl = (len & DESC_CTRL_LEN);
1019 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1020 if (entry == (B44_TX_RING_SIZE - 1))
1021 ctrl |= DESC_CTRL_EOT;
1022
1023 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1024 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1025
1026 if (bp->flags & B44_FLAG_TX_RING_HACK)
1027 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1028 entry * sizeof(bp->tx_ring[0]),
1029 DMA_TO_DEVICE);
1030
1031 entry = NEXT_TX(entry);
1032
1033 bp->tx_prod = entry;
1034
1035 wmb();
1036
1037 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1038 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1039 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1040 if (bp->flags & B44_FLAG_REORDER_BUG)
1041 br32(bp, B44_DMATX_PTR);
1042
1043 netdev_sent_queue(dev, skb->len);
1044
1045 if (TX_BUFFS_AVAIL(bp) < 1)
1046 netif_stop_queue(dev);
1047
1048out_unlock:
1049 spin_unlock_irqrestore(&bp->lock, flags);
1050
1051 return rc;
1052
1053err_out:
1054 rc = NETDEV_TX_BUSY;
1055 goto out_unlock;
1056}
1057
1058static int b44_change_mtu(struct net_device *dev, int new_mtu)
1059{
1060 struct b44 *bp = netdev_priv(dev);
1061
1062 if (!netif_running(dev)) {
1063
1064
1065
1066 dev->mtu = new_mtu;
1067 return 0;
1068 }
1069
1070 spin_lock_irq(&bp->lock);
1071 b44_halt(bp);
1072 dev->mtu = new_mtu;
1073 b44_init_rings(bp);
1074 b44_init_hw(bp, B44_FULL_RESET);
1075 spin_unlock_irq(&bp->lock);
1076
1077 b44_enable_ints(bp);
1078
1079 return 0;
1080}
1081
1082
1083
1084
1085
1086
1087
1088
1089static void b44_free_rings(struct b44 *bp)
1090{
1091 struct ring_info *rp;
1092 int i;
1093
1094 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1095 rp = &bp->rx_buffers[i];
1096
1097 if (rp->skb == NULL)
1098 continue;
1099 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1100 DMA_FROM_DEVICE);
1101 dev_kfree_skb_any(rp->skb);
1102 rp->skb = NULL;
1103 }
1104
1105
1106 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1107 rp = &bp->tx_buffers[i];
1108
1109 if (rp->skb == NULL)
1110 continue;
1111 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1112 DMA_TO_DEVICE);
1113 dev_kfree_skb_any(rp->skb);
1114 rp->skb = NULL;
1115 }
1116}
1117
1118
1119
1120
1121
1122
1123
1124static void b44_init_rings(struct b44 *bp)
1125{
1126 int i;
1127
1128 b44_free_rings(bp);
1129
1130 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1131 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1132
1133 if (bp->flags & B44_FLAG_RX_RING_HACK)
1134 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1135 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1136
1137 if (bp->flags & B44_FLAG_TX_RING_HACK)
1138 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1139 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1140
1141 for (i = 0; i < bp->rx_pending; i++) {
1142 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1143 break;
1144 }
1145}
1146
1147
1148
1149
1150
1151static void b44_free_consistent(struct b44 *bp)
1152{
1153 kfree(bp->rx_buffers);
1154 bp->rx_buffers = NULL;
1155 kfree(bp->tx_buffers);
1156 bp->tx_buffers = NULL;
1157 if (bp->rx_ring) {
1158 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1159 dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1160 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1161 kfree(bp->rx_ring);
1162 } else
1163 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1164 bp->rx_ring, bp->rx_ring_dma);
1165 bp->rx_ring = NULL;
1166 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1167 }
1168 if (bp->tx_ring) {
1169 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1170 dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1171 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1172 kfree(bp->tx_ring);
1173 } else
1174 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1175 bp->tx_ring, bp->tx_ring_dma);
1176 bp->tx_ring = NULL;
1177 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1178 }
1179}
1180
1181
1182
1183
1184
1185static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1186{
1187 int size;
1188
1189 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1190 bp->rx_buffers = kzalloc(size, gfp);
1191 if (!bp->rx_buffers)
1192 goto out_err;
1193
1194 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1195 bp->tx_buffers = kzalloc(size, gfp);
1196 if (!bp->tx_buffers)
1197 goto out_err;
1198
1199 size = DMA_TABLE_BYTES;
1200 bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1201 &bp->rx_ring_dma, gfp);
1202 if (!bp->rx_ring) {
1203
1204
1205
1206 struct dma_desc *rx_ring;
1207 dma_addr_t rx_ring_dma;
1208
1209 rx_ring = kzalloc(size, gfp);
1210 if (!rx_ring)
1211 goto out_err;
1212
1213 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1214 DMA_TABLE_BYTES,
1215 DMA_BIDIRECTIONAL);
1216
1217 if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1218 rx_ring_dma + size > DMA_BIT_MASK(30)) {
1219 kfree(rx_ring);
1220 goto out_err;
1221 }
1222
1223 bp->rx_ring = rx_ring;
1224 bp->rx_ring_dma = rx_ring_dma;
1225 bp->flags |= B44_FLAG_RX_RING_HACK;
1226 }
1227
1228 bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1229 &bp->tx_ring_dma, gfp);
1230 if (!bp->tx_ring) {
1231
1232
1233
1234 struct dma_desc *tx_ring;
1235 dma_addr_t tx_ring_dma;
1236
1237 tx_ring = kzalloc(size, gfp);
1238 if (!tx_ring)
1239 goto out_err;
1240
1241 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1242 DMA_TABLE_BYTES,
1243 DMA_TO_DEVICE);
1244
1245 if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1246 tx_ring_dma + size > DMA_BIT_MASK(30)) {
1247 kfree(tx_ring);
1248 goto out_err;
1249 }
1250
1251 bp->tx_ring = tx_ring;
1252 bp->tx_ring_dma = tx_ring_dma;
1253 bp->flags |= B44_FLAG_TX_RING_HACK;
1254 }
1255
1256 return 0;
1257
1258out_err:
1259 b44_free_consistent(bp);
1260 return -ENOMEM;
1261}
1262
1263
1264static void b44_clear_stats(struct b44 *bp)
1265{
1266 unsigned long reg;
1267
1268 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1269 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1270 br32(bp, reg);
1271 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1272 br32(bp, reg);
1273}
1274
1275
1276static void b44_chip_reset(struct b44 *bp, int reset_kind)
1277{
1278 struct ssb_device *sdev = bp->sdev;
1279 bool was_enabled;
1280
1281 was_enabled = ssb_device_is_enabled(bp->sdev);
1282
1283 ssb_device_enable(bp->sdev, 0);
1284 ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1285
1286 if (was_enabled) {
1287 bw32(bp, B44_RCV_LAZY, 0);
1288 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1289 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1290 bw32(bp, B44_DMATX_CTRL, 0);
1291 bp->tx_prod = bp->tx_cons = 0;
1292 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1293 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1294 100, 0);
1295 }
1296 bw32(bp, B44_DMARX_CTRL, 0);
1297 bp->rx_prod = bp->rx_cons = 0;
1298 }
1299
1300 b44_clear_stats(bp);
1301
1302
1303
1304
1305
1306 if (reset_kind == B44_CHIP_RESET_PARTIAL)
1307 return;
1308
1309 switch (sdev->bus->bustype) {
1310 case SSB_BUSTYPE_SSB:
1311 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1312 (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1313 B44_MDC_RATIO)
1314 & MDIO_CTRL_MAXF_MASK)));
1315 break;
1316 case SSB_BUSTYPE_PCI:
1317 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1318 (0x0d & MDIO_CTRL_MAXF_MASK)));
1319 break;
1320 case SSB_BUSTYPE_PCMCIA:
1321 case SSB_BUSTYPE_SDIO:
1322 WARN_ON(1);
1323 break;
1324 }
1325
1326 br32(bp, B44_MDIO_CTRL);
1327
1328 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1329 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1330 br32(bp, B44_ENET_CTRL);
1331 bp->flags |= B44_FLAG_EXTERNAL_PHY;
1332 } else {
1333 u32 val = br32(bp, B44_DEVCTRL);
1334
1335 if (val & DEVCTRL_EPR) {
1336 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1337 br32(bp, B44_DEVCTRL);
1338 udelay(100);
1339 }
1340 bp->flags &= ~B44_FLAG_EXTERNAL_PHY;
1341 }
1342}
1343
1344
1345static void b44_halt(struct b44 *bp)
1346{
1347 b44_disable_ints(bp);
1348
1349 b44_phy_reset(bp);
1350
1351 netdev_info(bp->dev, "powering down PHY\n");
1352 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1353
1354
1355 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1356 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1357 else
1358 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1359}
1360
1361
1362static void __b44_set_mac_addr(struct b44 *bp)
1363{
1364 bw32(bp, B44_CAM_CTRL, 0);
1365 if (!(bp->dev->flags & IFF_PROMISC)) {
1366 u32 val;
1367
1368 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1369 val = br32(bp, B44_CAM_CTRL);
1370 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1371 }
1372}
1373
1374static int b44_set_mac_addr(struct net_device *dev, void *p)
1375{
1376 struct b44 *bp = netdev_priv(dev);
1377 struct sockaddr *addr = p;
1378 u32 val;
1379
1380 if (netif_running(dev))
1381 return -EBUSY;
1382
1383 if (!is_valid_ether_addr(addr->sa_data))
1384 return -EINVAL;
1385
1386 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1387
1388 spin_lock_irq(&bp->lock);
1389
1390 val = br32(bp, B44_RXCONFIG);
1391 if (!(val & RXCONFIG_CAM_ABSENT))
1392 __b44_set_mac_addr(bp);
1393
1394 spin_unlock_irq(&bp->lock);
1395
1396 return 0;
1397}
1398
1399
1400
1401
1402static void __b44_set_rx_mode(struct net_device *);
1403static void b44_init_hw(struct b44 *bp, int reset_kind)
1404{
1405 u32 val;
1406
1407 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1408 if (reset_kind == B44_FULL_RESET) {
1409 b44_phy_reset(bp);
1410 b44_setup_phy(bp);
1411 }
1412
1413
1414 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1415 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1416
1417
1418 __b44_set_rx_mode(bp->dev);
1419
1420
1421 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1422 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1423
1424 bw32(bp, B44_TX_WMARK, 56);
1425 if (reset_kind == B44_PARTIAL_RESET) {
1426 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1427 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1428 } else {
1429 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1430 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1431 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1432 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1433 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1434
1435 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1436 bp->rx_prod = bp->rx_pending;
1437
1438 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1439 }
1440
1441 val = br32(bp, B44_ENET_CTRL);
1442 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1443
1444 netdev_reset_queue(bp->dev);
1445}
1446
1447static int b44_open(struct net_device *dev)
1448{
1449 struct b44 *bp = netdev_priv(dev);
1450 int err;
1451
1452 err = b44_alloc_consistent(bp, GFP_KERNEL);
1453 if (err)
1454 goto out;
1455
1456 napi_enable(&bp->napi);
1457
1458 b44_init_rings(bp);
1459 b44_init_hw(bp, B44_FULL_RESET);
1460
1461 b44_check_phy(bp);
1462
1463 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1464 if (unlikely(err < 0)) {
1465 napi_disable(&bp->napi);
1466 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1467 b44_free_rings(bp);
1468 b44_free_consistent(bp);
1469 goto out;
1470 }
1471
1472 timer_setup(&bp->timer, b44_timer, 0);
1473 bp->timer.expires = jiffies + HZ;
1474 add_timer(&bp->timer);
1475
1476 b44_enable_ints(bp);
1477
1478 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1479 phy_start(dev->phydev);
1480
1481 netif_start_queue(dev);
1482out:
1483 return err;
1484}
1485
1486#ifdef CONFIG_NET_POLL_CONTROLLER
1487
1488
1489
1490
1491static void b44_poll_controller(struct net_device *dev)
1492{
1493 disable_irq(dev->irq);
1494 b44_interrupt(dev->irq, dev);
1495 enable_irq(dev->irq);
1496}
1497#endif
1498
1499static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1500{
1501 u32 i;
1502 u32 *pattern = (u32 *) pp;
1503
1504 for (i = 0; i < bytes; i += sizeof(u32)) {
1505 bw32(bp, B44_FILT_ADDR, table_offset + i);
1506 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1507 }
1508}
1509
1510static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1511{
1512 int magicsync = 6;
1513 int k, j, len = offset;
1514 int ethaddr_bytes = ETH_ALEN;
1515
1516 memset(ppattern + offset, 0xff, magicsync);
1517 for (j = 0; j < magicsync; j++) {
1518 pmask[len >> 3] |= BIT(len & 7);
1519 len++;
1520 }
1521
1522 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1523 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1524 ethaddr_bytes = ETH_ALEN;
1525 else
1526 ethaddr_bytes = B44_PATTERN_SIZE - len;
1527 if (ethaddr_bytes <=0)
1528 break;
1529 for (k = 0; k< ethaddr_bytes; k++) {
1530 ppattern[offset + magicsync +
1531 (j * ETH_ALEN) + k] = macaddr[k];
1532 pmask[len >> 3] |= BIT(len & 7);
1533 len++;
1534 }
1535 }
1536 return len - 1;
1537}
1538
1539
1540
1541
1542static void b44_setup_pseudo_magicp(struct b44 *bp)
1543{
1544
1545 u32 val;
1546 int plen0, plen1, plen2;
1547 u8 *pwol_pattern;
1548 u8 pwol_mask[B44_PMASK_SIZE];
1549
1550 pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1551 if (!pwol_pattern)
1552 return;
1553
1554
1555 memset(pwol_mask, 0, B44_PMASK_SIZE);
1556 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1557 B44_ETHIPV4UDP_HLEN);
1558
1559 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1560 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1561
1562
1563 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1564 memset(pwol_mask, 0, B44_PMASK_SIZE);
1565 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1566 ETH_HLEN);
1567
1568 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1569 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1570 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1571 B44_PMASK_BASE + B44_PMASK_SIZE);
1572
1573
1574 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1575 memset(pwol_mask, 0, B44_PMASK_SIZE);
1576 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1577 B44_ETHIPV6UDP_HLEN);
1578
1579 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1580 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1581 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1582 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1583
1584 kfree(pwol_pattern);
1585
1586
1587 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1588 bw32(bp, B44_WKUP_LEN, val);
1589
1590
1591 val = br32(bp, B44_DEVCTRL);
1592 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1593
1594}
1595
1596#ifdef CONFIG_B44_PCI
1597static void b44_setup_wol_pci(struct b44 *bp)
1598{
1599 u16 val;
1600
1601 if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1602 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1603 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1604 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1605 }
1606}
1607#else
1608static inline void b44_setup_wol_pci(struct b44 *bp) { }
1609#endif
1610
1611static void b44_setup_wol(struct b44 *bp)
1612{
1613 u32 val;
1614
1615 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1616
1617 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1618
1619 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1620
1621 val = bp->dev->dev_addr[2] << 24 |
1622 bp->dev->dev_addr[3] << 16 |
1623 bp->dev->dev_addr[4] << 8 |
1624 bp->dev->dev_addr[5];
1625 bw32(bp, B44_ADDR_LO, val);
1626
1627 val = bp->dev->dev_addr[0] << 8 |
1628 bp->dev->dev_addr[1];
1629 bw32(bp, B44_ADDR_HI, val);
1630
1631 val = br32(bp, B44_DEVCTRL);
1632 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1633
1634 } else {
1635 b44_setup_pseudo_magicp(bp);
1636 }
1637 b44_setup_wol_pci(bp);
1638}
1639
1640static int b44_close(struct net_device *dev)
1641{
1642 struct b44 *bp = netdev_priv(dev);
1643
1644 netif_stop_queue(dev);
1645
1646 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
1647 phy_stop(dev->phydev);
1648
1649 napi_disable(&bp->napi);
1650
1651 del_timer_sync(&bp->timer);
1652
1653 spin_lock_irq(&bp->lock);
1654
1655 b44_halt(bp);
1656 b44_free_rings(bp);
1657 netif_carrier_off(dev);
1658
1659 spin_unlock_irq(&bp->lock);
1660
1661 free_irq(dev->irq, dev);
1662
1663 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1664 b44_init_hw(bp, B44_PARTIAL_RESET);
1665 b44_setup_wol(bp);
1666 }
1667
1668 b44_free_consistent(bp);
1669
1670 return 0;
1671}
1672
1673static void b44_get_stats64(struct net_device *dev,
1674 struct rtnl_link_stats64 *nstat)
1675{
1676 struct b44 *bp = netdev_priv(dev);
1677 struct b44_hw_stats *hwstat = &bp->hw_stats;
1678 unsigned int start;
1679
1680 do {
1681 start = u64_stats_fetch_begin_irq(&hwstat->syncp);
1682
1683
1684 nstat->rx_packets = hwstat->rx_pkts;
1685 nstat->tx_packets = hwstat->tx_pkts;
1686 nstat->rx_bytes = hwstat->rx_octets;
1687 nstat->tx_bytes = hwstat->tx_octets;
1688 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1689 hwstat->tx_oversize_pkts +
1690 hwstat->tx_underruns +
1691 hwstat->tx_excessive_cols +
1692 hwstat->tx_late_cols);
1693 nstat->multicast = hwstat->rx_multicast_pkts;
1694 nstat->collisions = hwstat->tx_total_cols;
1695
1696 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1697 hwstat->rx_undersize);
1698 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1699 nstat->rx_frame_errors = hwstat->rx_align_errs;
1700 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1701 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1702 hwstat->rx_oversize_pkts +
1703 hwstat->rx_missed_pkts +
1704 hwstat->rx_crc_align_errs +
1705 hwstat->rx_undersize +
1706 hwstat->rx_crc_errs +
1707 hwstat->rx_align_errs +
1708 hwstat->rx_symbol_errs);
1709
1710 nstat->tx_aborted_errors = hwstat->tx_underruns;
1711#if 0
1712
1713 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1714#endif
1715 } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
1716
1717}
1718
1719static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1720{
1721 struct netdev_hw_addr *ha;
1722 int i, num_ents;
1723
1724 num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1725 i = 0;
1726 netdev_for_each_mc_addr(ha, dev) {
1727 if (i == num_ents)
1728 break;
1729 __b44_cam_write(bp, ha->addr, i++ + 1);
1730 }
1731 return i+1;
1732}
1733
1734static void __b44_set_rx_mode(struct net_device *dev)
1735{
1736 struct b44 *bp = netdev_priv(dev);
1737 u32 val;
1738
1739 val = br32(bp, B44_RXCONFIG);
1740 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1741 if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1742 val |= RXCONFIG_PROMISC;
1743 bw32(bp, B44_RXCONFIG, val);
1744 } else {
1745 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1746 int i = 1;
1747
1748 __b44_set_mac_addr(bp);
1749
1750 if ((dev->flags & IFF_ALLMULTI) ||
1751 (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1752 val |= RXCONFIG_ALLMULTI;
1753 else
1754 i = __b44_load_mcast(bp, dev);
1755
1756 for (; i < 64; i++)
1757 __b44_cam_write(bp, zero, i);
1758
1759 bw32(bp, B44_RXCONFIG, val);
1760 val = br32(bp, B44_CAM_CTRL);
1761 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1762 }
1763}
1764
1765static void b44_set_rx_mode(struct net_device *dev)
1766{
1767 struct b44 *bp = netdev_priv(dev);
1768
1769 spin_lock_irq(&bp->lock);
1770 __b44_set_rx_mode(dev);
1771 spin_unlock_irq(&bp->lock);
1772}
1773
1774static u32 b44_get_msglevel(struct net_device *dev)
1775{
1776 struct b44 *bp = netdev_priv(dev);
1777 return bp->msg_enable;
1778}
1779
1780static void b44_set_msglevel(struct net_device *dev, u32 value)
1781{
1782 struct b44 *bp = netdev_priv(dev);
1783 bp->msg_enable = value;
1784}
1785
1786static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1787{
1788 struct b44 *bp = netdev_priv(dev);
1789 struct ssb_bus *bus = bp->sdev->bus;
1790
1791 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1792 switch (bus->bustype) {
1793 case SSB_BUSTYPE_PCI:
1794 strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1795 break;
1796 case SSB_BUSTYPE_SSB:
1797 strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1798 break;
1799 case SSB_BUSTYPE_PCMCIA:
1800 case SSB_BUSTYPE_SDIO:
1801 WARN_ON(1);
1802 break;
1803 }
1804}
1805
1806static int b44_nway_reset(struct net_device *dev)
1807{
1808 struct b44 *bp = netdev_priv(dev);
1809 u32 bmcr;
1810 int r;
1811
1812 spin_lock_irq(&bp->lock);
1813 b44_readphy(bp, MII_BMCR, &bmcr);
1814 b44_readphy(bp, MII_BMCR, &bmcr);
1815 r = -EINVAL;
1816 if (bmcr & BMCR_ANENABLE) {
1817 b44_writephy(bp, MII_BMCR,
1818 bmcr | BMCR_ANRESTART);
1819 r = 0;
1820 }
1821 spin_unlock_irq(&bp->lock);
1822
1823 return r;
1824}
1825
1826static int b44_get_link_ksettings(struct net_device *dev,
1827 struct ethtool_link_ksettings *cmd)
1828{
1829 struct b44 *bp = netdev_priv(dev);
1830 u32 supported, advertising;
1831
1832 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1833 BUG_ON(!dev->phydev);
1834 phy_ethtool_ksettings_get(dev->phydev, cmd);
1835
1836 return 0;
1837 }
1838
1839 supported = (SUPPORTED_Autoneg);
1840 supported |= (SUPPORTED_100baseT_Half |
1841 SUPPORTED_100baseT_Full |
1842 SUPPORTED_10baseT_Half |
1843 SUPPORTED_10baseT_Full |
1844 SUPPORTED_MII);
1845
1846 advertising = 0;
1847 if (bp->flags & B44_FLAG_ADV_10HALF)
1848 advertising |= ADVERTISED_10baseT_Half;
1849 if (bp->flags & B44_FLAG_ADV_10FULL)
1850 advertising |= ADVERTISED_10baseT_Full;
1851 if (bp->flags & B44_FLAG_ADV_100HALF)
1852 advertising |= ADVERTISED_100baseT_Half;
1853 if (bp->flags & B44_FLAG_ADV_100FULL)
1854 advertising |= ADVERTISED_100baseT_Full;
1855 advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1856 cmd->base.speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1857 SPEED_100 : SPEED_10;
1858 cmd->base.duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1859 DUPLEX_FULL : DUPLEX_HALF;
1860 cmd->base.port = 0;
1861 cmd->base.phy_address = bp->phy_addr;
1862 cmd->base.autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1863 AUTONEG_DISABLE : AUTONEG_ENABLE;
1864 if (cmd->base.autoneg == AUTONEG_ENABLE)
1865 advertising |= ADVERTISED_Autoneg;
1866
1867 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1868 supported);
1869 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1870 advertising);
1871
1872 if (!netif_running(dev)){
1873 cmd->base.speed = 0;
1874 cmd->base.duplex = 0xff;
1875 }
1876
1877 return 0;
1878}
1879
1880static int b44_set_link_ksettings(struct net_device *dev,
1881 const struct ethtool_link_ksettings *cmd)
1882{
1883 struct b44 *bp = netdev_priv(dev);
1884 u32 speed;
1885 int ret;
1886 u32 advertising;
1887
1888 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
1889 BUG_ON(!dev->phydev);
1890 spin_lock_irq(&bp->lock);
1891 if (netif_running(dev))
1892 b44_setup_phy(bp);
1893
1894 ret = phy_ethtool_ksettings_set(dev->phydev, cmd);
1895
1896 spin_unlock_irq(&bp->lock);
1897
1898 return ret;
1899 }
1900
1901 speed = cmd->base.speed;
1902
1903 ethtool_convert_link_mode_to_legacy_u32(&advertising,
1904 cmd->link_modes.advertising);
1905
1906
1907 if (cmd->base.autoneg == AUTONEG_ENABLE) {
1908 if (advertising &
1909 (ADVERTISED_1000baseT_Half |
1910 ADVERTISED_1000baseT_Full))
1911 return -EINVAL;
1912 } else if ((speed != SPEED_100 &&
1913 speed != SPEED_10) ||
1914 (cmd->base.duplex != DUPLEX_HALF &&
1915 cmd->base.duplex != DUPLEX_FULL)) {
1916 return -EINVAL;
1917 }
1918
1919 spin_lock_irq(&bp->lock);
1920
1921 if (cmd->base.autoneg == AUTONEG_ENABLE) {
1922 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1923 B44_FLAG_100_BASE_T |
1924 B44_FLAG_FULL_DUPLEX |
1925 B44_FLAG_ADV_10HALF |
1926 B44_FLAG_ADV_10FULL |
1927 B44_FLAG_ADV_100HALF |
1928 B44_FLAG_ADV_100FULL);
1929 if (advertising == 0) {
1930 bp->flags |= (B44_FLAG_ADV_10HALF |
1931 B44_FLAG_ADV_10FULL |
1932 B44_FLAG_ADV_100HALF |
1933 B44_FLAG_ADV_100FULL);
1934 } else {
1935 if (advertising & ADVERTISED_10baseT_Half)
1936 bp->flags |= B44_FLAG_ADV_10HALF;
1937 if (advertising & ADVERTISED_10baseT_Full)
1938 bp->flags |= B44_FLAG_ADV_10FULL;
1939 if (advertising & ADVERTISED_100baseT_Half)
1940 bp->flags |= B44_FLAG_ADV_100HALF;
1941 if (advertising & ADVERTISED_100baseT_Full)
1942 bp->flags |= B44_FLAG_ADV_100FULL;
1943 }
1944 } else {
1945 bp->flags |= B44_FLAG_FORCE_LINK;
1946 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1947 if (speed == SPEED_100)
1948 bp->flags |= B44_FLAG_100_BASE_T;
1949 if (cmd->base.duplex == DUPLEX_FULL)
1950 bp->flags |= B44_FLAG_FULL_DUPLEX;
1951 }
1952
1953 if (netif_running(dev))
1954 b44_setup_phy(bp);
1955
1956 spin_unlock_irq(&bp->lock);
1957
1958 return 0;
1959}
1960
1961static void b44_get_ringparam(struct net_device *dev,
1962 struct ethtool_ringparam *ering)
1963{
1964 struct b44 *bp = netdev_priv(dev);
1965
1966 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1967 ering->rx_pending = bp->rx_pending;
1968
1969
1970}
1971
1972static int b44_set_ringparam(struct net_device *dev,
1973 struct ethtool_ringparam *ering)
1974{
1975 struct b44 *bp = netdev_priv(dev);
1976
1977 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1978 (ering->rx_mini_pending != 0) ||
1979 (ering->rx_jumbo_pending != 0) ||
1980 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1981 return -EINVAL;
1982
1983 spin_lock_irq(&bp->lock);
1984
1985 bp->rx_pending = ering->rx_pending;
1986 bp->tx_pending = ering->tx_pending;
1987
1988 b44_halt(bp);
1989 b44_init_rings(bp);
1990 b44_init_hw(bp, B44_FULL_RESET);
1991 netif_wake_queue(bp->dev);
1992 spin_unlock_irq(&bp->lock);
1993
1994 b44_enable_ints(bp);
1995
1996 return 0;
1997}
1998
1999static void b44_get_pauseparam(struct net_device *dev,
2000 struct ethtool_pauseparam *epause)
2001{
2002 struct b44 *bp = netdev_priv(dev);
2003
2004 epause->autoneg =
2005 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
2006 epause->rx_pause =
2007 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
2008 epause->tx_pause =
2009 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
2010}
2011
2012static int b44_set_pauseparam(struct net_device *dev,
2013 struct ethtool_pauseparam *epause)
2014{
2015 struct b44 *bp = netdev_priv(dev);
2016
2017 spin_lock_irq(&bp->lock);
2018 if (epause->autoneg)
2019 bp->flags |= B44_FLAG_PAUSE_AUTO;
2020 else
2021 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
2022 if (epause->rx_pause)
2023 bp->flags |= B44_FLAG_RX_PAUSE;
2024 else
2025 bp->flags &= ~B44_FLAG_RX_PAUSE;
2026 if (epause->tx_pause)
2027 bp->flags |= B44_FLAG_TX_PAUSE;
2028 else
2029 bp->flags &= ~B44_FLAG_TX_PAUSE;
2030 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
2031 b44_halt(bp);
2032 b44_init_rings(bp);
2033 b44_init_hw(bp, B44_FULL_RESET);
2034 } else {
2035 __b44_set_flow_ctrl(bp, bp->flags);
2036 }
2037 spin_unlock_irq(&bp->lock);
2038
2039 b44_enable_ints(bp);
2040
2041 return 0;
2042}
2043
2044static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2045{
2046 switch(stringset) {
2047 case ETH_SS_STATS:
2048 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
2049 break;
2050 }
2051}
2052
2053static int b44_get_sset_count(struct net_device *dev, int sset)
2054{
2055 switch (sset) {
2056 case ETH_SS_STATS:
2057 return ARRAY_SIZE(b44_gstrings);
2058 default:
2059 return -EOPNOTSUPP;
2060 }
2061}
2062
2063static void b44_get_ethtool_stats(struct net_device *dev,
2064 struct ethtool_stats *stats, u64 *data)
2065{
2066 struct b44 *bp = netdev_priv(dev);
2067 struct b44_hw_stats *hwstat = &bp->hw_stats;
2068 u64 *data_src, *data_dst;
2069 unsigned int start;
2070 u32 i;
2071
2072 spin_lock_irq(&bp->lock);
2073 b44_stats_update(bp);
2074 spin_unlock_irq(&bp->lock);
2075
2076 do {
2077 data_src = &hwstat->tx_good_octets;
2078 data_dst = data;
2079 start = u64_stats_fetch_begin_irq(&hwstat->syncp);
2080
2081 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2082 *data_dst++ = *data_src++;
2083
2084 } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
2085}
2086
2087static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2088{
2089 struct b44 *bp = netdev_priv(dev);
2090
2091 wol->supported = WAKE_MAGIC;
2092 if (bp->flags & B44_FLAG_WOL_ENABLE)
2093 wol->wolopts = WAKE_MAGIC;
2094 else
2095 wol->wolopts = 0;
2096 memset(&wol->sopass, 0, sizeof(wol->sopass));
2097}
2098
2099static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2100{
2101 struct b44 *bp = netdev_priv(dev);
2102
2103 spin_lock_irq(&bp->lock);
2104 if (wol->wolopts & WAKE_MAGIC)
2105 bp->flags |= B44_FLAG_WOL_ENABLE;
2106 else
2107 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2108 spin_unlock_irq(&bp->lock);
2109
2110 device_set_wakeup_enable(bp->sdev->dev, wol->wolopts & WAKE_MAGIC);
2111 return 0;
2112}
2113
2114static const struct ethtool_ops b44_ethtool_ops = {
2115 .get_drvinfo = b44_get_drvinfo,
2116 .nway_reset = b44_nway_reset,
2117 .get_link = ethtool_op_get_link,
2118 .get_wol = b44_get_wol,
2119 .set_wol = b44_set_wol,
2120 .get_ringparam = b44_get_ringparam,
2121 .set_ringparam = b44_set_ringparam,
2122 .get_pauseparam = b44_get_pauseparam,
2123 .set_pauseparam = b44_set_pauseparam,
2124 .get_msglevel = b44_get_msglevel,
2125 .set_msglevel = b44_set_msglevel,
2126 .get_strings = b44_get_strings,
2127 .get_sset_count = b44_get_sset_count,
2128 .get_ethtool_stats = b44_get_ethtool_stats,
2129 .get_link_ksettings = b44_get_link_ksettings,
2130 .set_link_ksettings = b44_set_link_ksettings,
2131};
2132
2133static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2134{
2135 struct b44 *bp = netdev_priv(dev);
2136 int err = -EINVAL;
2137
2138 if (!netif_running(dev))
2139 goto out;
2140
2141 spin_lock_irq(&bp->lock);
2142 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2143 BUG_ON(!dev->phydev);
2144 err = phy_mii_ioctl(dev->phydev, ifr, cmd);
2145 } else {
2146 err = generic_mii_ioctl(&bp->mii_if, if_mii(ifr), cmd, NULL);
2147 }
2148 spin_unlock_irq(&bp->lock);
2149out:
2150 return err;
2151}
2152
2153static int b44_get_invariants(struct b44 *bp)
2154{
2155 struct ssb_device *sdev = bp->sdev;
2156 int err = 0;
2157 u8 *addr;
2158
2159 bp->dma_offset = ssb_dma_translation(sdev);
2160
2161 if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2162 instance > 1) {
2163 addr = sdev->bus->sprom.et1mac;
2164 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2165 } else {
2166 addr = sdev->bus->sprom.et0mac;
2167 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2168 }
2169
2170
2171
2172 bp->phy_addr &= 0x1F;
2173
2174 memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
2175
2176 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2177 pr_err("Invalid MAC address found in EEPROM\n");
2178 return -EINVAL;
2179 }
2180
2181 bp->imask = IMASK_DEF;
2182
2183
2184
2185
2186
2187 if (bp->sdev->id.revision >= 7)
2188 bp->flags |= B44_FLAG_B0_ANDLATER;
2189
2190 return err;
2191}
2192
2193static const struct net_device_ops b44_netdev_ops = {
2194 .ndo_open = b44_open,
2195 .ndo_stop = b44_close,
2196 .ndo_start_xmit = b44_start_xmit,
2197 .ndo_get_stats64 = b44_get_stats64,
2198 .ndo_set_rx_mode = b44_set_rx_mode,
2199 .ndo_set_mac_address = b44_set_mac_addr,
2200 .ndo_validate_addr = eth_validate_addr,
2201 .ndo_do_ioctl = b44_ioctl,
2202 .ndo_tx_timeout = b44_tx_timeout,
2203 .ndo_change_mtu = b44_change_mtu,
2204#ifdef CONFIG_NET_POLL_CONTROLLER
2205 .ndo_poll_controller = b44_poll_controller,
2206#endif
2207};
2208
2209static void b44_adjust_link(struct net_device *dev)
2210{
2211 struct b44 *bp = netdev_priv(dev);
2212 struct phy_device *phydev = dev->phydev;
2213 bool status_changed = false;
2214
2215 BUG_ON(!phydev);
2216
2217 if (bp->old_link != phydev->link) {
2218 status_changed = true;
2219 bp->old_link = phydev->link;
2220 }
2221
2222
2223 if (phydev->link) {
2224 if ((phydev->duplex == DUPLEX_HALF) &&
2225 (bp->flags & B44_FLAG_FULL_DUPLEX)) {
2226 status_changed = true;
2227 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
2228 } else if ((phydev->duplex == DUPLEX_FULL) &&
2229 !(bp->flags & B44_FLAG_FULL_DUPLEX)) {
2230 status_changed = true;
2231 bp->flags |= B44_FLAG_FULL_DUPLEX;
2232 }
2233 }
2234
2235 if (status_changed) {
2236 u32 val = br32(bp, B44_TX_CTRL);
2237 if (bp->flags & B44_FLAG_FULL_DUPLEX)
2238 val |= TX_CTRL_DUPLEX;
2239 else
2240 val &= ~TX_CTRL_DUPLEX;
2241 bw32(bp, B44_TX_CTRL, val);
2242 phy_print_status(phydev);
2243 }
2244}
2245
2246static int b44_register_phy_one(struct b44 *bp)
2247{
2248 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
2249 struct mii_bus *mii_bus;
2250 struct ssb_device *sdev = bp->sdev;
2251 struct phy_device *phydev;
2252 char bus_id[MII_BUS_ID_SIZE + 3];
2253 struct ssb_sprom *sprom = &sdev->bus->sprom;
2254 int err;
2255
2256 mii_bus = mdiobus_alloc();
2257 if (!mii_bus) {
2258 dev_err(sdev->dev, "mdiobus_alloc() failed\n");
2259 err = -ENOMEM;
2260 goto err_out;
2261 }
2262
2263 mii_bus->priv = bp;
2264 mii_bus->read = b44_mdio_read_phylib;
2265 mii_bus->write = b44_mdio_write_phylib;
2266 mii_bus->name = "b44_eth_mii";
2267 mii_bus->parent = sdev->dev;
2268 mii_bus->phy_mask = ~(1 << bp->phy_addr);
2269 snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%x", instance);
2270
2271 bp->mii_bus = mii_bus;
2272
2273 err = mdiobus_register(mii_bus);
2274 if (err) {
2275 dev_err(sdev->dev, "failed to register MII bus\n");
2276 goto err_out_mdiobus;
2277 }
2278
2279 if (!mdiobus_is_registered_device(bp->mii_bus, bp->phy_addr) &&
2280 (sprom->boardflags_lo & (B44_BOARDFLAG_ROBO | B44_BOARDFLAG_ADM))) {
2281
2282 dev_info(sdev->dev,
2283 "could not find PHY at %i, use fixed one\n",
2284 bp->phy_addr);
2285
2286 bp->phy_addr = 0;
2287 snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, "fixed-0",
2288 bp->phy_addr);
2289 } else {
2290 snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
2291 bp->phy_addr);
2292 }
2293
2294 phydev = phy_connect(bp->dev, bus_id, &b44_adjust_link,
2295 PHY_INTERFACE_MODE_MII);
2296 if (IS_ERR(phydev)) {
2297 dev_err(sdev->dev, "could not attach PHY at %i\n",
2298 bp->phy_addr);
2299 err = PTR_ERR(phydev);
2300 goto err_out_mdiobus_unregister;
2301 }
2302
2303
2304 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
2305 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
2306 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
2307 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
2308 linkmode_and(phydev->supported, phydev->supported, mask);
2309 linkmode_copy(phydev->advertising, phydev->supported);
2310
2311 bp->old_link = 0;
2312 bp->phy_addr = phydev->mdio.addr;
2313
2314 phy_attached_info(phydev);
2315
2316 return 0;
2317
2318err_out_mdiobus_unregister:
2319 mdiobus_unregister(mii_bus);
2320
2321err_out_mdiobus:
2322 mdiobus_free(mii_bus);
2323
2324err_out:
2325 return err;
2326}
2327
2328static void b44_unregister_phy_one(struct b44 *bp)
2329{
2330 struct net_device *dev = bp->dev;
2331 struct mii_bus *mii_bus = bp->mii_bus;
2332
2333 phy_disconnect(dev->phydev);
2334 mdiobus_unregister(mii_bus);
2335 mdiobus_free(mii_bus);
2336}
2337
2338static int b44_init_one(struct ssb_device *sdev,
2339 const struct ssb_device_id *ent)
2340{
2341 struct net_device *dev;
2342 struct b44 *bp;
2343 int err;
2344
2345 instance++;
2346
2347 dev = alloc_etherdev(sizeof(*bp));
2348 if (!dev) {
2349 err = -ENOMEM;
2350 goto out;
2351 }
2352
2353 SET_NETDEV_DEV(dev, sdev->dev);
2354
2355
2356 dev->features |= 0;
2357
2358 bp = netdev_priv(dev);
2359 bp->sdev = sdev;
2360 bp->dev = dev;
2361 bp->force_copybreak = 0;
2362
2363 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2364
2365 spin_lock_init(&bp->lock);
2366 u64_stats_init(&bp->hw_stats.syncp);
2367
2368 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2369 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2370
2371 dev->netdev_ops = &b44_netdev_ops;
2372 netif_napi_add(dev, &bp->napi, b44_poll, 64);
2373 dev->watchdog_timeo = B44_TX_TIMEOUT;
2374 dev->min_mtu = B44_MIN_MTU;
2375 dev->max_mtu = B44_MAX_MTU;
2376 dev->irq = sdev->irq;
2377 dev->ethtool_ops = &b44_ethtool_ops;
2378
2379 err = ssb_bus_powerup(sdev->bus, 0);
2380 if (err) {
2381 dev_err(sdev->dev,
2382 "Failed to powerup the bus\n");
2383 goto err_out_free_dev;
2384 }
2385
2386 err = dma_set_mask_and_coherent(sdev->dma_dev, DMA_BIT_MASK(30));
2387 if (err) {
2388 dev_err(sdev->dev,
2389 "Required 30BIT DMA mask unsupported by the system\n");
2390 goto err_out_powerdown;
2391 }
2392
2393 err = b44_get_invariants(bp);
2394 if (err) {
2395 dev_err(sdev->dev,
2396 "Problem fetching invariants of chip, aborting\n");
2397 goto err_out_powerdown;
2398 }
2399
2400 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
2401 dev_err(sdev->dev, "No PHY present on this MAC, aborting\n");
2402 err = -ENODEV;
2403 goto err_out_powerdown;
2404 }
2405
2406 bp->mii_if.dev = dev;
2407 bp->mii_if.mdio_read = b44_mdio_read_mii;
2408 bp->mii_if.mdio_write = b44_mdio_write_mii;
2409 bp->mii_if.phy_id = bp->phy_addr;
2410 bp->mii_if.phy_id_mask = 0x1f;
2411 bp->mii_if.reg_num_mask = 0x1f;
2412
2413
2414 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2415 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2416
2417
2418 bp->flags |= B44_FLAG_PAUSE_AUTO;
2419
2420 err = register_netdev(dev);
2421 if (err) {
2422 dev_err(sdev->dev, "Cannot register net device, aborting\n");
2423 goto err_out_powerdown;
2424 }
2425
2426 netif_carrier_off(dev);
2427
2428 ssb_set_drvdata(sdev, dev);
2429
2430
2431
2432
2433 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2434
2435
2436 err = b44_phy_reset(bp);
2437 if (err < 0) {
2438 dev_err(sdev->dev, "phy reset failed\n");
2439 goto err_out_unregister_netdev;
2440 }
2441
2442 if (bp->flags & B44_FLAG_EXTERNAL_PHY) {
2443 err = b44_register_phy_one(bp);
2444 if (err) {
2445 dev_err(sdev->dev, "Cannot register PHY, aborting\n");
2446 goto err_out_unregister_netdev;
2447 }
2448 }
2449
2450 device_set_wakeup_capable(sdev->dev, true);
2451 netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
2452
2453 return 0;
2454
2455err_out_unregister_netdev:
2456 unregister_netdev(dev);
2457err_out_powerdown:
2458 ssb_bus_may_powerdown(sdev->bus);
2459
2460err_out_free_dev:
2461 netif_napi_del(&bp->napi);
2462 free_netdev(dev);
2463
2464out:
2465 return err;
2466}
2467
2468static void b44_remove_one(struct ssb_device *sdev)
2469{
2470 struct net_device *dev = ssb_get_drvdata(sdev);
2471 struct b44 *bp = netdev_priv(dev);
2472
2473 unregister_netdev(dev);
2474 if (bp->flags & B44_FLAG_EXTERNAL_PHY)
2475 b44_unregister_phy_one(bp);
2476 ssb_device_disable(sdev, 0);
2477 ssb_bus_may_powerdown(sdev->bus);
2478 netif_napi_del(&bp->napi);
2479 free_netdev(dev);
2480 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2481 ssb_set_drvdata(sdev, NULL);
2482}
2483
2484static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2485{
2486 struct net_device *dev = ssb_get_drvdata(sdev);
2487 struct b44 *bp = netdev_priv(dev);
2488
2489 if (!netif_running(dev))
2490 return 0;
2491
2492 del_timer_sync(&bp->timer);
2493
2494 spin_lock_irq(&bp->lock);
2495
2496 b44_halt(bp);
2497 netif_carrier_off(bp->dev);
2498 netif_device_detach(bp->dev);
2499 b44_free_rings(bp);
2500
2501 spin_unlock_irq(&bp->lock);
2502
2503 free_irq(dev->irq, dev);
2504 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2505 b44_init_hw(bp, B44_PARTIAL_RESET);
2506 b44_setup_wol(bp);
2507 }
2508
2509 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2510 return 0;
2511}
2512
2513static int b44_resume(struct ssb_device *sdev)
2514{
2515 struct net_device *dev = ssb_get_drvdata(sdev);
2516 struct b44 *bp = netdev_priv(dev);
2517 int rc = 0;
2518
2519 rc = ssb_bus_powerup(sdev->bus, 0);
2520 if (rc) {
2521 dev_err(sdev->dev,
2522 "Failed to powerup the bus\n");
2523 return rc;
2524 }
2525
2526 if (!netif_running(dev))
2527 return 0;
2528
2529 spin_lock_irq(&bp->lock);
2530 b44_init_rings(bp);
2531 b44_init_hw(bp, B44_FULL_RESET);
2532 spin_unlock_irq(&bp->lock);
2533
2534
2535
2536
2537
2538
2539 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2540 if (rc) {
2541 netdev_err(dev, "request_irq failed\n");
2542 spin_lock_irq(&bp->lock);
2543 b44_halt(bp);
2544 b44_free_rings(bp);
2545 spin_unlock_irq(&bp->lock);
2546 return rc;
2547 }
2548
2549 netif_device_attach(bp->dev);
2550
2551 b44_enable_ints(bp);
2552 netif_wake_queue(dev);
2553
2554 mod_timer(&bp->timer, jiffies + 1);
2555
2556 return 0;
2557}
2558
2559static struct ssb_driver b44_ssb_driver = {
2560 .name = DRV_MODULE_NAME,
2561 .id_table = b44_ssb_tbl,
2562 .probe = b44_init_one,
2563 .remove = b44_remove_one,
2564 .suspend = b44_suspend,
2565 .resume = b44_resume,
2566};
2567
2568static inline int __init b44_pci_init(void)
2569{
2570 int err = 0;
2571#ifdef CONFIG_B44_PCI
2572 err = ssb_pcihost_register(&b44_pci_driver);
2573#endif
2574 return err;
2575}
2576
2577static inline void b44_pci_exit(void)
2578{
2579#ifdef CONFIG_B44_PCI
2580 ssb_pcihost_unregister(&b44_pci_driver);
2581#endif
2582}
2583
2584static int __init b44_init(void)
2585{
2586 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2587 int err;
2588
2589
2590 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2591
2592 err = b44_pci_init();
2593 if (err)
2594 return err;
2595 err = ssb_driver_register(&b44_ssb_driver);
2596 if (err)
2597 b44_pci_exit();
2598 return err;
2599}
2600
2601static void __exit b44_cleanup(void)
2602{
2603 ssb_driver_unregister(&b44_ssb_driver);
2604 b44_pci_exit();
2605}
2606
2607module_init(b44_init);
2608module_exit(b44_cleanup);
2609
2610