1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/types.h>
17#include <linux/netdevice.h>
18#include <linux/ethtool.h>
19#include <linux/mii.h>
20#include <linux/if_ether.h>
21#include <linux/if_vlan.h>
22#include <linux/etherdevice.h>
23#include <linux/pci.h>
24#include <linux/delay.h>
25#include <linux/init.h>
26#include <linux/dma-mapping.h>
27#include <linux/ssb/ssb.h>
28
29#include <asm/uaccess.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32
33
34#include "b44.h"
35
36#define DRV_MODULE_NAME "b44"
37#define PFX DRV_MODULE_NAME ": "
38#define DRV_MODULE_VERSION "2.0"
39
40#define B44_DEF_MSG_ENABLE \
41 (NETIF_MSG_DRV | \
42 NETIF_MSG_PROBE | \
43 NETIF_MSG_LINK | \
44 NETIF_MSG_TIMER | \
45 NETIF_MSG_IFDOWN | \
46 NETIF_MSG_IFUP | \
47 NETIF_MSG_RX_ERR | \
48 NETIF_MSG_TX_ERR)
49
50
51
52
53#define B44_TX_TIMEOUT (5 * HZ)
54
55
56#define B44_MIN_MTU 60
57#define B44_MAX_MTU 1500
58
59#define B44_RX_RING_SIZE 512
60#define B44_DEF_RX_RING_PENDING 200
61#define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
62 B44_RX_RING_SIZE)
63#define B44_TX_RING_SIZE 512
64#define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
65#define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
66 B44_TX_RING_SIZE)
67
68#define TX_RING_GAP(BP) \
69 (B44_TX_RING_SIZE - (BP)->tx_pending)
70#define TX_BUFFS_AVAIL(BP) \
71 (((BP)->tx_cons <= (BP)->tx_prod) ? \
72 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
73 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
74#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
75
76#define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
77#define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
78
79
80#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
81
82
83#define B44_PATTERN_BASE 0x400
84#define B44_PATTERN_SIZE 0x80
85#define B44_PMASK_BASE 0x600
86#define B44_PMASK_SIZE 0x10
87#define B44_MAX_PATTERNS 16
88#define B44_ETHIPV6UDP_HLEN 62
89#define B44_ETHIPV4UDP_HLEN 42
90
91static char version[] __devinitdata =
92 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION "\n";
93
94MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
95MODULE_DESCRIPTION("Broadcom 44xx/47xx 10/100 PCI ethernet driver");
96MODULE_LICENSE("GPL");
97MODULE_VERSION(DRV_MODULE_VERSION);
98
99static int b44_debug = -1;
100module_param(b44_debug, int, 0);
101MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
102
103
104#ifdef CONFIG_B44_PCI
105static const struct pci_device_id b44_pci_tbl[] = {
106 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
107 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
109 { 0 }
110};
111MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
112
113static struct pci_driver b44_pci_driver = {
114 .name = DRV_MODULE_NAME,
115 .id_table = b44_pci_tbl,
116};
117#endif
118
119static const struct ssb_device_id b44_ssb_tbl[] = {
120 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
121 SSB_DEVTABLE_END
122};
123MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
124
125static void b44_halt(struct b44 *);
126static void b44_init_rings(struct b44 *);
127
128#define B44_FULL_RESET 1
129#define B44_FULL_RESET_SKIP_PHY 2
130#define B44_PARTIAL_RESET 3
131#define B44_CHIP_RESET_FULL 4
132#define B44_CHIP_RESET_PARTIAL 5
133
134static void b44_init_hw(struct b44 *, int);
135
136static int dma_desc_align_mask;
137static int dma_desc_sync_size;
138static int instance;
139
140static const char b44_gstrings[][ETH_GSTRING_LEN] = {
141#define _B44(x...) # x,
142B44_STAT_REG_DECLARE
143#undef _B44
144};
145
146static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
147 dma_addr_t dma_base,
148 unsigned long offset,
149 enum dma_data_direction dir)
150{
151 ssb_dma_sync_single_range_for_device(sdev, dma_base,
152 offset & dma_desc_align_mask,
153 dma_desc_sync_size, dir);
154}
155
156static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
157 dma_addr_t dma_base,
158 unsigned long offset,
159 enum dma_data_direction dir)
160{
161 ssb_dma_sync_single_range_for_cpu(sdev, dma_base,
162 offset & dma_desc_align_mask,
163 dma_desc_sync_size, dir);
164}
165
166static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
167{
168 return ssb_read32(bp->sdev, reg);
169}
170
171static inline void bw32(const struct b44 *bp,
172 unsigned long reg, unsigned long val)
173{
174 ssb_write32(bp->sdev, reg, val);
175}
176
177static int b44_wait_bit(struct b44 *bp, unsigned long reg,
178 u32 bit, unsigned long timeout, const int clear)
179{
180 unsigned long i;
181
182 for (i = 0; i < timeout; i++) {
183 u32 val = br32(bp, reg);
184
185 if (clear && !(val & bit))
186 break;
187 if (!clear && (val & bit))
188 break;
189 udelay(10);
190 }
191 if (i == timeout) {
192 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
193 "%lx to %s.\n",
194 bp->dev->name,
195 bit, reg,
196 (clear ? "clear" : "set"));
197 return -ENODEV;
198 }
199 return 0;
200}
201
202static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
203{
204 u32 val;
205
206 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
207 (index << CAM_CTRL_INDEX_SHIFT)));
208
209 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
210
211 val = br32(bp, B44_CAM_DATA_LO);
212
213 data[2] = (val >> 24) & 0xFF;
214 data[3] = (val >> 16) & 0xFF;
215 data[4] = (val >> 8) & 0xFF;
216 data[5] = (val >> 0) & 0xFF;
217
218 val = br32(bp, B44_CAM_DATA_HI);
219
220 data[0] = (val >> 8) & 0xFF;
221 data[1] = (val >> 0) & 0xFF;
222}
223
224static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
225{
226 u32 val;
227
228 val = ((u32) data[2]) << 24;
229 val |= ((u32) data[3]) << 16;
230 val |= ((u32) data[4]) << 8;
231 val |= ((u32) data[5]) << 0;
232 bw32(bp, B44_CAM_DATA_LO, val);
233 val = (CAM_DATA_HI_VALID |
234 (((u32) data[0]) << 8) |
235 (((u32) data[1]) << 0));
236 bw32(bp, B44_CAM_DATA_HI, val);
237 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
238 (index << CAM_CTRL_INDEX_SHIFT)));
239 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
240}
241
242static inline void __b44_disable_ints(struct b44 *bp)
243{
244 bw32(bp, B44_IMASK, 0);
245}
246
247static void b44_disable_ints(struct b44 *bp)
248{
249 __b44_disable_ints(bp);
250
251
252 br32(bp, B44_IMASK);
253}
254
255static void b44_enable_ints(struct b44 *bp)
256{
257 bw32(bp, B44_IMASK, bp->imask);
258}
259
260static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
261{
262 int err;
263
264 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
265 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
266 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
267 (phy_addr << MDIO_DATA_PMD_SHIFT) |
268 (reg << MDIO_DATA_RA_SHIFT) |
269 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
270 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
271 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
272
273 return err;
274}
275
276static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
277{
278 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
279 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
280 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
281 (phy_addr << MDIO_DATA_PMD_SHIFT) |
282 (reg << MDIO_DATA_RA_SHIFT) |
283 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
284 (val & MDIO_DATA_DATA)));
285 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
286}
287
288static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
289{
290 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
291 return 0;
292
293 return __b44_readphy(bp, bp->phy_addr, reg, val);
294}
295
296static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
297{
298 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
299 return 0;
300
301 return __b44_writephy(bp, bp->phy_addr, reg, val);
302}
303
304
305static int b44_mii_read(struct net_device *dev, int phy_id, int location)
306{
307 u32 val;
308 struct b44 *bp = netdev_priv(dev);
309 int rc = __b44_readphy(bp, phy_id, location, &val);
310 if (rc)
311 return 0xffffffff;
312 return val;
313}
314
315static void b44_mii_write(struct net_device *dev, int phy_id, int location,
316 int val)
317{
318 struct b44 *bp = netdev_priv(dev);
319 __b44_writephy(bp, phy_id, location, val);
320}
321
322static int b44_phy_reset(struct b44 *bp)
323{
324 u32 val;
325 int err;
326
327 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
328 return 0;
329 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
330 if (err)
331 return err;
332 udelay(100);
333 err = b44_readphy(bp, MII_BMCR, &val);
334 if (!err) {
335 if (val & BMCR_RESET) {
336 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
337 bp->dev->name);
338 err = -ENODEV;
339 }
340 }
341
342 return 0;
343}
344
345static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
346{
347 u32 val;
348
349 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
350 bp->flags |= pause_flags;
351
352 val = br32(bp, B44_RXCONFIG);
353 if (pause_flags & B44_FLAG_RX_PAUSE)
354 val |= RXCONFIG_FLOW;
355 else
356 val &= ~RXCONFIG_FLOW;
357 bw32(bp, B44_RXCONFIG, val);
358
359 val = br32(bp, B44_MAC_FLOW);
360 if (pause_flags & B44_FLAG_TX_PAUSE)
361 val |= (MAC_FLOW_PAUSE_ENAB |
362 (0xc0 & MAC_FLOW_RX_HI_WATER));
363 else
364 val &= ~MAC_FLOW_PAUSE_ENAB;
365 bw32(bp, B44_MAC_FLOW, val);
366}
367
368static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
369{
370 u32 pause_enab = 0;
371
372
373
374
375
376
377 if ((local & ADVERTISE_PAUSE_CAP) &&
378 (local & ADVERTISE_PAUSE_ASYM)){
379 if ((remote & LPA_PAUSE_ASYM) &&
380 !(remote & LPA_PAUSE_CAP))
381 pause_enab |= B44_FLAG_RX_PAUSE;
382 }
383
384 __b44_set_flow_ctrl(bp, pause_enab);
385}
386
387#ifdef SSB_DRIVER_MIPS
388extern char *nvram_get(char *name);
389static void b44_wap54g10_workaround(struct b44 *bp)
390{
391 const char *str;
392 u32 val;
393 int err;
394
395
396
397
398
399
400 str = nvram_get("boardnum");
401 if (!str)
402 return;
403 if (simple_strtoul(str, NULL, 0) == 2) {
404 err = __b44_readphy(bp, 0, MII_BMCR, &val);
405 if (err)
406 goto error;
407 if (!(val & BMCR_ISOLATE))
408 return;
409 val &= ~BMCR_ISOLATE;
410 err = __b44_writephy(bp, 0, MII_BMCR, val);
411 if (err)
412 goto error;
413 }
414 return;
415error:
416 printk(KERN_WARNING PFX "PHY: cannot reset MII transceiver isolate bit.\n");
417}
418#else
419static inline void b44_wap54g10_workaround(struct b44 *bp)
420{
421}
422#endif
423
424static int b44_setup_phy(struct b44 *bp)
425{
426 u32 val;
427 int err;
428
429 b44_wap54g10_workaround(bp);
430
431 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
432 return 0;
433 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
434 goto out;
435 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
436 val & MII_ALEDCTRL_ALLMSK)) != 0)
437 goto out;
438 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
439 goto out;
440 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
441 val | MII_TLEDCTRL_ENABLE)) != 0)
442 goto out;
443
444 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
445 u32 adv = ADVERTISE_CSMA;
446
447 if (bp->flags & B44_FLAG_ADV_10HALF)
448 adv |= ADVERTISE_10HALF;
449 if (bp->flags & B44_FLAG_ADV_10FULL)
450 adv |= ADVERTISE_10FULL;
451 if (bp->flags & B44_FLAG_ADV_100HALF)
452 adv |= ADVERTISE_100HALF;
453 if (bp->flags & B44_FLAG_ADV_100FULL)
454 adv |= ADVERTISE_100FULL;
455
456 if (bp->flags & B44_FLAG_PAUSE_AUTO)
457 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
458
459 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
460 goto out;
461 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
462 BMCR_ANRESTART))) != 0)
463 goto out;
464 } else {
465 u32 bmcr;
466
467 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
468 goto out;
469 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
470 if (bp->flags & B44_FLAG_100_BASE_T)
471 bmcr |= BMCR_SPEED100;
472 if (bp->flags & B44_FLAG_FULL_DUPLEX)
473 bmcr |= BMCR_FULLDPLX;
474 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
475 goto out;
476
477
478
479
480
481 b44_set_flow_ctrl(bp, 0, 0);
482 }
483
484out:
485 return err;
486}
487
488static void b44_stats_update(struct b44 *bp)
489{
490 unsigned long reg;
491 u32 *val;
492
493 val = &bp->hw_stats.tx_good_octets;
494 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
495 *val++ += br32(bp, reg);
496 }
497
498
499 reg += 8*4UL;
500
501 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
502 *val++ += br32(bp, reg);
503 }
504}
505
506static void b44_link_report(struct b44 *bp)
507{
508 if (!netif_carrier_ok(bp->dev)) {
509 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
510 } else {
511 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
512 bp->dev->name,
513 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
514 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
515
516 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
517 "%s for RX.\n",
518 bp->dev->name,
519 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
520 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
521 }
522}
523
524static void b44_check_phy(struct b44 *bp)
525{
526 u32 bmsr, aux;
527
528 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
529 bp->flags |= B44_FLAG_100_BASE_T;
530 bp->flags |= B44_FLAG_FULL_DUPLEX;
531 if (!netif_carrier_ok(bp->dev)) {
532 u32 val = br32(bp, B44_TX_CTRL);
533 val |= TX_CTRL_DUPLEX;
534 bw32(bp, B44_TX_CTRL, val);
535 netif_carrier_on(bp->dev);
536 b44_link_report(bp);
537 }
538 return;
539 }
540
541 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
542 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
543 (bmsr != 0xffff)) {
544 if (aux & MII_AUXCTRL_SPEED)
545 bp->flags |= B44_FLAG_100_BASE_T;
546 else
547 bp->flags &= ~B44_FLAG_100_BASE_T;
548 if (aux & MII_AUXCTRL_DUPLEX)
549 bp->flags |= B44_FLAG_FULL_DUPLEX;
550 else
551 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
552
553 if (!netif_carrier_ok(bp->dev) &&
554 (bmsr & BMSR_LSTATUS)) {
555 u32 val = br32(bp, B44_TX_CTRL);
556 u32 local_adv, remote_adv;
557
558 if (bp->flags & B44_FLAG_FULL_DUPLEX)
559 val |= TX_CTRL_DUPLEX;
560 else
561 val &= ~TX_CTRL_DUPLEX;
562 bw32(bp, B44_TX_CTRL, val);
563
564 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
565 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
566 !b44_readphy(bp, MII_LPA, &remote_adv))
567 b44_set_flow_ctrl(bp, local_adv, remote_adv);
568
569
570 netif_carrier_on(bp->dev);
571 b44_link_report(bp);
572 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
573
574 netif_carrier_off(bp->dev);
575 b44_link_report(bp);
576 }
577
578 if (bmsr & BMSR_RFAULT)
579 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
580 bp->dev->name);
581 if (bmsr & BMSR_JCD)
582 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
583 bp->dev->name);
584 }
585}
586
587static void b44_timer(unsigned long __opaque)
588{
589 struct b44 *bp = (struct b44 *) __opaque;
590
591 spin_lock_irq(&bp->lock);
592
593 b44_check_phy(bp);
594
595 b44_stats_update(bp);
596
597 spin_unlock_irq(&bp->lock);
598
599 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
600}
601
602static void b44_tx(struct b44 *bp)
603{
604 u32 cur, cons;
605
606 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
607 cur /= sizeof(struct dma_desc);
608
609
610 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
611 struct ring_info *rp = &bp->tx_buffers[cons];
612 struct sk_buff *skb = rp->skb;
613
614 BUG_ON(skb == NULL);
615
616 ssb_dma_unmap_single(bp->sdev,
617 rp->mapping,
618 skb->len,
619 DMA_TO_DEVICE);
620 rp->skb = NULL;
621 dev_kfree_skb_irq(skb);
622 }
623
624 bp->tx_cons = cons;
625 if (netif_queue_stopped(bp->dev) &&
626 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
627 netif_wake_queue(bp->dev);
628
629 bw32(bp, B44_GPTIMER, 0);
630}
631
632
633
634
635
636
637static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
638{
639 struct dma_desc *dp;
640 struct ring_info *src_map, *map;
641 struct rx_header *rh;
642 struct sk_buff *skb;
643 dma_addr_t mapping;
644 int dest_idx;
645 u32 ctrl;
646
647 src_map = NULL;
648 if (src_idx >= 0)
649 src_map = &bp->rx_buffers[src_idx];
650 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
651 map = &bp->rx_buffers[dest_idx];
652 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
653 if (skb == NULL)
654 return -ENOMEM;
655
656 mapping = ssb_dma_map_single(bp->sdev, skb->data,
657 RX_PKT_BUF_SZ,
658 DMA_FROM_DEVICE);
659
660
661
662 if (ssb_dma_mapping_error(bp->sdev, mapping) ||
663 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
664
665 if (!ssb_dma_mapping_error(bp->sdev, mapping))
666 ssb_dma_unmap_single(bp->sdev, mapping,
667 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
668 dev_kfree_skb_any(skb);
669 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
670 if (skb == NULL)
671 return -ENOMEM;
672 mapping = ssb_dma_map_single(bp->sdev, skb->data,
673 RX_PKT_BUF_SZ,
674 DMA_FROM_DEVICE);
675 if (ssb_dma_mapping_error(bp->sdev, mapping) ||
676 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
677 if (!ssb_dma_mapping_error(bp->sdev, mapping))
678 ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
679 dev_kfree_skb_any(skb);
680 return -ENOMEM;
681 }
682 bp->force_copybreak = 1;
683 }
684
685 rh = (struct rx_header *) skb->data;
686
687 rh->len = 0;
688 rh->flags = 0;
689
690 map->skb = skb;
691 map->mapping = mapping;
692
693 if (src_map != NULL)
694 src_map->skb = NULL;
695
696 ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
697 if (dest_idx == (B44_RX_RING_SIZE - 1))
698 ctrl |= DESC_CTRL_EOT;
699
700 dp = &bp->rx_ring[dest_idx];
701 dp->ctrl = cpu_to_le32(ctrl);
702 dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
703
704 if (bp->flags & B44_FLAG_RX_RING_HACK)
705 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
706 dest_idx * sizeof(*dp),
707 DMA_BIDIRECTIONAL);
708
709 return RX_PKT_BUF_SZ;
710}
711
712static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
713{
714 struct dma_desc *src_desc, *dest_desc;
715 struct ring_info *src_map, *dest_map;
716 struct rx_header *rh;
717 int dest_idx;
718 __le32 ctrl;
719
720 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
721 dest_desc = &bp->rx_ring[dest_idx];
722 dest_map = &bp->rx_buffers[dest_idx];
723 src_desc = &bp->rx_ring[src_idx];
724 src_map = &bp->rx_buffers[src_idx];
725
726 dest_map->skb = src_map->skb;
727 rh = (struct rx_header *) src_map->skb->data;
728 rh->len = 0;
729 rh->flags = 0;
730 dest_map->mapping = src_map->mapping;
731
732 if (bp->flags & B44_FLAG_RX_RING_HACK)
733 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
734 src_idx * sizeof(*src_desc),
735 DMA_BIDIRECTIONAL);
736
737 ctrl = src_desc->ctrl;
738 if (dest_idx == (B44_RX_RING_SIZE - 1))
739 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
740 else
741 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
742
743 dest_desc->ctrl = ctrl;
744 dest_desc->addr = src_desc->addr;
745
746 src_map->skb = NULL;
747
748 if (bp->flags & B44_FLAG_RX_RING_HACK)
749 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
750 dest_idx * sizeof(*dest_desc),
751 DMA_BIDIRECTIONAL);
752
753 ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping,
754 RX_PKT_BUF_SZ,
755 DMA_FROM_DEVICE);
756}
757
758static int b44_rx(struct b44 *bp, int budget)
759{
760 int received;
761 u32 cons, prod;
762
763 received = 0;
764 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
765 prod /= sizeof(struct dma_desc);
766 cons = bp->rx_cons;
767
768 while (cons != prod && budget > 0) {
769 struct ring_info *rp = &bp->rx_buffers[cons];
770 struct sk_buff *skb = rp->skb;
771 dma_addr_t map = rp->mapping;
772 struct rx_header *rh;
773 u16 len;
774
775 ssb_dma_sync_single_for_cpu(bp->sdev, map,
776 RX_PKT_BUF_SZ,
777 DMA_FROM_DEVICE);
778 rh = (struct rx_header *) skb->data;
779 len = le16_to_cpu(rh->len);
780 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
781 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
782 drop_it:
783 b44_recycle_rx(bp, cons, bp->rx_prod);
784 drop_it_no_recycle:
785 bp->dev->stats.rx_dropped++;
786 goto next_pkt;
787 }
788
789 if (len == 0) {
790 int i = 0;
791
792 do {
793 udelay(2);
794 barrier();
795 len = le16_to_cpu(rh->len);
796 } while (len == 0 && i++ < 5);
797 if (len == 0)
798 goto drop_it;
799 }
800
801
802 len -= 4;
803
804 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
805 int skb_size;
806 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
807 if (skb_size < 0)
808 goto drop_it;
809 ssb_dma_unmap_single(bp->sdev, map,
810 skb_size, DMA_FROM_DEVICE);
811
812 skb_put(skb, len + RX_PKT_OFFSET);
813 skb_pull(skb, RX_PKT_OFFSET);
814 } else {
815 struct sk_buff *copy_skb;
816
817 b44_recycle_rx(bp, cons, bp->rx_prod);
818 copy_skb = dev_alloc_skb(len + 2);
819 if (copy_skb == NULL)
820 goto drop_it_no_recycle;
821
822 skb_reserve(copy_skb, 2);
823 skb_put(copy_skb, len);
824
825 skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
826 copy_skb->data, len);
827 skb = copy_skb;
828 }
829 skb->ip_summed = CHECKSUM_NONE;
830 skb->protocol = eth_type_trans(skb, bp->dev);
831 netif_receive_skb(skb);
832 received++;
833 budget--;
834 next_pkt:
835 bp->rx_prod = (bp->rx_prod + 1) &
836 (B44_RX_RING_SIZE - 1);
837 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
838 }
839
840 bp->rx_cons = cons;
841 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
842
843 return received;
844}
845
846static int b44_poll(struct napi_struct *napi, int budget)
847{
848 struct b44 *bp = container_of(napi, struct b44, napi);
849 int work_done;
850 unsigned long flags;
851
852 spin_lock_irqsave(&bp->lock, flags);
853
854 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
855
856 b44_tx(bp);
857
858 }
859 spin_unlock_irqrestore(&bp->lock, flags);
860
861 work_done = 0;
862 if (bp->istat & ISTAT_RX)
863 work_done += b44_rx(bp, budget);
864
865 if (bp->istat & ISTAT_ERRORS) {
866 spin_lock_irqsave(&bp->lock, flags);
867 b44_halt(bp);
868 b44_init_rings(bp);
869 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
870 netif_wake_queue(bp->dev);
871 spin_unlock_irqrestore(&bp->lock, flags);
872 work_done = 0;
873 }
874
875 if (work_done < budget) {
876 napi_complete(napi);
877 b44_enable_ints(bp);
878 }
879
880 return work_done;
881}
882
883static irqreturn_t b44_interrupt(int irq, void *dev_id)
884{
885 struct net_device *dev = dev_id;
886 struct b44 *bp = netdev_priv(dev);
887 u32 istat, imask;
888 int handled = 0;
889
890 spin_lock(&bp->lock);
891
892 istat = br32(bp, B44_ISTAT);
893 imask = br32(bp, B44_IMASK);
894
895
896
897
898
899 istat &= imask;
900 if (istat) {
901 handled = 1;
902
903 if (unlikely(!netif_running(dev))) {
904 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
905 goto irq_ack;
906 }
907
908 if (napi_schedule_prep(&bp->napi)) {
909
910
911
912 bp->istat = istat;
913 __b44_disable_ints(bp);
914 __napi_schedule(&bp->napi);
915 }
916
917irq_ack:
918 bw32(bp, B44_ISTAT, istat);
919 br32(bp, B44_ISTAT);
920 }
921 spin_unlock(&bp->lock);
922 return IRQ_RETVAL(handled);
923}
924
925static void b44_tx_timeout(struct net_device *dev)
926{
927 struct b44 *bp = netdev_priv(dev);
928
929 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
930 dev->name);
931
932 spin_lock_irq(&bp->lock);
933
934 b44_halt(bp);
935 b44_init_rings(bp);
936 b44_init_hw(bp, B44_FULL_RESET);
937
938 spin_unlock_irq(&bp->lock);
939
940 b44_enable_ints(bp);
941
942 netif_wake_queue(dev);
943}
944
945static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
946{
947 struct b44 *bp = netdev_priv(dev);
948 int rc = NETDEV_TX_OK;
949 dma_addr_t mapping;
950 u32 len, entry, ctrl;
951 unsigned long flags;
952
953 len = skb->len;
954 spin_lock_irqsave(&bp->lock, flags);
955
956
957 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
958 netif_stop_queue(dev);
959 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
960 dev->name);
961 goto err_out;
962 }
963
964 mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE);
965 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
966 struct sk_buff *bounce_skb;
967
968
969 if (!ssb_dma_mapping_error(bp->sdev, mapping))
970 ssb_dma_unmap_single(bp->sdev, mapping, len,
971 DMA_TO_DEVICE);
972
973 bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
974 if (!bounce_skb)
975 goto err_out;
976
977 mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data,
978 len, DMA_TO_DEVICE);
979 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
980 if (!ssb_dma_mapping_error(bp->sdev, mapping))
981 ssb_dma_unmap_single(bp->sdev, mapping,
982 len, DMA_TO_DEVICE);
983 dev_kfree_skb_any(bounce_skb);
984 goto err_out;
985 }
986
987 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
988 dev_kfree_skb_any(skb);
989 skb = bounce_skb;
990 }
991
992 entry = bp->tx_prod;
993 bp->tx_buffers[entry].skb = skb;
994 bp->tx_buffers[entry].mapping = mapping;
995
996 ctrl = (len & DESC_CTRL_LEN);
997 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
998 if (entry == (B44_TX_RING_SIZE - 1))
999 ctrl |= DESC_CTRL_EOT;
1000
1001 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1002 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1003
1004 if (bp->flags & B44_FLAG_TX_RING_HACK)
1005 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1006 entry * sizeof(bp->tx_ring[0]),
1007 DMA_TO_DEVICE);
1008
1009 entry = NEXT_TX(entry);
1010
1011 bp->tx_prod = entry;
1012
1013 wmb();
1014
1015 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1016 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1017 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1018 if (bp->flags & B44_FLAG_REORDER_BUG)
1019 br32(bp, B44_DMATX_PTR);
1020
1021 if (TX_BUFFS_AVAIL(bp) < 1)
1022 netif_stop_queue(dev);
1023
1024 dev->trans_start = jiffies;
1025
1026out_unlock:
1027 spin_unlock_irqrestore(&bp->lock, flags);
1028
1029 return rc;
1030
1031err_out:
1032 rc = NETDEV_TX_BUSY;
1033 goto out_unlock;
1034}
1035
1036static int b44_change_mtu(struct net_device *dev, int new_mtu)
1037{
1038 struct b44 *bp = netdev_priv(dev);
1039
1040 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1041 return -EINVAL;
1042
1043 if (!netif_running(dev)) {
1044
1045
1046
1047 dev->mtu = new_mtu;
1048 return 0;
1049 }
1050
1051 spin_lock_irq(&bp->lock);
1052 b44_halt(bp);
1053 dev->mtu = new_mtu;
1054 b44_init_rings(bp);
1055 b44_init_hw(bp, B44_FULL_RESET);
1056 spin_unlock_irq(&bp->lock);
1057
1058 b44_enable_ints(bp);
1059
1060 return 0;
1061}
1062
1063
1064
1065
1066
1067
1068
1069
1070static void b44_free_rings(struct b44 *bp)
1071{
1072 struct ring_info *rp;
1073 int i;
1074
1075 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1076 rp = &bp->rx_buffers[i];
1077
1078 if (rp->skb == NULL)
1079 continue;
1080 ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ,
1081 DMA_FROM_DEVICE);
1082 dev_kfree_skb_any(rp->skb);
1083 rp->skb = NULL;
1084 }
1085
1086
1087 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1088 rp = &bp->tx_buffers[i];
1089
1090 if (rp->skb == NULL)
1091 continue;
1092 ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len,
1093 DMA_TO_DEVICE);
1094 dev_kfree_skb_any(rp->skb);
1095 rp->skb = NULL;
1096 }
1097}
1098
1099
1100
1101
1102
1103
1104
1105static void b44_init_rings(struct b44 *bp)
1106{
1107 int i;
1108
1109 b44_free_rings(bp);
1110
1111 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1112 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1113
1114 if (bp->flags & B44_FLAG_RX_RING_HACK)
1115 ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma,
1116 DMA_TABLE_BYTES,
1117 DMA_BIDIRECTIONAL);
1118
1119 if (bp->flags & B44_FLAG_TX_RING_HACK)
1120 ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma,
1121 DMA_TABLE_BYTES,
1122 DMA_TO_DEVICE);
1123
1124 for (i = 0; i < bp->rx_pending; i++) {
1125 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1126 break;
1127 }
1128}
1129
1130
1131
1132
1133
1134static void b44_free_consistent(struct b44 *bp)
1135{
1136 kfree(bp->rx_buffers);
1137 bp->rx_buffers = NULL;
1138 kfree(bp->tx_buffers);
1139 bp->tx_buffers = NULL;
1140 if (bp->rx_ring) {
1141 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1142 ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma,
1143 DMA_TABLE_BYTES,
1144 DMA_BIDIRECTIONAL);
1145 kfree(bp->rx_ring);
1146 } else
1147 ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
1148 bp->rx_ring, bp->rx_ring_dma,
1149 GFP_KERNEL);
1150 bp->rx_ring = NULL;
1151 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1152 }
1153 if (bp->tx_ring) {
1154 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1155 ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma,
1156 DMA_TABLE_BYTES,
1157 DMA_TO_DEVICE);
1158 kfree(bp->tx_ring);
1159 } else
1160 ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
1161 bp->tx_ring, bp->tx_ring_dma,
1162 GFP_KERNEL);
1163 bp->tx_ring = NULL;
1164 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1165 }
1166}
1167
1168
1169
1170
1171
1172static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1173{
1174 int size;
1175
1176 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1177 bp->rx_buffers = kzalloc(size, gfp);
1178 if (!bp->rx_buffers)
1179 goto out_err;
1180
1181 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1182 bp->tx_buffers = kzalloc(size, gfp);
1183 if (!bp->tx_buffers)
1184 goto out_err;
1185
1186 size = DMA_TABLE_BYTES;
1187 bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp);
1188 if (!bp->rx_ring) {
1189
1190
1191
1192 struct dma_desc *rx_ring;
1193 dma_addr_t rx_ring_dma;
1194
1195 rx_ring = kzalloc(size, gfp);
1196 if (!rx_ring)
1197 goto out_err;
1198
1199 rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring,
1200 DMA_TABLE_BYTES,
1201 DMA_BIDIRECTIONAL);
1202
1203 if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) ||
1204 rx_ring_dma + size > DMA_BIT_MASK(30)) {
1205 kfree(rx_ring);
1206 goto out_err;
1207 }
1208
1209 bp->rx_ring = rx_ring;
1210 bp->rx_ring_dma = rx_ring_dma;
1211 bp->flags |= B44_FLAG_RX_RING_HACK;
1212 }
1213
1214 bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp);
1215 if (!bp->tx_ring) {
1216
1217
1218
1219 struct dma_desc *tx_ring;
1220 dma_addr_t tx_ring_dma;
1221
1222 tx_ring = kzalloc(size, gfp);
1223 if (!tx_ring)
1224 goto out_err;
1225
1226 tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring,
1227 DMA_TABLE_BYTES,
1228 DMA_TO_DEVICE);
1229
1230 if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) ||
1231 tx_ring_dma + size > DMA_BIT_MASK(30)) {
1232 kfree(tx_ring);
1233 goto out_err;
1234 }
1235
1236 bp->tx_ring = tx_ring;
1237 bp->tx_ring_dma = tx_ring_dma;
1238 bp->flags |= B44_FLAG_TX_RING_HACK;
1239 }
1240
1241 return 0;
1242
1243out_err:
1244 b44_free_consistent(bp);
1245 return -ENOMEM;
1246}
1247
1248
1249static void b44_clear_stats(struct b44 *bp)
1250{
1251 unsigned long reg;
1252
1253 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1254 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1255 br32(bp, reg);
1256 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1257 br32(bp, reg);
1258}
1259
1260
1261static void b44_chip_reset(struct b44 *bp, int reset_kind)
1262{
1263 struct ssb_device *sdev = bp->sdev;
1264 bool was_enabled;
1265
1266 was_enabled = ssb_device_is_enabled(bp->sdev);
1267
1268 ssb_device_enable(bp->sdev, 0);
1269 ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1270
1271 if (was_enabled) {
1272 bw32(bp, B44_RCV_LAZY, 0);
1273 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1274 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1275 bw32(bp, B44_DMATX_CTRL, 0);
1276 bp->tx_prod = bp->tx_cons = 0;
1277 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1278 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1279 100, 0);
1280 }
1281 bw32(bp, B44_DMARX_CTRL, 0);
1282 bp->rx_prod = bp->rx_cons = 0;
1283 }
1284
1285 b44_clear_stats(bp);
1286
1287
1288
1289
1290
1291 if (reset_kind == B44_CHIP_RESET_PARTIAL)
1292 return;
1293
1294 switch (sdev->bus->bustype) {
1295 case SSB_BUSTYPE_SSB:
1296 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1297 (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1298 B44_MDC_RATIO)
1299 & MDIO_CTRL_MAXF_MASK)));
1300 break;
1301 case SSB_BUSTYPE_PCI:
1302 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1303 (0x0d & MDIO_CTRL_MAXF_MASK)));
1304 break;
1305 case SSB_BUSTYPE_PCMCIA:
1306 case SSB_BUSTYPE_SDIO:
1307 WARN_ON(1);
1308 break;
1309 }
1310
1311 br32(bp, B44_MDIO_CTRL);
1312
1313 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1314 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1315 br32(bp, B44_ENET_CTRL);
1316 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1317 } else {
1318 u32 val = br32(bp, B44_DEVCTRL);
1319
1320 if (val & DEVCTRL_EPR) {
1321 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1322 br32(bp, B44_DEVCTRL);
1323 udelay(100);
1324 }
1325 bp->flags |= B44_FLAG_INTERNAL_PHY;
1326 }
1327}
1328
1329
1330static void b44_halt(struct b44 *bp)
1331{
1332 b44_disable_ints(bp);
1333
1334 b44_phy_reset(bp);
1335
1336 printk(KERN_INFO PFX "%s: powering down PHY\n", bp->dev->name);
1337 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1338
1339
1340 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1341}
1342
1343
1344static void __b44_set_mac_addr(struct b44 *bp)
1345{
1346 bw32(bp, B44_CAM_CTRL, 0);
1347 if (!(bp->dev->flags & IFF_PROMISC)) {
1348 u32 val;
1349
1350 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1351 val = br32(bp, B44_CAM_CTRL);
1352 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1353 }
1354}
1355
1356static int b44_set_mac_addr(struct net_device *dev, void *p)
1357{
1358 struct b44 *bp = netdev_priv(dev);
1359 struct sockaddr *addr = p;
1360 u32 val;
1361
1362 if (netif_running(dev))
1363 return -EBUSY;
1364
1365 if (!is_valid_ether_addr(addr->sa_data))
1366 return -EINVAL;
1367
1368 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1369
1370 spin_lock_irq(&bp->lock);
1371
1372 val = br32(bp, B44_RXCONFIG);
1373 if (!(val & RXCONFIG_CAM_ABSENT))
1374 __b44_set_mac_addr(bp);
1375
1376 spin_unlock_irq(&bp->lock);
1377
1378 return 0;
1379}
1380
1381
1382
1383
1384static void __b44_set_rx_mode(struct net_device *);
1385static void b44_init_hw(struct b44 *bp, int reset_kind)
1386{
1387 u32 val;
1388
1389 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1390 if (reset_kind == B44_FULL_RESET) {
1391 b44_phy_reset(bp);
1392 b44_setup_phy(bp);
1393 }
1394
1395
1396 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1397 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1398
1399
1400 __b44_set_rx_mode(bp->dev);
1401
1402
1403 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1404 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1405
1406 bw32(bp, B44_TX_WMARK, 56);
1407 if (reset_kind == B44_PARTIAL_RESET) {
1408 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1409 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1410 } else {
1411 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1412 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1413 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1414 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1415 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1416
1417 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1418 bp->rx_prod = bp->rx_pending;
1419
1420 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1421 }
1422
1423 val = br32(bp, B44_ENET_CTRL);
1424 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1425}
1426
1427static int b44_open(struct net_device *dev)
1428{
1429 struct b44 *bp = netdev_priv(dev);
1430 int err;
1431
1432 err = b44_alloc_consistent(bp, GFP_KERNEL);
1433 if (err)
1434 goto out;
1435
1436 napi_enable(&bp->napi);
1437
1438 b44_init_rings(bp);
1439 b44_init_hw(bp, B44_FULL_RESET);
1440
1441 b44_check_phy(bp);
1442
1443 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1444 if (unlikely(err < 0)) {
1445 napi_disable(&bp->napi);
1446 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1447 b44_free_rings(bp);
1448 b44_free_consistent(bp);
1449 goto out;
1450 }
1451
1452 init_timer(&bp->timer);
1453 bp->timer.expires = jiffies + HZ;
1454 bp->timer.data = (unsigned long) bp;
1455 bp->timer.function = b44_timer;
1456 add_timer(&bp->timer);
1457
1458 b44_enable_ints(bp);
1459 netif_start_queue(dev);
1460out:
1461 return err;
1462}
1463
1464#ifdef CONFIG_NET_POLL_CONTROLLER
1465
1466
1467
1468
1469static void b44_poll_controller(struct net_device *dev)
1470{
1471 disable_irq(dev->irq);
1472 b44_interrupt(dev->irq, dev);
1473 enable_irq(dev->irq);
1474}
1475#endif
1476
1477static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1478{
1479 u32 i;
1480 u32 *pattern = (u32 *) pp;
1481
1482 for (i = 0; i < bytes; i += sizeof(u32)) {
1483 bw32(bp, B44_FILT_ADDR, table_offset + i);
1484 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1485 }
1486}
1487
1488static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1489{
1490 int magicsync = 6;
1491 int k, j, len = offset;
1492 int ethaddr_bytes = ETH_ALEN;
1493
1494 memset(ppattern + offset, 0xff, magicsync);
1495 for (j = 0; j < magicsync; j++)
1496 set_bit(len++, (unsigned long *) pmask);
1497
1498 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1499 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1500 ethaddr_bytes = ETH_ALEN;
1501 else
1502 ethaddr_bytes = B44_PATTERN_SIZE - len;
1503 if (ethaddr_bytes <=0)
1504 break;
1505 for (k = 0; k< ethaddr_bytes; k++) {
1506 ppattern[offset + magicsync +
1507 (j * ETH_ALEN) + k] = macaddr[k];
1508 len++;
1509 set_bit(len, (unsigned long *) pmask);
1510 }
1511 }
1512 return len - 1;
1513}
1514
1515
1516
1517
1518static void b44_setup_pseudo_magicp(struct b44 *bp)
1519{
1520
1521 u32 val;
1522 int plen0, plen1, plen2;
1523 u8 *pwol_pattern;
1524 u8 pwol_mask[B44_PMASK_SIZE];
1525
1526 pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1527 if (!pwol_pattern) {
1528 printk(KERN_ERR PFX "Memory not available for WOL\n");
1529 return;
1530 }
1531
1532
1533 memset(pwol_mask, 0, B44_PMASK_SIZE);
1534 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1535 B44_ETHIPV4UDP_HLEN);
1536
1537 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1538 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1539
1540
1541 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1542 memset(pwol_mask, 0, B44_PMASK_SIZE);
1543 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1544 ETH_HLEN);
1545
1546 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1547 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1548 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1549 B44_PMASK_BASE + B44_PMASK_SIZE);
1550
1551
1552 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1553 memset(pwol_mask, 0, B44_PMASK_SIZE);
1554 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1555 B44_ETHIPV6UDP_HLEN);
1556
1557 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1558 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1559 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1560 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1561
1562 kfree(pwol_pattern);
1563
1564
1565 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1566 bw32(bp, B44_WKUP_LEN, val);
1567
1568
1569 val = br32(bp, B44_DEVCTRL);
1570 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1571
1572}
1573
1574#ifdef CONFIG_B44_PCI
1575static void b44_setup_wol_pci(struct b44 *bp)
1576{
1577 u16 val;
1578
1579 if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1580 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1581 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1582 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1583 }
1584}
1585#else
1586static inline void b44_setup_wol_pci(struct b44 *bp) { }
1587#endif
1588
1589static void b44_setup_wol(struct b44 *bp)
1590{
1591 u32 val;
1592
1593 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1594
1595 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1596
1597 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1598
1599 val = bp->dev->dev_addr[2] << 24 |
1600 bp->dev->dev_addr[3] << 16 |
1601 bp->dev->dev_addr[4] << 8 |
1602 bp->dev->dev_addr[5];
1603 bw32(bp, B44_ADDR_LO, val);
1604
1605 val = bp->dev->dev_addr[0] << 8 |
1606 bp->dev->dev_addr[1];
1607 bw32(bp, B44_ADDR_HI, val);
1608
1609 val = br32(bp, B44_DEVCTRL);
1610 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1611
1612 } else {
1613 b44_setup_pseudo_magicp(bp);
1614 }
1615 b44_setup_wol_pci(bp);
1616}
1617
1618static int b44_close(struct net_device *dev)
1619{
1620 struct b44 *bp = netdev_priv(dev);
1621
1622 netif_stop_queue(dev);
1623
1624 napi_disable(&bp->napi);
1625
1626 del_timer_sync(&bp->timer);
1627
1628 spin_lock_irq(&bp->lock);
1629
1630 b44_halt(bp);
1631 b44_free_rings(bp);
1632 netif_carrier_off(dev);
1633
1634 spin_unlock_irq(&bp->lock);
1635
1636 free_irq(dev->irq, dev);
1637
1638 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1639 b44_init_hw(bp, B44_PARTIAL_RESET);
1640 b44_setup_wol(bp);
1641 }
1642
1643 b44_free_consistent(bp);
1644
1645 return 0;
1646}
1647
1648static struct net_device_stats *b44_get_stats(struct net_device *dev)
1649{
1650 struct b44 *bp = netdev_priv(dev);
1651 struct net_device_stats *nstat = &dev->stats;
1652 struct b44_hw_stats *hwstat = &bp->hw_stats;
1653
1654
1655 nstat->rx_packets = hwstat->rx_pkts;
1656 nstat->tx_packets = hwstat->tx_pkts;
1657 nstat->rx_bytes = hwstat->rx_octets;
1658 nstat->tx_bytes = hwstat->tx_octets;
1659 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1660 hwstat->tx_oversize_pkts +
1661 hwstat->tx_underruns +
1662 hwstat->tx_excessive_cols +
1663 hwstat->tx_late_cols);
1664 nstat->multicast = hwstat->tx_multicast_pkts;
1665 nstat->collisions = hwstat->tx_total_cols;
1666
1667 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1668 hwstat->rx_undersize);
1669 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1670 nstat->rx_frame_errors = hwstat->rx_align_errs;
1671 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1672 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1673 hwstat->rx_oversize_pkts +
1674 hwstat->rx_missed_pkts +
1675 hwstat->rx_crc_align_errs +
1676 hwstat->rx_undersize +
1677 hwstat->rx_crc_errs +
1678 hwstat->rx_align_errs +
1679 hwstat->rx_symbol_errs);
1680
1681 nstat->tx_aborted_errors = hwstat->tx_underruns;
1682#if 0
1683
1684 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1685#endif
1686
1687 return nstat;
1688}
1689
1690static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1691{
1692 struct dev_mc_list *mclist;
1693 int i, num_ents;
1694
1695 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1696 mclist = dev->mc_list;
1697 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1698 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1699 }
1700 return i+1;
1701}
1702
1703static void __b44_set_rx_mode(struct net_device *dev)
1704{
1705 struct b44 *bp = netdev_priv(dev);
1706 u32 val;
1707
1708 val = br32(bp, B44_RXCONFIG);
1709 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1710 if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1711 val |= RXCONFIG_PROMISC;
1712 bw32(bp, B44_RXCONFIG, val);
1713 } else {
1714 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1715 int i = 1;
1716
1717 __b44_set_mac_addr(bp);
1718
1719 if ((dev->flags & IFF_ALLMULTI) ||
1720 (dev->mc_count > B44_MCAST_TABLE_SIZE))
1721 val |= RXCONFIG_ALLMULTI;
1722 else
1723 i = __b44_load_mcast(bp, dev);
1724
1725 for (; i < 64; i++)
1726 __b44_cam_write(bp, zero, i);
1727
1728 bw32(bp, B44_RXCONFIG, val);
1729 val = br32(bp, B44_CAM_CTRL);
1730 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1731 }
1732}
1733
1734static void b44_set_rx_mode(struct net_device *dev)
1735{
1736 struct b44 *bp = netdev_priv(dev);
1737
1738 spin_lock_irq(&bp->lock);
1739 __b44_set_rx_mode(dev);
1740 spin_unlock_irq(&bp->lock);
1741}
1742
1743static u32 b44_get_msglevel(struct net_device *dev)
1744{
1745 struct b44 *bp = netdev_priv(dev);
1746 return bp->msg_enable;
1747}
1748
1749static void b44_set_msglevel(struct net_device *dev, u32 value)
1750{
1751 struct b44 *bp = netdev_priv(dev);
1752 bp->msg_enable = value;
1753}
1754
1755static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1756{
1757 struct b44 *bp = netdev_priv(dev);
1758 struct ssb_bus *bus = bp->sdev->bus;
1759
1760 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1761 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1762 switch (bus->bustype) {
1763 case SSB_BUSTYPE_PCI:
1764 strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1765 break;
1766 case SSB_BUSTYPE_SSB:
1767 strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1768 break;
1769 case SSB_BUSTYPE_PCMCIA:
1770 case SSB_BUSTYPE_SDIO:
1771 WARN_ON(1);
1772 break;
1773 }
1774}
1775
1776static int b44_nway_reset(struct net_device *dev)
1777{
1778 struct b44 *bp = netdev_priv(dev);
1779 u32 bmcr;
1780 int r;
1781
1782 spin_lock_irq(&bp->lock);
1783 b44_readphy(bp, MII_BMCR, &bmcr);
1784 b44_readphy(bp, MII_BMCR, &bmcr);
1785 r = -EINVAL;
1786 if (bmcr & BMCR_ANENABLE) {
1787 b44_writephy(bp, MII_BMCR,
1788 bmcr | BMCR_ANRESTART);
1789 r = 0;
1790 }
1791 spin_unlock_irq(&bp->lock);
1792
1793 return r;
1794}
1795
1796static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1797{
1798 struct b44 *bp = netdev_priv(dev);
1799
1800 cmd->supported = (SUPPORTED_Autoneg);
1801 cmd->supported |= (SUPPORTED_100baseT_Half |
1802 SUPPORTED_100baseT_Full |
1803 SUPPORTED_10baseT_Half |
1804 SUPPORTED_10baseT_Full |
1805 SUPPORTED_MII);
1806
1807 cmd->advertising = 0;
1808 if (bp->flags & B44_FLAG_ADV_10HALF)
1809 cmd->advertising |= ADVERTISED_10baseT_Half;
1810 if (bp->flags & B44_FLAG_ADV_10FULL)
1811 cmd->advertising |= ADVERTISED_10baseT_Full;
1812 if (bp->flags & B44_FLAG_ADV_100HALF)
1813 cmd->advertising |= ADVERTISED_100baseT_Half;
1814 if (bp->flags & B44_FLAG_ADV_100FULL)
1815 cmd->advertising |= ADVERTISED_100baseT_Full;
1816 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1817 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1818 SPEED_100 : SPEED_10;
1819 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1820 DUPLEX_FULL : DUPLEX_HALF;
1821 cmd->port = 0;
1822 cmd->phy_address = bp->phy_addr;
1823 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1824 XCVR_INTERNAL : XCVR_EXTERNAL;
1825 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1826 AUTONEG_DISABLE : AUTONEG_ENABLE;
1827 if (cmd->autoneg == AUTONEG_ENABLE)
1828 cmd->advertising |= ADVERTISED_Autoneg;
1829 if (!netif_running(dev)){
1830 cmd->speed = 0;
1831 cmd->duplex = 0xff;
1832 }
1833 cmd->maxtxpkt = 0;
1834 cmd->maxrxpkt = 0;
1835 return 0;
1836}
1837
1838static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1839{
1840 struct b44 *bp = netdev_priv(dev);
1841
1842
1843 if (cmd->autoneg == AUTONEG_ENABLE) {
1844 if (cmd->advertising &
1845 (ADVERTISED_1000baseT_Half |
1846 ADVERTISED_1000baseT_Full))
1847 return -EINVAL;
1848 } else if ((cmd->speed != SPEED_100 &&
1849 cmd->speed != SPEED_10) ||
1850 (cmd->duplex != DUPLEX_HALF &&
1851 cmd->duplex != DUPLEX_FULL)) {
1852 return -EINVAL;
1853 }
1854
1855 spin_lock_irq(&bp->lock);
1856
1857 if (cmd->autoneg == AUTONEG_ENABLE) {
1858 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1859 B44_FLAG_100_BASE_T |
1860 B44_FLAG_FULL_DUPLEX |
1861 B44_FLAG_ADV_10HALF |
1862 B44_FLAG_ADV_10FULL |
1863 B44_FLAG_ADV_100HALF |
1864 B44_FLAG_ADV_100FULL);
1865 if (cmd->advertising == 0) {
1866 bp->flags |= (B44_FLAG_ADV_10HALF |
1867 B44_FLAG_ADV_10FULL |
1868 B44_FLAG_ADV_100HALF |
1869 B44_FLAG_ADV_100FULL);
1870 } else {
1871 if (cmd->advertising & ADVERTISED_10baseT_Half)
1872 bp->flags |= B44_FLAG_ADV_10HALF;
1873 if (cmd->advertising & ADVERTISED_10baseT_Full)
1874 bp->flags |= B44_FLAG_ADV_10FULL;
1875 if (cmd->advertising & ADVERTISED_100baseT_Half)
1876 bp->flags |= B44_FLAG_ADV_100HALF;
1877 if (cmd->advertising & ADVERTISED_100baseT_Full)
1878 bp->flags |= B44_FLAG_ADV_100FULL;
1879 }
1880 } else {
1881 bp->flags |= B44_FLAG_FORCE_LINK;
1882 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1883 if (cmd->speed == SPEED_100)
1884 bp->flags |= B44_FLAG_100_BASE_T;
1885 if (cmd->duplex == DUPLEX_FULL)
1886 bp->flags |= B44_FLAG_FULL_DUPLEX;
1887 }
1888
1889 if (netif_running(dev))
1890 b44_setup_phy(bp);
1891
1892 spin_unlock_irq(&bp->lock);
1893
1894 return 0;
1895}
1896
1897static void b44_get_ringparam(struct net_device *dev,
1898 struct ethtool_ringparam *ering)
1899{
1900 struct b44 *bp = netdev_priv(dev);
1901
1902 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1903 ering->rx_pending = bp->rx_pending;
1904
1905
1906}
1907
1908static int b44_set_ringparam(struct net_device *dev,
1909 struct ethtool_ringparam *ering)
1910{
1911 struct b44 *bp = netdev_priv(dev);
1912
1913 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1914 (ering->rx_mini_pending != 0) ||
1915 (ering->rx_jumbo_pending != 0) ||
1916 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1917 return -EINVAL;
1918
1919 spin_lock_irq(&bp->lock);
1920
1921 bp->rx_pending = ering->rx_pending;
1922 bp->tx_pending = ering->tx_pending;
1923
1924 b44_halt(bp);
1925 b44_init_rings(bp);
1926 b44_init_hw(bp, B44_FULL_RESET);
1927 netif_wake_queue(bp->dev);
1928 spin_unlock_irq(&bp->lock);
1929
1930 b44_enable_ints(bp);
1931
1932 return 0;
1933}
1934
1935static void b44_get_pauseparam(struct net_device *dev,
1936 struct ethtool_pauseparam *epause)
1937{
1938 struct b44 *bp = netdev_priv(dev);
1939
1940 epause->autoneg =
1941 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1942 epause->rx_pause =
1943 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1944 epause->tx_pause =
1945 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1946}
1947
1948static int b44_set_pauseparam(struct net_device *dev,
1949 struct ethtool_pauseparam *epause)
1950{
1951 struct b44 *bp = netdev_priv(dev);
1952
1953 spin_lock_irq(&bp->lock);
1954 if (epause->autoneg)
1955 bp->flags |= B44_FLAG_PAUSE_AUTO;
1956 else
1957 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1958 if (epause->rx_pause)
1959 bp->flags |= B44_FLAG_RX_PAUSE;
1960 else
1961 bp->flags &= ~B44_FLAG_RX_PAUSE;
1962 if (epause->tx_pause)
1963 bp->flags |= B44_FLAG_TX_PAUSE;
1964 else
1965 bp->flags &= ~B44_FLAG_TX_PAUSE;
1966 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1967 b44_halt(bp);
1968 b44_init_rings(bp);
1969 b44_init_hw(bp, B44_FULL_RESET);
1970 } else {
1971 __b44_set_flow_ctrl(bp, bp->flags);
1972 }
1973 spin_unlock_irq(&bp->lock);
1974
1975 b44_enable_ints(bp);
1976
1977 return 0;
1978}
1979
1980static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1981{
1982 switch(stringset) {
1983 case ETH_SS_STATS:
1984 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1985 break;
1986 }
1987}
1988
1989static int b44_get_sset_count(struct net_device *dev, int sset)
1990{
1991 switch (sset) {
1992 case ETH_SS_STATS:
1993 return ARRAY_SIZE(b44_gstrings);
1994 default:
1995 return -EOPNOTSUPP;
1996 }
1997}
1998
1999static void b44_get_ethtool_stats(struct net_device *dev,
2000 struct ethtool_stats *stats, u64 *data)
2001{
2002 struct b44 *bp = netdev_priv(dev);
2003 u32 *val = &bp->hw_stats.tx_good_octets;
2004 u32 i;
2005
2006 spin_lock_irq(&bp->lock);
2007
2008 b44_stats_update(bp);
2009
2010 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2011 *data++ = *val++;
2012
2013 spin_unlock_irq(&bp->lock);
2014}
2015
2016static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2017{
2018 struct b44 *bp = netdev_priv(dev);
2019
2020 wol->supported = WAKE_MAGIC;
2021 if (bp->flags & B44_FLAG_WOL_ENABLE)
2022 wol->wolopts = WAKE_MAGIC;
2023 else
2024 wol->wolopts = 0;
2025 memset(&wol->sopass, 0, sizeof(wol->sopass));
2026}
2027
2028static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2029{
2030 struct b44 *bp = netdev_priv(dev);
2031
2032 spin_lock_irq(&bp->lock);
2033 if (wol->wolopts & WAKE_MAGIC)
2034 bp->flags |= B44_FLAG_WOL_ENABLE;
2035 else
2036 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2037 spin_unlock_irq(&bp->lock);
2038
2039 return 0;
2040}
2041
2042static const struct ethtool_ops b44_ethtool_ops = {
2043 .get_drvinfo = b44_get_drvinfo,
2044 .get_settings = b44_get_settings,
2045 .set_settings = b44_set_settings,
2046 .nway_reset = b44_nway_reset,
2047 .get_link = ethtool_op_get_link,
2048 .get_wol = b44_get_wol,
2049 .set_wol = b44_set_wol,
2050 .get_ringparam = b44_get_ringparam,
2051 .set_ringparam = b44_set_ringparam,
2052 .get_pauseparam = b44_get_pauseparam,
2053 .set_pauseparam = b44_set_pauseparam,
2054 .get_msglevel = b44_get_msglevel,
2055 .set_msglevel = b44_set_msglevel,
2056 .get_strings = b44_get_strings,
2057 .get_sset_count = b44_get_sset_count,
2058 .get_ethtool_stats = b44_get_ethtool_stats,
2059};
2060
2061static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2062{
2063 struct mii_ioctl_data *data = if_mii(ifr);
2064 struct b44 *bp = netdev_priv(dev);
2065 int err = -EINVAL;
2066
2067 if (!netif_running(dev))
2068 goto out;
2069
2070 spin_lock_irq(&bp->lock);
2071 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2072 spin_unlock_irq(&bp->lock);
2073out:
2074 return err;
2075}
2076
2077static int __devinit b44_get_invariants(struct b44 *bp)
2078{
2079 struct ssb_device *sdev = bp->sdev;
2080 int err = 0;
2081 u8 *addr;
2082
2083 bp->dma_offset = ssb_dma_translation(sdev);
2084
2085 if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2086 instance > 1) {
2087 addr = sdev->bus->sprom.et1mac;
2088 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2089 } else {
2090 addr = sdev->bus->sprom.et0mac;
2091 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2092 }
2093
2094
2095
2096 bp->phy_addr &= 0x1F;
2097
2098 memcpy(bp->dev->dev_addr, addr, 6);
2099
2100 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2101 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2102 return -EINVAL;
2103 }
2104
2105 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2106
2107 bp->imask = IMASK_DEF;
2108
2109
2110
2111
2112
2113 if (bp->sdev->id.revision >= 7)
2114 bp->flags |= B44_FLAG_B0_ANDLATER;
2115
2116 return err;
2117}
2118
2119static const struct net_device_ops b44_netdev_ops = {
2120 .ndo_open = b44_open,
2121 .ndo_stop = b44_close,
2122 .ndo_start_xmit = b44_start_xmit,
2123 .ndo_get_stats = b44_get_stats,
2124 .ndo_set_multicast_list = b44_set_rx_mode,
2125 .ndo_set_mac_address = b44_set_mac_addr,
2126 .ndo_validate_addr = eth_validate_addr,
2127 .ndo_do_ioctl = b44_ioctl,
2128 .ndo_tx_timeout = b44_tx_timeout,
2129 .ndo_change_mtu = b44_change_mtu,
2130#ifdef CONFIG_NET_POLL_CONTROLLER
2131 .ndo_poll_controller = b44_poll_controller,
2132#endif
2133};
2134
2135static int __devinit b44_init_one(struct ssb_device *sdev,
2136 const struct ssb_device_id *ent)
2137{
2138 static int b44_version_printed = 0;
2139 struct net_device *dev;
2140 struct b44 *bp;
2141 int err;
2142
2143 instance++;
2144
2145 if (b44_version_printed++ == 0)
2146 printk(KERN_INFO "%s", version);
2147
2148
2149 dev = alloc_etherdev(sizeof(*bp));
2150 if (!dev) {
2151 dev_err(sdev->dev, "Etherdev alloc failed, aborting.\n");
2152 err = -ENOMEM;
2153 goto out;
2154 }
2155
2156 SET_NETDEV_DEV(dev, sdev->dev);
2157
2158
2159 dev->features |= 0;
2160
2161 bp = netdev_priv(dev);
2162 bp->sdev = sdev;
2163 bp->dev = dev;
2164 bp->force_copybreak = 0;
2165
2166 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2167
2168 spin_lock_init(&bp->lock);
2169
2170 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2171 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2172
2173 dev->netdev_ops = &b44_netdev_ops;
2174 netif_napi_add(dev, &bp->napi, b44_poll, 64);
2175 dev->watchdog_timeo = B44_TX_TIMEOUT;
2176 dev->irq = sdev->irq;
2177 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2178
2179 netif_carrier_off(dev);
2180
2181 err = ssb_bus_powerup(sdev->bus, 0);
2182 if (err) {
2183 dev_err(sdev->dev,
2184 "Failed to powerup the bus\n");
2185 goto err_out_free_dev;
2186 }
2187 err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30));
2188 if (err) {
2189 dev_err(sdev->dev,
2190 "Required 30BIT DMA mask unsupported by the system.\n");
2191 goto err_out_powerdown;
2192 }
2193 err = b44_get_invariants(bp);
2194 if (err) {
2195 dev_err(sdev->dev,
2196 "Problem fetching invariants of chip, aborting.\n");
2197 goto err_out_powerdown;
2198 }
2199
2200 bp->mii_if.dev = dev;
2201 bp->mii_if.mdio_read = b44_mii_read;
2202 bp->mii_if.mdio_write = b44_mii_write;
2203 bp->mii_if.phy_id = bp->phy_addr;
2204 bp->mii_if.phy_id_mask = 0x1f;
2205 bp->mii_if.reg_num_mask = 0x1f;
2206
2207
2208 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2209 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2210
2211
2212 bp->flags |= B44_FLAG_PAUSE_AUTO;
2213
2214 err = register_netdev(dev);
2215 if (err) {
2216 dev_err(sdev->dev, "Cannot register net device, aborting.\n");
2217 goto err_out_powerdown;
2218 }
2219
2220 ssb_set_drvdata(sdev, dev);
2221
2222
2223
2224
2225 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2226
2227 printk(KERN_INFO "%s: Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n",
2228 dev->name, dev->dev_addr);
2229
2230 return 0;
2231
2232err_out_powerdown:
2233 ssb_bus_may_powerdown(sdev->bus);
2234
2235err_out_free_dev:
2236 free_netdev(dev);
2237
2238out:
2239 return err;
2240}
2241
2242static void __devexit b44_remove_one(struct ssb_device *sdev)
2243{
2244 struct net_device *dev = ssb_get_drvdata(sdev);
2245
2246 unregister_netdev(dev);
2247 ssb_device_disable(sdev, 0);
2248 ssb_bus_may_powerdown(sdev->bus);
2249 free_netdev(dev);
2250 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2251 ssb_set_drvdata(sdev, NULL);
2252}
2253
2254static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2255{
2256 struct net_device *dev = ssb_get_drvdata(sdev);
2257 struct b44 *bp = netdev_priv(dev);
2258
2259 if (!netif_running(dev))
2260 return 0;
2261
2262 del_timer_sync(&bp->timer);
2263
2264 spin_lock_irq(&bp->lock);
2265
2266 b44_halt(bp);
2267 netif_carrier_off(bp->dev);
2268 netif_device_detach(bp->dev);
2269 b44_free_rings(bp);
2270
2271 spin_unlock_irq(&bp->lock);
2272
2273 free_irq(dev->irq, dev);
2274 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2275 b44_init_hw(bp, B44_PARTIAL_RESET);
2276 b44_setup_wol(bp);
2277 }
2278
2279 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2280 return 0;
2281}
2282
2283static int b44_resume(struct ssb_device *sdev)
2284{
2285 struct net_device *dev = ssb_get_drvdata(sdev);
2286 struct b44 *bp = netdev_priv(dev);
2287 int rc = 0;
2288
2289 rc = ssb_bus_powerup(sdev->bus, 0);
2290 if (rc) {
2291 dev_err(sdev->dev,
2292 "Failed to powerup the bus\n");
2293 return rc;
2294 }
2295
2296 if (!netif_running(dev))
2297 return 0;
2298
2299 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2300 if (rc) {
2301 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2302 return rc;
2303 }
2304
2305 spin_lock_irq(&bp->lock);
2306
2307 b44_init_rings(bp);
2308 b44_init_hw(bp, B44_FULL_RESET);
2309 netif_device_attach(bp->dev);
2310 spin_unlock_irq(&bp->lock);
2311
2312 b44_enable_ints(bp);
2313 netif_wake_queue(dev);
2314
2315 mod_timer(&bp->timer, jiffies + 1);
2316
2317 return 0;
2318}
2319
2320static struct ssb_driver b44_ssb_driver = {
2321 .name = DRV_MODULE_NAME,
2322 .id_table = b44_ssb_tbl,
2323 .probe = b44_init_one,
2324 .remove = __devexit_p(b44_remove_one),
2325 .suspend = b44_suspend,
2326 .resume = b44_resume,
2327};
2328
2329static inline int b44_pci_init(void)
2330{
2331 int err = 0;
2332#ifdef CONFIG_B44_PCI
2333 err = ssb_pcihost_register(&b44_pci_driver);
2334#endif
2335 return err;
2336}
2337
2338static inline void b44_pci_exit(void)
2339{
2340#ifdef CONFIG_B44_PCI
2341 ssb_pcihost_unregister(&b44_pci_driver);
2342#endif
2343}
2344
2345static int __init b44_init(void)
2346{
2347 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2348 int err;
2349
2350
2351 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2352 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2353
2354 err = b44_pci_init();
2355 if (err)
2356 return err;
2357 err = ssb_driver_register(&b44_ssb_driver);
2358 if (err)
2359 b44_pci_exit();
2360 return err;
2361}
2362
2363static void __exit b44_cleanup(void)
2364{
2365 ssb_driver_unregister(&b44_ssb_driver);
2366 b44_pci_exit();
2367}
2368
2369module_init(b44_init);
2370module_exit(b44_cleanup);
2371
2372