1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47static int rx_copybreak = 200;
48
49
50
51
52
53
54static unsigned int use_mmio = 2;
55
56
57
58
59
60static const int multicast_filter_limit = 32;
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75#define TXHI_ENTRIES 2
76#define TXLO_ENTRIES 128
77#define RX_ENTRIES 32
78#define COMMAND_ENTRIES 16
79#define RESPONSE_ENTRIES 32
80
81#define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc))
82#define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc))
83
84
85
86
87
88#define RXFREE_ENTRIES 128
89#define RXENT_ENTRIES (RXFREE_ENTRIES - 1)
90
91
92
93
94#define TX_TIMEOUT (2*HZ)
95
96#define PKT_BUF_SZ 1536
97#define FIRMWARE_NAME "3com/typhoon.bin"
98
99#define pr_fmt(fmt) KBUILD_MODNAME " " fmt
100
101#include <linux/module.h>
102#include <linux/kernel.h>
103#include <linux/sched.h>
104#include <linux/string.h>
105#include <linux/timer.h>
106#include <linux/errno.h>
107#include <linux/ioport.h>
108#include <linux/interrupt.h>
109#include <linux/pci.h>
110#include <linux/netdevice.h>
111#include <linux/etherdevice.h>
112#include <linux/skbuff.h>
113#include <linux/mm.h>
114#include <linux/init.h>
115#include <linux/delay.h>
116#include <linux/ethtool.h>
117#include <linux/if_vlan.h>
118#include <linux/crc32.h>
119#include <linux/bitops.h>
120#include <asm/processor.h>
121#include <asm/io.h>
122#include <linux/uaccess.h>
123#include <linux/in6.h>
124#include <linux/dma-mapping.h>
125#include <linux/firmware.h>
126
127#include "typhoon.h"
128
129MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
130MODULE_LICENSE("GPL");
131MODULE_FIRMWARE(FIRMWARE_NAME);
132MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
133MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
134 "the buffer given back to the NIC. Default "
135 "is 200.");
136MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
137 "Default is to try MMIO and fallback to PIO.");
138module_param(rx_copybreak, int, 0);
139module_param(use_mmio, int, 0);
140
141#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
142#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
143#undef NETIF_F_TSO
144#endif
145
146#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
147#error TX ring too small!
148#endif
149
150struct typhoon_card_info {
151 const char *name;
152 const int capabilities;
153};
154
155#define TYPHOON_CRYPTO_NONE 0x00
156#define TYPHOON_CRYPTO_DES 0x01
157#define TYPHOON_CRYPTO_3DES 0x02
158#define TYPHOON_CRYPTO_VARIABLE 0x04
159#define TYPHOON_FIBER 0x08
160#define TYPHOON_WAKEUP_NEEDS_RESET 0x10
161
162enum typhoon_cards {
163 TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
164 TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
165 TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
166 TYPHOON_FXM,
167};
168
169
170static struct typhoon_card_info typhoon_card_info[] = {
171 { "3Com Typhoon (3C990-TX)",
172 TYPHOON_CRYPTO_NONE},
173 { "3Com Typhoon (3CR990-TX-95)",
174 TYPHOON_CRYPTO_DES},
175 { "3Com Typhoon (3CR990-TX-97)",
176 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
177 { "3Com Typhoon (3C990SVR)",
178 TYPHOON_CRYPTO_NONE},
179 { "3Com Typhoon (3CR990SVR95)",
180 TYPHOON_CRYPTO_DES},
181 { "3Com Typhoon (3CR990SVR97)",
182 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
183 { "3Com Typhoon2 (3C990B-TX-M)",
184 TYPHOON_CRYPTO_VARIABLE},
185 { "3Com Typhoon2 (3C990BSVR)",
186 TYPHOON_CRYPTO_VARIABLE},
187 { "3Com Typhoon (3CR990-FX-95)",
188 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
189 { "3Com Typhoon (3CR990-FX-97)",
190 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
191 { "3Com Typhoon (3CR990-FX-95 Server)",
192 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
193 { "3Com Typhoon (3CR990-FX-97 Server)",
194 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
195 { "3Com Typhoon2 (3C990B-FX-97)",
196 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
197};
198
199
200
201
202
203
204
205static const struct pci_device_id typhoon_pci_tbl[] = {
206 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
208 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
210 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
212 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
213 PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
214 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
215 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
216 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
217 PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
218 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
219 PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
220 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
221 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
222 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
223 PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
224 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
225 PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
226 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
228 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
230 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
232 { 0, }
233};
234MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
235
236
237
238
239
240#define __3xp_aligned ____cacheline_aligned
241struct typhoon_shared {
242 struct typhoon_interface iface;
243 struct typhoon_indexes indexes __3xp_aligned;
244 struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned;
245 struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned;
246 struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned;
247 struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned;
248 struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned;
249 struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
250 u32 zeroWord;
251 struct tx_desc txHi[TXHI_ENTRIES];
252} __packed;
253
254struct rxbuff_ent {
255 struct sk_buff *skb;
256 dma_addr_t dma_addr;
257};
258
259struct typhoon {
260
261 struct transmit_ring txLoRing ____cacheline_aligned;
262 struct pci_dev * tx_pdev;
263 void __iomem *tx_ioaddr;
264 u32 txlo_dma_addr;
265
266
267 void __iomem *ioaddr ____cacheline_aligned;
268 struct typhoon_indexes *indexes;
269 u8 awaiting_resp;
270 u8 duplex;
271 u8 speed;
272 u8 card_state;
273 struct basic_ring rxLoRing;
274 struct pci_dev * pdev;
275 struct net_device * dev;
276 struct napi_struct napi;
277 struct basic_ring rxHiRing;
278 struct basic_ring rxBuffRing;
279 struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
280
281
282 spinlock_t command_lock ____cacheline_aligned;
283 struct basic_ring cmdRing;
284 struct basic_ring respRing;
285 struct net_device_stats stats_saved;
286 struct typhoon_shared * shared;
287 dma_addr_t shared_dma;
288 __le16 xcvr_select;
289 __le16 wol_events;
290 __le32 offload;
291
292
293 int capabilities;
294 struct transmit_ring txHiRing;
295};
296
297enum completion_wait_values {
298 NoWait = 0, WaitNoSleep, WaitSleep,
299};
300
301
302
303
304
305enum state_values {
306 Sleeping = 0, Running,
307};
308
309
310
311
312#define typhoon_post_pci_writes(x) \
313 do { if (likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while (0)
314
315
316
317#define TYPHOON_UDELAY 50
318#define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ)
319#define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY)
320#define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY)
321
322#if defined(NETIF_F_TSO)
323#define skb_tso_size(x) (skb_shinfo(x)->gso_size)
324#define TSO_NUM_DESCRIPTORS 2
325#define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
326#else
327#define NETIF_F_TSO 0
328#define skb_tso_size(x) 0
329#define TSO_NUM_DESCRIPTORS 0
330#define TSO_OFFLOAD_ON 0
331#endif
332
333static inline void
334typhoon_inc_index(u32 *index, const int count, const int num_entries)
335{
336
337
338
339
340 *index += count * sizeof(struct cmd_desc);
341 *index %= num_entries * sizeof(struct cmd_desc);
342}
343
344static inline void
345typhoon_inc_cmd_index(u32 *index, const int count)
346{
347 typhoon_inc_index(index, count, COMMAND_ENTRIES);
348}
349
350static inline void
351typhoon_inc_resp_index(u32 *index, const int count)
352{
353 typhoon_inc_index(index, count, RESPONSE_ENTRIES);
354}
355
356static inline void
357typhoon_inc_rxfree_index(u32 *index, const int count)
358{
359 typhoon_inc_index(index, count, RXFREE_ENTRIES);
360}
361
362static inline void
363typhoon_inc_tx_index(u32 *index, const int count)
364{
365
366 typhoon_inc_index(index, count, TXLO_ENTRIES);
367}
368
369static inline void
370typhoon_inc_rx_index(u32 *index, const int count)
371{
372
373 *index += count * sizeof(struct rx_desc);
374 *index %= RX_ENTRIES * sizeof(struct rx_desc);
375}
376
377static int
378typhoon_reset(void __iomem *ioaddr, int wait_type)
379{
380 int i, err = 0;
381 int timeout;
382
383 if (wait_type == WaitNoSleep)
384 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
385 else
386 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
387
388 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
389 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
390
391 iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
392 typhoon_post_pci_writes(ioaddr);
393 udelay(1);
394 iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
395
396 if (wait_type != NoWait) {
397 for (i = 0; i < timeout; i++) {
398 if (ioread32(ioaddr + TYPHOON_REG_STATUS) ==
399 TYPHOON_STATUS_WAITING_FOR_HOST)
400 goto out;
401
402 if (wait_type == WaitSleep)
403 schedule_timeout_uninterruptible(1);
404 else
405 udelay(TYPHOON_UDELAY);
406 }
407
408 err = -ETIMEDOUT;
409 }
410
411out:
412 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
413 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
414
415
416
417
418
419
420
421
422
423
424
425 if (wait_type == WaitSleep)
426 msleep(5);
427 else
428 udelay(500);
429 return err;
430}
431
432static int
433typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
434{
435 int i, err = 0;
436
437 for (i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
438 if (ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
439 goto out;
440 udelay(TYPHOON_UDELAY);
441 }
442
443 err = -ETIMEDOUT;
444
445out:
446 return err;
447}
448
449static inline void
450typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
451{
452 if (resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
453 netif_carrier_off(dev);
454 else
455 netif_carrier_on(dev);
456}
457
458static inline void
459typhoon_hello(struct typhoon *tp)
460{
461 struct basic_ring *ring = &tp->cmdRing;
462 struct cmd_desc *cmd;
463
464
465
466
467
468 if (spin_trylock(&tp->command_lock)) {
469 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
470 typhoon_inc_cmd_index(&ring->lastWrite, 1);
471
472 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
473 wmb();
474 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
475 spin_unlock(&tp->command_lock);
476 }
477}
478
479static int
480typhoon_process_response(struct typhoon *tp, int resp_size,
481 struct resp_desc *resp_save)
482{
483 struct typhoon_indexes *indexes = tp->indexes;
484 struct resp_desc *resp;
485 u8 *base = tp->respRing.ringBase;
486 int count, len, wrap_len;
487 u32 cleared;
488 u32 ready;
489
490 cleared = le32_to_cpu(indexes->respCleared);
491 ready = le32_to_cpu(indexes->respReady);
492 while (cleared != ready) {
493 resp = (struct resp_desc *)(base + cleared);
494 count = resp->numDesc + 1;
495 if (resp_save && resp->seqNo) {
496 if (count > resp_size) {
497 resp_save->flags = TYPHOON_RESP_ERROR;
498 goto cleanup;
499 }
500
501 wrap_len = 0;
502 len = count * sizeof(*resp);
503 if (unlikely(cleared + len > RESPONSE_RING_SIZE)) {
504 wrap_len = cleared + len - RESPONSE_RING_SIZE;
505 len = RESPONSE_RING_SIZE - cleared;
506 }
507
508 memcpy(resp_save, resp, len);
509 if (unlikely(wrap_len)) {
510 resp_save += len / sizeof(*resp);
511 memcpy(resp_save, base, wrap_len);
512 }
513
514 resp_save = NULL;
515 } else if (resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
516 typhoon_media_status(tp->dev, resp);
517 } else if (resp->cmd == TYPHOON_CMD_HELLO_RESP) {
518 typhoon_hello(tp);
519 } else {
520 netdev_err(tp->dev,
521 "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
522 le16_to_cpu(resp->cmd),
523 resp->numDesc, resp->flags,
524 le16_to_cpu(resp->parm1),
525 le32_to_cpu(resp->parm2),
526 le32_to_cpu(resp->parm3));
527 }
528
529cleanup:
530 typhoon_inc_resp_index(&cleared, count);
531 }
532
533 indexes->respCleared = cpu_to_le32(cleared);
534 wmb();
535 return resp_save == NULL;
536}
537
538static inline int
539typhoon_num_free(int lastWrite, int lastRead, int ringSize)
540{
541
542
543
544 lastWrite /= sizeof(struct cmd_desc);
545 lastRead /= sizeof(struct cmd_desc);
546 return (ringSize + lastRead - lastWrite - 1) % ringSize;
547}
548
549static inline int
550typhoon_num_free_cmd(struct typhoon *tp)
551{
552 int lastWrite = tp->cmdRing.lastWrite;
553 int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
554
555 return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
556}
557
558static inline int
559typhoon_num_free_resp(struct typhoon *tp)
560{
561 int respReady = le32_to_cpu(tp->indexes->respReady);
562 int respCleared = le32_to_cpu(tp->indexes->respCleared);
563
564 return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
565}
566
567static inline int
568typhoon_num_free_tx(struct transmit_ring *ring)
569{
570
571 return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
572}
573
574static int
575typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
576 int num_resp, struct resp_desc *resp)
577{
578 struct typhoon_indexes *indexes = tp->indexes;
579 struct basic_ring *ring = &tp->cmdRing;
580 struct resp_desc local_resp;
581 int i, err = 0;
582 int got_resp;
583 int freeCmd, freeResp;
584 int len, wrap_len;
585
586 spin_lock(&tp->command_lock);
587
588 freeCmd = typhoon_num_free_cmd(tp);
589 freeResp = typhoon_num_free_resp(tp);
590
591 if (freeCmd < num_cmd || freeResp < num_resp) {
592 netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
593 freeCmd, num_cmd, freeResp, num_resp);
594 err = -ENOMEM;
595 goto out;
596 }
597
598 if (cmd->flags & TYPHOON_CMD_RESPOND) {
599
600
601
602 tp->awaiting_resp = 1;
603 if (resp == NULL) {
604 resp = &local_resp;
605 num_resp = 1;
606 }
607 }
608
609 wrap_len = 0;
610 len = num_cmd * sizeof(*cmd);
611 if (unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
612 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
613 len = COMMAND_RING_SIZE - ring->lastWrite;
614 }
615
616 memcpy(ring->ringBase + ring->lastWrite, cmd, len);
617 if (unlikely(wrap_len)) {
618 struct cmd_desc *wrap_ptr = cmd;
619 wrap_ptr += len / sizeof(*cmd);
620 memcpy(ring->ringBase, wrap_ptr, wrap_len);
621 }
622
623 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
624
625
626
627 wmb();
628 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
629 typhoon_post_pci_writes(tp->ioaddr);
630
631 if ((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
632 goto out;
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650 got_resp = 0;
651 for (i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
652 if (indexes->respCleared != indexes->respReady)
653 got_resp = typhoon_process_response(tp, num_resp,
654 resp);
655 udelay(TYPHOON_UDELAY);
656 }
657
658 if (!got_resp) {
659 err = -ETIMEDOUT;
660 goto out;
661 }
662
663
664
665
666 if (resp->flags & TYPHOON_RESP_ERROR)
667 err = -EIO;
668
669out:
670 if (tp->awaiting_resp) {
671 tp->awaiting_resp = 0;
672 smp_wmb();
673
674
675
676
677
678
679
680
681 if (indexes->respCleared != indexes->respReady)
682 iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
683 }
684
685 spin_unlock(&tp->command_lock);
686 return err;
687}
688
689static inline void
690typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
691 u32 ring_dma)
692{
693 struct tcpopt_desc *tcpd;
694 u32 tcpd_offset = ring_dma;
695
696 tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
697 tcpd_offset += txRing->lastWrite;
698 tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
699 typhoon_inc_tx_index(&txRing->lastWrite, 1);
700
701 tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
702 tcpd->numDesc = 1;
703 tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
704 tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
705 tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
706 tcpd->bytesTx = cpu_to_le32(skb->len);
707 tcpd->status = 0;
708}
709
710static netdev_tx_t
711typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
712{
713 struct typhoon *tp = netdev_priv(dev);
714 struct transmit_ring *txRing;
715 struct tx_desc *txd, *first_txd;
716 dma_addr_t skb_dma;
717 int numDesc;
718
719
720
721
722
723
724
725 txRing = &tp->txLoRing;
726
727
728
729
730
731
732
733
734
735
736
737
738 numDesc = skb_shinfo(skb)->nr_frags + 1;
739 if (skb_is_gso(skb))
740 numDesc++;
741
742
743
744
745
746
747
748
749
750
751 while (unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
752 smp_rmb();
753
754 first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
755 typhoon_inc_tx_index(&txRing->lastWrite, 1);
756
757 first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
758 first_txd->numDesc = 0;
759 first_txd->len = 0;
760 first_txd->tx_addr = (u64)((unsigned long) skb);
761 first_txd->processFlags = 0;
762
763 if (skb->ip_summed == CHECKSUM_PARTIAL) {
764
765 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
766 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
767 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
768 }
769
770 if (skb_vlan_tag_present(skb)) {
771 first_txd->processFlags |=
772 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
773 first_txd->processFlags |=
774 cpu_to_le32(htons(skb_vlan_tag_get(skb)) <<
775 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
776 }
777
778 if (skb_is_gso(skb)) {
779 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
780 first_txd->numDesc++;
781
782 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
783 }
784
785 txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
786 typhoon_inc_tx_index(&txRing->lastWrite, 1);
787
788
789
790
791 if (skb_shinfo(skb)->nr_frags == 0) {
792 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
793 PCI_DMA_TODEVICE);
794 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
795 txd->len = cpu_to_le16(skb->len);
796 txd->frag.addr = cpu_to_le32(skb_dma);
797 txd->frag.addrHi = 0;
798 first_txd->numDesc++;
799 } else {
800 int i, len;
801
802 len = skb_headlen(skb);
803 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
804 PCI_DMA_TODEVICE);
805 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
806 txd->len = cpu_to_le16(len);
807 txd->frag.addr = cpu_to_le32(skb_dma);
808 txd->frag.addrHi = 0;
809 first_txd->numDesc++;
810
811 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
812 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
813 void *frag_addr;
814
815 txd = (struct tx_desc *) (txRing->ringBase +
816 txRing->lastWrite);
817 typhoon_inc_tx_index(&txRing->lastWrite, 1);
818
819 len = skb_frag_size(frag);
820 frag_addr = skb_frag_address(frag);
821 skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
822 PCI_DMA_TODEVICE);
823 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
824 txd->len = cpu_to_le16(len);
825 txd->frag.addr = cpu_to_le32(skb_dma);
826 txd->frag.addrHi = 0;
827 first_txd->numDesc++;
828 }
829 }
830
831
832
833 wmb();
834 iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
835
836
837
838
839
840
841 numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
842
843 if (typhoon_num_free_tx(txRing) < (numDesc + 2)) {
844 netif_stop_queue(dev);
845
846
847
848
849
850 if (typhoon_num_free_tx(txRing) >= (numDesc + 2))
851 netif_wake_queue(dev);
852 }
853
854 return NETDEV_TX_OK;
855}
856
857static void
858typhoon_set_rx_mode(struct net_device *dev)
859{
860 struct typhoon *tp = netdev_priv(dev);
861 struct cmd_desc xp_cmd;
862 u32 mc_filter[2];
863 __le16 filter;
864
865 filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
866 if (dev->flags & IFF_PROMISC) {
867 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
868 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
869 (dev->flags & IFF_ALLMULTI)) {
870
871 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
872 } else if (!netdev_mc_empty(dev)) {
873 struct netdev_hw_addr *ha;
874
875 memset(mc_filter, 0, sizeof(mc_filter));
876 netdev_for_each_mc_addr(ha, dev) {
877 int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f;
878 mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
879 }
880
881 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
882 TYPHOON_CMD_SET_MULTICAST_HASH);
883 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
884 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
885 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
886 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
887
888 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
889 }
890
891 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
892 xp_cmd.parm1 = filter;
893 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
894}
895
896static int
897typhoon_do_get_stats(struct typhoon *tp)
898{
899 struct net_device_stats *stats = &tp->dev->stats;
900 struct net_device_stats *saved = &tp->stats_saved;
901 struct cmd_desc xp_cmd;
902 struct resp_desc xp_resp[7];
903 struct stats_resp *s = (struct stats_resp *) xp_resp;
904 int err;
905
906 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
907 err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
908 if (err < 0)
909 return err;
910
911
912
913
914
915
916
917 stats->tx_packets = le32_to_cpu(s->txPackets) +
918 saved->tx_packets;
919 stats->tx_bytes = le64_to_cpu(s->txBytes) +
920 saved->tx_bytes;
921 stats->tx_errors = le32_to_cpu(s->txCarrierLost) +
922 saved->tx_errors;
923 stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost) +
924 saved->tx_carrier_errors;
925 stats->collisions = le32_to_cpu(s->txMultipleCollisions) +
926 saved->collisions;
927 stats->rx_packets = le32_to_cpu(s->rxPacketsGood) +
928 saved->rx_packets;
929 stats->rx_bytes = le64_to_cpu(s->rxBytesGood) +
930 saved->rx_bytes;
931 stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns) +
932 saved->rx_fifo_errors;
933 stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
934 le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors) +
935 saved->rx_errors;
936 stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors) +
937 saved->rx_crc_errors;
938 stats->rx_length_errors = le32_to_cpu(s->rxOversized) +
939 saved->rx_length_errors;
940 tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
941 SPEED_100 : SPEED_10;
942 tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
943 DUPLEX_FULL : DUPLEX_HALF;
944
945 return 0;
946}
947
948static struct net_device_stats *
949typhoon_get_stats(struct net_device *dev)
950{
951 struct typhoon *tp = netdev_priv(dev);
952 struct net_device_stats *stats = &tp->dev->stats;
953 struct net_device_stats *saved = &tp->stats_saved;
954
955 smp_rmb();
956 if (tp->card_state == Sleeping)
957 return saved;
958
959 if (typhoon_do_get_stats(tp) < 0) {
960 netdev_err(dev, "error getting stats\n");
961 return saved;
962 }
963
964 return stats;
965}
966
967static void
968typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
969{
970 struct typhoon *tp = netdev_priv(dev);
971 struct pci_dev *pci_dev = tp->pdev;
972 struct cmd_desc xp_cmd;
973 struct resp_desc xp_resp[3];
974
975 smp_rmb();
976 if (tp->card_state == Sleeping) {
977 strlcpy(info->fw_version, "Sleep image",
978 sizeof(info->fw_version));
979 } else {
980 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
981 if (typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
982 strlcpy(info->fw_version, "Unknown runtime",
983 sizeof(info->fw_version));
984 } else {
985 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
986 snprintf(info->fw_version, sizeof(info->fw_version),
987 "%02x.%03x.%03x", sleep_ver >> 24,
988 (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff);
989 }
990 }
991
992 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
993 strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
994}
995
996static int
997typhoon_get_link_ksettings(struct net_device *dev,
998 struct ethtool_link_ksettings *cmd)
999{
1000 struct typhoon *tp = netdev_priv(dev);
1001 u32 supported, advertising = 0;
1002
1003 supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1004 SUPPORTED_Autoneg;
1005
1006 switch (tp->xcvr_select) {
1007 case TYPHOON_XCVR_10HALF:
1008 advertising = ADVERTISED_10baseT_Half;
1009 break;
1010 case TYPHOON_XCVR_10FULL:
1011 advertising = ADVERTISED_10baseT_Full;
1012 break;
1013 case TYPHOON_XCVR_100HALF:
1014 advertising = ADVERTISED_100baseT_Half;
1015 break;
1016 case TYPHOON_XCVR_100FULL:
1017 advertising = ADVERTISED_100baseT_Full;
1018 break;
1019 case TYPHOON_XCVR_AUTONEG:
1020 advertising = ADVERTISED_10baseT_Half |
1021 ADVERTISED_10baseT_Full |
1022 ADVERTISED_100baseT_Half |
1023 ADVERTISED_100baseT_Full |
1024 ADVERTISED_Autoneg;
1025 break;
1026 }
1027
1028 if (tp->capabilities & TYPHOON_FIBER) {
1029 supported |= SUPPORTED_FIBRE;
1030 advertising |= ADVERTISED_FIBRE;
1031 cmd->base.port = PORT_FIBRE;
1032 } else {
1033 supported |= SUPPORTED_10baseT_Half |
1034 SUPPORTED_10baseT_Full |
1035 SUPPORTED_TP;
1036 advertising |= ADVERTISED_TP;
1037 cmd->base.port = PORT_TP;
1038 }
1039
1040
1041 typhoon_do_get_stats(tp);
1042 cmd->base.speed = tp->speed;
1043 cmd->base.duplex = tp->duplex;
1044 cmd->base.phy_address = 0;
1045 if (tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1046 cmd->base.autoneg = AUTONEG_ENABLE;
1047 else
1048 cmd->base.autoneg = AUTONEG_DISABLE;
1049
1050 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1051 supported);
1052 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1053 advertising);
1054
1055 return 0;
1056}
1057
1058static int
1059typhoon_set_link_ksettings(struct net_device *dev,
1060 const struct ethtool_link_ksettings *cmd)
1061{
1062 struct typhoon *tp = netdev_priv(dev);
1063 u32 speed = cmd->base.speed;
1064 struct cmd_desc xp_cmd;
1065 __le16 xcvr;
1066 int err;
1067
1068 err = -EINVAL;
1069 if (cmd->base.autoneg == AUTONEG_ENABLE) {
1070 xcvr = TYPHOON_XCVR_AUTONEG;
1071 } else {
1072 if (cmd->base.duplex == DUPLEX_HALF) {
1073 if (speed == SPEED_10)
1074 xcvr = TYPHOON_XCVR_10HALF;
1075 else if (speed == SPEED_100)
1076 xcvr = TYPHOON_XCVR_100HALF;
1077 else
1078 goto out;
1079 } else if (cmd->base.duplex == DUPLEX_FULL) {
1080 if (speed == SPEED_10)
1081 xcvr = TYPHOON_XCVR_10FULL;
1082 else if (speed == SPEED_100)
1083 xcvr = TYPHOON_XCVR_100FULL;
1084 else
1085 goto out;
1086 } else
1087 goto out;
1088 }
1089
1090 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1091 xp_cmd.parm1 = xcvr;
1092 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1093 if (err < 0)
1094 goto out;
1095
1096 tp->xcvr_select = xcvr;
1097 if (cmd->base.autoneg == AUTONEG_ENABLE) {
1098 tp->speed = 0xff;
1099 tp->duplex = 0xff;
1100 } else {
1101 tp->speed = speed;
1102 tp->duplex = cmd->base.duplex;
1103 }
1104
1105out:
1106 return err;
1107}
1108
1109static void
1110typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1111{
1112 struct typhoon *tp = netdev_priv(dev);
1113
1114 wol->supported = WAKE_PHY | WAKE_MAGIC;
1115 wol->wolopts = 0;
1116 if (tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1117 wol->wolopts |= WAKE_PHY;
1118 if (tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1119 wol->wolopts |= WAKE_MAGIC;
1120 memset(&wol->sopass, 0, sizeof(wol->sopass));
1121}
1122
1123static int
1124typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1125{
1126 struct typhoon *tp = netdev_priv(dev);
1127
1128 if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1129 return -EINVAL;
1130
1131 tp->wol_events = 0;
1132 if (wol->wolopts & WAKE_PHY)
1133 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1134 if (wol->wolopts & WAKE_MAGIC)
1135 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1136
1137 return 0;
1138}
1139
1140static void
1141typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1142{
1143 ering->rx_max_pending = RXENT_ENTRIES;
1144 ering->tx_max_pending = TXLO_ENTRIES - 1;
1145
1146 ering->rx_pending = RXENT_ENTRIES;
1147 ering->tx_pending = TXLO_ENTRIES - 1;
1148}
1149
1150static const struct ethtool_ops typhoon_ethtool_ops = {
1151 .get_drvinfo = typhoon_get_drvinfo,
1152 .get_wol = typhoon_get_wol,
1153 .set_wol = typhoon_set_wol,
1154 .get_link = ethtool_op_get_link,
1155 .get_ringparam = typhoon_get_ringparam,
1156 .get_link_ksettings = typhoon_get_link_ksettings,
1157 .set_link_ksettings = typhoon_set_link_ksettings,
1158};
1159
1160static int
1161typhoon_wait_interrupt(void __iomem *ioaddr)
1162{
1163 int i, err = 0;
1164
1165 for (i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1166 if (ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1167 TYPHOON_INTR_BOOTCMD)
1168 goto out;
1169 udelay(TYPHOON_UDELAY);
1170 }
1171
1172 err = -ETIMEDOUT;
1173
1174out:
1175 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1176 return err;
1177}
1178
1179#define shared_offset(x) offsetof(struct typhoon_shared, x)
1180
1181static void
1182typhoon_init_interface(struct typhoon *tp)
1183{
1184 struct typhoon_interface *iface = &tp->shared->iface;
1185 dma_addr_t shared_dma;
1186
1187 memset(tp->shared, 0, sizeof(struct typhoon_shared));
1188
1189
1190
1191 shared_dma = tp->shared_dma + shared_offset(indexes);
1192 iface->ringIndex = cpu_to_le32(shared_dma);
1193
1194 shared_dma = tp->shared_dma + shared_offset(txLo);
1195 iface->txLoAddr = cpu_to_le32(shared_dma);
1196 iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1197
1198 shared_dma = tp->shared_dma + shared_offset(txHi);
1199 iface->txHiAddr = cpu_to_le32(shared_dma);
1200 iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1201
1202 shared_dma = tp->shared_dma + shared_offset(rxBuff);
1203 iface->rxBuffAddr = cpu_to_le32(shared_dma);
1204 iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1205 sizeof(struct rx_free));
1206
1207 shared_dma = tp->shared_dma + shared_offset(rxLo);
1208 iface->rxLoAddr = cpu_to_le32(shared_dma);
1209 iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1210
1211 shared_dma = tp->shared_dma + shared_offset(rxHi);
1212 iface->rxHiAddr = cpu_to_le32(shared_dma);
1213 iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1214
1215 shared_dma = tp->shared_dma + shared_offset(cmd);
1216 iface->cmdAddr = cpu_to_le32(shared_dma);
1217 iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1218
1219 shared_dma = tp->shared_dma + shared_offset(resp);
1220 iface->respAddr = cpu_to_le32(shared_dma);
1221 iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1222
1223 shared_dma = tp->shared_dma + shared_offset(zeroWord);
1224 iface->zeroAddr = cpu_to_le32(shared_dma);
1225
1226 tp->indexes = &tp->shared->indexes;
1227 tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1228 tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1229 tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1230 tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1231 tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1232 tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1233 tp->respRing.ringBase = (u8 *) tp->shared->resp;
1234
1235 tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1236 tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1237
1238 tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1239 tp->card_state = Sleeping;
1240
1241 tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1242 tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1243 tp->offload |= TYPHOON_OFFLOAD_VLAN;
1244
1245 spin_lock_init(&tp->command_lock);
1246
1247
1248 wmb();
1249}
1250
1251static void
1252typhoon_init_rings(struct typhoon *tp)
1253{
1254 memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1255
1256 tp->txLoRing.lastWrite = 0;
1257 tp->txHiRing.lastWrite = 0;
1258 tp->rxLoRing.lastWrite = 0;
1259 tp->rxHiRing.lastWrite = 0;
1260 tp->rxBuffRing.lastWrite = 0;
1261 tp->cmdRing.lastWrite = 0;
1262 tp->respRing.lastWrite = 0;
1263
1264 tp->txLoRing.lastRead = 0;
1265 tp->txHiRing.lastRead = 0;
1266}
1267
1268static const struct firmware *typhoon_fw;
1269
1270static int
1271typhoon_request_firmware(struct typhoon *tp)
1272{
1273 const struct typhoon_file_header *fHdr;
1274 const struct typhoon_section_header *sHdr;
1275 const u8 *image_data;
1276 u32 numSections;
1277 u32 section_len;
1278 u32 remaining;
1279 int err;
1280
1281 if (typhoon_fw)
1282 return 0;
1283
1284 err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1285 if (err) {
1286 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
1287 FIRMWARE_NAME);
1288 return err;
1289 }
1290
1291 image_data = typhoon_fw->data;
1292 remaining = typhoon_fw->size;
1293 if (remaining < sizeof(struct typhoon_file_header))
1294 goto invalid_fw;
1295
1296 fHdr = (struct typhoon_file_header *) image_data;
1297 if (memcmp(fHdr->tag, "TYPHOON", 8))
1298 goto invalid_fw;
1299
1300 numSections = le32_to_cpu(fHdr->numSections);
1301 image_data += sizeof(struct typhoon_file_header);
1302 remaining -= sizeof(struct typhoon_file_header);
1303
1304 while (numSections--) {
1305 if (remaining < sizeof(struct typhoon_section_header))
1306 goto invalid_fw;
1307
1308 sHdr = (struct typhoon_section_header *) image_data;
1309 image_data += sizeof(struct typhoon_section_header);
1310 section_len = le32_to_cpu(sHdr->len);
1311
1312 if (remaining < section_len)
1313 goto invalid_fw;
1314
1315 image_data += section_len;
1316 remaining -= section_len;
1317 }
1318
1319 return 0;
1320
1321invalid_fw:
1322 netdev_err(tp->dev, "Invalid firmware image\n");
1323 release_firmware(typhoon_fw);
1324 typhoon_fw = NULL;
1325 return -EINVAL;
1326}
1327
1328static int
1329typhoon_download_firmware(struct typhoon *tp)
1330{
1331 void __iomem *ioaddr = tp->ioaddr;
1332 struct pci_dev *pdev = tp->pdev;
1333 const struct typhoon_file_header *fHdr;
1334 const struct typhoon_section_header *sHdr;
1335 const u8 *image_data;
1336 void *dpage;
1337 dma_addr_t dpage_dma;
1338 __sum16 csum;
1339 u32 irqEnabled;
1340 u32 irqMasked;
1341 u32 numSections;
1342 u32 section_len;
1343 u32 len;
1344 u32 load_addr;
1345 u32 hmac;
1346 int i;
1347 int err;
1348
1349 image_data = typhoon_fw->data;
1350 fHdr = (struct typhoon_file_header *) image_data;
1351
1352
1353
1354
1355
1356 err = -ENOMEM;
1357 dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1358 if (!dpage) {
1359 netdev_err(tp->dev, "no DMA mem for firmware\n");
1360 goto err_out;
1361 }
1362
1363 irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1364 iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1365 ioaddr + TYPHOON_REG_INTR_ENABLE);
1366 irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1367 iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1368 ioaddr + TYPHOON_REG_INTR_MASK);
1369
1370 err = -ETIMEDOUT;
1371 if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1372 netdev_err(tp->dev, "card ready timeout\n");
1373 goto err_out_irq;
1374 }
1375
1376 numSections = le32_to_cpu(fHdr->numSections);
1377 load_addr = le32_to_cpu(fHdr->startAddr);
1378
1379 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1380 iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1381 hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1382 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1383 hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1384 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1385 hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1386 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1387 hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1388 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1389 hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1390 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1391 typhoon_post_pci_writes(ioaddr);
1392 iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1393
1394 image_data += sizeof(struct typhoon_file_header);
1395
1396
1397
1398
1399
1400 for (i = 0; i < numSections; i++) {
1401 sHdr = (struct typhoon_section_header *) image_data;
1402 image_data += sizeof(struct typhoon_section_header);
1403 load_addr = le32_to_cpu(sHdr->startAddr);
1404 section_len = le32_to_cpu(sHdr->len);
1405
1406 while (section_len) {
1407 len = min_t(u32, section_len, PAGE_SIZE);
1408
1409 if (typhoon_wait_interrupt(ioaddr) < 0 ||
1410 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1411 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1412 netdev_err(tp->dev, "segment ready timeout\n");
1413 goto err_out_irq;
1414 }
1415
1416
1417
1418
1419
1420
1421 csum = csum_fold(csum_partial_copy_nocheck(image_data,
1422 dpage, len,
1423 0));
1424
1425 iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1426 iowrite32(le16_to_cpu((__force __le16)csum),
1427 ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1428 iowrite32(load_addr,
1429 ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1430 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1431 iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1432 typhoon_post_pci_writes(ioaddr);
1433 iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1434 ioaddr + TYPHOON_REG_COMMAND);
1435
1436 image_data += len;
1437 load_addr += len;
1438 section_len -= len;
1439 }
1440 }
1441
1442 if (typhoon_wait_interrupt(ioaddr) < 0 ||
1443 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1444 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1445 netdev_err(tp->dev, "final segment ready timeout\n");
1446 goto err_out_irq;
1447 }
1448
1449 iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1450
1451 if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1452 netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
1453 ioread32(ioaddr + TYPHOON_REG_STATUS));
1454 goto err_out_irq;
1455 }
1456
1457 err = 0;
1458
1459err_out_irq:
1460 iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1461 iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1462
1463 pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1464
1465err_out:
1466 return err;
1467}
1468
1469static int
1470typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1471{
1472 void __iomem *ioaddr = tp->ioaddr;
1473
1474 if (typhoon_wait_status(ioaddr, initial_status) < 0) {
1475 netdev_err(tp->dev, "boot ready timeout\n");
1476 goto out_timeout;
1477 }
1478
1479 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1480 iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1481 typhoon_post_pci_writes(ioaddr);
1482 iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1483 ioaddr + TYPHOON_REG_COMMAND);
1484
1485 if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1486 netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
1487 ioread32(ioaddr + TYPHOON_REG_STATUS));
1488 goto out_timeout;
1489 }
1490
1491
1492
1493 iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1494 iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1495 iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1496 typhoon_post_pci_writes(ioaddr);
1497 iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1498
1499 return 0;
1500
1501out_timeout:
1502 return -ETIMEDOUT;
1503}
1504
1505static u32
1506typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1507 volatile __le32 * index)
1508{
1509 u32 lastRead = txRing->lastRead;
1510 struct tx_desc *tx;
1511 dma_addr_t skb_dma;
1512 int dma_len;
1513 int type;
1514
1515 while (lastRead != le32_to_cpu(*index)) {
1516 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1517 type = tx->flags & TYPHOON_TYPE_MASK;
1518
1519 if (type == TYPHOON_TX_DESC) {
1520
1521
1522 unsigned long ptr = tx->tx_addr;
1523 struct sk_buff *skb = (struct sk_buff *) ptr;
1524 dev_kfree_skb_irq(skb);
1525 } else if (type == TYPHOON_FRAG_DESC) {
1526
1527
1528 skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1529 dma_len = le16_to_cpu(tx->len);
1530 pci_unmap_single(tp->pdev, skb_dma, dma_len,
1531 PCI_DMA_TODEVICE);
1532 }
1533
1534 tx->flags = 0;
1535 typhoon_inc_tx_index(&lastRead, 1);
1536 }
1537
1538 return lastRead;
1539}
1540
1541static void
1542typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1543 volatile __le32 * index)
1544{
1545 u32 lastRead;
1546 int numDesc = MAX_SKB_FRAGS + 1;
1547
1548
1549 lastRead = typhoon_clean_tx(tp, txRing, index);
1550 if (netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1551 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1552 netif_wake_queue(tp->dev);
1553
1554 txRing->lastRead = lastRead;
1555 smp_wmb();
1556}
1557
1558static void
1559typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1560{
1561 struct typhoon_indexes *indexes = tp->indexes;
1562 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1563 struct basic_ring *ring = &tp->rxBuffRing;
1564 struct rx_free *r;
1565
1566 if ((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1567 le32_to_cpu(indexes->rxBuffCleared)) {
1568
1569
1570 dev_kfree_skb_any(rxb->skb);
1571 rxb->skb = NULL;
1572 return;
1573 }
1574
1575 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1576 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1577 r->virtAddr = idx;
1578 r->physAddr = cpu_to_le32(rxb->dma_addr);
1579
1580
1581 wmb();
1582 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1583}
1584
1585static int
1586typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1587{
1588 struct typhoon_indexes *indexes = tp->indexes;
1589 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1590 struct basic_ring *ring = &tp->rxBuffRing;
1591 struct rx_free *r;
1592 struct sk_buff *skb;
1593 dma_addr_t dma_addr;
1594
1595 rxb->skb = NULL;
1596
1597 if ((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1598 le32_to_cpu(indexes->rxBuffCleared))
1599 return -ENOMEM;
1600
1601 skb = netdev_alloc_skb(tp->dev, PKT_BUF_SZ);
1602 if (!skb)
1603 return -ENOMEM;
1604
1605#if 0
1606
1607
1608
1609 skb_reserve(skb, 2);
1610#endif
1611
1612 dma_addr = pci_map_single(tp->pdev, skb->data,
1613 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1614
1615
1616
1617
1618 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1619 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1620 r->virtAddr = idx;
1621 r->physAddr = cpu_to_le32(dma_addr);
1622 rxb->skb = skb;
1623 rxb->dma_addr = dma_addr;
1624
1625
1626 wmb();
1627 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1628 return 0;
1629}
1630
1631static int
1632typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1633 volatile __le32 * cleared, int budget)
1634{
1635 struct rx_desc *rx;
1636 struct sk_buff *skb, *new_skb;
1637 struct rxbuff_ent *rxb;
1638 dma_addr_t dma_addr;
1639 u32 local_ready;
1640 u32 rxaddr;
1641 int pkt_len;
1642 u32 idx;
1643 __le32 csum_bits;
1644 int received;
1645
1646 received = 0;
1647 local_ready = le32_to_cpu(*ready);
1648 rxaddr = le32_to_cpu(*cleared);
1649 while (rxaddr != local_ready && budget > 0) {
1650 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1651 idx = rx->addr;
1652 rxb = &tp->rxbuffers[idx];
1653 skb = rxb->skb;
1654 dma_addr = rxb->dma_addr;
1655
1656 typhoon_inc_rx_index(&rxaddr, 1);
1657
1658 if (rx->flags & TYPHOON_RX_ERROR) {
1659 typhoon_recycle_rx_skb(tp, idx);
1660 continue;
1661 }
1662
1663 pkt_len = le16_to_cpu(rx->frameLen);
1664
1665 if (pkt_len < rx_copybreak &&
1666 (new_skb = netdev_alloc_skb(tp->dev, pkt_len + 2)) != NULL) {
1667 skb_reserve(new_skb, 2);
1668 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1669 PKT_BUF_SZ,
1670 PCI_DMA_FROMDEVICE);
1671 skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1672 pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1673 PKT_BUF_SZ,
1674 PCI_DMA_FROMDEVICE);
1675 skb_put(new_skb, pkt_len);
1676 typhoon_recycle_rx_skb(tp, idx);
1677 } else {
1678 new_skb = skb;
1679 skb_put(new_skb, pkt_len);
1680 pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1681 PCI_DMA_FROMDEVICE);
1682 typhoon_alloc_rx_skb(tp, idx);
1683 }
1684 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1685 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1686 TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1687 if (csum_bits ==
1688 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
1689 csum_bits ==
1690 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1691 new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1692 } else
1693 skb_checksum_none_assert(new_skb);
1694
1695 if (rx->rxStatus & TYPHOON_RX_VLAN)
1696 __vlan_hwaccel_put_tag(new_skb, htons(ETH_P_8021Q),
1697 ntohl(rx->vlanTag) & 0xffff);
1698 netif_receive_skb(new_skb);
1699
1700 received++;
1701 budget--;
1702 }
1703 *cleared = cpu_to_le32(rxaddr);
1704
1705 return received;
1706}
1707
1708static void
1709typhoon_fill_free_ring(struct typhoon *tp)
1710{
1711 u32 i;
1712
1713 for (i = 0; i < RXENT_ENTRIES; i++) {
1714 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1715 if (rxb->skb)
1716 continue;
1717 if (typhoon_alloc_rx_skb(tp, i) < 0)
1718 break;
1719 }
1720}
1721
1722static int
1723typhoon_poll(struct napi_struct *napi, int budget)
1724{
1725 struct typhoon *tp = container_of(napi, struct typhoon, napi);
1726 struct typhoon_indexes *indexes = tp->indexes;
1727 int work_done;
1728
1729 rmb();
1730 if (!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1731 typhoon_process_response(tp, 0, NULL);
1732
1733 if (le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1734 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1735
1736 work_done = 0;
1737
1738 if (indexes->rxHiCleared != indexes->rxHiReady) {
1739 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1740 &indexes->rxHiCleared, budget);
1741 }
1742
1743 if (indexes->rxLoCleared != indexes->rxLoReady) {
1744 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1745 &indexes->rxLoCleared, budget - work_done);
1746 }
1747
1748 if (le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1749
1750 typhoon_fill_free_ring(tp);
1751 }
1752
1753 if (work_done < budget) {
1754 napi_complete_done(napi, work_done);
1755 iowrite32(TYPHOON_INTR_NONE,
1756 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1757 typhoon_post_pci_writes(tp->ioaddr);
1758 }
1759
1760 return work_done;
1761}
1762
1763static irqreturn_t
1764typhoon_interrupt(int irq, void *dev_instance)
1765{
1766 struct net_device *dev = dev_instance;
1767 struct typhoon *tp = netdev_priv(dev);
1768 void __iomem *ioaddr = tp->ioaddr;
1769 u32 intr_status;
1770
1771 intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1772 if (!(intr_status & TYPHOON_INTR_HOST_INT))
1773 return IRQ_NONE;
1774
1775 iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1776
1777 if (napi_schedule_prep(&tp->napi)) {
1778 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1779 typhoon_post_pci_writes(ioaddr);
1780 __napi_schedule(&tp->napi);
1781 } else {
1782 netdev_err(dev, "Error, poll already scheduled\n");
1783 }
1784 return IRQ_HANDLED;
1785}
1786
1787static void
1788typhoon_free_rx_rings(struct typhoon *tp)
1789{
1790 u32 i;
1791
1792 for (i = 0; i < RXENT_ENTRIES; i++) {
1793 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1794 if (rxb->skb) {
1795 pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1796 PCI_DMA_FROMDEVICE);
1797 dev_kfree_skb(rxb->skb);
1798 rxb->skb = NULL;
1799 }
1800 }
1801}
1802
1803static int
1804typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1805{
1806 struct pci_dev *pdev = tp->pdev;
1807 void __iomem *ioaddr = tp->ioaddr;
1808 struct cmd_desc xp_cmd;
1809 int err;
1810
1811 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1812 xp_cmd.parm1 = events;
1813 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1814 if (err < 0) {
1815 netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
1816 err);
1817 return err;
1818 }
1819
1820 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1821 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1822 if (err < 0) {
1823 netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
1824 return err;
1825 }
1826
1827 if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1828 return -ETIMEDOUT;
1829
1830
1831
1832
1833 netif_carrier_off(tp->dev);
1834
1835 pci_enable_wake(tp->pdev, state, 1);
1836 pci_disable_device(pdev);
1837 return pci_set_power_state(pdev, state);
1838}
1839
1840static int
1841typhoon_wakeup(struct typhoon *tp, int wait_type)
1842{
1843 struct pci_dev *pdev = tp->pdev;
1844 void __iomem *ioaddr = tp->ioaddr;
1845
1846 pci_set_power_state(pdev, PCI_D0);
1847 pci_restore_state(pdev);
1848
1849
1850
1851
1852
1853 iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1854 if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1855 (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1856 return typhoon_reset(ioaddr, wait_type);
1857
1858 return 0;
1859}
1860
1861static int
1862typhoon_start_runtime(struct typhoon *tp)
1863{
1864 struct net_device *dev = tp->dev;
1865 void __iomem *ioaddr = tp->ioaddr;
1866 struct cmd_desc xp_cmd;
1867 int err;
1868
1869 typhoon_init_rings(tp);
1870 typhoon_fill_free_ring(tp);
1871
1872 err = typhoon_download_firmware(tp);
1873 if (err < 0) {
1874 netdev_err(tp->dev, "cannot load runtime on 3XP\n");
1875 goto error_out;
1876 }
1877
1878 if (typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1879 netdev_err(tp->dev, "cannot boot 3XP\n");
1880 err = -EIO;
1881 goto error_out;
1882 }
1883
1884 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1885 xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1886 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1887 if (err < 0)
1888 goto error_out;
1889
1890 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1891 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1892 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1893 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1894 if (err < 0)
1895 goto error_out;
1896
1897
1898
1899
1900 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1901 xp_cmd.parm1 = 0;
1902 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1903 if (err < 0)
1904 goto error_out;
1905
1906 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1907 xp_cmd.parm1 = tp->xcvr_select;
1908 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1909 if (err < 0)
1910 goto error_out;
1911
1912 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1913 xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1914 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1915 if (err < 0)
1916 goto error_out;
1917
1918 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1919 xp_cmd.parm2 = tp->offload;
1920 xp_cmd.parm3 = tp->offload;
1921 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1922 if (err < 0)
1923 goto error_out;
1924
1925 typhoon_set_rx_mode(dev);
1926
1927 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
1928 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1929 if (err < 0)
1930 goto error_out;
1931
1932 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
1933 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1934 if (err < 0)
1935 goto error_out;
1936
1937 tp->card_state = Running;
1938 smp_wmb();
1939
1940 iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
1941 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
1942 typhoon_post_pci_writes(ioaddr);
1943
1944 return 0;
1945
1946error_out:
1947 typhoon_reset(ioaddr, WaitNoSleep);
1948 typhoon_free_rx_rings(tp);
1949 typhoon_init_rings(tp);
1950 return err;
1951}
1952
1953static int
1954typhoon_stop_runtime(struct typhoon *tp, int wait_type)
1955{
1956 struct typhoon_indexes *indexes = tp->indexes;
1957 struct transmit_ring *txLo = &tp->txLoRing;
1958 void __iomem *ioaddr = tp->ioaddr;
1959 struct cmd_desc xp_cmd;
1960 int i;
1961
1962
1963
1964
1965
1966 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
1967
1968 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
1969 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1970
1971
1972
1973
1974 for (i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1975 if (indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
1976 break;
1977 udelay(TYPHOON_UDELAY);
1978 }
1979
1980 if (i == TYPHOON_WAIT_TIMEOUT)
1981 netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
1982
1983 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
1984 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1985
1986
1987
1988
1989 tp->card_state = Sleeping;
1990 smp_wmb();
1991 typhoon_do_get_stats(tp);
1992 memcpy(&tp->stats_saved, &tp->dev->stats, sizeof(struct net_device_stats));
1993
1994 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
1995 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1996
1997 if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
1998 netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
1999
2000 if (typhoon_reset(ioaddr, wait_type) < 0) {
2001 netdev_err(tp->dev, "unable to reset 3XP\n");
2002 return -ETIMEDOUT;
2003 }
2004
2005
2006 if (indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2007 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2008 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2009 }
2010
2011 return 0;
2012}
2013
2014static void
2015typhoon_tx_timeout(struct net_device *dev, unsigned int txqueue)
2016{
2017 struct typhoon *tp = netdev_priv(dev);
2018
2019 if (typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2020 netdev_warn(dev, "could not reset in tx timeout\n");
2021 goto truly_dead;
2022 }
2023
2024
2025 typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2026 typhoon_free_rx_rings(tp);
2027
2028 if (typhoon_start_runtime(tp) < 0) {
2029 netdev_err(dev, "could not start runtime in tx timeout\n");
2030 goto truly_dead;
2031 }
2032
2033 netif_wake_queue(dev);
2034 return;
2035
2036truly_dead:
2037
2038 typhoon_reset(tp->ioaddr, NoWait);
2039 netif_carrier_off(dev);
2040}
2041
2042static int
2043typhoon_open(struct net_device *dev)
2044{
2045 struct typhoon *tp = netdev_priv(dev);
2046 int err;
2047
2048 err = typhoon_request_firmware(tp);
2049 if (err)
2050 goto out;
2051
2052 err = typhoon_wakeup(tp, WaitSleep);
2053 if (err < 0) {
2054 netdev_err(dev, "unable to wakeup device\n");
2055 goto out_sleep;
2056 }
2057
2058 err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
2059 dev->name, dev);
2060 if (err < 0)
2061 goto out_sleep;
2062
2063 napi_enable(&tp->napi);
2064
2065 err = typhoon_start_runtime(tp);
2066 if (err < 0) {
2067 napi_disable(&tp->napi);
2068 goto out_irq;
2069 }
2070
2071 netif_start_queue(dev);
2072 return 0;
2073
2074out_irq:
2075 free_irq(dev->irq, dev);
2076
2077out_sleep:
2078 if (typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2079 netdev_err(dev, "unable to reboot into sleep img\n");
2080 typhoon_reset(tp->ioaddr, NoWait);
2081 goto out;
2082 }
2083
2084 if (typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2085 netdev_err(dev, "unable to go back to sleep\n");
2086
2087out:
2088 return err;
2089}
2090
2091static int
2092typhoon_close(struct net_device *dev)
2093{
2094 struct typhoon *tp = netdev_priv(dev);
2095
2096 netif_stop_queue(dev);
2097 napi_disable(&tp->napi);
2098
2099 if (typhoon_stop_runtime(tp, WaitSleep) < 0)
2100 netdev_err(dev, "unable to stop runtime\n");
2101
2102
2103 free_irq(dev->irq, dev);
2104
2105 typhoon_free_rx_rings(tp);
2106 typhoon_init_rings(tp);
2107
2108 if (typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2109 netdev_err(dev, "unable to boot sleep image\n");
2110
2111 if (typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2112 netdev_err(dev, "unable to put card to sleep\n");
2113
2114 return 0;
2115}
2116
2117#ifdef CONFIG_PM
2118static int
2119typhoon_resume(struct pci_dev *pdev)
2120{
2121 struct net_device *dev = pci_get_drvdata(pdev);
2122 struct typhoon *tp = netdev_priv(dev);
2123
2124
2125
2126 if (!netif_running(dev))
2127 return 0;
2128
2129 if (typhoon_wakeup(tp, WaitNoSleep) < 0) {
2130 netdev_err(dev, "critical: could not wake up in resume\n");
2131 goto reset;
2132 }
2133
2134 if (typhoon_start_runtime(tp) < 0) {
2135 netdev_err(dev, "critical: could not start runtime in resume\n");
2136 goto reset;
2137 }
2138
2139 netif_device_attach(dev);
2140 return 0;
2141
2142reset:
2143 typhoon_reset(tp->ioaddr, NoWait);
2144 return -EBUSY;
2145}
2146
2147static int
2148typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
2149{
2150 struct net_device *dev = pci_get_drvdata(pdev);
2151 struct typhoon *tp = netdev_priv(dev);
2152 struct cmd_desc xp_cmd;
2153
2154
2155
2156 if (!netif_running(dev))
2157 return 0;
2158
2159
2160 if (tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
2161 netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
2162
2163 netif_device_detach(dev);
2164
2165 if (typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2166 netdev_err(dev, "unable to stop runtime\n");
2167 goto need_resume;
2168 }
2169
2170 typhoon_free_rx_rings(tp);
2171 typhoon_init_rings(tp);
2172
2173 if (typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2174 netdev_err(dev, "unable to boot sleep image\n");
2175 goto need_resume;
2176 }
2177
2178 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2179 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2180 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2181 if (typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2182 netdev_err(dev, "unable to set mac address in suspend\n");
2183 goto need_resume;
2184 }
2185
2186 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2187 xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2188 if (typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2189 netdev_err(dev, "unable to set rx filter in suspend\n");
2190 goto need_resume;
2191 }
2192
2193 if (typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
2194 netdev_err(dev, "unable to put card to sleep\n");
2195 goto need_resume;
2196 }
2197
2198 return 0;
2199
2200need_resume:
2201 typhoon_resume(pdev);
2202 return -EBUSY;
2203}
2204#endif
2205
2206static int
2207typhoon_test_mmio(struct pci_dev *pdev)
2208{
2209 void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2210 int mode = 0;
2211 u32 val;
2212
2213 if (!ioaddr)
2214 goto out;
2215
2216 if (ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2217 TYPHOON_STATUS_WAITING_FOR_HOST)
2218 goto out_unmap;
2219
2220 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2221 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2222 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2223
2224
2225
2226
2227
2228 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2229 if ((val & TYPHOON_INTR_SELF) == 0) {
2230 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2231 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2232 udelay(50);
2233 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2234 if (val & TYPHOON_INTR_SELF)
2235 mode = 1;
2236 }
2237
2238 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2239 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2240 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2241 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2242
2243out_unmap:
2244 pci_iounmap(pdev, ioaddr);
2245
2246out:
2247 if (!mode)
2248 pr_info("%s: falling back to port IO\n", pci_name(pdev));
2249 return mode;
2250}
2251
2252static const struct net_device_ops typhoon_netdev_ops = {
2253 .ndo_open = typhoon_open,
2254 .ndo_stop = typhoon_close,
2255 .ndo_start_xmit = typhoon_start_tx,
2256 .ndo_set_rx_mode = typhoon_set_rx_mode,
2257 .ndo_tx_timeout = typhoon_tx_timeout,
2258 .ndo_get_stats = typhoon_get_stats,
2259 .ndo_validate_addr = eth_validate_addr,
2260 .ndo_set_mac_address = eth_mac_addr,
2261};
2262
2263static int
2264typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2265{
2266 struct net_device *dev;
2267 struct typhoon *tp;
2268 int card_id = (int) ent->driver_data;
2269 void __iomem *ioaddr;
2270 void *shared;
2271 dma_addr_t shared_dma;
2272 struct cmd_desc xp_cmd;
2273 struct resp_desc xp_resp[3];
2274 int err = 0;
2275 const char *err_msg;
2276
2277 dev = alloc_etherdev(sizeof(*tp));
2278 if (dev == NULL) {
2279 err_msg = "unable to alloc new net device";
2280 err = -ENOMEM;
2281 goto error_out;
2282 }
2283 SET_NETDEV_DEV(dev, &pdev->dev);
2284
2285 err = pci_enable_device(pdev);
2286 if (err < 0) {
2287 err_msg = "unable to enable device";
2288 goto error_out_dev;
2289 }
2290
2291 err = pci_set_mwi(pdev);
2292 if (err < 0) {
2293 err_msg = "unable to set MWI";
2294 goto error_out_disable;
2295 }
2296
2297 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2298 if (err < 0) {
2299 err_msg = "No usable DMA configuration";
2300 goto error_out_mwi;
2301 }
2302
2303
2304
2305 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2306 err_msg = "region #1 not a PCI IO resource, aborting";
2307 err = -ENODEV;
2308 goto error_out_mwi;
2309 }
2310 if (pci_resource_len(pdev, 0) < 128) {
2311 err_msg = "Invalid PCI IO region size, aborting";
2312 err = -ENODEV;
2313 goto error_out_mwi;
2314 }
2315 if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2316 err_msg = "region #1 not a PCI MMIO resource, aborting";
2317 err = -ENODEV;
2318 goto error_out_mwi;
2319 }
2320 if (pci_resource_len(pdev, 1) < 128) {
2321 err_msg = "Invalid PCI MMIO region size, aborting";
2322 err = -ENODEV;
2323 goto error_out_mwi;
2324 }
2325
2326 err = pci_request_regions(pdev, KBUILD_MODNAME);
2327 if (err < 0) {
2328 err_msg = "could not request regions";
2329 goto error_out_mwi;
2330 }
2331
2332
2333
2334 if (use_mmio != 0 && use_mmio != 1)
2335 use_mmio = typhoon_test_mmio(pdev);
2336
2337 ioaddr = pci_iomap(pdev, use_mmio, 128);
2338 if (!ioaddr) {
2339 err_msg = "cannot remap registers, aborting";
2340 err = -EIO;
2341 goto error_out_regions;
2342 }
2343
2344
2345
2346 shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2347 &shared_dma);
2348 if (!shared) {
2349 err_msg = "could not allocate DMA memory";
2350 err = -ENOMEM;
2351 goto error_out_remap;
2352 }
2353
2354 dev->irq = pdev->irq;
2355 tp = netdev_priv(dev);
2356 tp->shared = shared;
2357 tp->shared_dma = shared_dma;
2358 tp->pdev = pdev;
2359 tp->tx_pdev = pdev;
2360 tp->ioaddr = ioaddr;
2361 tp->tx_ioaddr = ioaddr;
2362 tp->dev = dev;
2363
2364
2365
2366
2367
2368
2369
2370
2371 err = typhoon_reset(ioaddr, WaitSleep);
2372 if (err < 0) {
2373 err_msg = "could not reset 3XP";
2374 goto error_out_dma;
2375 }
2376
2377
2378
2379
2380
2381 pci_set_master(pdev);
2382 pci_save_state(pdev);
2383
2384 typhoon_init_interface(tp);
2385 typhoon_init_rings(tp);
2386
2387 err = typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST);
2388 if (err < 0) {
2389 err_msg = "cannot boot 3XP sleep image";
2390 goto error_out_reset;
2391 }
2392
2393 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2394 err = typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp);
2395 if (err < 0) {
2396 err_msg = "cannot read MAC address";
2397 goto error_out_reset;
2398 }
2399
2400 *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2401 *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2402
2403 if (!is_valid_ether_addr(dev->dev_addr)) {
2404 err_msg = "Could not obtain valid ethernet address, aborting";
2405 err = -EIO;
2406 goto error_out_reset;
2407 }
2408
2409
2410
2411
2412 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2413 err = typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp);
2414 if (err < 0) {
2415 err_msg = "Could not get Sleep Image version";
2416 goto error_out_reset;
2417 }
2418
2419 tp->capabilities = typhoon_card_info[card_id].capabilities;
2420 tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2421
2422
2423
2424
2425
2426
2427
2428 if (xp_resp[0].numDesc != 0)
2429 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2430
2431 err = typhoon_sleep(tp, PCI_D3hot, 0);
2432 if (err < 0) {
2433 err_msg = "cannot put adapter to sleep";
2434 goto error_out_reset;
2435 }
2436
2437
2438 dev->netdev_ops = &typhoon_netdev_ops;
2439 netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2440 dev->watchdog_timeo = TX_TIMEOUT;
2441
2442 dev->ethtool_ops = &typhoon_ethtool_ops;
2443
2444
2445
2446
2447
2448
2449
2450
2451 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2452 NETIF_F_HW_VLAN_CTAG_TX;
2453 dev->features = dev->hw_features |
2454 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
2455
2456 err = register_netdev(dev);
2457 if (err < 0) {
2458 err_msg = "unable to register netdev";
2459 goto error_out_reset;
2460 }
2461
2462 pci_set_drvdata(pdev, dev);
2463
2464 netdev_info(dev, "%s at %s 0x%llx, %pM\n",
2465 typhoon_card_info[card_id].name,
2466 use_mmio ? "MMIO" : "IO",
2467 (unsigned long long)pci_resource_start(pdev, use_mmio),
2468 dev->dev_addr);
2469
2470
2471
2472
2473 if (xp_resp[0].numDesc == 0) {
2474
2475
2476
2477 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2478 netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
2479 monthday >> 8, monthday & 0xff);
2480 } else if (xp_resp[0].numDesc == 2) {
2481
2482
2483 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2484 u8 *ver_string = (u8 *) &xp_resp[1];
2485 ver_string[25] = 0;
2486 netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
2487 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
2488 sleep_ver & 0xfff, ver_string);
2489 } else {
2490 netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
2491 xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
2492 }
2493
2494 return 0;
2495
2496error_out_reset:
2497 typhoon_reset(ioaddr, NoWait);
2498
2499error_out_dma:
2500 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2501 shared, shared_dma);
2502error_out_remap:
2503 pci_iounmap(pdev, ioaddr);
2504error_out_regions:
2505 pci_release_regions(pdev);
2506error_out_mwi:
2507 pci_clear_mwi(pdev);
2508error_out_disable:
2509 pci_disable_device(pdev);
2510error_out_dev:
2511 free_netdev(dev);
2512error_out:
2513 pr_err("%s: %s\n", pci_name(pdev), err_msg);
2514 return err;
2515}
2516
2517static void
2518typhoon_remove_one(struct pci_dev *pdev)
2519{
2520 struct net_device *dev = pci_get_drvdata(pdev);
2521 struct typhoon *tp = netdev_priv(dev);
2522
2523 unregister_netdev(dev);
2524 pci_set_power_state(pdev, PCI_D0);
2525 pci_restore_state(pdev);
2526 typhoon_reset(tp->ioaddr, NoWait);
2527 pci_iounmap(pdev, tp->ioaddr);
2528 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2529 tp->shared, tp->shared_dma);
2530 pci_release_regions(pdev);
2531 pci_clear_mwi(pdev);
2532 pci_disable_device(pdev);
2533 free_netdev(dev);
2534}
2535
2536static struct pci_driver typhoon_driver = {
2537 .name = KBUILD_MODNAME,
2538 .id_table = typhoon_pci_tbl,
2539 .probe = typhoon_init_one,
2540 .remove = typhoon_remove_one,
2541#ifdef CONFIG_PM
2542 .suspend = typhoon_suspend,
2543 .resume = typhoon_resume,
2544#endif
2545};
2546
2547static int __init
2548typhoon_init(void)
2549{
2550 return pci_register_driver(&typhoon_driver);
2551}
2552
2553static void __exit
2554typhoon_cleanup(void)
2555{
2556 release_firmware(typhoon_fw);
2557 pci_unregister_driver(&typhoon_driver);
2558}
2559
2560module_init(typhoon_init);
2561module_exit(typhoon_cleanup);
2562