1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47static int rx_copybreak = 200;
48
49
50
51
52
53
54static unsigned int use_mmio = 2;
55
56
57
58
59
60static const int multicast_filter_limit = 32;
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75#define TXHI_ENTRIES 2
76#define TXLO_ENTRIES 128
77#define RX_ENTRIES 32
78#define COMMAND_ENTRIES 16
79#define RESPONSE_ENTRIES 32
80
81#define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc))
82#define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc))
83
84
85
86
87
88#define RXFREE_ENTRIES 128
89#define RXENT_ENTRIES (RXFREE_ENTRIES - 1)
90
91
92
93
94#define TX_TIMEOUT (2*HZ)
95
96#define PKT_BUF_SZ 1536
97#define FIRMWARE_NAME "3com/typhoon.bin"
98
99#define pr_fmt(fmt) KBUILD_MODNAME " " fmt
100
101#include <linux/module.h>
102#include <linux/kernel.h>
103#include <linux/sched.h>
104#include <linux/string.h>
105#include <linux/timer.h>
106#include <linux/errno.h>
107#include <linux/ioport.h>
108#include <linux/interrupt.h>
109#include <linux/pci.h>
110#include <linux/netdevice.h>
111#include <linux/etherdevice.h>
112#include <linux/skbuff.h>
113#include <linux/mm.h>
114#include <linux/init.h>
115#include <linux/delay.h>
116#include <linux/ethtool.h>
117#include <linux/if_vlan.h>
118#include <linux/crc32.h>
119#include <linux/bitops.h>
120#include <asm/processor.h>
121#include <asm/io.h>
122#include <linux/uaccess.h>
123#include <linux/in6.h>
124#include <linux/dma-mapping.h>
125#include <linux/firmware.h>
126
127#include "typhoon.h"
128
129MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
130MODULE_LICENSE("GPL");
131MODULE_FIRMWARE(FIRMWARE_NAME);
132MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
133MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
134 "the buffer given back to the NIC. Default "
135 "is 200.");
136MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
137 "Default is to try MMIO and fallback to PIO.");
138module_param(rx_copybreak, int, 0);
139module_param(use_mmio, int, 0);
140
141#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
142#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
143#undef NETIF_F_TSO
144#endif
145
146#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
147#error TX ring too small!
148#endif
149
150struct typhoon_card_info {
151 const char *name;
152 const int capabilities;
153};
154
155#define TYPHOON_CRYPTO_NONE 0x00
156#define TYPHOON_CRYPTO_DES 0x01
157#define TYPHOON_CRYPTO_3DES 0x02
158#define TYPHOON_CRYPTO_VARIABLE 0x04
159#define TYPHOON_FIBER 0x08
160#define TYPHOON_WAKEUP_NEEDS_RESET 0x10
161
162enum typhoon_cards {
163 TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
164 TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
165 TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
166 TYPHOON_FXM,
167};
168
169
170static struct typhoon_card_info typhoon_card_info[] = {
171 { "3Com Typhoon (3C990-TX)",
172 TYPHOON_CRYPTO_NONE},
173 { "3Com Typhoon (3CR990-TX-95)",
174 TYPHOON_CRYPTO_DES},
175 { "3Com Typhoon (3CR990-TX-97)",
176 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
177 { "3Com Typhoon (3C990SVR)",
178 TYPHOON_CRYPTO_NONE},
179 { "3Com Typhoon (3CR990SVR95)",
180 TYPHOON_CRYPTO_DES},
181 { "3Com Typhoon (3CR990SVR97)",
182 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
183 { "3Com Typhoon2 (3C990B-TX-M)",
184 TYPHOON_CRYPTO_VARIABLE},
185 { "3Com Typhoon2 (3C990BSVR)",
186 TYPHOON_CRYPTO_VARIABLE},
187 { "3Com Typhoon (3CR990-FX-95)",
188 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
189 { "3Com Typhoon (3CR990-FX-97)",
190 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
191 { "3Com Typhoon (3CR990-FX-95 Server)",
192 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
193 { "3Com Typhoon (3CR990-FX-97 Server)",
194 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
195 { "3Com Typhoon2 (3C990B-FX-97)",
196 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
197};
198
199
200
201
202
203
204
205static const struct pci_device_id typhoon_pci_tbl[] = {
206 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
208 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
210 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
212 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
213 PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
214 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
215 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
216 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
217 PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
218 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
219 PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
220 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
221 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
222 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
223 PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
224 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
225 PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
226 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
228 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
230 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
232 { 0, }
233};
234MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
235
236
237
238
239
240#define __3xp_aligned ____cacheline_aligned
241struct typhoon_shared {
242 struct typhoon_interface iface;
243 struct typhoon_indexes indexes __3xp_aligned;
244 struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned;
245 struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned;
246 struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned;
247 struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned;
248 struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned;
249 struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
250 u32 zeroWord;
251 struct tx_desc txHi[TXHI_ENTRIES];
252} __packed;
253
254struct rxbuff_ent {
255 struct sk_buff *skb;
256 dma_addr_t dma_addr;
257};
258
259struct typhoon {
260
261 struct transmit_ring txLoRing ____cacheline_aligned;
262 struct pci_dev * tx_pdev;
263 void __iomem *tx_ioaddr;
264 u32 txlo_dma_addr;
265
266
267 void __iomem *ioaddr ____cacheline_aligned;
268 struct typhoon_indexes *indexes;
269 u8 awaiting_resp;
270 u8 duplex;
271 u8 speed;
272 u8 card_state;
273 struct basic_ring rxLoRing;
274 struct pci_dev * pdev;
275 struct net_device * dev;
276 struct napi_struct napi;
277 struct basic_ring rxHiRing;
278 struct basic_ring rxBuffRing;
279 struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
280
281
282 spinlock_t command_lock ____cacheline_aligned;
283 struct basic_ring cmdRing;
284 struct basic_ring respRing;
285 struct net_device_stats stats_saved;
286 struct typhoon_shared * shared;
287 dma_addr_t shared_dma;
288 __le16 xcvr_select;
289 __le16 wol_events;
290 __le32 offload;
291
292
293 int capabilities;
294 struct transmit_ring txHiRing;
295};
296
297enum completion_wait_values {
298 NoWait = 0, WaitNoSleep, WaitSleep,
299};
300
301
302
303
304
305enum state_values {
306 Sleeping = 0, Running,
307};
308
309
310
311
312#define typhoon_post_pci_writes(x) \
313 do { if (likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while (0)
314
315
316
317#define TYPHOON_UDELAY 50
318#define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ)
319#define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY)
320#define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY)
321
322#if defined(NETIF_F_TSO)
323#define skb_tso_size(x) (skb_shinfo(x)->gso_size)
324#define TSO_NUM_DESCRIPTORS 2
325#define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
326#else
327#define NETIF_F_TSO 0
328#define skb_tso_size(x) 0
329#define TSO_NUM_DESCRIPTORS 0
330#define TSO_OFFLOAD_ON 0
331#endif
332
333static inline void
334typhoon_inc_index(u32 *index, const int count, const int num_entries)
335{
336
337
338
339
340 *index += count * sizeof(struct cmd_desc);
341 *index %= num_entries * sizeof(struct cmd_desc);
342}
343
344static inline void
345typhoon_inc_cmd_index(u32 *index, const int count)
346{
347 typhoon_inc_index(index, count, COMMAND_ENTRIES);
348}
349
350static inline void
351typhoon_inc_resp_index(u32 *index, const int count)
352{
353 typhoon_inc_index(index, count, RESPONSE_ENTRIES);
354}
355
356static inline void
357typhoon_inc_rxfree_index(u32 *index, const int count)
358{
359 typhoon_inc_index(index, count, RXFREE_ENTRIES);
360}
361
362static inline void
363typhoon_inc_tx_index(u32 *index, const int count)
364{
365
366 typhoon_inc_index(index, count, TXLO_ENTRIES);
367}
368
369static inline void
370typhoon_inc_rx_index(u32 *index, const int count)
371{
372
373 *index += count * sizeof(struct rx_desc);
374 *index %= RX_ENTRIES * sizeof(struct rx_desc);
375}
376
377static int
378typhoon_reset(void __iomem *ioaddr, int wait_type)
379{
380 int i, err = 0;
381 int timeout;
382
383 if (wait_type == WaitNoSleep)
384 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
385 else
386 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
387
388 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
389 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
390
391 iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
392 typhoon_post_pci_writes(ioaddr);
393 udelay(1);
394 iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
395
396 if (wait_type != NoWait) {
397 for (i = 0; i < timeout; i++) {
398 if (ioread32(ioaddr + TYPHOON_REG_STATUS) ==
399 TYPHOON_STATUS_WAITING_FOR_HOST)
400 goto out;
401
402 if (wait_type == WaitSleep)
403 schedule_timeout_uninterruptible(1);
404 else
405 udelay(TYPHOON_UDELAY);
406 }
407
408 err = -ETIMEDOUT;
409 }
410
411out:
412 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
413 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
414
415
416
417
418
419
420
421
422
423
424
425 if (wait_type == WaitSleep)
426 msleep(5);
427 else
428 udelay(500);
429 return err;
430}
431
432static int
433typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
434{
435 int i, err = 0;
436
437 for (i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
438 if (ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
439 goto out;
440 udelay(TYPHOON_UDELAY);
441 }
442
443 err = -ETIMEDOUT;
444
445out:
446 return err;
447}
448
449static inline void
450typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
451{
452 if (resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
453 netif_carrier_off(dev);
454 else
455 netif_carrier_on(dev);
456}
457
458static inline void
459typhoon_hello(struct typhoon *tp)
460{
461 struct basic_ring *ring = &tp->cmdRing;
462 struct cmd_desc *cmd;
463
464
465
466
467
468 if (spin_trylock(&tp->command_lock)) {
469 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
470 typhoon_inc_cmd_index(&ring->lastWrite, 1);
471
472 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
473 wmb();
474 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
475 spin_unlock(&tp->command_lock);
476 }
477}
478
479static int
480typhoon_process_response(struct typhoon *tp, int resp_size,
481 struct resp_desc *resp_save)
482{
483 struct typhoon_indexes *indexes = tp->indexes;
484 struct resp_desc *resp;
485 u8 *base = tp->respRing.ringBase;
486 int count, len, wrap_len;
487 u32 cleared;
488 u32 ready;
489
490 cleared = le32_to_cpu(indexes->respCleared);
491 ready = le32_to_cpu(indexes->respReady);
492 while (cleared != ready) {
493 resp = (struct resp_desc *)(base + cleared);
494 count = resp->numDesc + 1;
495 if (resp_save && resp->seqNo) {
496 if (count > resp_size) {
497 resp_save->flags = TYPHOON_RESP_ERROR;
498 goto cleanup;
499 }
500
501 wrap_len = 0;
502 len = count * sizeof(*resp);
503 if (unlikely(cleared + len > RESPONSE_RING_SIZE)) {
504 wrap_len = cleared + len - RESPONSE_RING_SIZE;
505 len = RESPONSE_RING_SIZE - cleared;
506 }
507
508 memcpy(resp_save, resp, len);
509 if (unlikely(wrap_len)) {
510 resp_save += len / sizeof(*resp);
511 memcpy(resp_save, base, wrap_len);
512 }
513
514 resp_save = NULL;
515 } else if (resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
516 typhoon_media_status(tp->dev, resp);
517 } else if (resp->cmd == TYPHOON_CMD_HELLO_RESP) {
518 typhoon_hello(tp);
519 } else {
520 netdev_err(tp->dev,
521 "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
522 le16_to_cpu(resp->cmd),
523 resp->numDesc, resp->flags,
524 le16_to_cpu(resp->parm1),
525 le32_to_cpu(resp->parm2),
526 le32_to_cpu(resp->parm3));
527 }
528
529cleanup:
530 typhoon_inc_resp_index(&cleared, count);
531 }
532
533 indexes->respCleared = cpu_to_le32(cleared);
534 wmb();
535 return resp_save == NULL;
536}
537
538static inline int
539typhoon_num_free(int lastWrite, int lastRead, int ringSize)
540{
541
542
543
544 lastWrite /= sizeof(struct cmd_desc);
545 lastRead /= sizeof(struct cmd_desc);
546 return (ringSize + lastRead - lastWrite - 1) % ringSize;
547}
548
549static inline int
550typhoon_num_free_cmd(struct typhoon *tp)
551{
552 int lastWrite = tp->cmdRing.lastWrite;
553 int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
554
555 return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
556}
557
558static inline int
559typhoon_num_free_resp(struct typhoon *tp)
560{
561 int respReady = le32_to_cpu(tp->indexes->respReady);
562 int respCleared = le32_to_cpu(tp->indexes->respCleared);
563
564 return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
565}
566
567static inline int
568typhoon_num_free_tx(struct transmit_ring *ring)
569{
570
571 return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
572}
573
574static int
575typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
576 int num_resp, struct resp_desc *resp)
577{
578 struct typhoon_indexes *indexes = tp->indexes;
579 struct basic_ring *ring = &tp->cmdRing;
580 struct resp_desc local_resp;
581 int i, err = 0;
582 int got_resp;
583 int freeCmd, freeResp;
584 int len, wrap_len;
585
586 spin_lock(&tp->command_lock);
587
588 freeCmd = typhoon_num_free_cmd(tp);
589 freeResp = typhoon_num_free_resp(tp);
590
591 if (freeCmd < num_cmd || freeResp < num_resp) {
592 netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
593 freeCmd, num_cmd, freeResp, num_resp);
594 err = -ENOMEM;
595 goto out;
596 }
597
598 if (cmd->flags & TYPHOON_CMD_RESPOND) {
599
600
601
602 tp->awaiting_resp = 1;
603 if (resp == NULL) {
604 resp = &local_resp;
605 num_resp = 1;
606 }
607 }
608
609 wrap_len = 0;
610 len = num_cmd * sizeof(*cmd);
611 if (unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
612 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
613 len = COMMAND_RING_SIZE - ring->lastWrite;
614 }
615
616 memcpy(ring->ringBase + ring->lastWrite, cmd, len);
617 if (unlikely(wrap_len)) {
618 struct cmd_desc *wrap_ptr = cmd;
619 wrap_ptr += len / sizeof(*cmd);
620 memcpy(ring->ringBase, wrap_ptr, wrap_len);
621 }
622
623 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
624
625
626
627 wmb();
628 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
629 typhoon_post_pci_writes(tp->ioaddr);
630
631 if ((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
632 goto out;
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650 got_resp = 0;
651 for (i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
652 if (indexes->respCleared != indexes->respReady)
653 got_resp = typhoon_process_response(tp, num_resp,
654 resp);
655 udelay(TYPHOON_UDELAY);
656 }
657
658 if (!got_resp) {
659 err = -ETIMEDOUT;
660 goto out;
661 }
662
663
664
665
666 if (resp->flags & TYPHOON_RESP_ERROR)
667 err = -EIO;
668
669out:
670 if (tp->awaiting_resp) {
671 tp->awaiting_resp = 0;
672 smp_wmb();
673
674
675
676
677
678
679
680
681 if (indexes->respCleared != indexes->respReady)
682 iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
683 }
684
685 spin_unlock(&tp->command_lock);
686 return err;
687}
688
689static inline void
690typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
691 u32 ring_dma)
692{
693 struct tcpopt_desc *tcpd;
694 u32 tcpd_offset = ring_dma;
695
696 tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
697 tcpd_offset += txRing->lastWrite;
698 tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
699 typhoon_inc_tx_index(&txRing->lastWrite, 1);
700
701 tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
702 tcpd->numDesc = 1;
703 tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
704 tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
705 tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
706 tcpd->bytesTx = cpu_to_le32(skb->len);
707 tcpd->status = 0;
708}
709
710static netdev_tx_t
711typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
712{
713 struct typhoon *tp = netdev_priv(dev);
714 struct transmit_ring *txRing;
715 struct tx_desc *txd, *first_txd;
716 dma_addr_t skb_dma;
717 int numDesc;
718
719
720
721
722
723
724
725 txRing = &tp->txLoRing;
726
727
728
729
730
731
732
733
734
735
736
737
738 numDesc = skb_shinfo(skb)->nr_frags + 1;
739 if (skb_is_gso(skb))
740 numDesc++;
741
742
743
744
745
746
747
748
749
750
751 while (unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
752 smp_rmb();
753
754 first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
755 typhoon_inc_tx_index(&txRing->lastWrite, 1);
756
757 first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
758 first_txd->numDesc = 0;
759 first_txd->len = 0;
760 first_txd->tx_addr = (u64)((unsigned long) skb);
761 first_txd->processFlags = 0;
762
763 if (skb->ip_summed == CHECKSUM_PARTIAL) {
764
765 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
766 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
767 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
768 }
769
770 if (skb_vlan_tag_present(skb)) {
771 first_txd->processFlags |=
772 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
773 first_txd->processFlags |=
774 cpu_to_le32(htons(skb_vlan_tag_get(skb)) <<
775 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
776 }
777
778 if (skb_is_gso(skb)) {
779 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
780 first_txd->numDesc++;
781
782 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
783 }
784
785 txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
786 typhoon_inc_tx_index(&txRing->lastWrite, 1);
787
788
789
790
791 if (skb_shinfo(skb)->nr_frags == 0) {
792 skb_dma = dma_map_single(&tp->tx_pdev->dev, skb->data,
793 skb->len, DMA_TO_DEVICE);
794 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
795 txd->len = cpu_to_le16(skb->len);
796 txd->frag.addr = cpu_to_le32(skb_dma);
797 txd->frag.addrHi = 0;
798 first_txd->numDesc++;
799 } else {
800 int i, len;
801
802 len = skb_headlen(skb);
803 skb_dma = dma_map_single(&tp->tx_pdev->dev, skb->data, len,
804 DMA_TO_DEVICE);
805 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
806 txd->len = cpu_to_le16(len);
807 txd->frag.addr = cpu_to_le32(skb_dma);
808 txd->frag.addrHi = 0;
809 first_txd->numDesc++;
810
811 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
812 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
813 void *frag_addr;
814
815 txd = (struct tx_desc *) (txRing->ringBase +
816 txRing->lastWrite);
817 typhoon_inc_tx_index(&txRing->lastWrite, 1);
818
819 len = skb_frag_size(frag);
820 frag_addr = skb_frag_address(frag);
821 skb_dma = dma_map_single(&tp->tx_pdev->dev, frag_addr,
822 len, DMA_TO_DEVICE);
823 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
824 txd->len = cpu_to_le16(len);
825 txd->frag.addr = cpu_to_le32(skb_dma);
826 txd->frag.addrHi = 0;
827 first_txd->numDesc++;
828 }
829 }
830
831
832
833 wmb();
834 iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
835
836
837
838
839
840
841 numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
842
843 if (typhoon_num_free_tx(txRing) < (numDesc + 2)) {
844 netif_stop_queue(dev);
845
846
847
848
849
850 if (typhoon_num_free_tx(txRing) >= (numDesc + 2))
851 netif_wake_queue(dev);
852 }
853
854 return NETDEV_TX_OK;
855}
856
857static void
858typhoon_set_rx_mode(struct net_device *dev)
859{
860 struct typhoon *tp = netdev_priv(dev);
861 struct cmd_desc xp_cmd;
862 u32 mc_filter[2];
863 __le16 filter;
864
865 filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
866 if (dev->flags & IFF_PROMISC) {
867 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
868 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
869 (dev->flags & IFF_ALLMULTI)) {
870
871 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
872 } else if (!netdev_mc_empty(dev)) {
873 struct netdev_hw_addr *ha;
874
875 memset(mc_filter, 0, sizeof(mc_filter));
876 netdev_for_each_mc_addr(ha, dev) {
877 int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f;
878 mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
879 }
880
881 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
882 TYPHOON_CMD_SET_MULTICAST_HASH);
883 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
884 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
885 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
886 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
887
888 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
889 }
890
891 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
892 xp_cmd.parm1 = filter;
893 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
894}
895
896static int
897typhoon_do_get_stats(struct typhoon *tp)
898{
899 struct net_device_stats *stats = &tp->dev->stats;
900 struct net_device_stats *saved = &tp->stats_saved;
901 struct cmd_desc xp_cmd;
902 struct resp_desc xp_resp[7];
903 struct stats_resp *s = (struct stats_resp *) xp_resp;
904 int err;
905
906 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
907 err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
908 if (err < 0)
909 return err;
910
911
912
913
914
915
916
917 stats->tx_packets = le32_to_cpu(s->txPackets) +
918 saved->tx_packets;
919 stats->tx_bytes = le64_to_cpu(s->txBytes) +
920 saved->tx_bytes;
921 stats->tx_errors = le32_to_cpu(s->txCarrierLost) +
922 saved->tx_errors;
923 stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost) +
924 saved->tx_carrier_errors;
925 stats->collisions = le32_to_cpu(s->txMultipleCollisions) +
926 saved->collisions;
927 stats->rx_packets = le32_to_cpu(s->rxPacketsGood) +
928 saved->rx_packets;
929 stats->rx_bytes = le64_to_cpu(s->rxBytesGood) +
930 saved->rx_bytes;
931 stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns) +
932 saved->rx_fifo_errors;
933 stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
934 le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors) +
935 saved->rx_errors;
936 stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors) +
937 saved->rx_crc_errors;
938 stats->rx_length_errors = le32_to_cpu(s->rxOversized) +
939 saved->rx_length_errors;
940 tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
941 SPEED_100 : SPEED_10;
942 tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
943 DUPLEX_FULL : DUPLEX_HALF;
944
945 return 0;
946}
947
948static struct net_device_stats *
949typhoon_get_stats(struct net_device *dev)
950{
951 struct typhoon *tp = netdev_priv(dev);
952 struct net_device_stats *stats = &tp->dev->stats;
953 struct net_device_stats *saved = &tp->stats_saved;
954
955 smp_rmb();
956 if (tp->card_state == Sleeping)
957 return saved;
958
959 if (typhoon_do_get_stats(tp) < 0) {
960 netdev_err(dev, "error getting stats\n");
961 return saved;
962 }
963
964 return stats;
965}
966
967static void
968typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
969{
970 struct typhoon *tp = netdev_priv(dev);
971 struct pci_dev *pci_dev = tp->pdev;
972 struct cmd_desc xp_cmd;
973 struct resp_desc xp_resp[3];
974
975 smp_rmb();
976 if (tp->card_state == Sleeping) {
977 strlcpy(info->fw_version, "Sleep image",
978 sizeof(info->fw_version));
979 } else {
980 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
981 if (typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
982 strlcpy(info->fw_version, "Unknown runtime",
983 sizeof(info->fw_version));
984 } else {
985 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
986 snprintf(info->fw_version, sizeof(info->fw_version),
987 "%02x.%03x.%03x", sleep_ver >> 24,
988 (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff);
989 }
990 }
991
992 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
993 strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
994}
995
996static int
997typhoon_get_link_ksettings(struct net_device *dev,
998 struct ethtool_link_ksettings *cmd)
999{
1000 struct typhoon *tp = netdev_priv(dev);
1001 u32 supported, advertising = 0;
1002
1003 supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1004 SUPPORTED_Autoneg;
1005
1006 switch (tp->xcvr_select) {
1007 case TYPHOON_XCVR_10HALF:
1008 advertising = ADVERTISED_10baseT_Half;
1009 break;
1010 case TYPHOON_XCVR_10FULL:
1011 advertising = ADVERTISED_10baseT_Full;
1012 break;
1013 case TYPHOON_XCVR_100HALF:
1014 advertising = ADVERTISED_100baseT_Half;
1015 break;
1016 case TYPHOON_XCVR_100FULL:
1017 advertising = ADVERTISED_100baseT_Full;
1018 break;
1019 case TYPHOON_XCVR_AUTONEG:
1020 advertising = ADVERTISED_10baseT_Half |
1021 ADVERTISED_10baseT_Full |
1022 ADVERTISED_100baseT_Half |
1023 ADVERTISED_100baseT_Full |
1024 ADVERTISED_Autoneg;
1025 break;
1026 }
1027
1028 if (tp->capabilities & TYPHOON_FIBER) {
1029 supported |= SUPPORTED_FIBRE;
1030 advertising |= ADVERTISED_FIBRE;
1031 cmd->base.port = PORT_FIBRE;
1032 } else {
1033 supported |= SUPPORTED_10baseT_Half |
1034 SUPPORTED_10baseT_Full |
1035 SUPPORTED_TP;
1036 advertising |= ADVERTISED_TP;
1037 cmd->base.port = PORT_TP;
1038 }
1039
1040
1041 typhoon_do_get_stats(tp);
1042 cmd->base.speed = tp->speed;
1043 cmd->base.duplex = tp->duplex;
1044 cmd->base.phy_address = 0;
1045 if (tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1046 cmd->base.autoneg = AUTONEG_ENABLE;
1047 else
1048 cmd->base.autoneg = AUTONEG_DISABLE;
1049
1050 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1051 supported);
1052 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1053 advertising);
1054
1055 return 0;
1056}
1057
1058static int
1059typhoon_set_link_ksettings(struct net_device *dev,
1060 const struct ethtool_link_ksettings *cmd)
1061{
1062 struct typhoon *tp = netdev_priv(dev);
1063 u32 speed = cmd->base.speed;
1064 struct cmd_desc xp_cmd;
1065 __le16 xcvr;
1066 int err;
1067
1068 err = -EINVAL;
1069 if (cmd->base.autoneg == AUTONEG_ENABLE) {
1070 xcvr = TYPHOON_XCVR_AUTONEG;
1071 } else {
1072 if (cmd->base.duplex == DUPLEX_HALF) {
1073 if (speed == SPEED_10)
1074 xcvr = TYPHOON_XCVR_10HALF;
1075 else if (speed == SPEED_100)
1076 xcvr = TYPHOON_XCVR_100HALF;
1077 else
1078 goto out;
1079 } else if (cmd->base.duplex == DUPLEX_FULL) {
1080 if (speed == SPEED_10)
1081 xcvr = TYPHOON_XCVR_10FULL;
1082 else if (speed == SPEED_100)
1083 xcvr = TYPHOON_XCVR_100FULL;
1084 else
1085 goto out;
1086 } else
1087 goto out;
1088 }
1089
1090 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1091 xp_cmd.parm1 = xcvr;
1092 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1093 if (err < 0)
1094 goto out;
1095
1096 tp->xcvr_select = xcvr;
1097 if (cmd->base.autoneg == AUTONEG_ENABLE) {
1098 tp->speed = 0xff;
1099 tp->duplex = 0xff;
1100 } else {
1101 tp->speed = speed;
1102 tp->duplex = cmd->base.duplex;
1103 }
1104
1105out:
1106 return err;
1107}
1108
1109static void
1110typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1111{
1112 struct typhoon *tp = netdev_priv(dev);
1113
1114 wol->supported = WAKE_PHY | WAKE_MAGIC;
1115 wol->wolopts = 0;
1116 if (tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1117 wol->wolopts |= WAKE_PHY;
1118 if (tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1119 wol->wolopts |= WAKE_MAGIC;
1120 memset(&wol->sopass, 0, sizeof(wol->sopass));
1121}
1122
1123static int
1124typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1125{
1126 struct typhoon *tp = netdev_priv(dev);
1127
1128 if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1129 return -EINVAL;
1130
1131 tp->wol_events = 0;
1132 if (wol->wolopts & WAKE_PHY)
1133 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1134 if (wol->wolopts & WAKE_MAGIC)
1135 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1136
1137 return 0;
1138}
1139
1140static void
1141typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1142{
1143 ering->rx_max_pending = RXENT_ENTRIES;
1144 ering->tx_max_pending = TXLO_ENTRIES - 1;
1145
1146 ering->rx_pending = RXENT_ENTRIES;
1147 ering->tx_pending = TXLO_ENTRIES - 1;
1148}
1149
1150static const struct ethtool_ops typhoon_ethtool_ops = {
1151 .get_drvinfo = typhoon_get_drvinfo,
1152 .get_wol = typhoon_get_wol,
1153 .set_wol = typhoon_set_wol,
1154 .get_link = ethtool_op_get_link,
1155 .get_ringparam = typhoon_get_ringparam,
1156 .get_link_ksettings = typhoon_get_link_ksettings,
1157 .set_link_ksettings = typhoon_set_link_ksettings,
1158};
1159
1160static int
1161typhoon_wait_interrupt(void __iomem *ioaddr)
1162{
1163 int i, err = 0;
1164
1165 for (i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1166 if (ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1167 TYPHOON_INTR_BOOTCMD)
1168 goto out;
1169 udelay(TYPHOON_UDELAY);
1170 }
1171
1172 err = -ETIMEDOUT;
1173
1174out:
1175 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1176 return err;
1177}
1178
1179#define shared_offset(x) offsetof(struct typhoon_shared, x)
1180
1181static void
1182typhoon_init_interface(struct typhoon *tp)
1183{
1184 struct typhoon_interface *iface = &tp->shared->iface;
1185 dma_addr_t shared_dma;
1186
1187 memset(tp->shared, 0, sizeof(struct typhoon_shared));
1188
1189
1190
1191 shared_dma = tp->shared_dma + shared_offset(indexes);
1192 iface->ringIndex = cpu_to_le32(shared_dma);
1193
1194 shared_dma = tp->shared_dma + shared_offset(txLo);
1195 iface->txLoAddr = cpu_to_le32(shared_dma);
1196 iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1197
1198 shared_dma = tp->shared_dma + shared_offset(txHi);
1199 iface->txHiAddr = cpu_to_le32(shared_dma);
1200 iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1201
1202 shared_dma = tp->shared_dma + shared_offset(rxBuff);
1203 iface->rxBuffAddr = cpu_to_le32(shared_dma);
1204 iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1205 sizeof(struct rx_free));
1206
1207 shared_dma = tp->shared_dma + shared_offset(rxLo);
1208 iface->rxLoAddr = cpu_to_le32(shared_dma);
1209 iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1210
1211 shared_dma = tp->shared_dma + shared_offset(rxHi);
1212 iface->rxHiAddr = cpu_to_le32(shared_dma);
1213 iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1214
1215 shared_dma = tp->shared_dma + shared_offset(cmd);
1216 iface->cmdAddr = cpu_to_le32(shared_dma);
1217 iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1218
1219 shared_dma = tp->shared_dma + shared_offset(resp);
1220 iface->respAddr = cpu_to_le32(shared_dma);
1221 iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1222
1223 shared_dma = tp->shared_dma + shared_offset(zeroWord);
1224 iface->zeroAddr = cpu_to_le32(shared_dma);
1225
1226 tp->indexes = &tp->shared->indexes;
1227 tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1228 tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1229 tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1230 tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1231 tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1232 tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1233 tp->respRing.ringBase = (u8 *) tp->shared->resp;
1234
1235 tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1236 tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1237
1238 tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1239 tp->card_state = Sleeping;
1240
1241 tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1242 tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1243 tp->offload |= TYPHOON_OFFLOAD_VLAN;
1244
1245 spin_lock_init(&tp->command_lock);
1246
1247
1248 wmb();
1249}
1250
1251static void
1252typhoon_init_rings(struct typhoon *tp)
1253{
1254 memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1255
1256 tp->txLoRing.lastWrite = 0;
1257 tp->txHiRing.lastWrite = 0;
1258 tp->rxLoRing.lastWrite = 0;
1259 tp->rxHiRing.lastWrite = 0;
1260 tp->rxBuffRing.lastWrite = 0;
1261 tp->cmdRing.lastWrite = 0;
1262 tp->respRing.lastWrite = 0;
1263
1264 tp->txLoRing.lastRead = 0;
1265 tp->txHiRing.lastRead = 0;
1266}
1267
1268static const struct firmware *typhoon_fw;
1269
1270static int
1271typhoon_request_firmware(struct typhoon *tp)
1272{
1273 const struct typhoon_file_header *fHdr;
1274 const struct typhoon_section_header *sHdr;
1275 const u8 *image_data;
1276 u32 numSections;
1277 u32 section_len;
1278 u32 remaining;
1279 int err;
1280
1281 if (typhoon_fw)
1282 return 0;
1283
1284 err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1285 if (err) {
1286 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
1287 FIRMWARE_NAME);
1288 return err;
1289 }
1290
1291 image_data = typhoon_fw->data;
1292 remaining = typhoon_fw->size;
1293 if (remaining < sizeof(struct typhoon_file_header))
1294 goto invalid_fw;
1295
1296 fHdr = (struct typhoon_file_header *) image_data;
1297 if (memcmp(fHdr->tag, "TYPHOON", 8))
1298 goto invalid_fw;
1299
1300 numSections = le32_to_cpu(fHdr->numSections);
1301 image_data += sizeof(struct typhoon_file_header);
1302 remaining -= sizeof(struct typhoon_file_header);
1303
1304 while (numSections--) {
1305 if (remaining < sizeof(struct typhoon_section_header))
1306 goto invalid_fw;
1307
1308 sHdr = (struct typhoon_section_header *) image_data;
1309 image_data += sizeof(struct typhoon_section_header);
1310 section_len = le32_to_cpu(sHdr->len);
1311
1312 if (remaining < section_len)
1313 goto invalid_fw;
1314
1315 image_data += section_len;
1316 remaining -= section_len;
1317 }
1318
1319 return 0;
1320
1321invalid_fw:
1322 netdev_err(tp->dev, "Invalid firmware image\n");
1323 release_firmware(typhoon_fw);
1324 typhoon_fw = NULL;
1325 return -EINVAL;
1326}
1327
1328static int
1329typhoon_download_firmware(struct typhoon *tp)
1330{
1331 void __iomem *ioaddr = tp->ioaddr;
1332 struct pci_dev *pdev = tp->pdev;
1333 const struct typhoon_file_header *fHdr;
1334 const struct typhoon_section_header *sHdr;
1335 const u8 *image_data;
1336 void *dpage;
1337 dma_addr_t dpage_dma;
1338 __sum16 csum;
1339 u32 irqEnabled;
1340 u32 irqMasked;
1341 u32 numSections;
1342 u32 section_len;
1343 u32 len;
1344 u32 load_addr;
1345 u32 hmac;
1346 int i;
1347 int err;
1348
1349 image_data = typhoon_fw->data;
1350 fHdr = (struct typhoon_file_header *) image_data;
1351
1352
1353
1354
1355
1356 err = -ENOMEM;
1357 dpage = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &dpage_dma, GFP_ATOMIC);
1358 if (!dpage) {
1359 netdev_err(tp->dev, "no DMA mem for firmware\n");
1360 goto err_out;
1361 }
1362
1363 irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1364 iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1365 ioaddr + TYPHOON_REG_INTR_ENABLE);
1366 irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1367 iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1368 ioaddr + TYPHOON_REG_INTR_MASK);
1369
1370 err = -ETIMEDOUT;
1371 if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1372 netdev_err(tp->dev, "card ready timeout\n");
1373 goto err_out_irq;
1374 }
1375
1376 numSections = le32_to_cpu(fHdr->numSections);
1377 load_addr = le32_to_cpu(fHdr->startAddr);
1378
1379 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1380 iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1381 hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1382 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1383 hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1384 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1385 hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1386 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1387 hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1388 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1389 hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1390 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1391 typhoon_post_pci_writes(ioaddr);
1392 iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1393
1394 image_data += sizeof(struct typhoon_file_header);
1395
1396
1397
1398
1399
1400 for (i = 0; i < numSections; i++) {
1401 sHdr = (struct typhoon_section_header *) image_data;
1402 image_data += sizeof(struct typhoon_section_header);
1403 load_addr = le32_to_cpu(sHdr->startAddr);
1404 section_len = le32_to_cpu(sHdr->len);
1405
1406 while (section_len) {
1407 len = min_t(u32, section_len, PAGE_SIZE);
1408
1409 if (typhoon_wait_interrupt(ioaddr) < 0 ||
1410 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1411 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1412 netdev_err(tp->dev, "segment ready timeout\n");
1413 goto err_out_irq;
1414 }
1415
1416
1417
1418
1419
1420
1421 csum = csum_fold(csum_partial_copy_nocheck(image_data,
1422 dpage, len));
1423
1424 iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1425 iowrite32(le16_to_cpu((__force __le16)csum),
1426 ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1427 iowrite32(load_addr,
1428 ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1429 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1430 iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1431 typhoon_post_pci_writes(ioaddr);
1432 iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1433 ioaddr + TYPHOON_REG_COMMAND);
1434
1435 image_data += len;
1436 load_addr += len;
1437 section_len -= len;
1438 }
1439 }
1440
1441 if (typhoon_wait_interrupt(ioaddr) < 0 ||
1442 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1443 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1444 netdev_err(tp->dev, "final segment ready timeout\n");
1445 goto err_out_irq;
1446 }
1447
1448 iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1449
1450 if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1451 netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
1452 ioread32(ioaddr + TYPHOON_REG_STATUS));
1453 goto err_out_irq;
1454 }
1455
1456 err = 0;
1457
1458err_out_irq:
1459 iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1460 iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1461
1462 dma_free_coherent(&pdev->dev, PAGE_SIZE, dpage, dpage_dma);
1463
1464err_out:
1465 return err;
1466}
1467
1468static int
1469typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1470{
1471 void __iomem *ioaddr = tp->ioaddr;
1472
1473 if (typhoon_wait_status(ioaddr, initial_status) < 0) {
1474 netdev_err(tp->dev, "boot ready timeout\n");
1475 goto out_timeout;
1476 }
1477
1478 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1479 iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1480 typhoon_post_pci_writes(ioaddr);
1481 iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1482 ioaddr + TYPHOON_REG_COMMAND);
1483
1484 if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1485 netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
1486 ioread32(ioaddr + TYPHOON_REG_STATUS));
1487 goto out_timeout;
1488 }
1489
1490
1491
1492 iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1493 iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1494 iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1495 typhoon_post_pci_writes(ioaddr);
1496 iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1497
1498 return 0;
1499
1500out_timeout:
1501 return -ETIMEDOUT;
1502}
1503
1504static u32
1505typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1506 volatile __le32 * index)
1507{
1508 u32 lastRead = txRing->lastRead;
1509 struct tx_desc *tx;
1510 dma_addr_t skb_dma;
1511 int dma_len;
1512 int type;
1513
1514 while (lastRead != le32_to_cpu(*index)) {
1515 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1516 type = tx->flags & TYPHOON_TYPE_MASK;
1517
1518 if (type == TYPHOON_TX_DESC) {
1519
1520
1521 unsigned long ptr = tx->tx_addr;
1522 struct sk_buff *skb = (struct sk_buff *) ptr;
1523 dev_kfree_skb_irq(skb);
1524 } else if (type == TYPHOON_FRAG_DESC) {
1525
1526
1527 skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1528 dma_len = le16_to_cpu(tx->len);
1529 dma_unmap_single(&tp->pdev->dev, skb_dma, dma_len,
1530 DMA_TO_DEVICE);
1531 }
1532
1533 tx->flags = 0;
1534 typhoon_inc_tx_index(&lastRead, 1);
1535 }
1536
1537 return lastRead;
1538}
1539
1540static void
1541typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1542 volatile __le32 * index)
1543{
1544 u32 lastRead;
1545 int numDesc = MAX_SKB_FRAGS + 1;
1546
1547
1548 lastRead = typhoon_clean_tx(tp, txRing, index);
1549 if (netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1550 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1551 netif_wake_queue(tp->dev);
1552
1553 txRing->lastRead = lastRead;
1554 smp_wmb();
1555}
1556
1557static void
1558typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1559{
1560 struct typhoon_indexes *indexes = tp->indexes;
1561 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1562 struct basic_ring *ring = &tp->rxBuffRing;
1563 struct rx_free *r;
1564
1565 if ((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1566 le32_to_cpu(indexes->rxBuffCleared)) {
1567
1568
1569 dev_kfree_skb_any(rxb->skb);
1570 rxb->skb = NULL;
1571 return;
1572 }
1573
1574 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1575 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1576 r->virtAddr = idx;
1577 r->physAddr = cpu_to_le32(rxb->dma_addr);
1578
1579
1580 wmb();
1581 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1582}
1583
1584static int
1585typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1586{
1587 struct typhoon_indexes *indexes = tp->indexes;
1588 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1589 struct basic_ring *ring = &tp->rxBuffRing;
1590 struct rx_free *r;
1591 struct sk_buff *skb;
1592 dma_addr_t dma_addr;
1593
1594 rxb->skb = NULL;
1595
1596 if ((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1597 le32_to_cpu(indexes->rxBuffCleared))
1598 return -ENOMEM;
1599
1600 skb = netdev_alloc_skb(tp->dev, PKT_BUF_SZ);
1601 if (!skb)
1602 return -ENOMEM;
1603
1604#if 0
1605
1606
1607
1608 skb_reserve(skb, 2);
1609#endif
1610
1611 dma_addr = dma_map_single(&tp->pdev->dev, skb->data, PKT_BUF_SZ,
1612 DMA_FROM_DEVICE);
1613
1614
1615
1616
1617 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1618 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1619 r->virtAddr = idx;
1620 r->physAddr = cpu_to_le32(dma_addr);
1621 rxb->skb = skb;
1622 rxb->dma_addr = dma_addr;
1623
1624
1625 wmb();
1626 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1627 return 0;
1628}
1629
1630static int
1631typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1632 volatile __le32 * cleared, int budget)
1633{
1634 struct rx_desc *rx;
1635 struct sk_buff *skb, *new_skb;
1636 struct rxbuff_ent *rxb;
1637 dma_addr_t dma_addr;
1638 u32 local_ready;
1639 u32 rxaddr;
1640 int pkt_len;
1641 u32 idx;
1642 __le32 csum_bits;
1643 int received;
1644
1645 received = 0;
1646 local_ready = le32_to_cpu(*ready);
1647 rxaddr = le32_to_cpu(*cleared);
1648 while (rxaddr != local_ready && budget > 0) {
1649 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1650 idx = rx->addr;
1651 rxb = &tp->rxbuffers[idx];
1652 skb = rxb->skb;
1653 dma_addr = rxb->dma_addr;
1654
1655 typhoon_inc_rx_index(&rxaddr, 1);
1656
1657 if (rx->flags & TYPHOON_RX_ERROR) {
1658 typhoon_recycle_rx_skb(tp, idx);
1659 continue;
1660 }
1661
1662 pkt_len = le16_to_cpu(rx->frameLen);
1663
1664 if (pkt_len < rx_copybreak &&
1665 (new_skb = netdev_alloc_skb(tp->dev, pkt_len + 2)) != NULL) {
1666 skb_reserve(new_skb, 2);
1667 dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr,
1668 PKT_BUF_SZ, DMA_FROM_DEVICE);
1669 skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1670 dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
1671 PKT_BUF_SZ,
1672 DMA_FROM_DEVICE);
1673 skb_put(new_skb, pkt_len);
1674 typhoon_recycle_rx_skb(tp, idx);
1675 } else {
1676 new_skb = skb;
1677 skb_put(new_skb, pkt_len);
1678 dma_unmap_single(&tp->pdev->dev, dma_addr, PKT_BUF_SZ,
1679 DMA_FROM_DEVICE);
1680 typhoon_alloc_rx_skb(tp, idx);
1681 }
1682 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1683 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1684 TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1685 if (csum_bits ==
1686 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
1687 csum_bits ==
1688 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1689 new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1690 } else
1691 skb_checksum_none_assert(new_skb);
1692
1693 if (rx->rxStatus & TYPHOON_RX_VLAN)
1694 __vlan_hwaccel_put_tag(new_skb, htons(ETH_P_8021Q),
1695 ntohl(rx->vlanTag) & 0xffff);
1696 netif_receive_skb(new_skb);
1697
1698 received++;
1699 budget--;
1700 }
1701 *cleared = cpu_to_le32(rxaddr);
1702
1703 return received;
1704}
1705
1706static void
1707typhoon_fill_free_ring(struct typhoon *tp)
1708{
1709 u32 i;
1710
1711 for (i = 0; i < RXENT_ENTRIES; i++) {
1712 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1713 if (rxb->skb)
1714 continue;
1715 if (typhoon_alloc_rx_skb(tp, i) < 0)
1716 break;
1717 }
1718}
1719
1720static int
1721typhoon_poll(struct napi_struct *napi, int budget)
1722{
1723 struct typhoon *tp = container_of(napi, struct typhoon, napi);
1724 struct typhoon_indexes *indexes = tp->indexes;
1725 int work_done;
1726
1727 rmb();
1728 if (!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1729 typhoon_process_response(tp, 0, NULL);
1730
1731 if (le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1732 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1733
1734 work_done = 0;
1735
1736 if (indexes->rxHiCleared != indexes->rxHiReady) {
1737 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1738 &indexes->rxHiCleared, budget);
1739 }
1740
1741 if (indexes->rxLoCleared != indexes->rxLoReady) {
1742 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1743 &indexes->rxLoCleared, budget - work_done);
1744 }
1745
1746 if (le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1747
1748 typhoon_fill_free_ring(tp);
1749 }
1750
1751 if (work_done < budget) {
1752 napi_complete_done(napi, work_done);
1753 iowrite32(TYPHOON_INTR_NONE,
1754 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1755 typhoon_post_pci_writes(tp->ioaddr);
1756 }
1757
1758 return work_done;
1759}
1760
1761static irqreturn_t
1762typhoon_interrupt(int irq, void *dev_instance)
1763{
1764 struct net_device *dev = dev_instance;
1765 struct typhoon *tp = netdev_priv(dev);
1766 void __iomem *ioaddr = tp->ioaddr;
1767 u32 intr_status;
1768
1769 intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1770 if (!(intr_status & TYPHOON_INTR_HOST_INT))
1771 return IRQ_NONE;
1772
1773 iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1774
1775 if (napi_schedule_prep(&tp->napi)) {
1776 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1777 typhoon_post_pci_writes(ioaddr);
1778 __napi_schedule(&tp->napi);
1779 } else {
1780 netdev_err(dev, "Error, poll already scheduled\n");
1781 }
1782 return IRQ_HANDLED;
1783}
1784
1785static void
1786typhoon_free_rx_rings(struct typhoon *tp)
1787{
1788 u32 i;
1789
1790 for (i = 0; i < RXENT_ENTRIES; i++) {
1791 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1792 if (rxb->skb) {
1793 dma_unmap_single(&tp->pdev->dev, rxb->dma_addr,
1794 PKT_BUF_SZ, DMA_FROM_DEVICE);
1795 dev_kfree_skb(rxb->skb);
1796 rxb->skb = NULL;
1797 }
1798 }
1799}
1800
1801static int
1802typhoon_sleep_early(struct typhoon *tp, __le16 events)
1803{
1804 void __iomem *ioaddr = tp->ioaddr;
1805 struct cmd_desc xp_cmd;
1806 int err;
1807
1808 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1809 xp_cmd.parm1 = events;
1810 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1811 if (err < 0) {
1812 netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
1813 err);
1814 return err;
1815 }
1816
1817 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1818 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1819 if (err < 0) {
1820 netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
1821 return err;
1822 }
1823
1824 if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1825 return -ETIMEDOUT;
1826
1827
1828
1829
1830 netif_carrier_off(tp->dev);
1831
1832 return 0;
1833}
1834
1835static int
1836typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1837{
1838 int err;
1839
1840 err = typhoon_sleep_early(tp, events);
1841
1842 if (err)
1843 return err;
1844
1845 pci_enable_wake(tp->pdev, state, 1);
1846 pci_disable_device(tp->pdev);
1847 return pci_set_power_state(tp->pdev, state);
1848}
1849
1850static int
1851typhoon_wakeup(struct typhoon *tp, int wait_type)
1852{
1853 void __iomem *ioaddr = tp->ioaddr;
1854
1855
1856
1857
1858
1859 iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1860 if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1861 (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1862 return typhoon_reset(ioaddr, wait_type);
1863
1864 return 0;
1865}
1866
1867static int
1868typhoon_start_runtime(struct typhoon *tp)
1869{
1870 struct net_device *dev = tp->dev;
1871 void __iomem *ioaddr = tp->ioaddr;
1872 struct cmd_desc xp_cmd;
1873 int err;
1874
1875 typhoon_init_rings(tp);
1876 typhoon_fill_free_ring(tp);
1877
1878 err = typhoon_download_firmware(tp);
1879 if (err < 0) {
1880 netdev_err(tp->dev, "cannot load runtime on 3XP\n");
1881 goto error_out;
1882 }
1883
1884 if (typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1885 netdev_err(tp->dev, "cannot boot 3XP\n");
1886 err = -EIO;
1887 goto error_out;
1888 }
1889
1890 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1891 xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1892 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1893 if (err < 0)
1894 goto error_out;
1895
1896 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1897 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1898 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1899 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1900 if (err < 0)
1901 goto error_out;
1902
1903
1904
1905
1906 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1907 xp_cmd.parm1 = 0;
1908 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1909 if (err < 0)
1910 goto error_out;
1911
1912 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1913 xp_cmd.parm1 = tp->xcvr_select;
1914 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1915 if (err < 0)
1916 goto error_out;
1917
1918 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1919 xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1920 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1921 if (err < 0)
1922 goto error_out;
1923
1924 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1925 xp_cmd.parm2 = tp->offload;
1926 xp_cmd.parm3 = tp->offload;
1927 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1928 if (err < 0)
1929 goto error_out;
1930
1931 typhoon_set_rx_mode(dev);
1932
1933 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
1934 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1935 if (err < 0)
1936 goto error_out;
1937
1938 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
1939 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1940 if (err < 0)
1941 goto error_out;
1942
1943 tp->card_state = Running;
1944 smp_wmb();
1945
1946 iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
1947 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
1948 typhoon_post_pci_writes(ioaddr);
1949
1950 return 0;
1951
1952error_out:
1953 typhoon_reset(ioaddr, WaitNoSleep);
1954 typhoon_free_rx_rings(tp);
1955 typhoon_init_rings(tp);
1956 return err;
1957}
1958
1959static int
1960typhoon_stop_runtime(struct typhoon *tp, int wait_type)
1961{
1962 struct typhoon_indexes *indexes = tp->indexes;
1963 struct transmit_ring *txLo = &tp->txLoRing;
1964 void __iomem *ioaddr = tp->ioaddr;
1965 struct cmd_desc xp_cmd;
1966 int i;
1967
1968
1969
1970
1971
1972 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
1973
1974 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
1975 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1976
1977
1978
1979
1980 for (i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1981 if (indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
1982 break;
1983 udelay(TYPHOON_UDELAY);
1984 }
1985
1986 if (i == TYPHOON_WAIT_TIMEOUT)
1987 netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
1988
1989 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
1990 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1991
1992
1993
1994
1995 tp->card_state = Sleeping;
1996 smp_wmb();
1997 typhoon_do_get_stats(tp);
1998 memcpy(&tp->stats_saved, &tp->dev->stats, sizeof(struct net_device_stats));
1999
2000 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2001 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2002
2003 if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2004 netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
2005
2006 if (typhoon_reset(ioaddr, wait_type) < 0) {
2007 netdev_err(tp->dev, "unable to reset 3XP\n");
2008 return -ETIMEDOUT;
2009 }
2010
2011
2012 if (indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2013 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2014 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2015 }
2016
2017 return 0;
2018}
2019
2020static void
2021typhoon_tx_timeout(struct net_device *dev, unsigned int txqueue)
2022{
2023 struct typhoon *tp = netdev_priv(dev);
2024
2025 if (typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2026 netdev_warn(dev, "could not reset in tx timeout\n");
2027 goto truly_dead;
2028 }
2029
2030
2031 typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2032 typhoon_free_rx_rings(tp);
2033
2034 if (typhoon_start_runtime(tp) < 0) {
2035 netdev_err(dev, "could not start runtime in tx timeout\n");
2036 goto truly_dead;
2037 }
2038
2039 netif_wake_queue(dev);
2040 return;
2041
2042truly_dead:
2043
2044 typhoon_reset(tp->ioaddr, NoWait);
2045 netif_carrier_off(dev);
2046}
2047
2048static int
2049typhoon_open(struct net_device *dev)
2050{
2051 struct typhoon *tp = netdev_priv(dev);
2052 int err;
2053
2054 err = typhoon_request_firmware(tp);
2055 if (err)
2056 goto out;
2057
2058 pci_set_power_state(tp->pdev, PCI_D0);
2059 pci_restore_state(tp->pdev);
2060
2061 err = typhoon_wakeup(tp, WaitSleep);
2062 if (err < 0) {
2063 netdev_err(dev, "unable to wakeup device\n");
2064 goto out_sleep;
2065 }
2066
2067 err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
2068 dev->name, dev);
2069 if (err < 0)
2070 goto out_sleep;
2071
2072 napi_enable(&tp->napi);
2073
2074 err = typhoon_start_runtime(tp);
2075 if (err < 0) {
2076 napi_disable(&tp->napi);
2077 goto out_irq;
2078 }
2079
2080 netif_start_queue(dev);
2081 return 0;
2082
2083out_irq:
2084 free_irq(dev->irq, dev);
2085
2086out_sleep:
2087 if (typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2088 netdev_err(dev, "unable to reboot into sleep img\n");
2089 typhoon_reset(tp->ioaddr, NoWait);
2090 goto out;
2091 }
2092
2093 if (typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2094 netdev_err(dev, "unable to go back to sleep\n");
2095
2096out:
2097 return err;
2098}
2099
2100static int
2101typhoon_close(struct net_device *dev)
2102{
2103 struct typhoon *tp = netdev_priv(dev);
2104
2105 netif_stop_queue(dev);
2106 napi_disable(&tp->napi);
2107
2108 if (typhoon_stop_runtime(tp, WaitSleep) < 0)
2109 netdev_err(dev, "unable to stop runtime\n");
2110
2111
2112 free_irq(dev->irq, dev);
2113
2114 typhoon_free_rx_rings(tp);
2115 typhoon_init_rings(tp);
2116
2117 if (typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2118 netdev_err(dev, "unable to boot sleep image\n");
2119
2120 if (typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2121 netdev_err(dev, "unable to put card to sleep\n");
2122
2123 return 0;
2124}
2125
2126static int __maybe_unused
2127typhoon_resume(struct device *dev_d)
2128{
2129 struct net_device *dev = dev_get_drvdata(dev_d);
2130 struct typhoon *tp = netdev_priv(dev);
2131
2132
2133
2134 if (!netif_running(dev))
2135 return 0;
2136
2137 if (typhoon_wakeup(tp, WaitNoSleep) < 0) {
2138 netdev_err(dev, "critical: could not wake up in resume\n");
2139 goto reset;
2140 }
2141
2142 if (typhoon_start_runtime(tp) < 0) {
2143 netdev_err(dev, "critical: could not start runtime in resume\n");
2144 goto reset;
2145 }
2146
2147 netif_device_attach(dev);
2148 return 0;
2149
2150reset:
2151 typhoon_reset(tp->ioaddr, NoWait);
2152 return -EBUSY;
2153}
2154
2155static int __maybe_unused
2156typhoon_suspend(struct device *dev_d)
2157{
2158 struct pci_dev *pdev = to_pci_dev(dev_d);
2159 struct net_device *dev = pci_get_drvdata(pdev);
2160 struct typhoon *tp = netdev_priv(dev);
2161 struct cmd_desc xp_cmd;
2162
2163
2164
2165 if (!netif_running(dev))
2166 return 0;
2167
2168
2169 if (tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
2170 netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
2171
2172 netif_device_detach(dev);
2173
2174 if (typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2175 netdev_err(dev, "unable to stop runtime\n");
2176 goto need_resume;
2177 }
2178
2179 typhoon_free_rx_rings(tp);
2180 typhoon_init_rings(tp);
2181
2182 if (typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2183 netdev_err(dev, "unable to boot sleep image\n");
2184 goto need_resume;
2185 }
2186
2187 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2188 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2189 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2190 if (typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2191 netdev_err(dev, "unable to set mac address in suspend\n");
2192 goto need_resume;
2193 }
2194
2195 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2196 xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2197 if (typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2198 netdev_err(dev, "unable to set rx filter in suspend\n");
2199 goto need_resume;
2200 }
2201
2202 if (typhoon_sleep_early(tp, tp->wol_events) < 0) {
2203 netdev_err(dev, "unable to put card to sleep\n");
2204 goto need_resume;
2205 }
2206
2207 device_wakeup_enable(dev_d);
2208
2209 return 0;
2210
2211need_resume:
2212 typhoon_resume(dev_d);
2213 return -EBUSY;
2214}
2215
2216static int
2217typhoon_test_mmio(struct pci_dev *pdev)
2218{
2219 void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2220 int mode = 0;
2221 u32 val;
2222
2223 if (!ioaddr)
2224 goto out;
2225
2226 if (ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2227 TYPHOON_STATUS_WAITING_FOR_HOST)
2228 goto out_unmap;
2229
2230 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2231 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2232 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2233
2234
2235
2236
2237
2238 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2239 if ((val & TYPHOON_INTR_SELF) == 0) {
2240 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2241 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2242 udelay(50);
2243 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2244 if (val & TYPHOON_INTR_SELF)
2245 mode = 1;
2246 }
2247
2248 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2249 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2250 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2251 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2252
2253out_unmap:
2254 pci_iounmap(pdev, ioaddr);
2255
2256out:
2257 if (!mode)
2258 pr_info("%s: falling back to port IO\n", pci_name(pdev));
2259 return mode;
2260}
2261
2262static const struct net_device_ops typhoon_netdev_ops = {
2263 .ndo_open = typhoon_open,
2264 .ndo_stop = typhoon_close,
2265 .ndo_start_xmit = typhoon_start_tx,
2266 .ndo_set_rx_mode = typhoon_set_rx_mode,
2267 .ndo_tx_timeout = typhoon_tx_timeout,
2268 .ndo_get_stats = typhoon_get_stats,
2269 .ndo_validate_addr = eth_validate_addr,
2270 .ndo_set_mac_address = eth_mac_addr,
2271};
2272
2273static int
2274typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2275{
2276 struct net_device *dev;
2277 struct typhoon *tp;
2278 int card_id = (int) ent->driver_data;
2279 void __iomem *ioaddr;
2280 void *shared;
2281 dma_addr_t shared_dma;
2282 struct cmd_desc xp_cmd;
2283 struct resp_desc xp_resp[3];
2284 int err = 0;
2285 const char *err_msg;
2286
2287 dev = alloc_etherdev(sizeof(*tp));
2288 if (dev == NULL) {
2289 err_msg = "unable to alloc new net device";
2290 err = -ENOMEM;
2291 goto error_out;
2292 }
2293 SET_NETDEV_DEV(dev, &pdev->dev);
2294
2295 err = pci_enable_device(pdev);
2296 if (err < 0) {
2297 err_msg = "unable to enable device";
2298 goto error_out_dev;
2299 }
2300
2301 err = pci_set_mwi(pdev);
2302 if (err < 0) {
2303 err_msg = "unable to set MWI";
2304 goto error_out_disable;
2305 }
2306
2307 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2308 if (err < 0) {
2309 err_msg = "No usable DMA configuration";
2310 goto error_out_mwi;
2311 }
2312
2313
2314
2315 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2316 err_msg = "region #1 not a PCI IO resource, aborting";
2317 err = -ENODEV;
2318 goto error_out_mwi;
2319 }
2320 if (pci_resource_len(pdev, 0) < 128) {
2321 err_msg = "Invalid PCI IO region size, aborting";
2322 err = -ENODEV;
2323 goto error_out_mwi;
2324 }
2325 if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2326 err_msg = "region #1 not a PCI MMIO resource, aborting";
2327 err = -ENODEV;
2328 goto error_out_mwi;
2329 }
2330 if (pci_resource_len(pdev, 1) < 128) {
2331 err_msg = "Invalid PCI MMIO region size, aborting";
2332 err = -ENODEV;
2333 goto error_out_mwi;
2334 }
2335
2336 err = pci_request_regions(pdev, KBUILD_MODNAME);
2337 if (err < 0) {
2338 err_msg = "could not request regions";
2339 goto error_out_mwi;
2340 }
2341
2342
2343
2344 if (use_mmio != 0 && use_mmio != 1)
2345 use_mmio = typhoon_test_mmio(pdev);
2346
2347 ioaddr = pci_iomap(pdev, use_mmio, 128);
2348 if (!ioaddr) {
2349 err_msg = "cannot remap registers, aborting";
2350 err = -EIO;
2351 goto error_out_regions;
2352 }
2353
2354
2355
2356 shared = dma_alloc_coherent(&pdev->dev, sizeof(struct typhoon_shared),
2357 &shared_dma, GFP_KERNEL);
2358 if (!shared) {
2359 err_msg = "could not allocate DMA memory";
2360 err = -ENOMEM;
2361 goto error_out_remap;
2362 }
2363
2364 dev->irq = pdev->irq;
2365 tp = netdev_priv(dev);
2366 tp->shared = shared;
2367 tp->shared_dma = shared_dma;
2368 tp->pdev = pdev;
2369 tp->tx_pdev = pdev;
2370 tp->ioaddr = ioaddr;
2371 tp->tx_ioaddr = ioaddr;
2372 tp->dev = dev;
2373
2374
2375
2376
2377
2378
2379
2380
2381 err = typhoon_reset(ioaddr, WaitSleep);
2382 if (err < 0) {
2383 err_msg = "could not reset 3XP";
2384 goto error_out_dma;
2385 }
2386
2387
2388
2389
2390
2391 pci_set_master(pdev);
2392 pci_save_state(pdev);
2393
2394 typhoon_init_interface(tp);
2395 typhoon_init_rings(tp);
2396
2397 err = typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST);
2398 if (err < 0) {
2399 err_msg = "cannot boot 3XP sleep image";
2400 goto error_out_reset;
2401 }
2402
2403 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2404 err = typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp);
2405 if (err < 0) {
2406 err_msg = "cannot read MAC address";
2407 goto error_out_reset;
2408 }
2409
2410 *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2411 *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2412
2413 if (!is_valid_ether_addr(dev->dev_addr)) {
2414 err_msg = "Could not obtain valid ethernet address, aborting";
2415 err = -EIO;
2416 goto error_out_reset;
2417 }
2418
2419
2420
2421
2422 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2423 err = typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp);
2424 if (err < 0) {
2425 err_msg = "Could not get Sleep Image version";
2426 goto error_out_reset;
2427 }
2428
2429 tp->capabilities = typhoon_card_info[card_id].capabilities;
2430 tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2431
2432
2433
2434
2435
2436
2437
2438 if (xp_resp[0].numDesc != 0)
2439 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2440
2441 err = typhoon_sleep(tp, PCI_D3hot, 0);
2442 if (err < 0) {
2443 err_msg = "cannot put adapter to sleep";
2444 goto error_out_reset;
2445 }
2446
2447
2448 dev->netdev_ops = &typhoon_netdev_ops;
2449 netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2450 dev->watchdog_timeo = TX_TIMEOUT;
2451
2452 dev->ethtool_ops = &typhoon_ethtool_ops;
2453
2454
2455
2456
2457
2458
2459
2460
2461 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2462 NETIF_F_HW_VLAN_CTAG_TX;
2463 dev->features = dev->hw_features |
2464 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
2465
2466 err = register_netdev(dev);
2467 if (err < 0) {
2468 err_msg = "unable to register netdev";
2469 goto error_out_reset;
2470 }
2471
2472 pci_set_drvdata(pdev, dev);
2473
2474 netdev_info(dev, "%s at %s 0x%llx, %pM\n",
2475 typhoon_card_info[card_id].name,
2476 use_mmio ? "MMIO" : "IO",
2477 (unsigned long long)pci_resource_start(pdev, use_mmio),
2478 dev->dev_addr);
2479
2480
2481
2482
2483 if (xp_resp[0].numDesc == 0) {
2484
2485
2486
2487 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2488 netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
2489 monthday >> 8, monthday & 0xff);
2490 } else if (xp_resp[0].numDesc == 2) {
2491
2492
2493 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2494 u8 *ver_string = (u8 *) &xp_resp[1];
2495 ver_string[25] = 0;
2496 netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
2497 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
2498 sleep_ver & 0xfff, ver_string);
2499 } else {
2500 netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
2501 xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
2502 }
2503
2504 return 0;
2505
2506error_out_reset:
2507 typhoon_reset(ioaddr, NoWait);
2508
2509error_out_dma:
2510 dma_free_coherent(&pdev->dev, sizeof(struct typhoon_shared), shared,
2511 shared_dma);
2512error_out_remap:
2513 pci_iounmap(pdev, ioaddr);
2514error_out_regions:
2515 pci_release_regions(pdev);
2516error_out_mwi:
2517 pci_clear_mwi(pdev);
2518error_out_disable:
2519 pci_disable_device(pdev);
2520error_out_dev:
2521 free_netdev(dev);
2522error_out:
2523 pr_err("%s: %s\n", pci_name(pdev), err_msg);
2524 return err;
2525}
2526
2527static void
2528typhoon_remove_one(struct pci_dev *pdev)
2529{
2530 struct net_device *dev = pci_get_drvdata(pdev);
2531 struct typhoon *tp = netdev_priv(dev);
2532
2533 unregister_netdev(dev);
2534 pci_set_power_state(pdev, PCI_D0);
2535 pci_restore_state(pdev);
2536 typhoon_reset(tp->ioaddr, NoWait);
2537 pci_iounmap(pdev, tp->ioaddr);
2538 dma_free_coherent(&pdev->dev, sizeof(struct typhoon_shared),
2539 tp->shared, tp->shared_dma);
2540 pci_release_regions(pdev);
2541 pci_clear_mwi(pdev);
2542 pci_disable_device(pdev);
2543 free_netdev(dev);
2544}
2545
2546static SIMPLE_DEV_PM_OPS(typhoon_pm_ops, typhoon_suspend, typhoon_resume);
2547
2548static struct pci_driver typhoon_driver = {
2549 .name = KBUILD_MODNAME,
2550 .id_table = typhoon_pci_tbl,
2551 .probe = typhoon_init_one,
2552 .remove = typhoon_remove_one,
2553 .driver.pm = &typhoon_pm_ops,
2554};
2555
2556static int __init
2557typhoon_init(void)
2558{
2559 return pci_register_driver(&typhoon_driver);
2560}
2561
2562static void __exit
2563typhoon_cleanup(void)
2564{
2565 release_firmware(typhoon_fw);
2566 pci_unregister_driver(&typhoon_driver);
2567}
2568
2569module_init(typhoon_init);
2570module_exit(typhoon_cleanup);
2571