1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47static int rx_copybreak = 200;
48
49
50
51
52
53
54static unsigned int use_mmio = 2;
55
56
57
58
59
60static const int multicast_filter_limit = 32;
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75#define TXHI_ENTRIES 2
76#define TXLO_ENTRIES 128
77#define RX_ENTRIES 32
78#define COMMAND_ENTRIES 16
79#define RESPONSE_ENTRIES 32
80
81#define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc))
82#define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc))
83
84
85
86
87
88#define RXFREE_ENTRIES 128
89#define RXENT_ENTRIES (RXFREE_ENTRIES - 1)
90
91
92
93
94#define TX_TIMEOUT (2*HZ)
95
96#define PKT_BUF_SZ 1536
97#define FIRMWARE_NAME "3com/typhoon.bin"
98
99#define pr_fmt(fmt) KBUILD_MODNAME " " fmt
100
101#include <linux/module.h>
102#include <linux/kernel.h>
103#include <linux/sched.h>
104#include <linux/string.h>
105#include <linux/timer.h>
106#include <linux/errno.h>
107#include <linux/ioport.h>
108#include <linux/interrupt.h>
109#include <linux/pci.h>
110#include <linux/netdevice.h>
111#include <linux/etherdevice.h>
112#include <linux/skbuff.h>
113#include <linux/mm.h>
114#include <linux/init.h>
115#include <linux/delay.h>
116#include <linux/ethtool.h>
117#include <linux/if_vlan.h>
118#include <linux/crc32.h>
119#include <linux/bitops.h>
120#include <asm/processor.h>
121#include <asm/io.h>
122#include <linux/uaccess.h>
123#include <linux/in6.h>
124#include <linux/dma-mapping.h>
125#include <linux/firmware.h>
126
127#include "typhoon.h"
128
129MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
130MODULE_LICENSE("GPL");
131MODULE_FIRMWARE(FIRMWARE_NAME);
132MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
133MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and "
134 "the buffer given back to the NIC. Default "
135 "is 200.");
136MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. "
137 "Default is to try MMIO and fallback to PIO.");
138module_param(rx_copybreak, int, 0);
139module_param(use_mmio, int, 0);
140
141#if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
142#warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
143#undef NETIF_F_TSO
144#endif
145
146#if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
147#error TX ring too small!
148#endif
149
150struct typhoon_card_info {
151 const char *name;
152 const int capabilities;
153};
154
155#define TYPHOON_CRYPTO_NONE 0x00
156#define TYPHOON_CRYPTO_DES 0x01
157#define TYPHOON_CRYPTO_3DES 0x02
158#define TYPHOON_CRYPTO_VARIABLE 0x04
159#define TYPHOON_FIBER 0x08
160#define TYPHOON_WAKEUP_NEEDS_RESET 0x10
161
162enum typhoon_cards {
163 TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
164 TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
165 TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
166 TYPHOON_FXM,
167};
168
169
170static struct typhoon_card_info typhoon_card_info[] = {
171 { "3Com Typhoon (3C990-TX)",
172 TYPHOON_CRYPTO_NONE},
173 { "3Com Typhoon (3CR990-TX-95)",
174 TYPHOON_CRYPTO_DES},
175 { "3Com Typhoon (3CR990-TX-97)",
176 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
177 { "3Com Typhoon (3C990SVR)",
178 TYPHOON_CRYPTO_NONE},
179 { "3Com Typhoon (3CR990SVR95)",
180 TYPHOON_CRYPTO_DES},
181 { "3Com Typhoon (3CR990SVR97)",
182 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
183 { "3Com Typhoon2 (3C990B-TX-M)",
184 TYPHOON_CRYPTO_VARIABLE},
185 { "3Com Typhoon2 (3C990BSVR)",
186 TYPHOON_CRYPTO_VARIABLE},
187 { "3Com Typhoon (3CR990-FX-95)",
188 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
189 { "3Com Typhoon (3CR990-FX-97)",
190 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
191 { "3Com Typhoon (3CR990-FX-95 Server)",
192 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
193 { "3Com Typhoon (3CR990-FX-97 Server)",
194 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
195 { "3Com Typhoon2 (3C990B-FX-97)",
196 TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER},
197};
198
199
200
201
202
203
204
205static const struct pci_device_id typhoon_pci_tbl[] = {
206 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
207 PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
208 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
209 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
210 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
211 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
212 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
213 PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
214 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
215 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM },
216 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
217 PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
218 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
219 PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
220 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
221 PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
222 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
223 PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
224 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
225 PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
226 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
227 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
228 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
229 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
230 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
231 PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
232 { 0, }
233};
234MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
235
236
237
238
239
240#define __3xp_aligned ____cacheline_aligned
241struct typhoon_shared {
242 struct typhoon_interface iface;
243 struct typhoon_indexes indexes __3xp_aligned;
244 struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned;
245 struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned;
246 struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned;
247 struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned;
248 struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned;
249 struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned;
250 u32 zeroWord;
251 struct tx_desc txHi[TXHI_ENTRIES];
252} __packed;
253
254struct rxbuff_ent {
255 struct sk_buff *skb;
256 dma_addr_t dma_addr;
257};
258
259struct typhoon {
260
261 struct transmit_ring txLoRing ____cacheline_aligned;
262 struct pci_dev * tx_pdev;
263 void __iomem *tx_ioaddr;
264 u32 txlo_dma_addr;
265
266
267 void __iomem *ioaddr ____cacheline_aligned;
268 struct typhoon_indexes *indexes;
269 u8 awaiting_resp;
270 u8 duplex;
271 u8 speed;
272 u8 card_state;
273 struct basic_ring rxLoRing;
274 struct pci_dev * pdev;
275 struct net_device * dev;
276 struct napi_struct napi;
277 struct basic_ring rxHiRing;
278 struct basic_ring rxBuffRing;
279 struct rxbuff_ent rxbuffers[RXENT_ENTRIES];
280
281
282 spinlock_t command_lock ____cacheline_aligned;
283 struct basic_ring cmdRing;
284 struct basic_ring respRing;
285 struct net_device_stats stats_saved;
286 struct typhoon_shared * shared;
287 dma_addr_t shared_dma;
288 __le16 xcvr_select;
289 __le16 wol_events;
290 __le32 offload;
291
292
293 int capabilities;
294 struct transmit_ring txHiRing;
295};
296
297enum completion_wait_values {
298 NoWait = 0, WaitNoSleep, WaitSleep,
299};
300
301
302
303
304
305enum state_values {
306 Sleeping = 0, Running,
307};
308
309
310
311
312#define typhoon_post_pci_writes(x) \
313 do { if (likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while (0)
314
315
316
317#define TYPHOON_UDELAY 50
318#define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ)
319#define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY)
320#define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY)
321
322#if defined(NETIF_F_TSO)
323#define skb_tso_size(x) (skb_shinfo(x)->gso_size)
324#define TSO_NUM_DESCRIPTORS 2
325#define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT
326#else
327#define NETIF_F_TSO 0
328#define skb_tso_size(x) 0
329#define TSO_NUM_DESCRIPTORS 0
330#define TSO_OFFLOAD_ON 0
331#endif
332
333static inline void
334typhoon_inc_index(u32 *index, const int count, const int num_entries)
335{
336
337
338
339
340 *index += count * sizeof(struct cmd_desc);
341 *index %= num_entries * sizeof(struct cmd_desc);
342}
343
344static inline void
345typhoon_inc_cmd_index(u32 *index, const int count)
346{
347 typhoon_inc_index(index, count, COMMAND_ENTRIES);
348}
349
350static inline void
351typhoon_inc_resp_index(u32 *index, const int count)
352{
353 typhoon_inc_index(index, count, RESPONSE_ENTRIES);
354}
355
356static inline void
357typhoon_inc_rxfree_index(u32 *index, const int count)
358{
359 typhoon_inc_index(index, count, RXFREE_ENTRIES);
360}
361
362static inline void
363typhoon_inc_tx_index(u32 *index, const int count)
364{
365
366 typhoon_inc_index(index, count, TXLO_ENTRIES);
367}
368
369static inline void
370typhoon_inc_rx_index(u32 *index, const int count)
371{
372
373 *index += count * sizeof(struct rx_desc);
374 *index %= RX_ENTRIES * sizeof(struct rx_desc);
375}
376
377static int
378typhoon_reset(void __iomem *ioaddr, int wait_type)
379{
380 int i, err = 0;
381 int timeout;
382
383 if (wait_type == WaitNoSleep)
384 timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP;
385 else
386 timeout = TYPHOON_RESET_TIMEOUT_SLEEP;
387
388 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
389 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
390
391 iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
392 typhoon_post_pci_writes(ioaddr);
393 udelay(1);
394 iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
395
396 if (wait_type != NoWait) {
397 for (i = 0; i < timeout; i++) {
398 if (ioread32(ioaddr + TYPHOON_REG_STATUS) ==
399 TYPHOON_STATUS_WAITING_FOR_HOST)
400 goto out;
401
402 if (wait_type == WaitSleep)
403 schedule_timeout_uninterruptible(1);
404 else
405 udelay(TYPHOON_UDELAY);
406 }
407
408 err = -ETIMEDOUT;
409 }
410
411out:
412 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
413 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
414
415
416
417
418
419
420
421
422
423
424
425 if (wait_type == WaitSleep)
426 msleep(5);
427 else
428 udelay(500);
429 return err;
430}
431
432static int
433typhoon_wait_status(void __iomem *ioaddr, u32 wait_value)
434{
435 int i, err = 0;
436
437 for (i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
438 if (ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value)
439 goto out;
440 udelay(TYPHOON_UDELAY);
441 }
442
443 err = -ETIMEDOUT;
444
445out:
446 return err;
447}
448
449static inline void
450typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
451{
452 if (resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
453 netif_carrier_off(dev);
454 else
455 netif_carrier_on(dev);
456}
457
458static inline void
459typhoon_hello(struct typhoon *tp)
460{
461 struct basic_ring *ring = &tp->cmdRing;
462 struct cmd_desc *cmd;
463
464
465
466
467
468 if (spin_trylock(&tp->command_lock)) {
469 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
470 typhoon_inc_cmd_index(&ring->lastWrite, 1);
471
472 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
473 wmb();
474 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
475 spin_unlock(&tp->command_lock);
476 }
477}
478
479static int
480typhoon_process_response(struct typhoon *tp, int resp_size,
481 struct resp_desc *resp_save)
482{
483 struct typhoon_indexes *indexes = tp->indexes;
484 struct resp_desc *resp;
485 u8 *base = tp->respRing.ringBase;
486 int count, len, wrap_len;
487 u32 cleared;
488 u32 ready;
489
490 cleared = le32_to_cpu(indexes->respCleared);
491 ready = le32_to_cpu(indexes->respReady);
492 while (cleared != ready) {
493 resp = (struct resp_desc *)(base + cleared);
494 count = resp->numDesc + 1;
495 if (resp_save && resp->seqNo) {
496 if (count > resp_size) {
497 resp_save->flags = TYPHOON_RESP_ERROR;
498 goto cleanup;
499 }
500
501 wrap_len = 0;
502 len = count * sizeof(*resp);
503 if (unlikely(cleared + len > RESPONSE_RING_SIZE)) {
504 wrap_len = cleared + len - RESPONSE_RING_SIZE;
505 len = RESPONSE_RING_SIZE - cleared;
506 }
507
508 memcpy(resp_save, resp, len);
509 if (unlikely(wrap_len)) {
510 resp_save += len / sizeof(*resp);
511 memcpy(resp_save, base, wrap_len);
512 }
513
514 resp_save = NULL;
515 } else if (resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
516 typhoon_media_status(tp->dev, resp);
517 } else if (resp->cmd == TYPHOON_CMD_HELLO_RESP) {
518 typhoon_hello(tp);
519 } else {
520 netdev_err(tp->dev,
521 "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
522 le16_to_cpu(resp->cmd),
523 resp->numDesc, resp->flags,
524 le16_to_cpu(resp->parm1),
525 le32_to_cpu(resp->parm2),
526 le32_to_cpu(resp->parm3));
527 }
528
529cleanup:
530 typhoon_inc_resp_index(&cleared, count);
531 }
532
533 indexes->respCleared = cpu_to_le32(cleared);
534 wmb();
535 return resp_save == NULL;
536}
537
538static inline int
539typhoon_num_free(int lastWrite, int lastRead, int ringSize)
540{
541
542
543
544 lastWrite /= sizeof(struct cmd_desc);
545 lastRead /= sizeof(struct cmd_desc);
546 return (ringSize + lastRead - lastWrite - 1) % ringSize;
547}
548
549static inline int
550typhoon_num_free_cmd(struct typhoon *tp)
551{
552 int lastWrite = tp->cmdRing.lastWrite;
553 int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
554
555 return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
556}
557
558static inline int
559typhoon_num_free_resp(struct typhoon *tp)
560{
561 int respReady = le32_to_cpu(tp->indexes->respReady);
562 int respCleared = le32_to_cpu(tp->indexes->respCleared);
563
564 return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
565}
566
567static inline int
568typhoon_num_free_tx(struct transmit_ring *ring)
569{
570
571 return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
572}
573
574static int
575typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
576 int num_resp, struct resp_desc *resp)
577{
578 struct typhoon_indexes *indexes = tp->indexes;
579 struct basic_ring *ring = &tp->cmdRing;
580 struct resp_desc local_resp;
581 int i, err = 0;
582 int got_resp;
583 int freeCmd, freeResp;
584 int len, wrap_len;
585
586 spin_lock(&tp->command_lock);
587
588 freeCmd = typhoon_num_free_cmd(tp);
589 freeResp = typhoon_num_free_resp(tp);
590
591 if (freeCmd < num_cmd || freeResp < num_resp) {
592 netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
593 freeCmd, num_cmd, freeResp, num_resp);
594 err = -ENOMEM;
595 goto out;
596 }
597
598 if (cmd->flags & TYPHOON_CMD_RESPOND) {
599
600
601
602 tp->awaiting_resp = 1;
603 if (resp == NULL) {
604 resp = &local_resp;
605 num_resp = 1;
606 }
607 }
608
609 wrap_len = 0;
610 len = num_cmd * sizeof(*cmd);
611 if (unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
612 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
613 len = COMMAND_RING_SIZE - ring->lastWrite;
614 }
615
616 memcpy(ring->ringBase + ring->lastWrite, cmd, len);
617 if (unlikely(wrap_len)) {
618 struct cmd_desc *wrap_ptr = cmd;
619 wrap_ptr += len / sizeof(*cmd);
620 memcpy(ring->ringBase, wrap_ptr, wrap_len);
621 }
622
623 typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
624
625
626
627 wmb();
628 iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
629 typhoon_post_pci_writes(tp->ioaddr);
630
631 if ((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
632 goto out;
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650 got_resp = 0;
651 for (i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
652 if (indexes->respCleared != indexes->respReady)
653 got_resp = typhoon_process_response(tp, num_resp,
654 resp);
655 udelay(TYPHOON_UDELAY);
656 }
657
658 if (!got_resp) {
659 err = -ETIMEDOUT;
660 goto out;
661 }
662
663
664
665
666 if (resp->flags & TYPHOON_RESP_ERROR)
667 err = -EIO;
668
669out:
670 if (tp->awaiting_resp) {
671 tp->awaiting_resp = 0;
672 smp_wmb();
673
674
675
676
677
678
679
680
681 if (indexes->respCleared != indexes->respReady)
682 iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
683 }
684
685 spin_unlock(&tp->command_lock);
686 return err;
687}
688
689static inline void
690typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
691 u32 ring_dma)
692{
693 struct tcpopt_desc *tcpd;
694 u32 tcpd_offset = ring_dma;
695
696 tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
697 tcpd_offset += txRing->lastWrite;
698 tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
699 typhoon_inc_tx_index(&txRing->lastWrite, 1);
700
701 tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
702 tcpd->numDesc = 1;
703 tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
704 tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
705 tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
706 tcpd->bytesTx = cpu_to_le32(skb->len);
707 tcpd->status = 0;
708}
709
710static netdev_tx_t
711typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
712{
713 struct typhoon *tp = netdev_priv(dev);
714 struct transmit_ring *txRing;
715 struct tx_desc *txd, *first_txd;
716 dma_addr_t skb_dma;
717 int numDesc;
718
719
720
721
722
723
724
725 txRing = &tp->txLoRing;
726
727
728
729
730
731
732
733
734
735
736
737
738 numDesc = skb_shinfo(skb)->nr_frags + 1;
739 if (skb_is_gso(skb))
740 numDesc++;
741
742
743
744
745
746
747
748
749
750
751 while (unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
752 smp_rmb();
753
754 first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
755 typhoon_inc_tx_index(&txRing->lastWrite, 1);
756
757 first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
758 first_txd->numDesc = 0;
759 first_txd->len = 0;
760 first_txd->tx_addr = (u64)((unsigned long) skb);
761 first_txd->processFlags = 0;
762
763 if (skb->ip_summed == CHECKSUM_PARTIAL) {
764
765 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
766 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
767 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
768 }
769
770 if (skb_vlan_tag_present(skb)) {
771 first_txd->processFlags |=
772 TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
773 first_txd->processFlags |=
774 cpu_to_le32(htons(skb_vlan_tag_get(skb)) <<
775 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
776 }
777
778 if (skb_is_gso(skb)) {
779 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
780 first_txd->numDesc++;
781
782 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
783 }
784
785 txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
786 typhoon_inc_tx_index(&txRing->lastWrite, 1);
787
788
789
790
791 if (skb_shinfo(skb)->nr_frags == 0) {
792 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
793 PCI_DMA_TODEVICE);
794 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
795 txd->len = cpu_to_le16(skb->len);
796 txd->frag.addr = cpu_to_le32(skb_dma);
797 txd->frag.addrHi = 0;
798 first_txd->numDesc++;
799 } else {
800 int i, len;
801
802 len = skb_headlen(skb);
803 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
804 PCI_DMA_TODEVICE);
805 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
806 txd->len = cpu_to_le16(len);
807 txd->frag.addr = cpu_to_le32(skb_dma);
808 txd->frag.addrHi = 0;
809 first_txd->numDesc++;
810
811 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
812 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
813 void *frag_addr;
814
815 txd = (struct tx_desc *) (txRing->ringBase +
816 txRing->lastWrite);
817 typhoon_inc_tx_index(&txRing->lastWrite, 1);
818
819 len = skb_frag_size(frag);
820 frag_addr = skb_frag_address(frag);
821 skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
822 PCI_DMA_TODEVICE);
823 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
824 txd->len = cpu_to_le16(len);
825 txd->frag.addr = cpu_to_le32(skb_dma);
826 txd->frag.addrHi = 0;
827 first_txd->numDesc++;
828 }
829 }
830
831
832
833 wmb();
834 iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
835
836
837
838
839
840
841 numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
842
843 if (typhoon_num_free_tx(txRing) < (numDesc + 2)) {
844 netif_stop_queue(dev);
845
846
847
848
849
850 if (typhoon_num_free_tx(txRing) >= (numDesc + 2))
851 netif_wake_queue(dev);
852 }
853
854 return NETDEV_TX_OK;
855}
856
857static void
858typhoon_set_rx_mode(struct net_device *dev)
859{
860 struct typhoon *tp = netdev_priv(dev);
861 struct cmd_desc xp_cmd;
862 u32 mc_filter[2];
863 __le16 filter;
864
865 filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
866 if (dev->flags & IFF_PROMISC) {
867 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
868 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
869 (dev->flags & IFF_ALLMULTI)) {
870
871 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
872 } else if (!netdev_mc_empty(dev)) {
873 struct netdev_hw_addr *ha;
874
875 memset(mc_filter, 0, sizeof(mc_filter));
876 netdev_for_each_mc_addr(ha, dev) {
877 int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f;
878 mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
879 }
880
881 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
882 TYPHOON_CMD_SET_MULTICAST_HASH);
883 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
884 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
885 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
886 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
887
888 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
889 }
890
891 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
892 xp_cmd.parm1 = filter;
893 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
894}
895
896static int
897typhoon_do_get_stats(struct typhoon *tp)
898{
899 struct net_device_stats *stats = &tp->dev->stats;
900 struct net_device_stats *saved = &tp->stats_saved;
901 struct cmd_desc xp_cmd;
902 struct resp_desc xp_resp[7];
903 struct stats_resp *s = (struct stats_resp *) xp_resp;
904 int err;
905
906 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
907 err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
908 if (err < 0)
909 return err;
910
911
912
913
914
915
916
917 stats->tx_packets = le32_to_cpu(s->txPackets) +
918 saved->tx_packets;
919 stats->tx_bytes = le64_to_cpu(s->txBytes) +
920 saved->tx_bytes;
921 stats->tx_errors = le32_to_cpu(s->txCarrierLost) +
922 saved->tx_errors;
923 stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost) +
924 saved->tx_carrier_errors;
925 stats->collisions = le32_to_cpu(s->txMultipleCollisions) +
926 saved->collisions;
927 stats->rx_packets = le32_to_cpu(s->rxPacketsGood) +
928 saved->rx_packets;
929 stats->rx_bytes = le64_to_cpu(s->rxBytesGood) +
930 saved->rx_bytes;
931 stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns) +
932 saved->rx_fifo_errors;
933 stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
934 le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors) +
935 saved->rx_errors;
936 stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors) +
937 saved->rx_crc_errors;
938 stats->rx_length_errors = le32_to_cpu(s->rxOversized) +
939 saved->rx_length_errors;
940 tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
941 SPEED_100 : SPEED_10;
942 tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
943 DUPLEX_FULL : DUPLEX_HALF;
944
945 return 0;
946}
947
948static struct net_device_stats *
949typhoon_get_stats(struct net_device *dev)
950{
951 struct typhoon *tp = netdev_priv(dev);
952 struct net_device_stats *stats = &tp->dev->stats;
953 struct net_device_stats *saved = &tp->stats_saved;
954
955 smp_rmb();
956 if (tp->card_state == Sleeping)
957 return saved;
958
959 if (typhoon_do_get_stats(tp) < 0) {
960 netdev_err(dev, "error getting stats\n");
961 return saved;
962 }
963
964 return stats;
965}
966
967static void
968typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
969{
970 struct typhoon *tp = netdev_priv(dev);
971 struct pci_dev *pci_dev = tp->pdev;
972 struct cmd_desc xp_cmd;
973 struct resp_desc xp_resp[3];
974
975 smp_rmb();
976 if (tp->card_state == Sleeping) {
977 strlcpy(info->fw_version, "Sleep image",
978 sizeof(info->fw_version));
979 } else {
980 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
981 if (typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
982 strlcpy(info->fw_version, "Unknown runtime",
983 sizeof(info->fw_version));
984 } else {
985 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
986 snprintf(info->fw_version, sizeof(info->fw_version),
987 "%02x.%03x.%03x", sleep_ver >> 24,
988 (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff);
989 }
990 }
991
992 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
993 strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
994}
995
996static int
997typhoon_get_link_ksettings(struct net_device *dev,
998 struct ethtool_link_ksettings *cmd)
999{
1000 struct typhoon *tp = netdev_priv(dev);
1001 u32 supported, advertising = 0;
1002
1003 supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1004 SUPPORTED_Autoneg;
1005
1006 switch (tp->xcvr_select) {
1007 case TYPHOON_XCVR_10HALF:
1008 advertising = ADVERTISED_10baseT_Half;
1009 break;
1010 case TYPHOON_XCVR_10FULL:
1011 advertising = ADVERTISED_10baseT_Full;
1012 break;
1013 case TYPHOON_XCVR_100HALF:
1014 advertising = ADVERTISED_100baseT_Half;
1015 break;
1016 case TYPHOON_XCVR_100FULL:
1017 advertising = ADVERTISED_100baseT_Full;
1018 break;
1019 case TYPHOON_XCVR_AUTONEG:
1020 advertising = ADVERTISED_10baseT_Half |
1021 ADVERTISED_10baseT_Full |
1022 ADVERTISED_100baseT_Half |
1023 ADVERTISED_100baseT_Full |
1024 ADVERTISED_Autoneg;
1025 break;
1026 }
1027
1028 if (tp->capabilities & TYPHOON_FIBER) {
1029 supported |= SUPPORTED_FIBRE;
1030 advertising |= ADVERTISED_FIBRE;
1031 cmd->base.port = PORT_FIBRE;
1032 } else {
1033 supported |= SUPPORTED_10baseT_Half |
1034 SUPPORTED_10baseT_Full |
1035 SUPPORTED_TP;
1036 advertising |= ADVERTISED_TP;
1037 cmd->base.port = PORT_TP;
1038 }
1039
1040
1041 typhoon_do_get_stats(tp);
1042 cmd->base.speed = tp->speed;
1043 cmd->base.duplex = tp->duplex;
1044 cmd->base.phy_address = 0;
1045 if (tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1046 cmd->base.autoneg = AUTONEG_ENABLE;
1047 else
1048 cmd->base.autoneg = AUTONEG_DISABLE;
1049
1050 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1051 supported);
1052 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1053 advertising);
1054
1055 return 0;
1056}
1057
1058static int
1059typhoon_set_link_ksettings(struct net_device *dev,
1060 const struct ethtool_link_ksettings *cmd)
1061{
1062 struct typhoon *tp = netdev_priv(dev);
1063 u32 speed = cmd->base.speed;
1064 struct cmd_desc xp_cmd;
1065 __le16 xcvr;
1066 int err;
1067
1068 err = -EINVAL;
1069 if (cmd->base.autoneg == AUTONEG_ENABLE) {
1070 xcvr = TYPHOON_XCVR_AUTONEG;
1071 } else {
1072 if (cmd->base.duplex == DUPLEX_HALF) {
1073 if (speed == SPEED_10)
1074 xcvr = TYPHOON_XCVR_10HALF;
1075 else if (speed == SPEED_100)
1076 xcvr = TYPHOON_XCVR_100HALF;
1077 else
1078 goto out;
1079 } else if (cmd->base.duplex == DUPLEX_FULL) {
1080 if (speed == SPEED_10)
1081 xcvr = TYPHOON_XCVR_10FULL;
1082 else if (speed == SPEED_100)
1083 xcvr = TYPHOON_XCVR_100FULL;
1084 else
1085 goto out;
1086 } else
1087 goto out;
1088 }
1089
1090 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1091 xp_cmd.parm1 = xcvr;
1092 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1093 if (err < 0)
1094 goto out;
1095
1096 tp->xcvr_select = xcvr;
1097 if (cmd->base.autoneg == AUTONEG_ENABLE) {
1098 tp->speed = 0xff;
1099 tp->duplex = 0xff;
1100 } else {
1101 tp->speed = speed;
1102 tp->duplex = cmd->base.duplex;
1103 }
1104
1105out:
1106 return err;
1107}
1108
1109static void
1110typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1111{
1112 struct typhoon *tp = netdev_priv(dev);
1113
1114 wol->supported = WAKE_PHY | WAKE_MAGIC;
1115 wol->wolopts = 0;
1116 if (tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1117 wol->wolopts |= WAKE_PHY;
1118 if (tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1119 wol->wolopts |= WAKE_MAGIC;
1120 memset(&wol->sopass, 0, sizeof(wol->sopass));
1121}
1122
1123static int
1124typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1125{
1126 struct typhoon *tp = netdev_priv(dev);
1127
1128 if (wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
1129 return -EINVAL;
1130
1131 tp->wol_events = 0;
1132 if (wol->wolopts & WAKE_PHY)
1133 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1134 if (wol->wolopts & WAKE_MAGIC)
1135 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1136
1137 return 0;
1138}
1139
1140static void
1141typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
1142{
1143 ering->rx_max_pending = RXENT_ENTRIES;
1144 ering->tx_max_pending = TXLO_ENTRIES - 1;
1145
1146 ering->rx_pending = RXENT_ENTRIES;
1147 ering->tx_pending = TXLO_ENTRIES - 1;
1148}
1149
1150static const struct ethtool_ops typhoon_ethtool_ops = {
1151 .get_drvinfo = typhoon_get_drvinfo,
1152 .get_wol = typhoon_get_wol,
1153 .set_wol = typhoon_set_wol,
1154 .get_link = ethtool_op_get_link,
1155 .get_ringparam = typhoon_get_ringparam,
1156 .get_link_ksettings = typhoon_get_link_ksettings,
1157 .set_link_ksettings = typhoon_set_link_ksettings,
1158};
1159
1160static int
1161typhoon_wait_interrupt(void __iomem *ioaddr)
1162{
1163 int i, err = 0;
1164
1165 for (i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1166 if (ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) &
1167 TYPHOON_INTR_BOOTCMD)
1168 goto out;
1169 udelay(TYPHOON_UDELAY);
1170 }
1171
1172 err = -ETIMEDOUT;
1173
1174out:
1175 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1176 return err;
1177}
1178
1179#define shared_offset(x) offsetof(struct typhoon_shared, x)
1180
1181static void
1182typhoon_init_interface(struct typhoon *tp)
1183{
1184 struct typhoon_interface *iface = &tp->shared->iface;
1185 dma_addr_t shared_dma;
1186
1187 memset(tp->shared, 0, sizeof(struct typhoon_shared));
1188
1189
1190
1191 shared_dma = tp->shared_dma + shared_offset(indexes);
1192 iface->ringIndex = cpu_to_le32(shared_dma);
1193
1194 shared_dma = tp->shared_dma + shared_offset(txLo);
1195 iface->txLoAddr = cpu_to_le32(shared_dma);
1196 iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1197
1198 shared_dma = tp->shared_dma + shared_offset(txHi);
1199 iface->txHiAddr = cpu_to_le32(shared_dma);
1200 iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1201
1202 shared_dma = tp->shared_dma + shared_offset(rxBuff);
1203 iface->rxBuffAddr = cpu_to_le32(shared_dma);
1204 iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1205 sizeof(struct rx_free));
1206
1207 shared_dma = tp->shared_dma + shared_offset(rxLo);
1208 iface->rxLoAddr = cpu_to_le32(shared_dma);
1209 iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1210
1211 shared_dma = tp->shared_dma + shared_offset(rxHi);
1212 iface->rxHiAddr = cpu_to_le32(shared_dma);
1213 iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1214
1215 shared_dma = tp->shared_dma + shared_offset(cmd);
1216 iface->cmdAddr = cpu_to_le32(shared_dma);
1217 iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1218
1219 shared_dma = tp->shared_dma + shared_offset(resp);
1220 iface->respAddr = cpu_to_le32(shared_dma);
1221 iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1222
1223 shared_dma = tp->shared_dma + shared_offset(zeroWord);
1224 iface->zeroAddr = cpu_to_le32(shared_dma);
1225
1226 tp->indexes = &tp->shared->indexes;
1227 tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1228 tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1229 tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1230 tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1231 tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1232 tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1233 tp->respRing.ringBase = (u8 *) tp->shared->resp;
1234
1235 tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1236 tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1237
1238 tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr);
1239 tp->card_state = Sleeping;
1240
1241 tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1242 tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1243 tp->offload |= TYPHOON_OFFLOAD_VLAN;
1244
1245 spin_lock_init(&tp->command_lock);
1246
1247
1248 wmb();
1249}
1250
1251static void
1252typhoon_init_rings(struct typhoon *tp)
1253{
1254 memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1255
1256 tp->txLoRing.lastWrite = 0;
1257 tp->txHiRing.lastWrite = 0;
1258 tp->rxLoRing.lastWrite = 0;
1259 tp->rxHiRing.lastWrite = 0;
1260 tp->rxBuffRing.lastWrite = 0;
1261 tp->cmdRing.lastWrite = 0;
1262 tp->respRing.lastWrite = 0;
1263
1264 tp->txLoRing.lastRead = 0;
1265 tp->txHiRing.lastRead = 0;
1266}
1267
1268static const struct firmware *typhoon_fw;
1269
1270static int
1271typhoon_request_firmware(struct typhoon *tp)
1272{
1273 const struct typhoon_file_header *fHdr;
1274 const struct typhoon_section_header *sHdr;
1275 const u8 *image_data;
1276 u32 numSections;
1277 u32 section_len;
1278 u32 remaining;
1279 int err;
1280
1281 if (typhoon_fw)
1282 return 0;
1283
1284 err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
1285 if (err) {
1286 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
1287 FIRMWARE_NAME);
1288 return err;
1289 }
1290
1291 image_data = typhoon_fw->data;
1292 remaining = typhoon_fw->size;
1293 if (remaining < sizeof(struct typhoon_file_header))
1294 goto invalid_fw;
1295
1296 fHdr = (struct typhoon_file_header *) image_data;
1297 if (memcmp(fHdr->tag, "TYPHOON", 8))
1298 goto invalid_fw;
1299
1300 numSections = le32_to_cpu(fHdr->numSections);
1301 image_data += sizeof(struct typhoon_file_header);
1302 remaining -= sizeof(struct typhoon_file_header);
1303
1304 while (numSections--) {
1305 if (remaining < sizeof(struct typhoon_section_header))
1306 goto invalid_fw;
1307
1308 sHdr = (struct typhoon_section_header *) image_data;
1309 image_data += sizeof(struct typhoon_section_header);
1310 section_len = le32_to_cpu(sHdr->len);
1311
1312 if (remaining < section_len)
1313 goto invalid_fw;
1314
1315 image_data += section_len;
1316 remaining -= section_len;
1317 }
1318
1319 return 0;
1320
1321invalid_fw:
1322 netdev_err(tp->dev, "Invalid firmware image\n");
1323 release_firmware(typhoon_fw);
1324 typhoon_fw = NULL;
1325 return -EINVAL;
1326}
1327
1328static int
1329typhoon_download_firmware(struct typhoon *tp)
1330{
1331 void __iomem *ioaddr = tp->ioaddr;
1332 struct pci_dev *pdev = tp->pdev;
1333 const struct typhoon_file_header *fHdr;
1334 const struct typhoon_section_header *sHdr;
1335 const u8 *image_data;
1336 void *dpage;
1337 dma_addr_t dpage_dma;
1338 __sum16 csum;
1339 u32 irqEnabled;
1340 u32 irqMasked;
1341 u32 numSections;
1342 u32 section_len;
1343 u32 len;
1344 u32 load_addr;
1345 u32 hmac;
1346 int i;
1347 int err;
1348
1349 image_data = typhoon_fw->data;
1350 fHdr = (struct typhoon_file_header *) image_data;
1351
1352
1353
1354
1355
1356 err = -ENOMEM;
1357 dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1358 if (!dpage) {
1359 netdev_err(tp->dev, "no DMA mem for firmware\n");
1360 goto err_out;
1361 }
1362
1363 irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE);
1364 iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD,
1365 ioaddr + TYPHOON_REG_INTR_ENABLE);
1366 irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK);
1367 iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD,
1368 ioaddr + TYPHOON_REG_INTR_MASK);
1369
1370 err = -ETIMEDOUT;
1371 if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1372 netdev_err(tp->dev, "card ready timeout\n");
1373 goto err_out_irq;
1374 }
1375
1376 numSections = le32_to_cpu(fHdr->numSections);
1377 load_addr = le32_to_cpu(fHdr->startAddr);
1378
1379 iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1380 iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1381 hmac = le32_to_cpu(fHdr->hmacDigest[0]);
1382 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0);
1383 hmac = le32_to_cpu(fHdr->hmacDigest[1]);
1384 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1);
1385 hmac = le32_to_cpu(fHdr->hmacDigest[2]);
1386 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2);
1387 hmac = le32_to_cpu(fHdr->hmacDigest[3]);
1388 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3);
1389 hmac = le32_to_cpu(fHdr->hmacDigest[4]);
1390 iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4);
1391 typhoon_post_pci_writes(ioaddr);
1392 iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1393
1394 image_data += sizeof(struct typhoon_file_header);
1395
1396
1397
1398
1399
1400 for (i = 0; i < numSections; i++) {
1401 sHdr = (struct typhoon_section_header *) image_data;
1402 image_data += sizeof(struct typhoon_section_header);
1403 load_addr = le32_to_cpu(sHdr->startAddr);
1404 section_len = le32_to_cpu(sHdr->len);
1405
1406 while (section_len) {
1407 len = min_t(u32, section_len, PAGE_SIZE);
1408
1409 if (typhoon_wait_interrupt(ioaddr) < 0 ||
1410 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1411 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1412 netdev_err(tp->dev, "segment ready timeout\n");
1413 goto err_out_irq;
1414 }
1415
1416
1417
1418
1419
1420
1421 csum = csum_fold(csum_partial_copy_nocheck(image_data,
1422 dpage, len,
1423 0));
1424
1425 iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1426 iowrite32(le16_to_cpu((__force __le16)csum),
1427 ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1428 iowrite32(load_addr,
1429 ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1430 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1431 iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1432 typhoon_post_pci_writes(ioaddr);
1433 iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1434 ioaddr + TYPHOON_REG_COMMAND);
1435
1436 image_data += len;
1437 load_addr += len;
1438 section_len -= len;
1439 }
1440 }
1441
1442 if (typhoon_wait_interrupt(ioaddr) < 0 ||
1443 ioread32(ioaddr + TYPHOON_REG_STATUS) !=
1444 TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1445 netdev_err(tp->dev, "final segment ready timeout\n");
1446 goto err_out_irq;
1447 }
1448
1449 iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1450
1451 if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1452 netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
1453 ioread32(ioaddr + TYPHOON_REG_STATUS));
1454 goto err_out_irq;
1455 }
1456
1457 err = 0;
1458
1459err_out_irq:
1460 iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1461 iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1462
1463 pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1464
1465err_out:
1466 return err;
1467}
1468
1469static int
1470typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1471{
1472 void __iomem *ioaddr = tp->ioaddr;
1473
1474 if (typhoon_wait_status(ioaddr, initial_status) < 0) {
1475 netdev_err(tp->dev, "boot ready timeout\n");
1476 goto out_timeout;
1477 }
1478
1479 iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1480 iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1481 typhoon_post_pci_writes(ioaddr);
1482 iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD,
1483 ioaddr + TYPHOON_REG_COMMAND);
1484
1485 if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1486 netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
1487 ioread32(ioaddr + TYPHOON_REG_STATUS));
1488 goto out_timeout;
1489 }
1490
1491
1492
1493 iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1494 iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY);
1495 iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1496 typhoon_post_pci_writes(ioaddr);
1497 iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1498
1499 return 0;
1500
1501out_timeout:
1502 return -ETIMEDOUT;
1503}
1504
1505static u32
1506typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1507 volatile __le32 * index)
1508{
1509 u32 lastRead = txRing->lastRead;
1510 struct tx_desc *tx;
1511 dma_addr_t skb_dma;
1512 int dma_len;
1513 int type;
1514
1515 while (lastRead != le32_to_cpu(*index)) {
1516 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1517 type = tx->flags & TYPHOON_TYPE_MASK;
1518
1519 if (type == TYPHOON_TX_DESC) {
1520
1521
1522 unsigned long ptr = tx->tx_addr;
1523 struct sk_buff *skb = (struct sk_buff *) ptr;
1524 dev_kfree_skb_irq(skb);
1525 } else if (type == TYPHOON_FRAG_DESC) {
1526
1527
1528 skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
1529 dma_len = le16_to_cpu(tx->len);
1530 pci_unmap_single(tp->pdev, skb_dma, dma_len,
1531 PCI_DMA_TODEVICE);
1532 }
1533
1534 tx->flags = 0;
1535 typhoon_inc_tx_index(&lastRead, 1);
1536 }
1537
1538 return lastRead;
1539}
1540
1541static void
1542typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1543 volatile __le32 * index)
1544{
1545 u32 lastRead;
1546 int numDesc = MAX_SKB_FRAGS + 1;
1547
1548
1549 lastRead = typhoon_clean_tx(tp, txRing, index);
1550 if (netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1551 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1552 netif_wake_queue(tp->dev);
1553
1554 txRing->lastRead = lastRead;
1555 smp_wmb();
1556}
1557
1558static void
1559typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1560{
1561 struct typhoon_indexes *indexes = tp->indexes;
1562 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1563 struct basic_ring *ring = &tp->rxBuffRing;
1564 struct rx_free *r;
1565
1566 if ((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1567 le32_to_cpu(indexes->rxBuffCleared)) {
1568
1569
1570 dev_kfree_skb_any(rxb->skb);
1571 rxb->skb = NULL;
1572 return;
1573 }
1574
1575 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1576 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1577 r->virtAddr = idx;
1578 r->physAddr = cpu_to_le32(rxb->dma_addr);
1579
1580
1581 wmb();
1582 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1583}
1584
1585static int
1586typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1587{
1588 struct typhoon_indexes *indexes = tp->indexes;
1589 struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1590 struct basic_ring *ring = &tp->rxBuffRing;
1591 struct rx_free *r;
1592 struct sk_buff *skb;
1593 dma_addr_t dma_addr;
1594
1595 rxb->skb = NULL;
1596
1597 if ((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1598 le32_to_cpu(indexes->rxBuffCleared))
1599 return -ENOMEM;
1600
1601 skb = netdev_alloc_skb(tp->dev, PKT_BUF_SZ);
1602 if (!skb)
1603 return -ENOMEM;
1604
1605#if 0
1606
1607
1608
1609 skb_reserve(skb, 2);
1610#endif
1611
1612 dma_addr = pci_map_single(tp->pdev, skb->data,
1613 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1614
1615
1616
1617
1618 r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1619 typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1620 r->virtAddr = idx;
1621 r->physAddr = cpu_to_le32(dma_addr);
1622 rxb->skb = skb;
1623 rxb->dma_addr = dma_addr;
1624
1625
1626 wmb();
1627 indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1628 return 0;
1629}
1630
1631static int
1632typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready,
1633 volatile __le32 * cleared, int budget)
1634{
1635 struct rx_desc *rx;
1636 struct sk_buff *skb, *new_skb;
1637 struct rxbuff_ent *rxb;
1638 dma_addr_t dma_addr;
1639 u32 local_ready;
1640 u32 rxaddr;
1641 int pkt_len;
1642 u32 idx;
1643 __le32 csum_bits;
1644 int received;
1645
1646 received = 0;
1647 local_ready = le32_to_cpu(*ready);
1648 rxaddr = le32_to_cpu(*cleared);
1649 while (rxaddr != local_ready && budget > 0) {
1650 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1651 idx = rx->addr;
1652 rxb = &tp->rxbuffers[idx];
1653 skb = rxb->skb;
1654 dma_addr = rxb->dma_addr;
1655
1656 typhoon_inc_rx_index(&rxaddr, 1);
1657
1658 if (rx->flags & TYPHOON_RX_ERROR) {
1659 typhoon_recycle_rx_skb(tp, idx);
1660 continue;
1661 }
1662
1663 pkt_len = le16_to_cpu(rx->frameLen);
1664
1665 if (pkt_len < rx_copybreak &&
1666 (new_skb = netdev_alloc_skb(tp->dev, pkt_len + 2)) != NULL) {
1667 skb_reserve(new_skb, 2);
1668 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1669 PKT_BUF_SZ,
1670 PCI_DMA_FROMDEVICE);
1671 skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
1672 pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1673 PKT_BUF_SZ,
1674 PCI_DMA_FROMDEVICE);
1675 skb_put(new_skb, pkt_len);
1676 typhoon_recycle_rx_skb(tp, idx);
1677 } else {
1678 new_skb = skb;
1679 skb_put(new_skb, pkt_len);
1680 pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1681 PCI_DMA_FROMDEVICE);
1682 typhoon_alloc_rx_skb(tp, idx);
1683 }
1684 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1685 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1686 TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1687 if (csum_bits ==
1688 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) ||
1689 csum_bits ==
1690 (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1691 new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1692 } else
1693 skb_checksum_none_assert(new_skb);
1694
1695 if (rx->rxStatus & TYPHOON_RX_VLAN)
1696 __vlan_hwaccel_put_tag(new_skb, htons(ETH_P_8021Q),
1697 ntohl(rx->vlanTag) & 0xffff);
1698 netif_receive_skb(new_skb);
1699
1700 received++;
1701 budget--;
1702 }
1703 *cleared = cpu_to_le32(rxaddr);
1704
1705 return received;
1706}
1707
1708static void
1709typhoon_fill_free_ring(struct typhoon *tp)
1710{
1711 u32 i;
1712
1713 for (i = 0; i < RXENT_ENTRIES; i++) {
1714 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1715 if (rxb->skb)
1716 continue;
1717 if (typhoon_alloc_rx_skb(tp, i) < 0)
1718 break;
1719 }
1720}
1721
1722static int
1723typhoon_poll(struct napi_struct *napi, int budget)
1724{
1725 struct typhoon *tp = container_of(napi, struct typhoon, napi);
1726 struct typhoon_indexes *indexes = tp->indexes;
1727 int work_done;
1728
1729 rmb();
1730 if (!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1731 typhoon_process_response(tp, 0, NULL);
1732
1733 if (le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1734 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1735
1736 work_done = 0;
1737
1738 if (indexes->rxHiCleared != indexes->rxHiReady) {
1739 work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1740 &indexes->rxHiCleared, budget);
1741 }
1742
1743 if (indexes->rxLoCleared != indexes->rxLoReady) {
1744 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1745 &indexes->rxLoCleared, budget - work_done);
1746 }
1747
1748 if (le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1749
1750 typhoon_fill_free_ring(tp);
1751 }
1752
1753 if (work_done < budget) {
1754 napi_complete_done(napi, work_done);
1755 iowrite32(TYPHOON_INTR_NONE,
1756 tp->ioaddr + TYPHOON_REG_INTR_MASK);
1757 typhoon_post_pci_writes(tp->ioaddr);
1758 }
1759
1760 return work_done;
1761}
1762
1763static irqreturn_t
1764typhoon_interrupt(int irq, void *dev_instance)
1765{
1766 struct net_device *dev = dev_instance;
1767 struct typhoon *tp = netdev_priv(dev);
1768 void __iomem *ioaddr = tp->ioaddr;
1769 u32 intr_status;
1770
1771 intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
1772 if (!(intr_status & TYPHOON_INTR_HOST_INT))
1773 return IRQ_NONE;
1774
1775 iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1776
1777 if (napi_schedule_prep(&tp->napi)) {
1778 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1779 typhoon_post_pci_writes(ioaddr);
1780 __napi_schedule(&tp->napi);
1781 } else {
1782 netdev_err(dev, "Error, poll already scheduled\n");
1783 }
1784 return IRQ_HANDLED;
1785}
1786
1787static void
1788typhoon_free_rx_rings(struct typhoon *tp)
1789{
1790 u32 i;
1791
1792 for (i = 0; i < RXENT_ENTRIES; i++) {
1793 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1794 if (rxb->skb) {
1795 pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1796 PCI_DMA_FROMDEVICE);
1797 dev_kfree_skb(rxb->skb);
1798 rxb->skb = NULL;
1799 }
1800 }
1801}
1802
1803static int
1804typhoon_sleep_early(struct typhoon *tp, __le16 events)
1805{
1806 void __iomem *ioaddr = tp->ioaddr;
1807 struct cmd_desc xp_cmd;
1808 int err;
1809
1810 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1811 xp_cmd.parm1 = events;
1812 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1813 if (err < 0) {
1814 netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
1815 err);
1816 return err;
1817 }
1818
1819 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1820 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1821 if (err < 0) {
1822 netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
1823 return err;
1824 }
1825
1826 if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1827 return -ETIMEDOUT;
1828
1829
1830
1831
1832 netif_carrier_off(tp->dev);
1833
1834 return 0;
1835}
1836
1837static int
1838typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
1839{
1840 int err;
1841
1842 err = typhoon_sleep_early(tp, events);
1843
1844 if (err)
1845 return err;
1846
1847 pci_enable_wake(tp->pdev, state, 1);
1848 pci_disable_device(tp->pdev);
1849 return pci_set_power_state(tp->pdev, state);
1850}
1851
1852static int
1853typhoon_wakeup(struct typhoon *tp, int wait_type)
1854{
1855 void __iomem *ioaddr = tp->ioaddr;
1856
1857
1858
1859
1860
1861 iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1862 if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 ||
1863 (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET))
1864 return typhoon_reset(ioaddr, wait_type);
1865
1866 return 0;
1867}
1868
1869static int
1870typhoon_start_runtime(struct typhoon *tp)
1871{
1872 struct net_device *dev = tp->dev;
1873 void __iomem *ioaddr = tp->ioaddr;
1874 struct cmd_desc xp_cmd;
1875 int err;
1876
1877 typhoon_init_rings(tp);
1878 typhoon_fill_free_ring(tp);
1879
1880 err = typhoon_download_firmware(tp);
1881 if (err < 0) {
1882 netdev_err(tp->dev, "cannot load runtime on 3XP\n");
1883 goto error_out;
1884 }
1885
1886 if (typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1887 netdev_err(tp->dev, "cannot boot 3XP\n");
1888 err = -EIO;
1889 goto error_out;
1890 }
1891
1892 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1893 xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1894 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1895 if (err < 0)
1896 goto error_out;
1897
1898 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1899 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
1900 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
1901 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1902 if (err < 0)
1903 goto error_out;
1904
1905
1906
1907
1908 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1909 xp_cmd.parm1 = 0;
1910 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1911 if (err < 0)
1912 goto error_out;
1913
1914 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1915 xp_cmd.parm1 = tp->xcvr_select;
1916 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1917 if (err < 0)
1918 goto error_out;
1919
1920 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1921 xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q);
1922 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1923 if (err < 0)
1924 goto error_out;
1925
1926 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1927 xp_cmd.parm2 = tp->offload;
1928 xp_cmd.parm3 = tp->offload;
1929 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1930 if (err < 0)
1931 goto error_out;
1932
1933 typhoon_set_rx_mode(dev);
1934
1935 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
1936 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1937 if (err < 0)
1938 goto error_out;
1939
1940 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
1941 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1942 if (err < 0)
1943 goto error_out;
1944
1945 tp->card_state = Running;
1946 smp_wmb();
1947
1948 iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
1949 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
1950 typhoon_post_pci_writes(ioaddr);
1951
1952 return 0;
1953
1954error_out:
1955 typhoon_reset(ioaddr, WaitNoSleep);
1956 typhoon_free_rx_rings(tp);
1957 typhoon_init_rings(tp);
1958 return err;
1959}
1960
1961static int
1962typhoon_stop_runtime(struct typhoon *tp, int wait_type)
1963{
1964 struct typhoon_indexes *indexes = tp->indexes;
1965 struct transmit_ring *txLo = &tp->txLoRing;
1966 void __iomem *ioaddr = tp->ioaddr;
1967 struct cmd_desc xp_cmd;
1968 int i;
1969
1970
1971
1972
1973
1974 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
1975
1976 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
1977 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1978
1979
1980
1981
1982 for (i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1983 if (indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
1984 break;
1985 udelay(TYPHOON_UDELAY);
1986 }
1987
1988 if (i == TYPHOON_WAIT_TIMEOUT)
1989 netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
1990
1991 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
1992 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1993
1994
1995
1996
1997 tp->card_state = Sleeping;
1998 smp_wmb();
1999 typhoon_do_get_stats(tp);
2000 memcpy(&tp->stats_saved, &tp->dev->stats, sizeof(struct net_device_stats));
2001
2002 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2003 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2004
2005 if (typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2006 netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
2007
2008 if (typhoon_reset(ioaddr, wait_type) < 0) {
2009 netdev_err(tp->dev, "unable to reset 3XP\n");
2010 return -ETIMEDOUT;
2011 }
2012
2013
2014 if (indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2015 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2016 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2017 }
2018
2019 return 0;
2020}
2021
2022static void
2023typhoon_tx_timeout(struct net_device *dev, unsigned int txqueue)
2024{
2025 struct typhoon *tp = netdev_priv(dev);
2026
2027 if (typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
2028 netdev_warn(dev, "could not reset in tx timeout\n");
2029 goto truly_dead;
2030 }
2031
2032
2033 typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2034 typhoon_free_rx_rings(tp);
2035
2036 if (typhoon_start_runtime(tp) < 0) {
2037 netdev_err(dev, "could not start runtime in tx timeout\n");
2038 goto truly_dead;
2039 }
2040
2041 netif_wake_queue(dev);
2042 return;
2043
2044truly_dead:
2045
2046 typhoon_reset(tp->ioaddr, NoWait);
2047 netif_carrier_off(dev);
2048}
2049
2050static int
2051typhoon_open(struct net_device *dev)
2052{
2053 struct typhoon *tp = netdev_priv(dev);
2054 int err;
2055
2056 err = typhoon_request_firmware(tp);
2057 if (err)
2058 goto out;
2059
2060 pci_set_power_state(tp->pdev, PCI_D0);
2061 pci_restore_state(tp->pdev);
2062
2063 err = typhoon_wakeup(tp, WaitSleep);
2064 if (err < 0) {
2065 netdev_err(dev, "unable to wakeup device\n");
2066 goto out_sleep;
2067 }
2068
2069 err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED,
2070 dev->name, dev);
2071 if (err < 0)
2072 goto out_sleep;
2073
2074 napi_enable(&tp->napi);
2075
2076 err = typhoon_start_runtime(tp);
2077 if (err < 0) {
2078 napi_disable(&tp->napi);
2079 goto out_irq;
2080 }
2081
2082 netif_start_queue(dev);
2083 return 0;
2084
2085out_irq:
2086 free_irq(dev->irq, dev);
2087
2088out_sleep:
2089 if (typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2090 netdev_err(dev, "unable to reboot into sleep img\n");
2091 typhoon_reset(tp->ioaddr, NoWait);
2092 goto out;
2093 }
2094
2095 if (typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2096 netdev_err(dev, "unable to go back to sleep\n");
2097
2098out:
2099 return err;
2100}
2101
2102static int
2103typhoon_close(struct net_device *dev)
2104{
2105 struct typhoon *tp = netdev_priv(dev);
2106
2107 netif_stop_queue(dev);
2108 napi_disable(&tp->napi);
2109
2110 if (typhoon_stop_runtime(tp, WaitSleep) < 0)
2111 netdev_err(dev, "unable to stop runtime\n");
2112
2113
2114 free_irq(dev->irq, dev);
2115
2116 typhoon_free_rx_rings(tp);
2117 typhoon_init_rings(tp);
2118
2119 if (typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2120 netdev_err(dev, "unable to boot sleep image\n");
2121
2122 if (typhoon_sleep(tp, PCI_D3hot, 0) < 0)
2123 netdev_err(dev, "unable to put card to sleep\n");
2124
2125 return 0;
2126}
2127
2128static int __maybe_unused
2129typhoon_resume(struct device *dev_d)
2130{
2131 struct net_device *dev = dev_get_drvdata(dev_d);
2132 struct typhoon *tp = netdev_priv(dev);
2133
2134
2135
2136 if (!netif_running(dev))
2137 return 0;
2138
2139 if (typhoon_wakeup(tp, WaitNoSleep) < 0) {
2140 netdev_err(dev, "critical: could not wake up in resume\n");
2141 goto reset;
2142 }
2143
2144 if (typhoon_start_runtime(tp) < 0) {
2145 netdev_err(dev, "critical: could not start runtime in resume\n");
2146 goto reset;
2147 }
2148
2149 netif_device_attach(dev);
2150 return 0;
2151
2152reset:
2153 typhoon_reset(tp->ioaddr, NoWait);
2154 return -EBUSY;
2155}
2156
2157static int __maybe_unused
2158typhoon_suspend(struct device *dev_d)
2159{
2160 struct pci_dev *pdev = to_pci_dev(dev_d);
2161 struct net_device *dev = pci_get_drvdata(pdev);
2162 struct typhoon *tp = netdev_priv(dev);
2163 struct cmd_desc xp_cmd;
2164
2165
2166
2167 if (!netif_running(dev))
2168 return 0;
2169
2170
2171 if (tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
2172 netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n");
2173
2174 netif_device_detach(dev);
2175
2176 if (typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2177 netdev_err(dev, "unable to stop runtime\n");
2178 goto need_resume;
2179 }
2180
2181 typhoon_free_rx_rings(tp);
2182 typhoon_init_rings(tp);
2183
2184 if (typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2185 netdev_err(dev, "unable to boot sleep image\n");
2186 goto need_resume;
2187 }
2188
2189 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2190 xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
2191 xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
2192 if (typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2193 netdev_err(dev, "unable to set mac address in suspend\n");
2194 goto need_resume;
2195 }
2196
2197 INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2198 xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2199 if (typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2200 netdev_err(dev, "unable to set rx filter in suspend\n");
2201 goto need_resume;
2202 }
2203
2204 if (typhoon_sleep_early(tp, tp->wol_events) < 0) {
2205 netdev_err(dev, "unable to put card to sleep\n");
2206 goto need_resume;
2207 }
2208
2209 device_wakeup_enable(dev_d);
2210
2211 return 0;
2212
2213need_resume:
2214 typhoon_resume(dev_d);
2215 return -EBUSY;
2216}
2217
2218static int
2219typhoon_test_mmio(struct pci_dev *pdev)
2220{
2221 void __iomem *ioaddr = pci_iomap(pdev, 1, 128);
2222 int mode = 0;
2223 u32 val;
2224
2225 if (!ioaddr)
2226 goto out;
2227
2228 if (ioread32(ioaddr + TYPHOON_REG_STATUS) !=
2229 TYPHOON_STATUS_WAITING_FOR_HOST)
2230 goto out_unmap;
2231
2232 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2233 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2234 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
2235
2236
2237
2238
2239
2240 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2241 if ((val & TYPHOON_INTR_SELF) == 0) {
2242 iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT);
2243 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2244 udelay(50);
2245 val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2246 if (val & TYPHOON_INTR_SELF)
2247 mode = 1;
2248 }
2249
2250 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
2251 iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
2252 iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
2253 ioread32(ioaddr + TYPHOON_REG_INTR_STATUS);
2254
2255out_unmap:
2256 pci_iounmap(pdev, ioaddr);
2257
2258out:
2259 if (!mode)
2260 pr_info("%s: falling back to port IO\n", pci_name(pdev));
2261 return mode;
2262}
2263
2264static const struct net_device_ops typhoon_netdev_ops = {
2265 .ndo_open = typhoon_open,
2266 .ndo_stop = typhoon_close,
2267 .ndo_start_xmit = typhoon_start_tx,
2268 .ndo_set_rx_mode = typhoon_set_rx_mode,
2269 .ndo_tx_timeout = typhoon_tx_timeout,
2270 .ndo_get_stats = typhoon_get_stats,
2271 .ndo_validate_addr = eth_validate_addr,
2272 .ndo_set_mac_address = eth_mac_addr,
2273};
2274
2275static int
2276typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2277{
2278 struct net_device *dev;
2279 struct typhoon *tp;
2280 int card_id = (int) ent->driver_data;
2281 void __iomem *ioaddr;
2282 void *shared;
2283 dma_addr_t shared_dma;
2284 struct cmd_desc xp_cmd;
2285 struct resp_desc xp_resp[3];
2286 int err = 0;
2287 const char *err_msg;
2288
2289 dev = alloc_etherdev(sizeof(*tp));
2290 if (dev == NULL) {
2291 err_msg = "unable to alloc new net device";
2292 err = -ENOMEM;
2293 goto error_out;
2294 }
2295 SET_NETDEV_DEV(dev, &pdev->dev);
2296
2297 err = pci_enable_device(pdev);
2298 if (err < 0) {
2299 err_msg = "unable to enable device";
2300 goto error_out_dev;
2301 }
2302
2303 err = pci_set_mwi(pdev);
2304 if (err < 0) {
2305 err_msg = "unable to set MWI";
2306 goto error_out_disable;
2307 }
2308
2309 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2310 if (err < 0) {
2311 err_msg = "No usable DMA configuration";
2312 goto error_out_mwi;
2313 }
2314
2315
2316
2317 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2318 err_msg = "region #1 not a PCI IO resource, aborting";
2319 err = -ENODEV;
2320 goto error_out_mwi;
2321 }
2322 if (pci_resource_len(pdev, 0) < 128) {
2323 err_msg = "Invalid PCI IO region size, aborting";
2324 err = -ENODEV;
2325 goto error_out_mwi;
2326 }
2327 if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2328 err_msg = "region #1 not a PCI MMIO resource, aborting";
2329 err = -ENODEV;
2330 goto error_out_mwi;
2331 }
2332 if (pci_resource_len(pdev, 1) < 128) {
2333 err_msg = "Invalid PCI MMIO region size, aborting";
2334 err = -ENODEV;
2335 goto error_out_mwi;
2336 }
2337
2338 err = pci_request_regions(pdev, KBUILD_MODNAME);
2339 if (err < 0) {
2340 err_msg = "could not request regions";
2341 goto error_out_mwi;
2342 }
2343
2344
2345
2346 if (use_mmio != 0 && use_mmio != 1)
2347 use_mmio = typhoon_test_mmio(pdev);
2348
2349 ioaddr = pci_iomap(pdev, use_mmio, 128);
2350 if (!ioaddr) {
2351 err_msg = "cannot remap registers, aborting";
2352 err = -EIO;
2353 goto error_out_regions;
2354 }
2355
2356
2357
2358 shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2359 &shared_dma);
2360 if (!shared) {
2361 err_msg = "could not allocate DMA memory";
2362 err = -ENOMEM;
2363 goto error_out_remap;
2364 }
2365
2366 dev->irq = pdev->irq;
2367 tp = netdev_priv(dev);
2368 tp->shared = shared;
2369 tp->shared_dma = shared_dma;
2370 tp->pdev = pdev;
2371 tp->tx_pdev = pdev;
2372 tp->ioaddr = ioaddr;
2373 tp->tx_ioaddr = ioaddr;
2374 tp->dev = dev;
2375
2376
2377
2378
2379
2380
2381
2382
2383 err = typhoon_reset(ioaddr, WaitSleep);
2384 if (err < 0) {
2385 err_msg = "could not reset 3XP";
2386 goto error_out_dma;
2387 }
2388
2389
2390
2391
2392
2393 pci_set_master(pdev);
2394 pci_save_state(pdev);
2395
2396 typhoon_init_interface(tp);
2397 typhoon_init_rings(tp);
2398
2399 err = typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST);
2400 if (err < 0) {
2401 err_msg = "cannot boot 3XP sleep image";
2402 goto error_out_reset;
2403 }
2404
2405 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2406 err = typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp);
2407 if (err < 0) {
2408 err_msg = "cannot read MAC address";
2409 goto error_out_reset;
2410 }
2411
2412 *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
2413 *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
2414
2415 if (!is_valid_ether_addr(dev->dev_addr)) {
2416 err_msg = "Could not obtain valid ethernet address, aborting";
2417 err = -EIO;
2418 goto error_out_reset;
2419 }
2420
2421
2422
2423
2424 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
2425 err = typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp);
2426 if (err < 0) {
2427 err_msg = "Could not get Sleep Image version";
2428 goto error_out_reset;
2429 }
2430
2431 tp->capabilities = typhoon_card_info[card_id].capabilities;
2432 tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2433
2434
2435
2436
2437
2438
2439
2440 if (xp_resp[0].numDesc != 0)
2441 tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
2442
2443 err = typhoon_sleep(tp, PCI_D3hot, 0);
2444 if (err < 0) {
2445 err_msg = "cannot put adapter to sleep";
2446 goto error_out_reset;
2447 }
2448
2449
2450 dev->netdev_ops = &typhoon_netdev_ops;
2451 netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
2452 dev->watchdog_timeo = TX_TIMEOUT;
2453
2454 dev->ethtool_ops = &typhoon_ethtool_ops;
2455
2456
2457
2458
2459
2460
2461
2462
2463 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2464 NETIF_F_HW_VLAN_CTAG_TX;
2465 dev->features = dev->hw_features |
2466 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
2467
2468 err = register_netdev(dev);
2469 if (err < 0) {
2470 err_msg = "unable to register netdev";
2471 goto error_out_reset;
2472 }
2473
2474 pci_set_drvdata(pdev, dev);
2475
2476 netdev_info(dev, "%s at %s 0x%llx, %pM\n",
2477 typhoon_card_info[card_id].name,
2478 use_mmio ? "MMIO" : "IO",
2479 (unsigned long long)pci_resource_start(pdev, use_mmio),
2480 dev->dev_addr);
2481
2482
2483
2484
2485 if (xp_resp[0].numDesc == 0) {
2486
2487
2488
2489 u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
2490 netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
2491 monthday >> 8, monthday & 0xff);
2492 } else if (xp_resp[0].numDesc == 2) {
2493
2494
2495 u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
2496 u8 *ver_string = (u8 *) &xp_resp[1];
2497 ver_string[25] = 0;
2498 netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
2499 sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
2500 sleep_ver & 0xfff, ver_string);
2501 } else {
2502 netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
2503 xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
2504 }
2505
2506 return 0;
2507
2508error_out_reset:
2509 typhoon_reset(ioaddr, NoWait);
2510
2511error_out_dma:
2512 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2513 shared, shared_dma);
2514error_out_remap:
2515 pci_iounmap(pdev, ioaddr);
2516error_out_regions:
2517 pci_release_regions(pdev);
2518error_out_mwi:
2519 pci_clear_mwi(pdev);
2520error_out_disable:
2521 pci_disable_device(pdev);
2522error_out_dev:
2523 free_netdev(dev);
2524error_out:
2525 pr_err("%s: %s\n", pci_name(pdev), err_msg);
2526 return err;
2527}
2528
2529static void
2530typhoon_remove_one(struct pci_dev *pdev)
2531{
2532 struct net_device *dev = pci_get_drvdata(pdev);
2533 struct typhoon *tp = netdev_priv(dev);
2534
2535 unregister_netdev(dev);
2536 pci_set_power_state(pdev, PCI_D0);
2537 pci_restore_state(pdev);
2538 typhoon_reset(tp->ioaddr, NoWait);
2539 pci_iounmap(pdev, tp->ioaddr);
2540 pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2541 tp->shared, tp->shared_dma);
2542 pci_release_regions(pdev);
2543 pci_clear_mwi(pdev);
2544 pci_disable_device(pdev);
2545 free_netdev(dev);
2546}
2547
2548static SIMPLE_DEV_PM_OPS(typhoon_pm_ops, typhoon_suspend, typhoon_resume);
2549
2550static struct pci_driver typhoon_driver = {
2551 .name = KBUILD_MODNAME,
2552 .id_table = typhoon_pci_tbl,
2553 .probe = typhoon_init_one,
2554 .remove = typhoon_remove_one,
2555 .driver.pm = &typhoon_pm_ops,
2556};
2557
2558static int __init
2559typhoon_init(void)
2560{
2561 return pci_register_driver(&typhoon_driver);
2562}
2563
2564static void __exit
2565typhoon_cleanup(void)
2566{
2567 release_firmware(typhoon_fw);
2568 pci_unregister_driver(&typhoon_driver);
2569}
2570
2571module_init(typhoon_init);
2572module_exit(typhoon_cleanup);
2573