1
2
3
4
5
6
7
8#include <linux/kernel.h>
9#include <linux/bitops.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/if_vlan.h>
37#include <linux/skbuff.h>
38#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
41#include <linux/prefetch.h>
42#include <net/ip6_checksum.h>
43
44#include "qlge.h"
45
46char qlge_driver_name[] = DRV_NAME;
47const char qlge_driver_version[] = DRV_VERSION;
48
49MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
50MODULE_DESCRIPTION(DRV_STRING " ");
51MODULE_LICENSE("GPL");
52MODULE_VERSION(DRV_VERSION);
53
54static const u32 default_msg =
55 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
56 NETIF_MSG_IFDOWN |
57 NETIF_MSG_IFUP |
58 NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR |
60 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
61
62static int debug = -1;
63module_param(debug, int, 0664);
64MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
65
66#define MSIX_IRQ 0
67#define MSI_IRQ 1
68#define LEG_IRQ 2
69static int qlge_irq_type = MSIX_IRQ;
70module_param(qlge_irq_type, int, 0664);
71MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
72
73static int qlge_mpi_coredump;
74module_param(qlge_mpi_coredump, int, 0);
75MODULE_PARM_DESC(qlge_mpi_coredump,
76 "Option to enable MPI firmware dump. Default is OFF - Do Not allocate memory. ");
77
78static int qlge_force_coredump;
79module_param(qlge_force_coredump, int, 0);
80MODULE_PARM_DESC(qlge_force_coredump,
81 "Option to allow force of firmware core dump. Default is OFF - Do not allow.");
82
83static const struct pci_device_id qlge_pci_tbl[] = {
84 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
85 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
86
87 {0,}
88};
89
90MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
91
92static int ql_wol(struct ql_adapter *);
93static void qlge_set_multicast_list(struct net_device *);
94static int ql_adapter_down(struct ql_adapter *);
95static int ql_adapter_up(struct ql_adapter *);
96
97
98
99
100
101static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
102{
103 u32 sem_bits = 0;
104
105 switch (sem_mask) {
106 case SEM_XGMAC0_MASK:
107 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
108 break;
109 case SEM_XGMAC1_MASK:
110 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
111 break;
112 case SEM_ICB_MASK:
113 sem_bits = SEM_SET << SEM_ICB_SHIFT;
114 break;
115 case SEM_MAC_ADDR_MASK:
116 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
117 break;
118 case SEM_FLASH_MASK:
119 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
120 break;
121 case SEM_PROBE_MASK:
122 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
123 break;
124 case SEM_RT_IDX_MASK:
125 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
126 break;
127 case SEM_PROC_REG_MASK:
128 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
129 break;
130 default:
131 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
132 return -EINVAL;
133 }
134
135 ql_write32(qdev, SEM, sem_bits | sem_mask);
136 return !(ql_read32(qdev, SEM) & sem_bits);
137}
138
139int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
140{
141 unsigned int wait_count = 30;
142
143 do {
144 if (!ql_sem_trylock(qdev, sem_mask))
145 return 0;
146 udelay(100);
147 } while (--wait_count);
148 return -ETIMEDOUT;
149}
150
151void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
152{
153 ql_write32(qdev, SEM, sem_mask);
154 ql_read32(qdev, SEM);
155}
156
157
158
159
160
161
162int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
163{
164 u32 temp;
165 int count;
166
167 for (count = 0; count < UDELAY_COUNT; count++) {
168 temp = ql_read32(qdev, reg);
169
170
171 if (temp & err_bit) {
172 netif_alert(qdev, probe, qdev->ndev,
173 "register 0x%.08x access error, value = 0x%.08x!.\n",
174 reg, temp);
175 return -EIO;
176 } else if (temp & bit) {
177 return 0;
178 }
179 udelay(UDELAY_DELAY);
180 }
181 netif_alert(qdev, probe, qdev->ndev,
182 "Timed out waiting for reg %x to come ready.\n", reg);
183 return -ETIMEDOUT;
184}
185
186
187
188
189static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
190{
191 int count;
192 u32 temp;
193
194 for (count = 0; count < UDELAY_COUNT; count++) {
195 temp = ql_read32(qdev, CFG);
196 if (temp & CFG_LE)
197 return -EIO;
198 if (!(temp & bit))
199 return 0;
200 udelay(UDELAY_DELAY);
201 }
202 return -ETIMEDOUT;
203}
204
205
206
207
208int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
209 u16 q_id)
210{
211 u64 map;
212 int status = 0;
213 int direction;
214 u32 mask;
215 u32 value;
216
217 if (bit & (CFG_LRQ | CFG_LR | CFG_LCQ))
218 direction = DMA_TO_DEVICE;
219 else
220 direction = DMA_FROM_DEVICE;
221
222 map = dma_map_single(&qdev->pdev->dev, ptr, size, direction);
223 if (dma_mapping_error(&qdev->pdev->dev, map)) {
224 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
225 return -ENOMEM;
226 }
227
228 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
229 if (status)
230 goto lock_failed;
231
232 status = ql_wait_cfg(qdev, bit);
233 if (status) {
234 netif_err(qdev, ifup, qdev->ndev,
235 "Timed out waiting for CFG to come ready.\n");
236 goto exit;
237 }
238
239 ql_write32(qdev, ICB_L, (u32)map);
240 ql_write32(qdev, ICB_H, (u32)(map >> 32));
241
242 mask = CFG_Q_MASK | (bit << 16);
243 value = bit | (q_id << CFG_Q_SHIFT);
244 ql_write32(qdev, CFG, (mask | value));
245
246
247
248
249 status = ql_wait_cfg(qdev, bit);
250exit:
251 ql_sem_unlock(qdev, SEM_ICB_MASK);
252lock_failed:
253 dma_unmap_single(&qdev->pdev->dev, map, size, direction);
254 return status;
255}
256
257
258int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
259 u32 *value)
260{
261 u32 offset = 0;
262 int status;
263
264 switch (type) {
265 case MAC_ADDR_TYPE_MULTI_MAC:
266 case MAC_ADDR_TYPE_CAM_MAC: {
267 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
268 if (status)
269 break;
270 ql_write32(qdev, MAC_ADDR_IDX,
271 (offset++) |
272 (index << MAC_ADDR_IDX_SHIFT) |
273 MAC_ADDR_ADR | MAC_ADDR_RS |
274 type);
275 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
276 if (status)
277 break;
278 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
279 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
280 if (status)
281 break;
282 ql_write32(qdev, MAC_ADDR_IDX,
283 (offset++) |
284 (index << MAC_ADDR_IDX_SHIFT) |
285 MAC_ADDR_ADR | MAC_ADDR_RS |
286 type);
287 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0);
288 if (status)
289 break;
290 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
291 if (type == MAC_ADDR_TYPE_CAM_MAC) {
292 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
293 MAC_ADDR_MW, 0);
294 if (status)
295 break;
296 ql_write32(qdev, MAC_ADDR_IDX,
297 (offset++) |
298 (index
299 << MAC_ADDR_IDX_SHIFT) |
300 MAC_ADDR_ADR |
301 MAC_ADDR_RS | type);
302 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
303 MAC_ADDR_MR, 0);
304 if (status)
305 break;
306 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
307 }
308 break;
309 }
310 case MAC_ADDR_TYPE_VLAN:
311 case MAC_ADDR_TYPE_MULTI_FLTR:
312 default:
313 netif_crit(qdev, ifup, qdev->ndev,
314 "Address type %d not yet supported.\n", type);
315 status = -EPERM;
316 }
317 return status;
318}
319
320
321
322
323static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
324 u16 index)
325{
326 u32 offset = 0;
327 int status = 0;
328
329 switch (type) {
330 case MAC_ADDR_TYPE_MULTI_MAC: {
331 u32 upper = (addr[0] << 8) | addr[1];
332 u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
333 (addr[5]);
334
335 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
336 if (status)
337 break;
338 ql_write32(qdev, MAC_ADDR_IDX,
339 (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
340 MAC_ADDR_E);
341 ql_write32(qdev, MAC_ADDR_DATA, lower);
342 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
343 if (status)
344 break;
345 ql_write32(qdev, MAC_ADDR_IDX,
346 (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type |
347 MAC_ADDR_E);
348
349 ql_write32(qdev, MAC_ADDR_DATA, upper);
350 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
351 break;
352 }
353 case MAC_ADDR_TYPE_CAM_MAC: {
354 u32 cam_output;
355 u32 upper = (addr[0] << 8) | addr[1];
356 u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
357 (addr[5]);
358 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
359 if (status)
360 break;
361 ql_write32(qdev, MAC_ADDR_IDX,
362 (offset++) |
363 (index << MAC_ADDR_IDX_SHIFT) |
364 type);
365 ql_write32(qdev, MAC_ADDR_DATA, lower);
366 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367 if (status)
368 break;
369 ql_write32(qdev, MAC_ADDR_IDX,
370 (offset++) |
371 (index << MAC_ADDR_IDX_SHIFT) |
372 type);
373 ql_write32(qdev, MAC_ADDR_DATA, upper);
374 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
375 if (status)
376 break;
377 ql_write32(qdev, MAC_ADDR_IDX,
378 (offset) |
379 (index << MAC_ADDR_IDX_SHIFT) |
380 type);
381
382
383
384
385 cam_output = (CAM_OUT_ROUTE_NIC |
386 (qdev->func << CAM_OUT_FUNC_SHIFT) |
387 (0 << CAM_OUT_CQ_ID_SHIFT));
388 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
389 cam_output |= CAM_OUT_RV;
390
391 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
392 break;
393 }
394 case MAC_ADDR_TYPE_VLAN: {
395 u32 enable_bit = *((u32 *)&addr[0]);
396
397
398
399
400
401 status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0);
402 if (status)
403 break;
404 ql_write32(qdev, MAC_ADDR_IDX,
405 offset |
406 (index << MAC_ADDR_IDX_SHIFT) |
407 type |
408 enable_bit);
409 break;
410 }
411 case MAC_ADDR_TYPE_MULTI_FLTR:
412 default:
413 netif_crit(qdev, ifup, qdev->ndev,
414 "Address type %d not yet supported.\n", type);
415 status = -EPERM;
416 }
417 return status;
418}
419
420
421
422
423
424static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
425{
426 int status;
427 char zero_mac_addr[ETH_ALEN];
428 char *addr;
429
430 if (set) {
431 addr = &qdev->current_mac_addr[0];
432 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
433 "Set Mac addr %pM\n", addr);
434 } else {
435 eth_zero_addr(zero_mac_addr);
436 addr = &zero_mac_addr[0];
437 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
438 "Clearing MAC address\n");
439 }
440 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
441 if (status)
442 return status;
443 status = ql_set_mac_addr_reg(qdev, (u8 *)addr,
444 MAC_ADDR_TYPE_CAM_MAC,
445 qdev->func * MAX_CQ);
446 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
447 if (status)
448 netif_err(qdev, ifup, qdev->ndev,
449 "Failed to init mac address.\n");
450 return status;
451}
452
453void ql_link_on(struct ql_adapter *qdev)
454{
455 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
456 netif_carrier_on(qdev->ndev);
457 ql_set_mac_addr(qdev, 1);
458}
459
460void ql_link_off(struct ql_adapter *qdev)
461{
462 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
463 netif_carrier_off(qdev->ndev);
464 ql_set_mac_addr(qdev, 0);
465}
466
467
468
469
470int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
471{
472 int status = 0;
473
474 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
475 if (status)
476 goto exit;
477
478 ql_write32(qdev, RT_IDX,
479 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
480 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
481 if (status)
482 goto exit;
483 *value = ql_read32(qdev, RT_DATA);
484exit:
485 return status;
486}
487
488
489
490
491
492
493static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
494 int enable)
495{
496 int status = -EINVAL;
497 u32 value = 0;
498
499 switch (mask) {
500 case RT_IDX_CAM_HIT:
501 {
502 value = RT_IDX_DST_CAM_Q |
503 RT_IDX_TYPE_NICQ |
504 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);
505 break;
506 }
507 case RT_IDX_VALID:
508 {
509 value = RT_IDX_DST_DFLT_Q |
510 RT_IDX_TYPE_NICQ |
511 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);
512 break;
513 }
514 case RT_IDX_ERR:
515 {
516 value = RT_IDX_DST_DFLT_Q |
517 RT_IDX_TYPE_NICQ |
518 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);
519 break;
520 }
521 case RT_IDX_IP_CSUM_ERR:
522 {
523 value = RT_IDX_DST_DFLT_Q |
524 RT_IDX_TYPE_NICQ |
525 (RT_IDX_IP_CSUM_ERR_SLOT <<
526 RT_IDX_IDX_SHIFT);
527 break;
528 }
529 case RT_IDX_TU_CSUM_ERR:
530 {
531 value = RT_IDX_DST_DFLT_Q |
532 RT_IDX_TYPE_NICQ |
533 (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
534 RT_IDX_IDX_SHIFT);
535 break;
536 }
537 case RT_IDX_BCAST:
538 {
539 value = RT_IDX_DST_DFLT_Q |
540 RT_IDX_TYPE_NICQ |
541 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);
542 break;
543 }
544 case RT_IDX_MCAST:
545 {
546 value = RT_IDX_DST_DFLT_Q |
547 RT_IDX_TYPE_NICQ |
548 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);
549 break;
550 }
551 case RT_IDX_MCAST_MATCH:
552 {
553 value = RT_IDX_DST_DFLT_Q |
554 RT_IDX_TYPE_NICQ |
555 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);
556 break;
557 }
558 case RT_IDX_RSS_MATCH:
559 {
560 value = RT_IDX_DST_RSS |
561 RT_IDX_TYPE_NICQ |
562 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);
563 break;
564 }
565 case 0:
566 {
567 value = RT_IDX_DST_DFLT_Q |
568 RT_IDX_TYPE_NICQ |
569 (index << RT_IDX_IDX_SHIFT);
570 break;
571 }
572 default:
573 netif_err(qdev, ifup, qdev->ndev,
574 "Mask type %d not yet supported.\n", mask);
575 status = -EPERM;
576 goto exit;
577 }
578
579 if (value) {
580 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
581 if (status)
582 goto exit;
583 value |= (enable ? RT_IDX_E : 0);
584 ql_write32(qdev, RT_IDX, value);
585 ql_write32(qdev, RT_DATA, enable ? mask : 0);
586 }
587exit:
588 return status;
589}
590
591static void ql_enable_interrupts(struct ql_adapter *qdev)
592{
593 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
594}
595
596static void ql_disable_interrupts(struct ql_adapter *qdev)
597{
598 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
599}
600
601static void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
602{
603 struct intr_context *ctx = &qdev->intr_context[intr];
604
605 ql_write32(qdev, INTR_EN, ctx->intr_en_mask);
606}
607
608static void ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
609{
610 struct intr_context *ctx = &qdev->intr_context[intr];
611
612 ql_write32(qdev, INTR_EN, ctx->intr_dis_mask);
613}
614
615static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
616{
617 int i;
618
619 for (i = 0; i < qdev->intr_count; i++)
620 ql_enable_completion_interrupt(qdev, i);
621}
622
623static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
624{
625 int status, i;
626 u16 csum = 0;
627 __le16 *flash = (__le16 *)&qdev->flash;
628
629 status = strncmp((char *)&qdev->flash, str, 4);
630 if (status) {
631 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
632 return status;
633 }
634
635 for (i = 0; i < size; i++)
636 csum += le16_to_cpu(*flash++);
637
638 if (csum)
639 netif_err(qdev, ifup, qdev->ndev,
640 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
641
642 return csum;
643}
644
645static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
646{
647 int status = 0;
648
649 status = ql_wait_reg_rdy(qdev,
650 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
651 if (status)
652 goto exit;
653
654 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
655
656 status = ql_wait_reg_rdy(qdev,
657 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
658 if (status)
659 goto exit;
660
661
662
663
664 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
665exit:
666 return status;
667}
668
669static int ql_get_8000_flash_params(struct ql_adapter *qdev)
670{
671 u32 i, size;
672 int status;
673 __le32 *p = (__le32 *)&qdev->flash;
674 u32 offset;
675 u8 mac_addr[6];
676
677
678
679
680 if (!qdev->port)
681 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
682 else
683 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
684
685 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
686 return -ETIMEDOUT;
687
688 size = sizeof(struct flash_params_8000) / sizeof(u32);
689 for (i = 0; i < size; i++, p++) {
690 status = ql_read_flash_word(qdev, i + offset, p);
691 if (status) {
692 netif_err(qdev, ifup, qdev->ndev,
693 "Error reading flash.\n");
694 goto exit;
695 }
696 }
697
698 status = ql_validate_flash(qdev,
699 sizeof(struct flash_params_8000) /
700 sizeof(u16),
701 "8000");
702 if (status) {
703 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
704 status = -EINVAL;
705 goto exit;
706 }
707
708
709
710
711 if (qdev->flash.flash_params_8000.data_type1 == 2)
712 memcpy(mac_addr,
713 qdev->flash.flash_params_8000.mac_addr1,
714 qdev->ndev->addr_len);
715 else
716 memcpy(mac_addr,
717 qdev->flash.flash_params_8000.mac_addr,
718 qdev->ndev->addr_len);
719
720 if (!is_valid_ether_addr(mac_addr)) {
721 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
722 status = -EINVAL;
723 goto exit;
724 }
725
726 memcpy(qdev->ndev->dev_addr,
727 mac_addr,
728 qdev->ndev->addr_len);
729
730exit:
731 ql_sem_unlock(qdev, SEM_FLASH_MASK);
732 return status;
733}
734
735static int ql_get_8012_flash_params(struct ql_adapter *qdev)
736{
737 int i;
738 int status;
739 __le32 *p = (__le32 *)&qdev->flash;
740 u32 offset = 0;
741 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
742
743
744
745
746 if (qdev->port)
747 offset = size;
748
749 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
750 return -ETIMEDOUT;
751
752 for (i = 0; i < size; i++, p++) {
753 status = ql_read_flash_word(qdev, i + offset, p);
754 if (status) {
755 netif_err(qdev, ifup, qdev->ndev,
756 "Error reading flash.\n");
757 goto exit;
758 }
759
760 }
761
762 status = ql_validate_flash(qdev,
763 sizeof(struct flash_params_8012) /
764 sizeof(u16),
765 "8012");
766 if (status) {
767 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
768 status = -EINVAL;
769 goto exit;
770 }
771
772 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
773 status = -EINVAL;
774 goto exit;
775 }
776
777 memcpy(qdev->ndev->dev_addr,
778 qdev->flash.flash_params_8012.mac_addr,
779 qdev->ndev->addr_len);
780
781exit:
782 ql_sem_unlock(qdev, SEM_FLASH_MASK);
783 return status;
784}
785
786
787
788
789
790static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
791{
792 int status;
793
794 status = ql_wait_reg_rdy(qdev,
795 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
796 if (status)
797 return status;
798
799 ql_write32(qdev, XGMAC_DATA, data);
800
801 ql_write32(qdev, XGMAC_ADDR, reg);
802 return status;
803}
804
805
806
807
808
809int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
810{
811 int status = 0;
812
813 status = ql_wait_reg_rdy(qdev,
814 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
815 if (status)
816 goto exit;
817
818 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
819
820 status = ql_wait_reg_rdy(qdev,
821 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
822 if (status)
823 goto exit;
824
825 *data = ql_read32(qdev, XGMAC_DATA);
826exit:
827 return status;
828}
829
830
831int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
832{
833 int status = 0;
834 u32 hi = 0;
835 u32 lo = 0;
836
837 status = ql_read_xgmac_reg(qdev, reg, &lo);
838 if (status)
839 goto exit;
840
841 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
842 if (status)
843 goto exit;
844
845 *data = (u64)lo | ((u64)hi << 32);
846
847exit:
848 return status;
849}
850
851static int ql_8000_port_initialize(struct ql_adapter *qdev)
852{
853 int status;
854
855
856
857
858 status = ql_mb_about_fw(qdev);
859 if (status)
860 goto exit;
861 status = ql_mb_get_fw_state(qdev);
862 if (status)
863 goto exit;
864
865 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
866exit:
867 return status;
868}
869
870
871
872
873
874
875
876static int ql_8012_port_initialize(struct ql_adapter *qdev)
877{
878 int status = 0;
879 u32 data;
880
881 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
882
883
884
885 netif_info(qdev, link, qdev->ndev,
886 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
887 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
888 if (status) {
889 netif_crit(qdev, link, qdev->ndev,
890 "Port initialize timed out.\n");
891 }
892 return status;
893 }
894
895 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
896
897 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
898 if (status)
899 goto end;
900 data |= GLOBAL_CFG_RESET;
901 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
902 if (status)
903 goto end;
904
905
906 data &= ~GLOBAL_CFG_RESET;
907 data |= GLOBAL_CFG_JUMBO;
908 data |= GLOBAL_CFG_TX_STAT_EN;
909 data |= GLOBAL_CFG_RX_STAT_EN;
910 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
911 if (status)
912 goto end;
913
914
915 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
916 if (status)
917 goto end;
918 data &= ~TX_CFG_RESET;
919 data |= TX_CFG_EN;
920 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
921 if (status)
922 goto end;
923
924
925 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
926 if (status)
927 goto end;
928 data &= ~RX_CFG_RESET;
929 data |= RX_CFG_EN;
930 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
931 if (status)
932 goto end;
933
934
935 status =
936 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
937 if (status)
938 goto end;
939 status =
940 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
941 if (status)
942 goto end;
943
944
945 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
946end:
947 ql_sem_unlock(qdev, qdev->xg_sem_mask);
948 return status;
949}
950
951static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
952{
953 return PAGE_SIZE << qdev->lbq_buf_order;
954}
955
956static struct qlge_bq_desc *qlge_get_curr_buf(struct qlge_bq *bq)
957{
958 struct qlge_bq_desc *bq_desc;
959
960 bq_desc = &bq->queue[bq->next_to_clean];
961 bq->next_to_clean = QLGE_BQ_WRAP(bq->next_to_clean + 1);
962
963 return bq_desc;
964}
965
966static struct qlge_bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
967 struct rx_ring *rx_ring)
968{
969 struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq);
970
971 dma_sync_single_for_cpu(&qdev->pdev->dev, lbq_desc->dma_addr,
972 qdev->lbq_buf_size, DMA_FROM_DEVICE);
973
974 if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) ==
975 ql_lbq_block_size(qdev)) {
976
977 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
978 ql_lbq_block_size(qdev), DMA_FROM_DEVICE);
979 }
980
981 return lbq_desc;
982}
983
984
985static void ql_update_cq(struct rx_ring *rx_ring)
986{
987 rx_ring->cnsmr_idx++;
988 rx_ring->curr_entry++;
989 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
990 rx_ring->cnsmr_idx = 0;
991 rx_ring->curr_entry = rx_ring->cq_base;
992 }
993}
994
995static void ql_write_cq_idx(struct rx_ring *rx_ring)
996{
997 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
998}
999
1000static const char * const bq_type_name[] = {
1001 [QLGE_SB] = "sbq",
1002 [QLGE_LB] = "lbq",
1003};
1004
1005
1006static int qlge_refill_sb(struct rx_ring *rx_ring,
1007 struct qlge_bq_desc *sbq_desc, gfp_t gfp)
1008{
1009 struct ql_adapter *qdev = rx_ring->qdev;
1010 struct sk_buff *skb;
1011
1012 if (sbq_desc->p.skb)
1013 return 0;
1014
1015 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1016 "ring %u sbq: getting new skb for index %d.\n",
1017 rx_ring->cq_id, sbq_desc->index);
1018
1019 skb = __netdev_alloc_skb(qdev->ndev, SMALL_BUFFER_SIZE, gfp);
1020 if (!skb)
1021 return -ENOMEM;
1022 skb_reserve(skb, QLGE_SB_PAD);
1023
1024 sbq_desc->dma_addr = dma_map_single(&qdev->pdev->dev, skb->data,
1025 SMALL_BUF_MAP_SIZE,
1026 DMA_FROM_DEVICE);
1027 if (dma_mapping_error(&qdev->pdev->dev, sbq_desc->dma_addr)) {
1028 netif_err(qdev, ifup, qdev->ndev, "PCI mapping failed.\n");
1029 dev_kfree_skb_any(skb);
1030 return -EIO;
1031 }
1032 *sbq_desc->buf_ptr = cpu_to_le64(sbq_desc->dma_addr);
1033
1034 sbq_desc->p.skb = skb;
1035 return 0;
1036}
1037
1038
1039static int qlge_refill_lb(struct rx_ring *rx_ring,
1040 struct qlge_bq_desc *lbq_desc, gfp_t gfp)
1041{
1042 struct ql_adapter *qdev = rx_ring->qdev;
1043 struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk;
1044
1045 if (!master_chunk->page) {
1046 struct page *page;
1047 dma_addr_t dma_addr;
1048
1049 page = alloc_pages(gfp | __GFP_COMP, qdev->lbq_buf_order);
1050 if (unlikely(!page))
1051 return -ENOMEM;
1052 dma_addr = dma_map_page(&qdev->pdev->dev, page, 0,
1053 ql_lbq_block_size(qdev),
1054 DMA_FROM_DEVICE);
1055 if (dma_mapping_error(&qdev->pdev->dev, dma_addr)) {
1056 __free_pages(page, qdev->lbq_buf_order);
1057 netif_err(qdev, drv, qdev->ndev,
1058 "PCI mapping failed.\n");
1059 return -EIO;
1060 }
1061 master_chunk->page = page;
1062 master_chunk->va = page_address(page);
1063 master_chunk->offset = 0;
1064 rx_ring->chunk_dma_addr = dma_addr;
1065 }
1066
1067 lbq_desc->p.pg_chunk = *master_chunk;
1068 lbq_desc->dma_addr = rx_ring->chunk_dma_addr;
1069 *lbq_desc->buf_ptr = cpu_to_le64(lbq_desc->dma_addr +
1070 lbq_desc->p.pg_chunk.offset);
1071
1072
1073
1074
1075 master_chunk->offset += qdev->lbq_buf_size;
1076 if (master_chunk->offset == ql_lbq_block_size(qdev)) {
1077 master_chunk->page = NULL;
1078 } else {
1079 master_chunk->va += qdev->lbq_buf_size;
1080 get_page(master_chunk->page);
1081 }
1082
1083 return 0;
1084}
1085
1086
1087static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp)
1088{
1089 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
1090 struct ql_adapter *qdev = rx_ring->qdev;
1091 struct qlge_bq_desc *bq_desc;
1092 int refill_count;
1093 int retval;
1094 int i;
1095
1096 refill_count = QLGE_BQ_WRAP(QLGE_BQ_ALIGN(bq->next_to_clean - 1) -
1097 bq->next_to_use);
1098 if (!refill_count)
1099 return 0;
1100
1101 i = bq->next_to_use;
1102 bq_desc = &bq->queue[i];
1103 i -= QLGE_BQ_LEN;
1104 do {
1105 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1106 "ring %u %s: try cleaning idx %d\n",
1107 rx_ring->cq_id, bq_type_name[bq->type], i);
1108
1109 if (bq->type == QLGE_SB)
1110 retval = qlge_refill_sb(rx_ring, bq_desc, gfp);
1111 else
1112 retval = qlge_refill_lb(rx_ring, bq_desc, gfp);
1113 if (retval < 0) {
1114 netif_err(qdev, ifup, qdev->ndev,
1115 "ring %u %s: Could not get a page chunk, idx %d\n",
1116 rx_ring->cq_id, bq_type_name[bq->type], i);
1117 break;
1118 }
1119
1120 bq_desc++;
1121 i++;
1122 if (unlikely(!i)) {
1123 bq_desc = &bq->queue[0];
1124 i -= QLGE_BQ_LEN;
1125 }
1126 refill_count--;
1127 } while (refill_count);
1128 i += QLGE_BQ_LEN;
1129
1130 if (bq->next_to_use != i) {
1131 if (QLGE_BQ_ALIGN(bq->next_to_use) != QLGE_BQ_ALIGN(i)) {
1132 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1133 "ring %u %s: updating prod idx = %d.\n",
1134 rx_ring->cq_id, bq_type_name[bq->type],
1135 i);
1136 ql_write_db_reg(i, bq->prod_idx_db_reg);
1137 }
1138 bq->next_to_use = i;
1139 }
1140
1141 return retval;
1142}
1143
1144static void ql_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp,
1145 unsigned long delay)
1146{
1147 bool sbq_fail, lbq_fail;
1148
1149 sbq_fail = !!qlge_refill_bq(&rx_ring->sbq, gfp);
1150 lbq_fail = !!qlge_refill_bq(&rx_ring->lbq, gfp);
1151
1152
1153
1154
1155
1156
1157
1158 if ((sbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->sbq) < 2) ||
1159 (lbq_fail && QLGE_BQ_HW_OWNED(&rx_ring->lbq) <
1160 DIV_ROUND_UP(9000, LARGE_BUFFER_MAX_SIZE)))
1161
1162
1163
1164
1165 queue_delayed_work_on(smp_processor_id(), system_long_wq,
1166 &rx_ring->refill_work, delay);
1167}
1168
1169static void qlge_slow_refill(struct work_struct *work)
1170{
1171 struct rx_ring *rx_ring = container_of(work, struct rx_ring,
1172 refill_work.work);
1173 struct napi_struct *napi = &rx_ring->napi;
1174
1175 napi_disable(napi);
1176 ql_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2);
1177 napi_enable(napi);
1178
1179 local_bh_disable();
1180
1181
1182
1183 napi_schedule(napi);
1184
1185 local_bh_enable();
1186}
1187
1188
1189
1190
1191static void ql_unmap_send(struct ql_adapter *qdev,
1192 struct tx_ring_desc *tx_ring_desc, int mapped)
1193{
1194 int i;
1195
1196 for (i = 0; i < mapped; i++) {
1197 if (i == 0 || (i == 7 && mapped > 7)) {
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207 if (i == 7) {
1208 netif_printk(qdev, tx_done, KERN_DEBUG,
1209 qdev->ndev,
1210 "unmapping OAL area.\n");
1211 }
1212 dma_unmap_single(&qdev->pdev->dev,
1213 dma_unmap_addr(&tx_ring_desc->map[i],
1214 mapaddr),
1215 dma_unmap_len(&tx_ring_desc->map[i],
1216 maplen),
1217 DMA_TO_DEVICE);
1218 } else {
1219 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1220 "unmapping frag %d.\n", i);
1221 dma_unmap_page(&qdev->pdev->dev,
1222 dma_unmap_addr(&tx_ring_desc->map[i],
1223 mapaddr),
1224 dma_unmap_len(&tx_ring_desc->map[i],
1225 maplen), DMA_TO_DEVICE);
1226 }
1227 }
1228
1229}
1230
1231
1232
1233
1234static int ql_map_send(struct ql_adapter *qdev,
1235 struct ob_mac_iocb_req *mac_iocb_ptr,
1236 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1237{
1238 int len = skb_headlen(skb);
1239 dma_addr_t map;
1240 int frag_idx, err, map_idx = 0;
1241 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1242 int frag_cnt = skb_shinfo(skb)->nr_frags;
1243
1244 if (frag_cnt) {
1245 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1246 "frag_cnt = %d.\n", frag_cnt);
1247 }
1248
1249
1250
1251 map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
1252
1253 err = dma_mapping_error(&qdev->pdev->dev, map);
1254 if (err) {
1255 netif_err(qdev, tx_queued, qdev->ndev,
1256 "PCI mapping failed with error: %d\n", err);
1257
1258 return NETDEV_TX_BUSY;
1259 }
1260
1261 tbd->len = cpu_to_le32(len);
1262 tbd->addr = cpu_to_le64(map);
1263 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1264 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1265 map_idx++;
1266
1267
1268
1269
1270
1271
1272
1273
1274 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1275 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1276
1277 tbd++;
1278 if (frag_idx == 6 && frag_cnt > 7) {
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298 map = dma_map_single(&qdev->pdev->dev, &tx_ring_desc->oal,
1299 sizeof(struct oal),
1300 DMA_TO_DEVICE);
1301 err = dma_mapping_error(&qdev->pdev->dev, map);
1302 if (err) {
1303 netif_err(qdev, tx_queued, qdev->ndev,
1304 "PCI mapping outbound address list with error: %d\n",
1305 err);
1306 goto map_error;
1307 }
1308
1309 tbd->addr = cpu_to_le64(map);
1310
1311
1312
1313
1314
1315 tbd->len =
1316 cpu_to_le32((sizeof(struct tx_buf_desc) *
1317 (frag_cnt - frag_idx)) | TX_DESC_C);
1318 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1319 map);
1320 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1321 sizeof(struct oal));
1322 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1323 map_idx++;
1324 }
1325
1326 map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1327 DMA_TO_DEVICE);
1328
1329 err = dma_mapping_error(&qdev->pdev->dev, map);
1330 if (err) {
1331 netif_err(qdev, tx_queued, qdev->ndev,
1332 "PCI mapping frags failed with error: %d.\n",
1333 err);
1334 goto map_error;
1335 }
1336
1337 tbd->addr = cpu_to_le64(map);
1338 tbd->len = cpu_to_le32(skb_frag_size(frag));
1339 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1340 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1341 skb_frag_size(frag));
1342
1343 }
1344
1345 tx_ring_desc->map_cnt = map_idx;
1346
1347 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1348 return NETDEV_TX_OK;
1349
1350map_error:
1351
1352
1353
1354
1355
1356
1357 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1358 return NETDEV_TX_BUSY;
1359}
1360
1361
1362static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
1363 struct rx_ring *rx_ring)
1364{
1365 struct nic_stats *stats = &qdev->nic_stats;
1366
1367 stats->rx_err_count++;
1368 rx_ring->rx_errors++;
1369
1370 switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
1371 case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
1372 stats->rx_code_err++;
1373 break;
1374 case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
1375 stats->rx_oversize_err++;
1376 break;
1377 case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
1378 stats->rx_undersize_err++;
1379 break;
1380 case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
1381 stats->rx_preamble_err++;
1382 break;
1383 case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
1384 stats->rx_frame_len_err++;
1385 break;
1386 case IB_MAC_IOCB_RSP_ERR_CRC:
1387 stats->rx_crc_err++;
1388 default:
1389 break;
1390 }
1391}
1392
1393
1394
1395
1396
1397static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
1398 struct ib_mac_iocb_rsp *ib_mac_rsp,
1399 void *page, size_t *len)
1400{
1401 u16 *tags;
1402
1403 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1404 return;
1405 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
1406 tags = (u16 *)page;
1407
1408 if (tags[6] == ETH_P_8021Q &&
1409 tags[8] == ETH_P_8021Q)
1410 *len += 2 * VLAN_HLEN;
1411 else
1412 *len += VLAN_HLEN;
1413 }
1414}
1415
1416
1417static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1418 struct rx_ring *rx_ring,
1419 struct ib_mac_iocb_rsp *ib_mac_rsp,
1420 u32 length, u16 vlan_id)
1421{
1422 struct sk_buff *skb;
1423 struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1424 struct napi_struct *napi = &rx_ring->napi;
1425
1426
1427 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1428 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1429 put_page(lbq_desc->p.pg_chunk.page);
1430 return;
1431 }
1432 napi->dev = qdev->ndev;
1433
1434 skb = napi_get_frags(napi);
1435 if (!skb) {
1436 netif_err(qdev, drv, qdev->ndev,
1437 "Couldn't get an skb, exiting.\n");
1438 rx_ring->rx_dropped++;
1439 put_page(lbq_desc->p.pg_chunk.page);
1440 return;
1441 }
1442 prefetch(lbq_desc->p.pg_chunk.va);
1443 __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1444 lbq_desc->p.pg_chunk.page,
1445 lbq_desc->p.pg_chunk.offset,
1446 length);
1447
1448 skb->len += length;
1449 skb->data_len += length;
1450 skb->truesize += length;
1451 skb_shinfo(skb)->nr_frags++;
1452
1453 rx_ring->rx_packets++;
1454 rx_ring->rx_bytes += length;
1455 skb->ip_summed = CHECKSUM_UNNECESSARY;
1456 skb_record_rx_queue(skb, rx_ring->cq_id);
1457 if (vlan_id != 0xffff)
1458 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1459 napi_gro_frags(napi);
1460}
1461
1462
1463static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1464 struct rx_ring *rx_ring,
1465 struct ib_mac_iocb_rsp *ib_mac_rsp,
1466 u32 length, u16 vlan_id)
1467{
1468 struct net_device *ndev = qdev->ndev;
1469 struct sk_buff *skb = NULL;
1470 void *addr;
1471 struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1472 struct napi_struct *napi = &rx_ring->napi;
1473 size_t hlen = ETH_HLEN;
1474
1475 skb = netdev_alloc_skb(ndev, length);
1476 if (!skb) {
1477 rx_ring->rx_dropped++;
1478 put_page(lbq_desc->p.pg_chunk.page);
1479 return;
1480 }
1481
1482 addr = lbq_desc->p.pg_chunk.va;
1483 prefetch(addr);
1484
1485
1486 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1487 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1488 goto err_out;
1489 }
1490
1491
1492 ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
1493
1494
1495
1496
1497 if (skb->len > ndev->mtu + hlen) {
1498 netif_err(qdev, drv, qdev->ndev,
1499 "Segment too small, dropping.\n");
1500 rx_ring->rx_dropped++;
1501 goto err_out;
1502 }
1503 skb_put_data(skb, addr, hlen);
1504 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1505 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1506 length);
1507 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1508 lbq_desc->p.pg_chunk.offset + hlen, length - hlen);
1509 skb->len += length - hlen;
1510 skb->data_len += length - hlen;
1511 skb->truesize += length - hlen;
1512
1513 rx_ring->rx_packets++;
1514 rx_ring->rx_bytes += skb->len;
1515 skb->protocol = eth_type_trans(skb, ndev);
1516 skb_checksum_none_assert(skb);
1517
1518 if ((ndev->features & NETIF_F_RXCSUM) &&
1519 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1520
1521 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1522 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1523 "TCP checksum done!\n");
1524 skb->ip_summed = CHECKSUM_UNNECESSARY;
1525 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1526 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1527
1528 struct iphdr *iph =
1529 (struct iphdr *)((u8 *)addr + hlen);
1530 if (!(iph->frag_off &
1531 htons(IP_MF | IP_OFFSET))) {
1532 skb->ip_summed = CHECKSUM_UNNECESSARY;
1533 netif_printk(qdev, rx_status, KERN_DEBUG,
1534 qdev->ndev,
1535 "UDP checksum done!\n");
1536 }
1537 }
1538 }
1539
1540 skb_record_rx_queue(skb, rx_ring->cq_id);
1541 if (vlan_id != 0xffff)
1542 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1543 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1544 napi_gro_receive(napi, skb);
1545 else
1546 netif_receive_skb(skb);
1547 return;
1548err_out:
1549 dev_kfree_skb_any(skb);
1550 put_page(lbq_desc->p.pg_chunk.page);
1551}
1552
1553
1554static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1555 struct rx_ring *rx_ring,
1556 struct ib_mac_iocb_rsp *ib_mac_rsp,
1557 u32 length, u16 vlan_id)
1558{
1559 struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1560 struct net_device *ndev = qdev->ndev;
1561 struct sk_buff *skb, *new_skb;
1562
1563 skb = sbq_desc->p.skb;
1564
1565 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1566 if (!new_skb) {
1567 rx_ring->rx_dropped++;
1568 return;
1569 }
1570 skb_reserve(new_skb, NET_IP_ALIGN);
1571
1572 dma_sync_single_for_cpu(&qdev->pdev->dev, sbq_desc->dma_addr,
1573 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1574
1575 skb_put_data(new_skb, skb->data, length);
1576
1577 skb = new_skb;
1578
1579
1580 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1581 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1582 dev_kfree_skb_any(skb);
1583 return;
1584 }
1585
1586
1587 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1588 ql_check_lb_frame(qdev, skb);
1589 dev_kfree_skb_any(skb);
1590 return;
1591 }
1592
1593
1594
1595
1596 if (skb->len > ndev->mtu + ETH_HLEN) {
1597 dev_kfree_skb_any(skb);
1598 rx_ring->rx_dropped++;
1599 return;
1600 }
1601
1602 prefetch(skb->data);
1603 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1604 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1605 "%s Multicast.\n",
1606 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1607 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1608 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1609 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1610 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1611 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1612 }
1613 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1614 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1615 "Promiscuous Packet.\n");
1616
1617 rx_ring->rx_packets++;
1618 rx_ring->rx_bytes += skb->len;
1619 skb->protocol = eth_type_trans(skb, ndev);
1620 skb_checksum_none_assert(skb);
1621
1622
1623
1624
1625 if ((ndev->features & NETIF_F_RXCSUM) &&
1626 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1627
1628 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1629 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1630 "TCP checksum done!\n");
1631 skb->ip_summed = CHECKSUM_UNNECESSARY;
1632 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1633 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1634
1635 struct iphdr *iph = (struct iphdr *)skb->data;
1636
1637 if (!(iph->frag_off &
1638 htons(IP_MF | IP_OFFSET))) {
1639 skb->ip_summed = CHECKSUM_UNNECESSARY;
1640 netif_printk(qdev, rx_status, KERN_DEBUG,
1641 qdev->ndev,
1642 "UDP checksum done!\n");
1643 }
1644 }
1645 }
1646
1647 skb_record_rx_queue(skb, rx_ring->cq_id);
1648 if (vlan_id != 0xffff)
1649 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1650 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1651 napi_gro_receive(&rx_ring->napi, skb);
1652 else
1653 netif_receive_skb(skb);
1654}
1655
1656static void ql_realign_skb(struct sk_buff *skb, int len)
1657{
1658 void *temp_addr = skb->data;
1659
1660
1661
1662
1663
1664 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1665 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1666 memmove(skb->data, temp_addr, len);
1667}
1668
1669
1670
1671
1672
1673
1674static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1675 struct rx_ring *rx_ring,
1676 struct ib_mac_iocb_rsp *ib_mac_rsp)
1677{
1678 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1679 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1680 struct qlge_bq_desc *lbq_desc, *sbq_desc;
1681 struct sk_buff *skb = NULL;
1682 size_t hlen = ETH_HLEN;
1683
1684
1685
1686
1687 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1688 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1689 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1690 "Header of %d bytes in small buffer.\n", hdr_len);
1691
1692
1693
1694 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1695 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1696 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1697 skb = sbq_desc->p.skb;
1698 ql_realign_skb(skb, hdr_len);
1699 skb_put(skb, hdr_len);
1700 sbq_desc->p.skb = NULL;
1701 }
1702
1703
1704
1705
1706 if (unlikely(!length)) {
1707 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1708 "No Data buffer in this packet.\n");
1709 return skb;
1710 }
1711
1712 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1713 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1714 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1715 "Headers in small, data of %d bytes in small, combine them.\n",
1716 length);
1717
1718
1719
1720
1721
1722
1723
1724 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1725 dma_sync_single_for_cpu(&qdev->pdev->dev,
1726 sbq_desc->dma_addr,
1727 SMALL_BUF_MAP_SIZE,
1728 DMA_FROM_DEVICE);
1729 skb_put_data(skb, sbq_desc->p.skb->data, length);
1730 } else {
1731 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1732 "%d bytes in a single small buffer.\n",
1733 length);
1734 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1735 skb = sbq_desc->p.skb;
1736 ql_realign_skb(skb, length);
1737 skb_put(skb, length);
1738 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1739 SMALL_BUF_MAP_SIZE,
1740 DMA_FROM_DEVICE);
1741 sbq_desc->p.skb = NULL;
1742 }
1743 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1744 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1745 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1746 "Header in small, %d bytes in large. Chain large to small!\n",
1747 length);
1748
1749
1750
1751
1752
1753 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1754 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1755 "Chaining page at offset = %d, for %d bytes to skb.\n",
1756 lbq_desc->p.pg_chunk.offset, length);
1757 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1758 lbq_desc->p.pg_chunk.offset, length);
1759 skb->len += length;
1760 skb->data_len += length;
1761 skb->truesize += length;
1762 } else {
1763
1764
1765
1766
1767
1768 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1769 skb = netdev_alloc_skb(qdev->ndev, length);
1770 if (!skb) {
1771 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1772 "No skb available, drop the packet.\n");
1773 return NULL;
1774 }
1775 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
1776 qdev->lbq_buf_size,
1777 DMA_FROM_DEVICE);
1778 skb_reserve(skb, NET_IP_ALIGN);
1779 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1780 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1781 length);
1782 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1783 lbq_desc->p.pg_chunk.offset,
1784 length);
1785 skb->len += length;
1786 skb->data_len += length;
1787 skb->truesize += length;
1788 ql_update_mac_hdr_len(qdev, ib_mac_rsp,
1789 lbq_desc->p.pg_chunk.va,
1790 &hlen);
1791 __pskb_pull_tail(skb, hlen);
1792 }
1793 } else {
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805 int size, i = 0;
1806
1807 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq);
1808 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
1809 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE);
1810 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1821 "%d bytes of headers & data in chain of large.\n",
1822 length);
1823 skb = sbq_desc->p.skb;
1824 sbq_desc->p.skb = NULL;
1825 skb_reserve(skb, NET_IP_ALIGN);
1826 }
1827 do {
1828 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1829 size = min(length, qdev->lbq_buf_size);
1830
1831 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1832 "Adding page %d to skb for %d bytes.\n",
1833 i, size);
1834 skb_fill_page_desc(skb, i,
1835 lbq_desc->p.pg_chunk.page,
1836 lbq_desc->p.pg_chunk.offset, size);
1837 skb->len += size;
1838 skb->data_len += size;
1839 skb->truesize += size;
1840 length -= size;
1841 i++;
1842 } while (length > 0);
1843 ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
1844 &hlen);
1845 __pskb_pull_tail(skb, hlen);
1846 }
1847 return skb;
1848}
1849
1850
1851static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1852 struct rx_ring *rx_ring,
1853 struct ib_mac_iocb_rsp *ib_mac_rsp,
1854 u16 vlan_id)
1855{
1856 struct net_device *ndev = qdev->ndev;
1857 struct sk_buff *skb = NULL;
1858
1859 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1860
1861 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1862 if (unlikely(!skb)) {
1863 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1864 "No skb available, drop packet.\n");
1865 rx_ring->rx_dropped++;
1866 return;
1867 }
1868
1869
1870 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1871 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
1872 dev_kfree_skb_any(skb);
1873 return;
1874 }
1875
1876
1877
1878
1879 if (skb->len > ndev->mtu + ETH_HLEN) {
1880 dev_kfree_skb_any(skb);
1881 rx_ring->rx_dropped++;
1882 return;
1883 }
1884
1885
1886 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1887 ql_check_lb_frame(qdev, skb);
1888 dev_kfree_skb_any(skb);
1889 return;
1890 }
1891
1892 prefetch(skb->data);
1893 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1894 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1895 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1896 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1897 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1898 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1899 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1900 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1901 rx_ring->rx_multicast++;
1902 }
1903 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1904 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1905 "Promiscuous Packet.\n");
1906 }
1907
1908 skb->protocol = eth_type_trans(skb, ndev);
1909 skb_checksum_none_assert(skb);
1910
1911
1912
1913
1914 if ((ndev->features & NETIF_F_RXCSUM) &&
1915 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1916
1917 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1918 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1919 "TCP checksum done!\n");
1920 skb->ip_summed = CHECKSUM_UNNECESSARY;
1921 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1922 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1923
1924 struct iphdr *iph = (struct iphdr *)skb->data;
1925
1926 if (!(iph->frag_off &
1927 htons(IP_MF | IP_OFFSET))) {
1928 skb->ip_summed = CHECKSUM_UNNECESSARY;
1929 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1930 "TCP checksum done!\n");
1931 }
1932 }
1933 }
1934
1935 rx_ring->rx_packets++;
1936 rx_ring->rx_bytes += skb->len;
1937 skb_record_rx_queue(skb, rx_ring->cq_id);
1938 if (vlan_id != 0xffff)
1939 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
1940 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1941 napi_gro_receive(&rx_ring->napi, skb);
1942 else
1943 netif_receive_skb(skb);
1944}
1945
1946
1947static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
1948 struct rx_ring *rx_ring,
1949 struct ib_mac_iocb_rsp *ib_mac_rsp)
1950{
1951 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1952 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1953 (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
1954 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
1955 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
1956
1957 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1958
1959 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
1960
1961
1962
1963 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1964 vlan_id);
1965 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1966
1967
1968
1969
1970 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length,
1971 vlan_id);
1972 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
1973 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
1974 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
1975
1976
1977
1978 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length,
1979 vlan_id);
1980 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1981
1982
1983
1984 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length,
1985 vlan_id);
1986 } else {
1987
1988
1989
1990 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
1991 vlan_id);
1992 }
1993
1994 return (unsigned long)length;
1995}
1996
1997
1998static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1999 struct ob_mac_iocb_rsp *mac_rsp)
2000{
2001 struct tx_ring *tx_ring;
2002 struct tx_ring_desc *tx_ring_desc;
2003
2004 QL_DUMP_OB_MAC_RSP(mac_rsp);
2005 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2006 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2007 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2008 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2009 tx_ring->tx_packets++;
2010 dev_kfree_skb(tx_ring_desc->skb);
2011 tx_ring_desc->skb = NULL;
2012
2013 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2014 OB_MAC_IOCB_RSP_S |
2015 OB_MAC_IOCB_RSP_L |
2016 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2017 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2018 netif_warn(qdev, tx_done, qdev->ndev,
2019 "Total descriptor length did not match transfer length.\n");
2020 }
2021 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2022 netif_warn(qdev, tx_done, qdev->ndev,
2023 "Frame too short to be valid, not sent.\n");
2024 }
2025 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2026 netif_warn(qdev, tx_done, qdev->ndev,
2027 "Frame too long, but sent anyway.\n");
2028 }
2029 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2030 netif_warn(qdev, tx_done, qdev->ndev,
2031 "PCI backplane error. Frame not sent.\n");
2032 }
2033 }
2034 atomic_inc(&tx_ring->tx_count);
2035}
2036
2037
2038void ql_queue_fw_error(struct ql_adapter *qdev)
2039{
2040 ql_link_off(qdev);
2041 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2042}
2043
2044void ql_queue_asic_error(struct ql_adapter *qdev)
2045{
2046 ql_link_off(qdev);
2047 ql_disable_interrupts(qdev);
2048
2049
2050
2051
2052 clear_bit(QL_ADAPTER_UP, &qdev->flags);
2053
2054
2055
2056 set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2057 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2058}
2059
2060static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2061 struct ib_ae_iocb_rsp *ib_ae_rsp)
2062{
2063 switch (ib_ae_rsp->event) {
2064 case MGMT_ERR_EVENT:
2065 netif_err(qdev, rx_err, qdev->ndev,
2066 "Management Processor Fatal Error.\n");
2067 ql_queue_fw_error(qdev);
2068 return;
2069
2070 case CAM_LOOKUP_ERR_EVENT:
2071 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2072 netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2073 ql_queue_asic_error(qdev);
2074 return;
2075
2076 case SOFT_ECC_ERROR_EVENT:
2077 netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2078 ql_queue_asic_error(qdev);
2079 break;
2080
2081 case PCI_ERR_ANON_BUF_RD:
2082 netdev_err(qdev->ndev, "PCI error occurred when reading "
2083 "anonymous buffers from rx_ring %d.\n",
2084 ib_ae_rsp->q_id);
2085 ql_queue_asic_error(qdev);
2086 break;
2087
2088 default:
2089 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2090 ib_ae_rsp->event);
2091 ql_queue_asic_error(qdev);
2092 break;
2093 }
2094}
2095
2096static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2097{
2098 struct ql_adapter *qdev = rx_ring->qdev;
2099 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2100 struct ob_mac_iocb_rsp *net_rsp = NULL;
2101 int count = 0;
2102
2103 struct tx_ring *tx_ring;
2104
2105 while (prod != rx_ring->cnsmr_idx) {
2106
2107 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2108 "cq_id = %d, prod = %d, cnsmr = %d\n",
2109 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2110
2111 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2112 rmb();
2113 switch (net_rsp->opcode) {
2114
2115 case OPCODE_OB_MAC_TSO_IOCB:
2116 case OPCODE_OB_MAC_IOCB:
2117 ql_process_mac_tx_intr(qdev, net_rsp);
2118 break;
2119 default:
2120 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2121 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2122 net_rsp->opcode);
2123 }
2124 count++;
2125 ql_update_cq(rx_ring);
2126 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2127 }
2128 if (!net_rsp)
2129 return 0;
2130 ql_write_cq_idx(rx_ring);
2131 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2132 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2133 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2134
2135
2136
2137
2138 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2139 }
2140
2141 return count;
2142}
2143
2144static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2145{
2146 struct ql_adapter *qdev = rx_ring->qdev;
2147 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2148 struct ql_net_rsp_iocb *net_rsp;
2149 int count = 0;
2150
2151
2152 while (prod != rx_ring->cnsmr_idx) {
2153
2154 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2155 "cq_id = %d, prod = %d, cnsmr = %d\n",
2156 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2157
2158 net_rsp = rx_ring->curr_entry;
2159 rmb();
2160 switch (net_rsp->opcode) {
2161 case OPCODE_IB_MAC_IOCB:
2162 ql_process_mac_rx_intr(qdev, rx_ring,
2163 (struct ib_mac_iocb_rsp *)
2164 net_rsp);
2165 break;
2166
2167 case OPCODE_IB_AE_IOCB:
2168 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2169 net_rsp);
2170 break;
2171 default:
2172 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2173 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2174 net_rsp->opcode);
2175 break;
2176 }
2177 count++;
2178 ql_update_cq(rx_ring);
2179 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2180 if (count == budget)
2181 break;
2182 }
2183 ql_update_buffer_queues(rx_ring, GFP_ATOMIC, 0);
2184 ql_write_cq_idx(rx_ring);
2185 return count;
2186}
2187
2188static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2189{
2190 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2191 struct ql_adapter *qdev = rx_ring->qdev;
2192 struct rx_ring *trx_ring;
2193 int i, work_done = 0;
2194 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2195
2196 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2197 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2198
2199
2200
2201
2202 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2203 trx_ring = &qdev->rx_ring[i];
2204
2205
2206
2207 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2208 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2209 trx_ring->cnsmr_idx)) {
2210 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2211 "%s: Servicing TX completion ring %d.\n",
2212 __func__, trx_ring->cq_id);
2213 ql_clean_outbound_rx_ring(trx_ring);
2214 }
2215 }
2216
2217
2218
2219
2220 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2221 rx_ring->cnsmr_idx) {
2222 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2223 "%s: Servicing RX completion ring %d.\n",
2224 __func__, rx_ring->cq_id);
2225 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2226 }
2227
2228 if (work_done < budget) {
2229 napi_complete_done(napi, work_done);
2230 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2231 }
2232 return work_done;
2233}
2234
2235static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2236{
2237 struct ql_adapter *qdev = netdev_priv(ndev);
2238
2239 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2240 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2241 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2242 } else {
2243 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2244 }
2245}
2246
2247
2248
2249
2250
2251static int qlge_update_hw_vlan_features(struct net_device *ndev,
2252 netdev_features_t features)
2253{
2254 struct ql_adapter *qdev = netdev_priv(ndev);
2255 int status = 0;
2256 bool need_restart = netif_running(ndev);
2257
2258 if (need_restart) {
2259 status = ql_adapter_down(qdev);
2260 if (status) {
2261 netif_err(qdev, link, qdev->ndev,
2262 "Failed to bring down the adapter\n");
2263 return status;
2264 }
2265 }
2266
2267
2268 ndev->features = features;
2269
2270 if (need_restart) {
2271 status = ql_adapter_up(qdev);
2272 if (status) {
2273 netif_err(qdev, link, qdev->ndev,
2274 "Failed to bring up the adapter\n");
2275 return status;
2276 }
2277 }
2278
2279 return status;
2280}
2281
2282static int qlge_set_features(struct net_device *ndev,
2283 netdev_features_t features)
2284{
2285 netdev_features_t changed = ndev->features ^ features;
2286 int err;
2287
2288 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2289
2290 err = qlge_update_hw_vlan_features(ndev, features);
2291 if (err)
2292 return err;
2293
2294 qlge_vlan_mode(ndev, features);
2295 }
2296
2297 return 0;
2298}
2299
2300static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2301{
2302 u32 enable_bit = MAC_ADDR_E;
2303 int err;
2304
2305 err = ql_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2306 MAC_ADDR_TYPE_VLAN, vid);
2307 if (err)
2308 netif_err(qdev, ifup, qdev->ndev,
2309 "Failed to init vlan address.\n");
2310 return err;
2311}
2312
2313static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
2314{
2315 struct ql_adapter *qdev = netdev_priv(ndev);
2316 int status;
2317 int err;
2318
2319 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2320 if (status)
2321 return status;
2322
2323 err = __qlge_vlan_rx_add_vid(qdev, vid);
2324 set_bit(vid, qdev->active_vlans);
2325
2326 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2327
2328 return err;
2329}
2330
2331static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2332{
2333 u32 enable_bit = 0;
2334 int err;
2335
2336 err = ql_set_mac_addr_reg(qdev, (u8 *)&enable_bit,
2337 MAC_ADDR_TYPE_VLAN, vid);
2338 if (err)
2339 netif_err(qdev, ifup, qdev->ndev,
2340 "Failed to clear vlan address.\n");
2341 return err;
2342}
2343
2344static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
2345{
2346 struct ql_adapter *qdev = netdev_priv(ndev);
2347 int status;
2348 int err;
2349
2350 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2351 if (status)
2352 return status;
2353
2354 err = __qlge_vlan_rx_kill_vid(qdev, vid);
2355 clear_bit(vid, qdev->active_vlans);
2356
2357 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2358
2359 return err;
2360}
2361
2362static void qlge_restore_vlan(struct ql_adapter *qdev)
2363{
2364 int status;
2365 u16 vid;
2366
2367 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2368 if (status)
2369 return;
2370
2371 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2372 __qlge_vlan_rx_add_vid(qdev, vid);
2373
2374 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2375}
2376
2377
2378static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2379{
2380 struct rx_ring *rx_ring = dev_id;
2381
2382 napi_schedule(&rx_ring->napi);
2383 return IRQ_HANDLED;
2384}
2385
2386
2387
2388
2389
2390
2391static irqreturn_t qlge_isr(int irq, void *dev_id)
2392{
2393 struct rx_ring *rx_ring = dev_id;
2394 struct ql_adapter *qdev = rx_ring->qdev;
2395 struct intr_context *intr_context = &qdev->intr_context[0];
2396 u32 var;
2397 int work_done = 0;
2398
2399
2400
2401
2402
2403
2404
2405 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags))
2406 ql_disable_completion_interrupt(qdev, 0);
2407
2408 var = ql_read32(qdev, STS);
2409
2410
2411
2412
2413 if (var & STS_FE) {
2414 ql_disable_completion_interrupt(qdev, 0);
2415 ql_queue_asic_error(qdev);
2416 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2417 var = ql_read32(qdev, ERR_STS);
2418 netdev_err(qdev->ndev, "Resetting chip. "
2419 "Error Status Register = 0x%x\n", var);
2420 return IRQ_HANDLED;
2421 }
2422
2423
2424
2425
2426 if ((var & STS_PI) &&
2427 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2428
2429
2430
2431
2432 netif_err(qdev, intr, qdev->ndev,
2433 "Got MPI processor interrupt.\n");
2434 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2435 queue_delayed_work_on(smp_processor_id(),
2436 qdev->workqueue, &qdev->mpi_work, 0);
2437 work_done++;
2438 }
2439
2440
2441
2442
2443
2444
2445 var = ql_read32(qdev, ISR1);
2446 if (var & intr_context->irq_mask) {
2447 netif_info(qdev, intr, qdev->ndev,
2448 "Waking handler for rx_ring[0].\n");
2449 napi_schedule(&rx_ring->napi);
2450 work_done++;
2451 } else {
2452
2453
2454
2455
2456
2457
2458 ql_enable_completion_interrupt(qdev, 0);
2459 }
2460
2461 return work_done ? IRQ_HANDLED : IRQ_NONE;
2462}
2463
2464static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2465{
2466
2467 if (skb_is_gso(skb)) {
2468 int err;
2469 __be16 l3_proto = vlan_get_protocol(skb);
2470
2471 err = skb_cow_head(skb, 0);
2472 if (err < 0)
2473 return err;
2474
2475 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2476 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2477 mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2478 mac_iocb_ptr->total_hdrs_len =
2479 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2480 mac_iocb_ptr->net_trans_offset =
2481 cpu_to_le16(skb_network_offset(skb) |
2482 skb_transport_offset(skb)
2483 << OB_MAC_TRANSPORT_HDR_SHIFT);
2484 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2485 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2486 if (likely(l3_proto == htons(ETH_P_IP))) {
2487 struct iphdr *iph = ip_hdr(skb);
2488
2489 iph->check = 0;
2490 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2491 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2492 iph->daddr, 0,
2493 IPPROTO_TCP,
2494 0);
2495 } else if (l3_proto == htons(ETH_P_IPV6)) {
2496 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2497 tcp_hdr(skb)->check =
2498 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2499 &ipv6_hdr(skb)->daddr,
2500 0, IPPROTO_TCP, 0);
2501 }
2502 return 1;
2503 }
2504 return 0;
2505}
2506
2507static void ql_hw_csum_setup(struct sk_buff *skb,
2508 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2509{
2510 int len;
2511 struct iphdr *iph = ip_hdr(skb);
2512 __sum16 *check;
2513
2514 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2515 mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
2516 mac_iocb_ptr->net_trans_offset =
2517 cpu_to_le16(skb_network_offset(skb) |
2518 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2519
2520 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2521 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2522 if (likely(iph->protocol == IPPROTO_TCP)) {
2523 check = &(tcp_hdr(skb)->check);
2524 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2525 mac_iocb_ptr->total_hdrs_len =
2526 cpu_to_le16(skb_transport_offset(skb) +
2527 (tcp_hdr(skb)->doff << 2));
2528 } else {
2529 check = &(udp_hdr(skb)->check);
2530 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2531 mac_iocb_ptr->total_hdrs_len =
2532 cpu_to_le16(skb_transport_offset(skb) +
2533 sizeof(struct udphdr));
2534 }
2535 *check = ~csum_tcpudp_magic(iph->saddr,
2536 iph->daddr, len, iph->protocol, 0);
2537}
2538
2539static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2540{
2541 struct tx_ring_desc *tx_ring_desc;
2542 struct ob_mac_iocb_req *mac_iocb_ptr;
2543 struct ql_adapter *qdev = netdev_priv(ndev);
2544 int tso;
2545 struct tx_ring *tx_ring;
2546 u32 tx_ring_idx = (u32)skb->queue_mapping;
2547
2548 tx_ring = &qdev->tx_ring[tx_ring_idx];
2549
2550 if (skb_padto(skb, ETH_ZLEN))
2551 return NETDEV_TX_OK;
2552
2553 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2554 netif_info(qdev, tx_queued, qdev->ndev,
2555 "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
2556 __func__, tx_ring_idx);
2557 netif_stop_subqueue(ndev, tx_ring->wq_id);
2558 tx_ring->tx_errors++;
2559 return NETDEV_TX_BUSY;
2560 }
2561 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2562 mac_iocb_ptr = tx_ring_desc->queue_entry;
2563 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2564
2565 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2566 mac_iocb_ptr->tid = tx_ring_desc->index;
2567
2568
2569
2570 mac_iocb_ptr->txq_idx = tx_ring_idx;
2571 tx_ring_desc->skb = skb;
2572
2573 mac_iocb_ptr->frame_len = cpu_to_le16((u16)skb->len);
2574
2575 if (skb_vlan_tag_present(skb)) {
2576 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2577 "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
2578 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2579 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
2580 }
2581 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2582 if (tso < 0) {
2583 dev_kfree_skb_any(skb);
2584 return NETDEV_TX_OK;
2585 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2586 ql_hw_csum_setup(skb,
2587 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2588 }
2589 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2590 NETDEV_TX_OK) {
2591 netif_err(qdev, tx_queued, qdev->ndev,
2592 "Could not map the segments.\n");
2593 tx_ring->tx_errors++;
2594 return NETDEV_TX_BUSY;
2595 }
2596 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2597 tx_ring->prod_idx++;
2598 if (tx_ring->prod_idx == tx_ring->wq_len)
2599 tx_ring->prod_idx = 0;
2600 wmb();
2601
2602 ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2603 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2604 "tx queued, slot %d, len %d\n",
2605 tx_ring->prod_idx, skb->len);
2606
2607 atomic_dec(&tx_ring->tx_count);
2608
2609 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2610 netif_stop_subqueue(ndev, tx_ring->wq_id);
2611 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2612
2613
2614
2615
2616 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2617 }
2618 return NETDEV_TX_OK;
2619}
2620
2621static void ql_free_shadow_space(struct ql_adapter *qdev)
2622{
2623 if (qdev->rx_ring_shadow_reg_area) {
2624 dma_free_coherent(&qdev->pdev->dev,
2625 PAGE_SIZE,
2626 qdev->rx_ring_shadow_reg_area,
2627 qdev->rx_ring_shadow_reg_dma);
2628 qdev->rx_ring_shadow_reg_area = NULL;
2629 }
2630 if (qdev->tx_ring_shadow_reg_area) {
2631 dma_free_coherent(&qdev->pdev->dev,
2632 PAGE_SIZE,
2633 qdev->tx_ring_shadow_reg_area,
2634 qdev->tx_ring_shadow_reg_dma);
2635 qdev->tx_ring_shadow_reg_area = NULL;
2636 }
2637}
2638
2639static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2640{
2641 qdev->rx_ring_shadow_reg_area =
2642 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2643 &qdev->rx_ring_shadow_reg_dma, GFP_ATOMIC);
2644 if (!qdev->rx_ring_shadow_reg_area) {
2645 netif_err(qdev, ifup, qdev->ndev,
2646 "Allocation of RX shadow space failed.\n");
2647 return -ENOMEM;
2648 }
2649
2650 qdev->tx_ring_shadow_reg_area =
2651 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
2652 &qdev->tx_ring_shadow_reg_dma, GFP_ATOMIC);
2653 if (!qdev->tx_ring_shadow_reg_area) {
2654 netif_err(qdev, ifup, qdev->ndev,
2655 "Allocation of TX shadow space failed.\n");
2656 goto err_wqp_sh_area;
2657 }
2658 return 0;
2659
2660err_wqp_sh_area:
2661 dma_free_coherent(&qdev->pdev->dev,
2662 PAGE_SIZE,
2663 qdev->rx_ring_shadow_reg_area,
2664 qdev->rx_ring_shadow_reg_dma);
2665 return -ENOMEM;
2666}
2667
2668static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2669{
2670 struct tx_ring_desc *tx_ring_desc;
2671 int i;
2672 struct ob_mac_iocb_req *mac_iocb_ptr;
2673
2674 mac_iocb_ptr = tx_ring->wq_base;
2675 tx_ring_desc = tx_ring->q;
2676 for (i = 0; i < tx_ring->wq_len; i++) {
2677 tx_ring_desc->index = i;
2678 tx_ring_desc->skb = NULL;
2679 tx_ring_desc->queue_entry = mac_iocb_ptr;
2680 mac_iocb_ptr++;
2681 tx_ring_desc++;
2682 }
2683 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2684}
2685
2686static void ql_free_tx_resources(struct ql_adapter *qdev,
2687 struct tx_ring *tx_ring)
2688{
2689 if (tx_ring->wq_base) {
2690 dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2691 tx_ring->wq_base, tx_ring->wq_base_dma);
2692 tx_ring->wq_base = NULL;
2693 }
2694 kfree(tx_ring->q);
2695 tx_ring->q = NULL;
2696}
2697
2698static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2699 struct tx_ring *tx_ring)
2700{
2701 tx_ring->wq_base =
2702 dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2703 &tx_ring->wq_base_dma, GFP_ATOMIC);
2704
2705 if (!tx_ring->wq_base ||
2706 tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
2707 goto pci_alloc_err;
2708
2709 tx_ring->q =
2710 kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc),
2711 GFP_KERNEL);
2712 if (!tx_ring->q)
2713 goto err;
2714
2715 return 0;
2716err:
2717 dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size,
2718 tx_ring->wq_base, tx_ring->wq_base_dma);
2719 tx_ring->wq_base = NULL;
2720pci_alloc_err:
2721 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2722 return -ENOMEM;
2723}
2724
2725static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2726{
2727 struct qlge_bq *lbq = &rx_ring->lbq;
2728 unsigned int last_offset;
2729
2730 last_offset = ql_lbq_block_size(qdev) - qdev->lbq_buf_size;
2731 while (lbq->next_to_clean != lbq->next_to_use) {
2732 struct qlge_bq_desc *lbq_desc =
2733 &lbq->queue[lbq->next_to_clean];
2734
2735 if (lbq_desc->p.pg_chunk.offset == last_offset)
2736 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr,
2737 ql_lbq_block_size(qdev),
2738 DMA_FROM_DEVICE);
2739 put_page(lbq_desc->p.pg_chunk.page);
2740
2741 lbq->next_to_clean = QLGE_BQ_WRAP(lbq->next_to_clean + 1);
2742 }
2743
2744 if (rx_ring->master_chunk.page) {
2745 dma_unmap_page(&qdev->pdev->dev, rx_ring->chunk_dma_addr,
2746 ql_lbq_block_size(qdev), DMA_FROM_DEVICE);
2747 put_page(rx_ring->master_chunk.page);
2748 rx_ring->master_chunk.page = NULL;
2749 }
2750}
2751
2752static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2753{
2754 int i;
2755
2756 for (i = 0; i < QLGE_BQ_LEN; i++) {
2757 struct qlge_bq_desc *sbq_desc = &rx_ring->sbq.queue[i];
2758
2759 if (!sbq_desc) {
2760 netif_err(qdev, ifup, qdev->ndev,
2761 "sbq_desc %d is NULL.\n", i);
2762 return;
2763 }
2764 if (sbq_desc->p.skb) {
2765 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr,
2766 SMALL_BUF_MAP_SIZE,
2767 DMA_FROM_DEVICE);
2768 dev_kfree_skb(sbq_desc->p.skb);
2769 sbq_desc->p.skb = NULL;
2770 }
2771 }
2772}
2773
2774
2775
2776
2777static void ql_free_rx_buffers(struct ql_adapter *qdev)
2778{
2779 int i;
2780
2781 for (i = 0; i < qdev->rx_ring_count; i++) {
2782 struct rx_ring *rx_ring = &qdev->rx_ring[i];
2783
2784 if (rx_ring->lbq.queue)
2785 ql_free_lbq_buffers(qdev, rx_ring);
2786 if (rx_ring->sbq.queue)
2787 ql_free_sbq_buffers(qdev, rx_ring);
2788 }
2789}
2790
2791static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2792{
2793 int i;
2794
2795 for (i = 0; i < qdev->rss_ring_count; i++)
2796 ql_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL,
2797 HZ / 2);
2798}
2799
2800static int qlge_init_bq(struct qlge_bq *bq)
2801{
2802 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq);
2803 struct ql_adapter *qdev = rx_ring->qdev;
2804 struct qlge_bq_desc *bq_desc;
2805 __le64 *buf_ptr;
2806 int i;
2807
2808 bq->base = dma_alloc_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2809 &bq->base_dma, GFP_ATOMIC);
2810 if (!bq->base) {
2811 netif_err(qdev, ifup, qdev->ndev,
2812 "ring %u %s allocation failed.\n", rx_ring->cq_id,
2813 bq_type_name[bq->type]);
2814 return -ENOMEM;
2815 }
2816
2817 bq->queue = kmalloc_array(QLGE_BQ_LEN, sizeof(struct qlge_bq_desc),
2818 GFP_KERNEL);
2819 if (!bq->queue)
2820 return -ENOMEM;
2821
2822 buf_ptr = bq->base;
2823 bq_desc = &bq->queue[0];
2824 for (i = 0; i < QLGE_BQ_LEN; i++, buf_ptr++, bq_desc++) {
2825 bq_desc->p.skb = NULL;
2826 bq_desc->index = i;
2827 bq_desc->buf_ptr = buf_ptr;
2828 }
2829
2830 return 0;
2831}
2832
2833static void ql_free_rx_resources(struct ql_adapter *qdev,
2834 struct rx_ring *rx_ring)
2835{
2836
2837 if (rx_ring->sbq.base) {
2838 dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2839 rx_ring->sbq.base, rx_ring->sbq.base_dma);
2840 rx_ring->sbq.base = NULL;
2841 }
2842
2843
2844 kfree(rx_ring->sbq.queue);
2845 rx_ring->sbq.queue = NULL;
2846
2847
2848 if (rx_ring->lbq.base) {
2849 dma_free_coherent(&qdev->pdev->dev, QLGE_BQ_SIZE,
2850 rx_ring->lbq.base, rx_ring->lbq.base_dma);
2851 rx_ring->lbq.base = NULL;
2852 }
2853
2854
2855 kfree(rx_ring->lbq.queue);
2856 rx_ring->lbq.queue = NULL;
2857
2858
2859 if (rx_ring->cq_base) {
2860 dma_free_coherent(&qdev->pdev->dev,
2861 rx_ring->cq_size,
2862 rx_ring->cq_base, rx_ring->cq_base_dma);
2863 rx_ring->cq_base = NULL;
2864 }
2865}
2866
2867
2868
2869
2870static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2871 struct rx_ring *rx_ring)
2872{
2873
2874
2875
2876
2877 rx_ring->cq_base =
2878 dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size,
2879 &rx_ring->cq_base_dma, GFP_ATOMIC);
2880
2881 if (!rx_ring->cq_base) {
2882 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2883 return -ENOMEM;
2884 }
2885
2886 if (rx_ring->cq_id < qdev->rss_ring_count &&
2887 (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) {
2888 ql_free_rx_resources(qdev, rx_ring);
2889 return -ENOMEM;
2890 }
2891
2892 return 0;
2893}
2894
2895static void ql_tx_ring_clean(struct ql_adapter *qdev)
2896{
2897 struct tx_ring *tx_ring;
2898 struct tx_ring_desc *tx_ring_desc;
2899 int i, j;
2900
2901
2902
2903
2904
2905 for (j = 0; j < qdev->tx_ring_count; j++) {
2906 tx_ring = &qdev->tx_ring[j];
2907 for (i = 0; i < tx_ring->wq_len; i++) {
2908 tx_ring_desc = &tx_ring->q[i];
2909 if (tx_ring_desc && tx_ring_desc->skb) {
2910 netif_err(qdev, ifdown, qdev->ndev,
2911 "Freeing lost SKB %p, from queue %d, index %d.\n",
2912 tx_ring_desc->skb, j,
2913 tx_ring_desc->index);
2914 ql_unmap_send(qdev, tx_ring_desc,
2915 tx_ring_desc->map_cnt);
2916 dev_kfree_skb(tx_ring_desc->skb);
2917 tx_ring_desc->skb = NULL;
2918 }
2919 }
2920 }
2921}
2922
2923static void ql_free_mem_resources(struct ql_adapter *qdev)
2924{
2925 int i;
2926
2927 for (i = 0; i < qdev->tx_ring_count; i++)
2928 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2929 for (i = 0; i < qdev->rx_ring_count; i++)
2930 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2931 ql_free_shadow_space(qdev);
2932}
2933
2934static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2935{
2936 int i;
2937
2938
2939 if (ql_alloc_shadow_space(qdev))
2940 return -ENOMEM;
2941
2942 for (i = 0; i < qdev->rx_ring_count; i++) {
2943 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2944 netif_err(qdev, ifup, qdev->ndev,
2945 "RX resource allocation failed.\n");
2946 goto err_mem;
2947 }
2948 }
2949
2950 for (i = 0; i < qdev->tx_ring_count; i++) {
2951 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2952 netif_err(qdev, ifup, qdev->ndev,
2953 "TX resource allocation failed.\n");
2954 goto err_mem;
2955 }
2956 }
2957 return 0;
2958
2959err_mem:
2960 ql_free_mem_resources(qdev);
2961 return -ENOMEM;
2962}
2963
2964
2965
2966
2967
2968static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2969{
2970 struct cqicb *cqicb = &rx_ring->cqicb;
2971 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
2972 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2973 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
2974 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
2975 void __iomem *doorbell_area =
2976 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2977 int err = 0;
2978 u64 tmp;
2979 __le64 *base_indirect_ptr;
2980 int page_entries;
2981
2982
2983 rx_ring->prod_idx_sh_reg = shadow_reg;
2984 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2985 *rx_ring->prod_idx_sh_reg = 0;
2986 shadow_reg += sizeof(u64);
2987 shadow_reg_dma += sizeof(u64);
2988 rx_ring->lbq.base_indirect = shadow_reg;
2989 rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
2990 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2991 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
2992 rx_ring->sbq.base_indirect = shadow_reg;
2993 rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
2994
2995
2996 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *)doorbell_area;
2997 rx_ring->cnsmr_idx = 0;
2998 rx_ring->curr_entry = rx_ring->cq_base;
2999
3000
3001 rx_ring->valid_db_reg = doorbell_area + 0x04;
3002
3003
3004 rx_ring->lbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x18);
3005
3006
3007 rx_ring->sbq.prod_idx_db_reg = (u32 __iomem *)(doorbell_area + 0x1c);
3008
3009 memset((void *)cqicb, 0, sizeof(struct cqicb));
3010 cqicb->msix_vect = rx_ring->irq;
3011
3012 cqicb->len = cpu_to_le16(QLGE_FIT16(rx_ring->cq_len) | LEN_V |
3013 LEN_CPP_CONT);
3014
3015 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3016
3017 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3018
3019
3020
3021
3022 cqicb->flags = FLAGS_LC |
3023 FLAGS_LV |
3024 FLAGS_LI;
3025 if (rx_ring->cq_id < qdev->rss_ring_count) {
3026 cqicb->flags |= FLAGS_LL;
3027 tmp = (u64)rx_ring->lbq.base_dma;
3028 base_indirect_ptr = rx_ring->lbq.base_indirect;
3029 page_entries = 0;
3030 do {
3031 *base_indirect_ptr = cpu_to_le64(tmp);
3032 tmp += DB_PAGE_SIZE;
3033 base_indirect_ptr++;
3034 page_entries++;
3035 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3036 cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
3037 cqicb->lbq_buf_size =
3038 cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
3039 cqicb->lbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3040 rx_ring->lbq.next_to_use = 0;
3041 rx_ring->lbq.next_to_clean = 0;
3042
3043 cqicb->flags |= FLAGS_LS;
3044 tmp = (u64)rx_ring->sbq.base_dma;
3045 base_indirect_ptr = rx_ring->sbq.base_indirect;
3046 page_entries = 0;
3047 do {
3048 *base_indirect_ptr = cpu_to_le64(tmp);
3049 tmp += DB_PAGE_SIZE;
3050 base_indirect_ptr++;
3051 page_entries++;
3052 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
3053 cqicb->sbq_addr =
3054 cpu_to_le64(rx_ring->sbq.base_indirect_dma);
3055 cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
3056 cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
3057 rx_ring->sbq.next_to_use = 0;
3058 rx_ring->sbq.next_to_clean = 0;
3059 }
3060 if (rx_ring->cq_id < qdev->rss_ring_count) {
3061
3062
3063
3064 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3065 64);
3066 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3067 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3068 } else {
3069 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3070 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3071 }
3072 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3073 CFG_LCQ, rx_ring->cq_id);
3074 if (err) {
3075 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3076 return err;
3077 }
3078 return err;
3079}
3080
3081static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3082{
3083 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3084 void __iomem *doorbell_area =
3085 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3086 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3087 (tx_ring->wq_id * sizeof(u64));
3088 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3089 (tx_ring->wq_id * sizeof(u64));
3090 int err = 0;
3091
3092
3093
3094
3095
3096 tx_ring->prod_idx_db_reg = (u32 __iomem *)doorbell_area;
3097 tx_ring->prod_idx = 0;
3098
3099 tx_ring->valid_db_reg = doorbell_area + 0x04;
3100
3101
3102
3103
3104 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3105 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3106
3107 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3108 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3109 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3110 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3111 wqicb->rid = 0;
3112 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3113
3114 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3115
3116 ql_init_tx_ring(qdev, tx_ring);
3117
3118 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3119 (u16)tx_ring->wq_id);
3120 if (err) {
3121 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3122 return err;
3123 }
3124 return err;
3125}
3126
3127static void ql_disable_msix(struct ql_adapter *qdev)
3128{
3129 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3130 pci_disable_msix(qdev->pdev);
3131 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3132 kfree(qdev->msi_x_entry);
3133 qdev->msi_x_entry = NULL;
3134 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3135 pci_disable_msi(qdev->pdev);
3136 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3137 }
3138}
3139
3140
3141
3142
3143
3144static void ql_enable_msix(struct ql_adapter *qdev)
3145{
3146 int i, err;
3147
3148
3149 if (qlge_irq_type == MSIX_IRQ) {
3150
3151
3152
3153 qdev->msi_x_entry = kcalloc(qdev->intr_count,
3154 sizeof(struct msix_entry),
3155 GFP_KERNEL);
3156 if (!qdev->msi_x_entry) {
3157 qlge_irq_type = MSI_IRQ;
3158 goto msi;
3159 }
3160
3161 for (i = 0; i < qdev->intr_count; i++)
3162 qdev->msi_x_entry[i].entry = i;
3163
3164 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3165 1, qdev->intr_count);
3166 if (err < 0) {
3167 kfree(qdev->msi_x_entry);
3168 qdev->msi_x_entry = NULL;
3169 netif_warn(qdev, ifup, qdev->ndev,
3170 "MSI-X Enable failed, trying MSI.\n");
3171 qlge_irq_type = MSI_IRQ;
3172 } else {
3173 qdev->intr_count = err;
3174 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3175 netif_info(qdev, ifup, qdev->ndev,
3176 "MSI-X Enabled, got %d vectors.\n",
3177 qdev->intr_count);
3178 return;
3179 }
3180 }
3181msi:
3182 qdev->intr_count = 1;
3183 if (qlge_irq_type == MSI_IRQ) {
3184 if (pci_alloc_irq_vectors(qdev->pdev, 1, 1, PCI_IRQ_MSI) >= 0) {
3185 set_bit(QL_MSI_ENABLED, &qdev->flags);
3186 netif_info(qdev, ifup, qdev->ndev,
3187 "Running with MSI interrupts.\n");
3188 return;
3189 }
3190 }
3191 qlge_irq_type = LEG_IRQ;
3192 set_bit(QL_LEGACY_ENABLED, &qdev->flags);
3193 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3194 "Running with legacy interrupts.\n");
3195}
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206static void ql_set_tx_vect(struct ql_adapter *qdev)
3207{
3208 int i, j, vect;
3209 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3210
3211 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3212
3213 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3214 i < qdev->rx_ring_count; i++) {
3215 if (j == tx_rings_per_vector) {
3216 vect++;
3217 j = 0;
3218 }
3219 qdev->rx_ring[i].irq = vect;
3220 j++;
3221 }
3222 } else {
3223
3224
3225
3226 for (i = 0; i < qdev->rx_ring_count; i++)
3227 qdev->rx_ring[i].irq = 0;
3228 }
3229}
3230
3231
3232
3233
3234
3235
3236static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3237{
3238 int j, vect = ctx->intr;
3239 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3240
3241 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3242
3243
3244
3245 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3246
3247
3248
3249 for (j = 0; j < tx_rings_per_vector; j++) {
3250 ctx->irq_mask |=
3251 (1 << qdev->rx_ring[qdev->rss_ring_count +
3252 (vect * tx_rings_per_vector) + j].cq_id);
3253 }
3254 } else {
3255
3256
3257
3258 for (j = 0; j < qdev->rx_ring_count; j++)
3259 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3260 }
3261}
3262
3263
3264
3265
3266
3267
3268
3269static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3270{
3271 int i = 0;
3272 struct intr_context *intr_context = &qdev->intr_context[0];
3273
3274 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3275
3276
3277
3278
3279 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3280 qdev->rx_ring[i].irq = i;
3281 intr_context->intr = i;
3282 intr_context->qdev = qdev;
3283
3284
3285
3286 ql_set_irq_mask(qdev, intr_context);
3287
3288
3289
3290
3291 intr_context->intr_en_mask =
3292 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3293 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3294 | i;
3295 intr_context->intr_dis_mask =
3296 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3297 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3298 INTR_EN_IHD | i;
3299 intr_context->intr_read_mask =
3300 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3301 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3302 i;
3303 if (i == 0) {
3304
3305
3306
3307
3308
3309 intr_context->handler = qlge_isr;
3310 sprintf(intr_context->name, "%s-rx-%d",
3311 qdev->ndev->name, i);
3312 } else {
3313
3314
3315
3316 intr_context->handler = qlge_msix_rx_isr;
3317 sprintf(intr_context->name, "%s-rx-%d",
3318 qdev->ndev->name, i);
3319 }
3320 }
3321 } else {
3322
3323
3324
3325
3326 intr_context->intr = 0;
3327 intr_context->qdev = qdev;
3328
3329
3330
3331
3332 intr_context->intr_en_mask =
3333 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3334 intr_context->intr_dis_mask =
3335 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3336 INTR_EN_TYPE_DISABLE;
3337 if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) {
3338
3339
3340
3341
3342
3343 intr_context->intr_en_mask |= INTR_EN_EI << 16 |
3344 INTR_EN_EI;
3345 intr_context->intr_dis_mask |= INTR_EN_EI << 16;
3346 }
3347 intr_context->intr_read_mask =
3348 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3349
3350
3351
3352 intr_context->handler = qlge_isr;
3353 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3354
3355
3356
3357
3358
3359 ql_set_irq_mask(qdev, intr_context);
3360 }
3361
3362
3363
3364 ql_set_tx_vect(qdev);
3365}
3366
3367static void ql_free_irq(struct ql_adapter *qdev)
3368{
3369 int i;
3370 struct intr_context *intr_context = &qdev->intr_context[0];
3371
3372 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3373 if (intr_context->hooked) {
3374 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3375 free_irq(qdev->msi_x_entry[i].vector,
3376 &qdev->rx_ring[i]);
3377 } else {
3378 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3379 }
3380 }
3381 }
3382 ql_disable_msix(qdev);
3383}
3384
3385static int ql_request_irq(struct ql_adapter *qdev)
3386{
3387 int i;
3388 int status = 0;
3389 struct pci_dev *pdev = qdev->pdev;
3390 struct intr_context *intr_context = &qdev->intr_context[0];
3391
3392 ql_resolve_queues_to_irqs(qdev);
3393
3394 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3395 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3396 status = request_irq(qdev->msi_x_entry[i].vector,
3397 intr_context->handler,
3398 0,
3399 intr_context->name,
3400 &qdev->rx_ring[i]);
3401 if (status) {
3402 netif_err(qdev, ifup, qdev->ndev,
3403 "Failed request for MSIX interrupt %d.\n",
3404 i);
3405 goto err_irq;
3406 }
3407 } else {
3408 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3409 "trying msi or legacy interrupts.\n");
3410 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3411 "%s: irq = %d.\n", __func__, pdev->irq);
3412 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3413 "%s: context->name = %s.\n", __func__,
3414 intr_context->name);
3415 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3416 "%s: dev_id = 0x%p.\n", __func__,
3417 &qdev->rx_ring[0]);
3418 status =
3419 request_irq(pdev->irq, qlge_isr,
3420 test_bit(QL_MSI_ENABLED, &qdev->flags)
3421 ? 0
3422 : IRQF_SHARED,
3423 intr_context->name, &qdev->rx_ring[0]);
3424 if (status)
3425 goto err_irq;
3426
3427 netif_err(qdev, ifup, qdev->ndev,
3428 "Hooked intr 0, queue type RX_Q, with name %s.\n",
3429 intr_context->name);
3430 }
3431 intr_context->hooked = 1;
3432 }
3433 return status;
3434err_irq:
3435 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
3436 ql_free_irq(qdev);
3437 return status;
3438}
3439
3440static int ql_start_rss(struct ql_adapter *qdev)
3441{
3442 static const u8 init_hash_seed[] = {
3443 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3444 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3445 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3446 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3447 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3448 };
3449 struct ricb *ricb = &qdev->ricb;
3450 int status = 0;
3451 int i;
3452 u8 *hash_id = (u8 *)ricb->hash_cq_id;
3453
3454 memset((void *)ricb, 0, sizeof(*ricb));
3455
3456 ricb->base_cq = RSS_L4K;
3457 ricb->flags =
3458 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3459 ricb->mask = cpu_to_le16((u16)(0x3ff));
3460
3461
3462
3463
3464 for (i = 0; i < 1024; i++)
3465 hash_id[i] = (i & (qdev->rss_ring_count - 1));
3466
3467 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3468 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3469
3470 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3471 if (status) {
3472 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3473 return status;
3474 }
3475 return status;
3476}
3477
3478static int ql_clear_routing_entries(struct ql_adapter *qdev)
3479{
3480 int i, status = 0;
3481
3482 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3483 if (status)
3484 return status;
3485
3486 for (i = 0; i < 16; i++) {
3487 status = ql_set_routing_reg(qdev, i, 0, 0);
3488 if (status) {
3489 netif_err(qdev, ifup, qdev->ndev,
3490 "Failed to init routing register for CAM packets.\n");
3491 break;
3492 }
3493 }
3494 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3495 return status;
3496}
3497
3498
3499static int ql_route_initialize(struct ql_adapter *qdev)
3500{
3501 int status = 0;
3502
3503
3504 status = ql_clear_routing_entries(qdev);
3505 if (status)
3506 return status;
3507
3508 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3509 if (status)
3510 return status;
3511
3512 status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3513 RT_IDX_IP_CSUM_ERR, 1);
3514 if (status) {
3515 netif_err(qdev, ifup, qdev->ndev,
3516 "Failed to init routing register for IP CSUM error packets.\n");
3517 goto exit;
3518 }
3519 status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3520 RT_IDX_TU_CSUM_ERR, 1);
3521 if (status) {
3522 netif_err(qdev, ifup, qdev->ndev,
3523 "Failed to init routing register for TCP/UDP CSUM error packets.\n");
3524 goto exit;
3525 }
3526 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3527 if (status) {
3528 netif_err(qdev, ifup, qdev->ndev,
3529 "Failed to init routing register for broadcast packets.\n");
3530 goto exit;
3531 }
3532
3533
3534
3535 if (qdev->rss_ring_count > 1) {
3536 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3537 RT_IDX_RSS_MATCH, 1);
3538 if (status) {
3539 netif_err(qdev, ifup, qdev->ndev,
3540 "Failed to init routing register for MATCH RSS packets.\n");
3541 goto exit;
3542 }
3543 }
3544
3545 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3546 RT_IDX_CAM_HIT, 1);
3547 if (status)
3548 netif_err(qdev, ifup, qdev->ndev,
3549 "Failed to init routing register for CAM packets.\n");
3550exit:
3551 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3552 return status;
3553}
3554
3555int ql_cam_route_initialize(struct ql_adapter *qdev)
3556{
3557 int status, set;
3558
3559
3560
3561
3562
3563 set = ql_read32(qdev, STS);
3564 set &= qdev->port_link_up;
3565 status = ql_set_mac_addr(qdev, set);
3566 if (status) {
3567 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3568 return status;
3569 }
3570
3571 status = ql_route_initialize(qdev);
3572 if (status)
3573 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3574
3575 return status;
3576}
3577
3578static int ql_adapter_initialize(struct ql_adapter *qdev)
3579{
3580 u32 value, mask;
3581 int i;
3582 int status = 0;
3583
3584
3585
3586
3587 value = SYS_EFE | SYS_FAE;
3588 mask = value << 16;
3589 ql_write32(qdev, SYS, mask | value);
3590
3591
3592 value = NIC_RCV_CFG_DFQ;
3593 mask = NIC_RCV_CFG_DFQ_MASK;
3594 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
3595 value |= NIC_RCV_CFG_RV;
3596 mask |= (NIC_RCV_CFG_RV << 16);
3597 }
3598 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3599
3600
3601 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3602
3603
3604 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3605 FSC_EC | FSC_VM_PAGE_4K;
3606 value |= SPLT_SETTING;
3607
3608
3609 mask = FSC_VM_PAGESIZE_MASK |
3610 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3611 ql_write32(qdev, FSC, mask | value);
3612
3613 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3614
3615
3616
3617
3618
3619
3620 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3621
3622
3623
3624
3625 value = ql_read32(qdev, MGMT_RCV_CFG);
3626 value &= ~MGMT_RCV_CFG_RM;
3627 mask = 0xffff0000;
3628
3629
3630 ql_write32(qdev, MGMT_RCV_CFG, mask);
3631 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3632
3633
3634 if (qdev->pdev->subsystem_device == 0x0068 ||
3635 qdev->pdev->subsystem_device == 0x0180)
3636 qdev->wol = WAKE_MAGIC;
3637
3638
3639 for (i = 0; i < qdev->rx_ring_count; i++) {
3640 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3641 if (status) {
3642 netif_err(qdev, ifup, qdev->ndev,
3643 "Failed to start rx ring[%d].\n", i);
3644 return status;
3645 }
3646 }
3647
3648
3649
3650
3651 if (qdev->rss_ring_count > 1) {
3652 status = ql_start_rss(qdev);
3653 if (status) {
3654 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3655 return status;
3656 }
3657 }
3658
3659
3660 for (i = 0; i < qdev->tx_ring_count; i++) {
3661 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3662 if (status) {
3663 netif_err(qdev, ifup, qdev->ndev,
3664 "Failed to start tx ring[%d].\n", i);
3665 return status;
3666 }
3667 }
3668
3669
3670 status = qdev->nic_ops->port_initialize(qdev);
3671 if (status)
3672 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3673
3674
3675 status = ql_cam_route_initialize(qdev);
3676 if (status) {
3677 netif_err(qdev, ifup, qdev->ndev,
3678 "Failed to init CAM/Routing tables.\n");
3679 return status;
3680 }
3681
3682
3683 for (i = 0; i < qdev->rss_ring_count; i++)
3684 napi_enable(&qdev->rx_ring[i].napi);
3685
3686 return status;
3687}
3688
3689
3690static int ql_adapter_reset(struct ql_adapter *qdev)
3691{
3692 u32 value;
3693 int status = 0;
3694 unsigned long end_jiffies;
3695
3696
3697 status = ql_clear_routing_entries(qdev);
3698 if (status) {
3699 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3700 return status;
3701 }
3702
3703
3704
3705
3706 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3707
3708 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3709
3710
3711 ql_wait_fifo_empty(qdev);
3712 } else {
3713 clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3714 }
3715
3716 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3717
3718 end_jiffies = jiffies + usecs_to_jiffies(30);
3719 do {
3720 value = ql_read32(qdev, RST_FO);
3721 if ((value & RST_FO_FR) == 0)
3722 break;
3723 cpu_relax();
3724 } while (time_before(jiffies, end_jiffies));
3725
3726 if (value & RST_FO_FR) {
3727 netif_err(qdev, ifdown, qdev->ndev,
3728 "ETIMEDOUT!!! errored out of resetting the chip!\n");
3729 status = -ETIMEDOUT;
3730 }
3731
3732
3733 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3734 return status;
3735}
3736
3737static void ql_display_dev_info(struct net_device *ndev)
3738{
3739 struct ql_adapter *qdev = netdev_priv(ndev);
3740
3741 netif_info(qdev, probe, qdev->ndev,
3742 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3743 "XG Roll = %d, XG Rev = %d.\n",
3744 qdev->func,
3745 qdev->port,
3746 qdev->chip_rev_id & 0x0000000f,
3747 qdev->chip_rev_id >> 4 & 0x0000000f,
3748 qdev->chip_rev_id >> 8 & 0x0000000f,
3749 qdev->chip_rev_id >> 12 & 0x0000000f);
3750 netif_info(qdev, probe, qdev->ndev,
3751 "MAC address %pM\n", ndev->dev_addr);
3752}
3753
3754static int ql_wol(struct ql_adapter *qdev)
3755{
3756 int status = 0;
3757 u32 wol = MB_WOL_DISABLE;
3758
3759
3760
3761
3762
3763
3764
3765
3766 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3767 WAKE_MCAST | WAKE_BCAST)) {
3768 netif_err(qdev, ifdown, qdev->ndev,
3769 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3770 qdev->wol);
3771 return -EINVAL;
3772 }
3773
3774 if (qdev->wol & WAKE_MAGIC) {
3775 status = ql_mb_wol_set_magic(qdev, 1);
3776 if (status) {
3777 netif_err(qdev, ifdown, qdev->ndev,
3778 "Failed to set magic packet on %s.\n",
3779 qdev->ndev->name);
3780 return status;
3781 }
3782 netif_info(qdev, drv, qdev->ndev,
3783 "Enabled magic packet successfully on %s.\n",
3784 qdev->ndev->name);
3785
3786 wol |= MB_WOL_MAGIC_PKT;
3787 }
3788
3789 if (qdev->wol) {
3790 wol |= MB_WOL_MODE_ON;
3791 status = ql_mb_wol_mode(qdev, wol);
3792 netif_err(qdev, drv, qdev->ndev,
3793 "WOL %s (wol code 0x%x) on %s\n",
3794 (status == 0) ? "Successfully set" : "Failed",
3795 wol, qdev->ndev->name);
3796 }
3797
3798 return status;
3799}
3800
3801static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3802{
3803
3804
3805
3806
3807 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3808 cancel_delayed_work_sync(&qdev->asic_reset_work);
3809 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3810 cancel_delayed_work_sync(&qdev->mpi_work);
3811 cancel_delayed_work_sync(&qdev->mpi_idc_work);
3812 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3813 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3814}
3815
3816static int ql_adapter_down(struct ql_adapter *qdev)
3817{
3818 int i, status = 0;
3819
3820 ql_link_off(qdev);
3821
3822 ql_cancel_all_work_sync(qdev);
3823
3824 for (i = 0; i < qdev->rss_ring_count; i++)
3825 napi_disable(&qdev->rx_ring[i].napi);
3826
3827 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3828
3829 ql_disable_interrupts(qdev);
3830
3831 ql_tx_ring_clean(qdev);
3832
3833
3834
3835 for (i = 0; i < qdev->rss_ring_count; i++)
3836 netif_napi_del(&qdev->rx_ring[i].napi);
3837
3838 status = ql_adapter_reset(qdev);
3839 if (status)
3840 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3841 qdev->func);
3842 ql_free_rx_buffers(qdev);
3843
3844 return status;
3845}
3846
3847static int ql_adapter_up(struct ql_adapter *qdev)
3848{
3849 int err = 0;
3850
3851 err = ql_adapter_initialize(qdev);
3852 if (err) {
3853 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3854 goto err_init;
3855 }
3856 set_bit(QL_ADAPTER_UP, &qdev->flags);
3857 ql_alloc_rx_buffers(qdev);
3858
3859
3860
3861 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3862 (ql_read32(qdev, STS) & qdev->port_link_up))
3863 ql_link_on(qdev);
3864
3865 clear_bit(QL_ALLMULTI, &qdev->flags);
3866 clear_bit(QL_PROMISCUOUS, &qdev->flags);
3867 qlge_set_multicast_list(qdev->ndev);
3868
3869
3870 qlge_restore_vlan(qdev);
3871
3872 ql_enable_interrupts(qdev);
3873 ql_enable_all_completion_interrupts(qdev);
3874 netif_tx_start_all_queues(qdev->ndev);
3875
3876 return 0;
3877err_init:
3878 ql_adapter_reset(qdev);
3879 return err;
3880}
3881
3882static void ql_release_adapter_resources(struct ql_adapter *qdev)
3883{
3884 ql_free_mem_resources(qdev);
3885 ql_free_irq(qdev);
3886}
3887
3888static int ql_get_adapter_resources(struct ql_adapter *qdev)
3889{
3890 if (ql_alloc_mem_resources(qdev)) {
3891 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
3892 return -ENOMEM;
3893 }
3894 return ql_request_irq(qdev);
3895}
3896
3897static int qlge_close(struct net_device *ndev)
3898{
3899 struct ql_adapter *qdev = netdev_priv(ndev);
3900 int i;
3901
3902
3903
3904
3905
3906 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
3907 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
3908 clear_bit(QL_EEH_FATAL, &qdev->flags);
3909 return 0;
3910 }
3911
3912
3913
3914
3915
3916 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3917 msleep(1);
3918
3919
3920 for (i = 0; i < qdev->rss_ring_count; i++)
3921 cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work);
3922
3923 ql_adapter_down(qdev);
3924 ql_release_adapter_resources(qdev);
3925 return 0;
3926}
3927
3928static void qlge_set_lb_size(struct ql_adapter *qdev)
3929{
3930 if (qdev->ndev->mtu <= 1500)
3931 qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE;
3932 else
3933 qdev->lbq_buf_size = LARGE_BUFFER_MAX_SIZE;
3934 qdev->lbq_buf_order = get_order(qdev->lbq_buf_size);
3935}
3936
3937static int ql_configure_rings(struct ql_adapter *qdev)
3938{
3939 int i;
3940 struct rx_ring *rx_ring;
3941 struct tx_ring *tx_ring;
3942 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
3943
3944
3945
3946
3947
3948
3949
3950
3951 qdev->intr_count = cpu_cnt;
3952 ql_enable_msix(qdev);
3953
3954 qdev->rss_ring_count = qdev->intr_count;
3955 qdev->tx_ring_count = cpu_cnt;
3956 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
3957
3958 for (i = 0; i < qdev->tx_ring_count; i++) {
3959 tx_ring = &qdev->tx_ring[i];
3960 memset((void *)tx_ring, 0, sizeof(*tx_ring));
3961 tx_ring->qdev = qdev;
3962 tx_ring->wq_id = i;
3963 tx_ring->wq_len = qdev->tx_ring_size;
3964 tx_ring->wq_size =
3965 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3966
3967
3968
3969
3970
3971 tx_ring->cq_id = qdev->rss_ring_count + i;
3972 }
3973
3974 for (i = 0; i < qdev->rx_ring_count; i++) {
3975 rx_ring = &qdev->rx_ring[i];
3976 memset((void *)rx_ring, 0, sizeof(*rx_ring));
3977 rx_ring->qdev = qdev;
3978 rx_ring->cq_id = i;
3979 rx_ring->cpu = i % cpu_cnt;
3980 if (i < qdev->rss_ring_count) {
3981
3982
3983
3984 rx_ring->cq_len = qdev->rx_ring_size;
3985 rx_ring->cq_size =
3986 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3987 rx_ring->lbq.type = QLGE_LB;
3988 rx_ring->sbq.type = QLGE_SB;
3989 INIT_DELAYED_WORK(&rx_ring->refill_work,
3990 &qlge_slow_refill);
3991 } else {
3992
3993
3994
3995
3996 rx_ring->cq_len = qdev->tx_ring_size;
3997 rx_ring->cq_size =
3998 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3999 }
4000 }
4001 return 0;
4002}
4003
4004static int qlge_open(struct net_device *ndev)
4005{
4006 int err = 0;
4007 struct ql_adapter *qdev = netdev_priv(ndev);
4008
4009 err = ql_adapter_reset(qdev);
4010 if (err)
4011 return err;
4012
4013 qlge_set_lb_size(qdev);
4014 err = ql_configure_rings(qdev);
4015 if (err)
4016 return err;
4017
4018 err = ql_get_adapter_resources(qdev);
4019 if (err)
4020 goto error_up;
4021
4022 err = ql_adapter_up(qdev);
4023 if (err)
4024 goto error_up;
4025
4026 return err;
4027
4028error_up:
4029 ql_release_adapter_resources(qdev);
4030 return err;
4031}
4032
4033static int ql_change_rx_buffers(struct ql_adapter *qdev)
4034{
4035 int status;
4036
4037
4038 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4039 int i = 4;
4040
4041 while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4042 netif_err(qdev, ifup, qdev->ndev,
4043 "Waiting for adapter UP...\n");
4044 ssleep(1);
4045 }
4046
4047 if (!i) {
4048 netif_err(qdev, ifup, qdev->ndev,
4049 "Timed out waiting for adapter UP\n");
4050 return -ETIMEDOUT;
4051 }
4052 }
4053
4054 status = ql_adapter_down(qdev);
4055 if (status)
4056 goto error;
4057
4058 qlge_set_lb_size(qdev);
4059
4060 status = ql_adapter_up(qdev);
4061 if (status)
4062 goto error;
4063
4064 return status;
4065error:
4066 netif_alert(qdev, ifup, qdev->ndev,
4067 "Driver up/down cycle failed, closing device.\n");
4068 set_bit(QL_ADAPTER_UP, &qdev->flags);
4069 dev_close(qdev->ndev);
4070 return status;
4071}
4072
4073static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4074{
4075 struct ql_adapter *qdev = netdev_priv(ndev);
4076 int status;
4077
4078 if (ndev->mtu == 1500 && new_mtu == 9000)
4079 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4080 else if (ndev->mtu == 9000 && new_mtu == 1500)
4081 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4082 else
4083 return -EINVAL;
4084
4085 queue_delayed_work(qdev->workqueue,
4086 &qdev->mpi_port_cfg_work, 3 * HZ);
4087
4088 ndev->mtu = new_mtu;
4089
4090 if (!netif_running(qdev->ndev))
4091 return 0;
4092
4093 status = ql_change_rx_buffers(qdev);
4094 if (status) {
4095 netif_err(qdev, ifup, qdev->ndev,
4096 "Changing MTU failed.\n");
4097 }
4098
4099 return status;
4100}
4101
4102static struct net_device_stats *qlge_get_stats(struct net_device
4103 *ndev)
4104{
4105 struct ql_adapter *qdev = netdev_priv(ndev);
4106 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4107 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4108 unsigned long pkts, mcast, dropped, errors, bytes;
4109 int i;
4110
4111
4112 pkts = mcast = dropped = errors = bytes = 0;
4113 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4114 pkts += rx_ring->rx_packets;
4115 bytes += rx_ring->rx_bytes;
4116 dropped += rx_ring->rx_dropped;
4117 errors += rx_ring->rx_errors;
4118 mcast += rx_ring->rx_multicast;
4119 }
4120 ndev->stats.rx_packets = pkts;
4121 ndev->stats.rx_bytes = bytes;
4122 ndev->stats.rx_dropped = dropped;
4123 ndev->stats.rx_errors = errors;
4124 ndev->stats.multicast = mcast;
4125
4126
4127 pkts = errors = bytes = 0;
4128 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4129 pkts += tx_ring->tx_packets;
4130 bytes += tx_ring->tx_bytes;
4131 errors += tx_ring->tx_errors;
4132 }
4133 ndev->stats.tx_packets = pkts;
4134 ndev->stats.tx_bytes = bytes;
4135 ndev->stats.tx_errors = errors;
4136 return &ndev->stats;
4137}
4138
4139static void qlge_set_multicast_list(struct net_device *ndev)
4140{
4141 struct ql_adapter *qdev = netdev_priv(ndev);
4142 struct netdev_hw_addr *ha;
4143 int i, status;
4144
4145 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4146 if (status)
4147 return;
4148
4149
4150
4151
4152 if (ndev->flags & IFF_PROMISC) {
4153 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4154 if (ql_set_routing_reg
4155 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4156 netif_err(qdev, hw, qdev->ndev,
4157 "Failed to set promiscuous mode.\n");
4158 } else {
4159 set_bit(QL_PROMISCUOUS, &qdev->flags);
4160 }
4161 }
4162 } else {
4163 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4164 if (ql_set_routing_reg
4165 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4166 netif_err(qdev, hw, qdev->ndev,
4167 "Failed to clear promiscuous mode.\n");
4168 } else {
4169 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4170 }
4171 }
4172 }
4173
4174
4175
4176
4177
4178 if ((ndev->flags & IFF_ALLMULTI) ||
4179 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4180 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4181 if (ql_set_routing_reg
4182 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4183 netif_err(qdev, hw, qdev->ndev,
4184 "Failed to set all-multi mode.\n");
4185 } else {
4186 set_bit(QL_ALLMULTI, &qdev->flags);
4187 }
4188 }
4189 } else {
4190 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4191 if (ql_set_routing_reg
4192 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4193 netif_err(qdev, hw, qdev->ndev,
4194 "Failed to clear all-multi mode.\n");
4195 } else {
4196 clear_bit(QL_ALLMULTI, &qdev->flags);
4197 }
4198 }
4199 }
4200
4201 if (!netdev_mc_empty(ndev)) {
4202 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4203 if (status)
4204 goto exit;
4205 i = 0;
4206 netdev_for_each_mc_addr(ha, ndev) {
4207 if (ql_set_mac_addr_reg(qdev, (u8 *)ha->addr,
4208 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4209 netif_err(qdev, hw, qdev->ndev,
4210 "Failed to loadmulticast address.\n");
4211 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4212 goto exit;
4213 }
4214 i++;
4215 }
4216 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4217 if (ql_set_routing_reg
4218 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4219 netif_err(qdev, hw, qdev->ndev,
4220 "Failed to set multicast match mode.\n");
4221 } else {
4222 set_bit(QL_ALLMULTI, &qdev->flags);
4223 }
4224 }
4225exit:
4226 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4227}
4228
4229static int qlge_set_mac_address(struct net_device *ndev, void *p)
4230{
4231 struct ql_adapter *qdev = netdev_priv(ndev);
4232 struct sockaddr *addr = p;
4233 int status;
4234
4235 if (!is_valid_ether_addr(addr->sa_data))
4236 return -EADDRNOTAVAIL;
4237 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4238
4239 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4240
4241 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4242 if (status)
4243 return status;
4244 status = ql_set_mac_addr_reg(qdev, (u8 *)ndev->dev_addr,
4245 MAC_ADDR_TYPE_CAM_MAC,
4246 qdev->func * MAX_CQ);
4247 if (status)
4248 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4249 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4250 return status;
4251}
4252
4253static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue)
4254{
4255 struct ql_adapter *qdev = netdev_priv(ndev);
4256
4257 ql_queue_asic_error(qdev);
4258}
4259
4260static void ql_asic_reset_work(struct work_struct *work)
4261{
4262 struct ql_adapter *qdev =
4263 container_of(work, struct ql_adapter, asic_reset_work.work);
4264 int status;
4265
4266 rtnl_lock();
4267 status = ql_adapter_down(qdev);
4268 if (status)
4269 goto error;
4270
4271 status = ql_adapter_up(qdev);
4272 if (status)
4273 goto error;
4274
4275
4276 clear_bit(QL_ALLMULTI, &qdev->flags);
4277 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4278 qlge_set_multicast_list(qdev->ndev);
4279
4280 rtnl_unlock();
4281 return;
4282error:
4283 netif_alert(qdev, ifup, qdev->ndev,
4284 "Driver up/down cycle failed, closing device\n");
4285
4286 set_bit(QL_ADAPTER_UP, &qdev->flags);
4287 dev_close(qdev->ndev);
4288 rtnl_unlock();
4289}
4290
4291static const struct nic_operations qla8012_nic_ops = {
4292 .get_flash = ql_get_8012_flash_params,
4293 .port_initialize = ql_8012_port_initialize,
4294};
4295
4296static const struct nic_operations qla8000_nic_ops = {
4297 .get_flash = ql_get_8000_flash_params,
4298 .port_initialize = ql_8000_port_initialize,
4299};
4300
4301
4302
4303
4304
4305
4306
4307
4308static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4309{
4310 int status = 0;
4311 u32 temp;
4312 u32 nic_func1, nic_func2;
4313
4314 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4315 &temp);
4316 if (status)
4317 return status;
4318
4319 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4320 MPI_TEST_NIC_FUNC_MASK);
4321 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4322 MPI_TEST_NIC_FUNC_MASK);
4323
4324 if (qdev->func == nic_func1)
4325 qdev->alt_func = nic_func2;
4326 else if (qdev->func == nic_func2)
4327 qdev->alt_func = nic_func1;
4328 else
4329 status = -EIO;
4330
4331 return status;
4332}
4333
4334static int ql_get_board_info(struct ql_adapter *qdev)
4335{
4336 int status;
4337
4338 qdev->func =
4339 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4340 if (qdev->func > 3)
4341 return -EIO;
4342
4343 status = ql_get_alt_pcie_func(qdev);
4344 if (status)
4345 return status;
4346
4347 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4348 if (qdev->port) {
4349 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4350 qdev->port_link_up = STS_PL1;
4351 qdev->port_init = STS_PI1;
4352 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4353 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4354 } else {
4355 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4356 qdev->port_link_up = STS_PL0;
4357 qdev->port_init = STS_PI0;
4358 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4359 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4360 }
4361 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4362 qdev->device_id = qdev->pdev->device;
4363 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4364 qdev->nic_ops = &qla8012_nic_ops;
4365 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4366 qdev->nic_ops = &qla8000_nic_ops;
4367 return status;
4368}
4369
4370static void ql_release_all(struct pci_dev *pdev)
4371{
4372 struct net_device *ndev = pci_get_drvdata(pdev);
4373 struct ql_adapter *qdev = netdev_priv(ndev);
4374
4375 if (qdev->workqueue) {
4376 destroy_workqueue(qdev->workqueue);
4377 qdev->workqueue = NULL;
4378 }
4379
4380 if (qdev->reg_base)
4381 iounmap(qdev->reg_base);
4382 if (qdev->doorbell_area)
4383 iounmap(qdev->doorbell_area);
4384 vfree(qdev->mpi_coredump);
4385 pci_release_regions(pdev);
4386}
4387
4388static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
4389 int cards_found)
4390{
4391 struct ql_adapter *qdev = netdev_priv(ndev);
4392 int err = 0;
4393
4394 memset((void *)qdev, 0, sizeof(*qdev));
4395 err = pci_enable_device(pdev);
4396 if (err) {
4397 dev_err(&pdev->dev, "PCI device enable failed.\n");
4398 return err;
4399 }
4400
4401 qdev->ndev = ndev;
4402 qdev->pdev = pdev;
4403 pci_set_drvdata(pdev, ndev);
4404
4405
4406 err = pcie_set_readrq(pdev, 4096);
4407 if (err) {
4408 dev_err(&pdev->dev, "Set readrq failed.\n");
4409 goto err_out1;
4410 }
4411
4412 err = pci_request_regions(pdev, DRV_NAME);
4413 if (err) {
4414 dev_err(&pdev->dev, "PCI region request failed.\n");
4415 return err;
4416 }
4417
4418 pci_set_master(pdev);
4419 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
4420 set_bit(QL_DMA64, &qdev->flags);
4421 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4422 } else {
4423 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4424 if (!err)
4425 err = dma_set_coherent_mask(&pdev->dev,
4426 DMA_BIT_MASK(32));
4427 }
4428
4429 if (err) {
4430 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4431 goto err_out2;
4432 }
4433
4434
4435 pdev->needs_freset = 1;
4436 pci_save_state(pdev);
4437 qdev->reg_base =
4438 ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
4439 if (!qdev->reg_base) {
4440 dev_err(&pdev->dev, "Register mapping failed.\n");
4441 err = -ENOMEM;
4442 goto err_out2;
4443 }
4444
4445 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4446 qdev->doorbell_area =
4447 ioremap(pci_resource_start(pdev, 3), pci_resource_len(pdev, 3));
4448 if (!qdev->doorbell_area) {
4449 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4450 err = -ENOMEM;
4451 goto err_out2;
4452 }
4453
4454 err = ql_get_board_info(qdev);
4455 if (err) {
4456 dev_err(&pdev->dev, "Register access failed.\n");
4457 err = -EIO;
4458 goto err_out2;
4459 }
4460 qdev->msg_enable = netif_msg_init(debug, default_msg);
4461 spin_lock_init(&qdev->stats_lock);
4462
4463 if (qlge_mpi_coredump) {
4464 qdev->mpi_coredump =
4465 vmalloc(sizeof(struct ql_mpi_coredump));
4466 if (!qdev->mpi_coredump) {
4467 err = -ENOMEM;
4468 goto err_out2;
4469 }
4470 if (qlge_force_coredump)
4471 set_bit(QL_FRC_COREDUMP, &qdev->flags);
4472 }
4473
4474 err = qdev->nic_ops->get_flash(qdev);
4475 if (err) {
4476 dev_err(&pdev->dev, "Invalid FLASH.\n");
4477 goto err_out2;
4478 }
4479
4480
4481 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4482
4483
4484 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4485 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4486
4487
4488 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4489 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4490 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4491 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4492
4493
4494
4495
4496 qdev->workqueue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM,
4497 ndev->name);
4498 if (!qdev->workqueue) {
4499 err = -ENOMEM;
4500 goto err_out2;
4501 }
4502
4503 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4504 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4505 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4506 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4507 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4508 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4509 init_completion(&qdev->ide_completion);
4510 mutex_init(&qdev->mpi_mutex);
4511
4512 if (!cards_found) {
4513 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4514 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4515 DRV_NAME, DRV_VERSION);
4516 }
4517 return 0;
4518err_out2:
4519 ql_release_all(pdev);
4520err_out1:
4521 pci_disable_device(pdev);
4522 return err;
4523}
4524
4525static const struct net_device_ops qlge_netdev_ops = {
4526 .ndo_open = qlge_open,
4527 .ndo_stop = qlge_close,
4528 .ndo_start_xmit = qlge_send,
4529 .ndo_change_mtu = qlge_change_mtu,
4530 .ndo_get_stats = qlge_get_stats,
4531 .ndo_set_rx_mode = qlge_set_multicast_list,
4532 .ndo_set_mac_address = qlge_set_mac_address,
4533 .ndo_validate_addr = eth_validate_addr,
4534 .ndo_tx_timeout = qlge_tx_timeout,
4535 .ndo_set_features = qlge_set_features,
4536 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4537 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4538};
4539
4540static void ql_timer(struct timer_list *t)
4541{
4542 struct ql_adapter *qdev = from_timer(qdev, t, timer);
4543 u32 var = 0;
4544
4545 var = ql_read32(qdev, STS);
4546 if (pci_channel_offline(qdev->pdev)) {
4547 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4548 return;
4549 }
4550
4551 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4552}
4553
4554static int qlge_probe(struct pci_dev *pdev,
4555 const struct pci_device_id *pci_entry)
4556{
4557 struct net_device *ndev = NULL;
4558 struct ql_adapter *qdev = NULL;
4559 static int cards_found;
4560 int err = 0;
4561
4562 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4563 min(MAX_CPUS,
4564 netif_get_num_default_rss_queues()));
4565 if (!ndev)
4566 return -ENOMEM;
4567
4568 err = ql_init_device(pdev, ndev, cards_found);
4569 if (err < 0) {
4570 free_netdev(ndev);
4571 return err;
4572 }
4573
4574 qdev = netdev_priv(ndev);
4575 SET_NETDEV_DEV(ndev, &pdev->dev);
4576 ndev->hw_features = NETIF_F_SG |
4577 NETIF_F_IP_CSUM |
4578 NETIF_F_TSO |
4579 NETIF_F_TSO_ECN |
4580 NETIF_F_HW_VLAN_CTAG_TX |
4581 NETIF_F_HW_VLAN_CTAG_RX |
4582 NETIF_F_HW_VLAN_CTAG_FILTER |
4583 NETIF_F_RXCSUM;
4584 ndev->features = ndev->hw_features;
4585 ndev->vlan_features = ndev->hw_features;
4586
4587 ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
4588 NETIF_F_HW_VLAN_CTAG_TX |
4589 NETIF_F_HW_VLAN_CTAG_RX);
4590
4591 if (test_bit(QL_DMA64, &qdev->flags))
4592 ndev->features |= NETIF_F_HIGHDMA;
4593
4594
4595
4596
4597 ndev->tx_queue_len = qdev->tx_ring_size;
4598 ndev->irq = pdev->irq;
4599
4600 ndev->netdev_ops = &qlge_netdev_ops;
4601 ndev->ethtool_ops = &qlge_ethtool_ops;
4602 ndev->watchdog_timeo = 10 * HZ;
4603
4604
4605
4606
4607
4608 ndev->min_mtu = ETH_DATA_LEN;
4609 ndev->max_mtu = 9000;
4610
4611 err = register_netdev(ndev);
4612 if (err) {
4613 dev_err(&pdev->dev, "net device registration failed.\n");
4614 ql_release_all(pdev);
4615 pci_disable_device(pdev);
4616 free_netdev(ndev);
4617 return err;
4618 }
4619
4620
4621
4622 timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE);
4623 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4624 ql_link_off(qdev);
4625 ql_display_dev_info(ndev);
4626 atomic_set(&qdev->lb_count, 0);
4627 cards_found++;
4628 return 0;
4629}
4630
4631netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4632{
4633 return qlge_send(skb, ndev);
4634}
4635
4636int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4637{
4638 return ql_clean_inbound_rx_ring(rx_ring, budget);
4639}
4640
4641static void qlge_remove(struct pci_dev *pdev)
4642{
4643 struct net_device *ndev = pci_get_drvdata(pdev);
4644 struct ql_adapter *qdev = netdev_priv(ndev);
4645
4646 del_timer_sync(&qdev->timer);
4647 ql_cancel_all_work_sync(qdev);
4648 unregister_netdev(ndev);
4649 ql_release_all(pdev);
4650 pci_disable_device(pdev);
4651 free_netdev(ndev);
4652}
4653
4654
4655static void ql_eeh_close(struct net_device *ndev)
4656{
4657 int i;
4658 struct ql_adapter *qdev = netdev_priv(ndev);
4659
4660 if (netif_carrier_ok(ndev)) {
4661 netif_carrier_off(ndev);
4662 netif_stop_queue(ndev);
4663 }
4664
4665
4666 ql_cancel_all_work_sync(qdev);
4667
4668 for (i = 0; i < qdev->rss_ring_count; i++)
4669 netif_napi_del(&qdev->rx_ring[i].napi);
4670
4671 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4672 ql_tx_ring_clean(qdev);
4673 ql_free_rx_buffers(qdev);
4674 ql_release_adapter_resources(qdev);
4675}
4676
4677
4678
4679
4680
4681static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4682 pci_channel_state_t state)
4683{
4684 struct net_device *ndev = pci_get_drvdata(pdev);
4685 struct ql_adapter *qdev = netdev_priv(ndev);
4686
4687 switch (state) {
4688 case pci_channel_io_normal:
4689 return PCI_ERS_RESULT_CAN_RECOVER;
4690 case pci_channel_io_frozen:
4691 netif_device_detach(ndev);
4692 del_timer_sync(&qdev->timer);
4693 if (netif_running(ndev))
4694 ql_eeh_close(ndev);
4695 pci_disable_device(pdev);
4696 return PCI_ERS_RESULT_NEED_RESET;
4697 case pci_channel_io_perm_failure:
4698 dev_err(&pdev->dev,
4699 "%s: pci_channel_io_perm_failure.\n", __func__);
4700 del_timer_sync(&qdev->timer);
4701 ql_eeh_close(ndev);
4702 set_bit(QL_EEH_FATAL, &qdev->flags);
4703 return PCI_ERS_RESULT_DISCONNECT;
4704 }
4705
4706
4707 return PCI_ERS_RESULT_NEED_RESET;
4708}
4709
4710
4711
4712
4713
4714
4715
4716static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4717{
4718 struct net_device *ndev = pci_get_drvdata(pdev);
4719 struct ql_adapter *qdev = netdev_priv(ndev);
4720
4721 pdev->error_state = pci_channel_io_normal;
4722
4723 pci_restore_state(pdev);
4724 if (pci_enable_device(pdev)) {
4725 netif_err(qdev, ifup, qdev->ndev,
4726 "Cannot re-enable PCI device after reset.\n");
4727 return PCI_ERS_RESULT_DISCONNECT;
4728 }
4729 pci_set_master(pdev);
4730
4731 if (ql_adapter_reset(qdev)) {
4732 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4733 set_bit(QL_EEH_FATAL, &qdev->flags);
4734 return PCI_ERS_RESULT_DISCONNECT;
4735 }
4736
4737 return PCI_ERS_RESULT_RECOVERED;
4738}
4739
4740static void qlge_io_resume(struct pci_dev *pdev)
4741{
4742 struct net_device *ndev = pci_get_drvdata(pdev);
4743 struct ql_adapter *qdev = netdev_priv(ndev);
4744 int err = 0;
4745
4746 if (netif_running(ndev)) {
4747 err = qlge_open(ndev);
4748 if (err) {
4749 netif_err(qdev, ifup, qdev->ndev,
4750 "Device initialization failed after reset.\n");
4751 return;
4752 }
4753 } else {
4754 netif_err(qdev, ifup, qdev->ndev,
4755 "Device was not running prior to EEH.\n");
4756 }
4757 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4758 netif_device_attach(ndev);
4759}
4760
4761static const struct pci_error_handlers qlge_err_handler = {
4762 .error_detected = qlge_io_error_detected,
4763 .slot_reset = qlge_io_slot_reset,
4764 .resume = qlge_io_resume,
4765};
4766
4767static int __maybe_unused qlge_suspend(struct device *dev_d)
4768{
4769 struct net_device *ndev = dev_get_drvdata(dev_d);
4770 struct ql_adapter *qdev = netdev_priv(ndev);
4771 int err;
4772
4773 netif_device_detach(ndev);
4774 del_timer_sync(&qdev->timer);
4775
4776 if (netif_running(ndev)) {
4777 err = ql_adapter_down(qdev);
4778 if (!err)
4779 return err;
4780 }
4781
4782 ql_wol(qdev);
4783
4784 return 0;
4785}
4786
4787static int __maybe_unused qlge_resume(struct device *dev_d)
4788{
4789 struct net_device *ndev = dev_get_drvdata(dev_d);
4790 struct ql_adapter *qdev = netdev_priv(ndev);
4791 int err;
4792
4793 pci_set_master(to_pci_dev(dev_d));
4794
4795 device_wakeup_disable(dev_d);
4796
4797 if (netif_running(ndev)) {
4798 err = ql_adapter_up(qdev);
4799 if (err)
4800 return err;
4801 }
4802
4803 mod_timer(&qdev->timer, jiffies + (5 * HZ));
4804 netif_device_attach(ndev);
4805
4806 return 0;
4807}
4808
4809static void qlge_shutdown(struct pci_dev *pdev)
4810{
4811 qlge_suspend(&pdev->dev);
4812}
4813
4814static SIMPLE_DEV_PM_OPS(qlge_pm_ops, qlge_suspend, qlge_resume);
4815
4816static struct pci_driver qlge_driver = {
4817 .name = DRV_NAME,
4818 .id_table = qlge_pci_tbl,
4819 .probe = qlge_probe,
4820 .remove = qlge_remove,
4821 .driver.pm = &qlge_pm_ops,
4822 .shutdown = qlge_shutdown,
4823 .err_handler = &qlge_err_handler
4824};
4825
4826module_pci_driver(qlge_driver);
4827